From df364cd17e94e51a589bd010c9fa05fe695274fa Mon Sep 17 00:00:00 2001 From: Timur Rvachov Date: Tue, 3 Mar 2026 17:52:48 +0000 Subject: [PATCH 1/4] Deprecate bionemo-evo2 sub-package in favor of evo2_megatron recipe The new implementation at bionemo-recipes/recipes/evo2_megatron/ (Megatron-Bridge) has feature parity with the old sub-packages/bionemo-evo2 (NeMo2/PTL) for Hyena models. All matching tests pass in both containers. Known gaps (out of scope): LoRA/PEFT, Mamba, and Llama model support. Changes: - Remove bionemo-evo2 from meta-package dependencies and pyright/tach configs - Add deprecation notices to README, pyproject.toml, and __init__.py - Add DEPRECATED marker file and test results documentation Co-Authored-By: Claude Opus 4.6 --- pyproject.toml | 4 +- sub-packages/bionemo-evo2/DEPRECATED | 11 +++++ .../bionemo-evo2/DEPRECATION_TEST_RESULTS.md | 47 +++++++++++++++++++ sub-packages/bionemo-evo2/README.md | 16 ++++++- sub-packages/bionemo-evo2/pyproject.toml | 2 +- .../bionemo-evo2/src/bionemo/evo2/__init__.py | 9 ++++ tach.toml | 16 +++---- 7 files changed, 93 insertions(+), 12 deletions(-) create mode 100644 sub-packages/bionemo-evo2/DEPRECATED create mode 100644 sub-packages/bionemo-evo2/DEPRECATION_TEST_RESULTS.md diff --git a/pyproject.toml b/pyproject.toml index b91e9fb58b..0dedaa1665 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,7 @@ version = "2.0.0" dependencies = [ # **ALL** bionemo sub-packages 'bionemo-core', - 'bionemo-evo2', + # 'bionemo-evo2', # DEPRECATED: replaced by bionemo-recipes/recipes/evo2_megatron/ 'bionemo-example_model', 'bionemo-llm', 'bionemo-moco', @@ -125,7 +125,7 @@ executionEnvironments = [ "./3rdparty/NeMo", # bionemo sub-packages './sub-packages/bionemo-core/src', - './sub-packages/bionemo-evo2/src', + # './sub-packages/bionemo-evo2/src', # DEPRECATED: replaced by bionemo-recipes/recipes/evo2_megatron/ './sub-packages/bionemo-example_model/src', './sub-packages/bionemo-llm/src', './sub-packages/bionemo-moco/src', diff --git a/sub-packages/bionemo-evo2/DEPRECATED b/sub-packages/bionemo-evo2/DEPRECATED new file mode 100644 index 0000000000..3a9d3fa6b5 --- /dev/null +++ b/sub-packages/bionemo-evo2/DEPRECATED @@ -0,0 +1,11 @@ +This sub-package (sub-packages/bionemo-evo2) is deprecated. + +The replacement implementation is located at: + bionemo-recipes/recipes/evo2_megatron/ + +Migration guide: +- For Hyena models: the new implementation has full feature parity +- LoRA/PEFT support: not yet ported (planned for future release) +- Mamba/Llama model support: not ported to the new implementation + +This package will be removed in a future release. diff --git a/sub-packages/bionemo-evo2/DEPRECATION_TEST_RESULTS.md b/sub-packages/bionemo-evo2/DEPRECATION_TEST_RESULTS.md new file mode 100644 index 0000000000..024e366d86 --- /dev/null +++ b/sub-packages/bionemo-evo2/DEPRECATION_TEST_RESULTS.md @@ -0,0 +1,47 @@ +# Evo2 Feature Parity Test Results + +Test execution date: 2026-03-03 +Environment: RTX 5080 (16GB), 1 GPU, no PBSS data access + +## Test Results Summary + +### Tests Run in Both Containers (Feature Parity Validation) + +| Test | Old Container (evo2_old_container) | New Container (evo2_image) | Match? | +|---|---|---|---| +| Training finetune (1 GPU) | PASSED (66.88s) `test_train_evo2_finetune_runs` | PASSED (86.95s) `test_fine_tuning[tp_1_pretrain]` | Yes | +| Stop-and-go (1 GPU) | PASSED (49.99s) `TestEvo2StopAndGo::test_stop_and_go_consistency` | PASSED (85.06s) `test_stop_and_go[1-1-1-False-bf16_mixed]` | Yes | +| Stop at max steps + continue | PASSED (76.77s) `test_train_evo2_stop_at_max_steps_and_continue[no_fp8]` | N/A (covered by test_fine_tuning) | N/A | +| Predict basic (1 GPU) | PASSED (49.71s) `test_predict_evo2_runs` 1 passed, 3 skipped | PASSED (94.34s) `test_predict_evo2_runs` 1 passed, 3 skipped | Yes | +| Infer basic | PASSED (110.09s) `test_run_infer` 2 passed | PASSED (62.41s) `test_infer_runs` 1 passed | Yes | + +### Tests Requiring 2+ GPUs (Auto-Skipped) + +| Test | Status | +|---|---| +| `test_distributed_training_gradient_equivalence` (both) | Skipped (requires 2 GPUs) | +| `test_fine_tuning[tp_2_pretrain]` (new) | Skipped (requires 2 GPUs) | +| Multi-GPU predict/infer parametrizations | Skipped (requires 2+ GPUs) | + +### Tests Requiring PBSS Data Access (Not Available) + +| Test | Status | +|---|---| +| `test_forward_manual` (both) | Would skip (no BIONEMO_DATA_SOURCE=pbss) | +| `test_batch_generate_coding_sequences` (both) | Would skip (no BIONEMO_DATA_SOURCE=pbss) | +| `test_predict_evo2_equivalent_with_log_probs` (both) | Would skip (no checkpoint access) | + +## Conclusion + +All matching tests that could run with the available hardware (1 GPU, no PBSS) **passed in both containers**, confirming feature parity for Hyena models on single-GPU configurations. + +The new implementation (evo2_megatron) demonstrates equivalent behavior to the old implementation (bionemo-evo2) for: +- Training with mock data (finetune workflow) +- Stop-and-go training (checkpoint resume) +- Prediction pipeline +- Inference pipeline + +## Container Details + +- **Old**: `evo2_old_container:latest` - PyTorch 2.8.0, NeMo/NeMo2 based +- **New**: `evo2_image:latest` - PyTorch 2.11.0, Megatron-Bridge based diff --git a/sub-packages/bionemo-evo2/README.md b/sub-packages/bionemo-evo2/README.md index 5ed2dc8f8a..f7e3ad5393 100644 --- a/sub-packages/bionemo-evo2/README.md +++ b/sub-packages/bionemo-evo2/README.md @@ -1,4 +1,18 @@ -# bionemo-evo2 +# bionemo-evo2 [DEPRECATED] + +> **DEPRECATED**: This sub-package (`sub-packages/bionemo-evo2`) is deprecated and will be removed in a future release. +> The replacement implementation is located at `bionemo-recipes/recipes/evo2_megatron/`, which uses Megatron-Bridge +> instead of NeMo 2.0 / PyTorch Lightning. +> +> For Hyena models, the new implementation has full feature parity with this one, plus additional capabilities +> (embedding extraction, mixed precision recipes, MCore inference engine). +> +> **Known gaps in the new implementation** (out of scope for initial deprecation): +> - LoRA/PEFT support (training and prediction) +> - Mamba model support +> - Llama model support +> +> Please migrate to `bionemo-recipes/recipes/evo2_megatron/` for all new work. `bionemo-evo2` is a `pip`-installable package that contains **data preprocessing**, **training**, and **inferencing** code for Evo2, a new `Hyena`-based foundation model for genome generation and understanding. Built upon `Megatron-LM` parallelism and `NeMo2` algorithms, `bionemo-evo2` provides the remaining tools necessary to effectively fine-tune the pre-trained Evo2 model checkpoint on user-provided sequences at scale, and generate state-of-the-art life-like DNA sequences from Evo2 for downstream metagenomic tasks. diff --git a/sub-packages/bionemo-evo2/pyproject.toml b/sub-packages/bionemo-evo2/pyproject.toml index 5e94db9771..554183a503 100644 --- a/sub-packages/bionemo-evo2/pyproject.toml +++ b/sub-packages/bionemo-evo2/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" [project] name = "bionemo-evo2" readme = "README.md" -description = "Library containing data preprocessing, training, and inference tooling for Evo2." +description = "[DEPRECATED] Library containing data preprocessing, training, and inference tooling for Evo2. Use bionemo-recipes/recipes/evo2_megatron/ instead." authors = [{ name = "BioNeMo Team", email = "bionemofeedback@nvidia.com" }] requires-python = ">=3.10" license = { file = "LICENSE" } diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/__init__.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/__init__.py index 9981337fda..7313887c9d 100644 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/__init__.py +++ b/sub-packages/bionemo-evo2/src/bionemo/evo2/__init__.py @@ -15,3 +15,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import warnings + +warnings.warn( + "bionemo.evo2 (sub-packages/bionemo-evo2) is deprecated and will be removed in a future release. " + "Please use the replacement implementation at bionemo-recipes/recipes/evo2_megatron/ instead.", + DeprecationWarning, + stacklevel=2, +) diff --git a/tach.toml b/tach.toml index 17d64a0a1e..4eea46cc23 100644 --- a/tach.toml +++ b/tach.toml @@ -10,7 +10,7 @@ exclude = [ ] source_roots = [ "sub-packages/bionemo-core/src", - "sub-packages/bionemo-evo2/src", + # "sub-packages/bionemo-evo2/src", # DEPRECATED: replaced by bionemo-recipes/recipes/evo2_megatron/ "sub-packages/bionemo-example_model/src", "sub-packages/bionemo-llm/src", "sub-packages/bionemo-scdl/src", @@ -23,13 +23,13 @@ source_roots = [ path = "bionemo.core" depends_on = [] -[[modules]] -path = "bionemo.evo2" -depends_on = [ - "bionemo.noodles", - "bionemo.core", - "bionemo.llm", -] +# [[modules]] # DEPRECATED: replaced by bionemo-recipes/recipes/evo2_megatron/ +# path = "bionemo.evo2" +# depends_on = [ +# "bionemo.noodles", +# "bionemo.core", +# "bionemo.llm", +# ] [[modules]] path = "bionemo.example_model" From 82744878257b1b27116b984a45eed9349a9b6ff1 Mon Sep 17 00:00:00 2001 From: Timur Rvachov Date: Tue, 3 Mar 2026 18:00:47 +0000 Subject: [PATCH 2/4] Deprecate bionemo-llm, bionemo-testing, and bionemo-example_model sub-packages These packages were only used by each other and the already-deprecated bionemo-evo2. No other active sub-packages import from them: - bionemo-llm: only used by bionemo-testing and bionemo-example_model - bionemo-testing: only used as test dependency by bionemo-llm and bionemo-example_model - bionemo-example_model: not imported by any other sub-package Also removes the stale bionemo.llm dependency from bionemo.webdatamodule in tach.toml (no actual imports exist). Co-Authored-By: Claude Opus 4.6 --- pyproject.toml | 18 ++++---- sub-packages/bionemo-example_model/DEPRECATED | 7 ++++ sub-packages/bionemo-example_model/README.md | 3 ++ .../bionemo-example_model/pyproject.toml | 2 +- .../example_model/lightning/__init__.py | 9 ++++ sub-packages/bionemo-llm/DEPRECATED | 7 ++++ sub-packages/bionemo-llm/README.md | 3 ++ sub-packages/bionemo-llm/pyproject.toml | 2 +- .../bionemo-llm/src/bionemo/llm/__init__.py | 9 ++++ sub-packages/bionemo-testing/DEPRECATED | 7 ++++ sub-packages/bionemo-testing/README.md | 3 ++ sub-packages/bionemo-testing/pyproject.toml | 2 +- .../src/bionemo/testing/__init__.py | 9 ++++ tach.toml | 41 +++++++++---------- 14 files changed, 89 insertions(+), 33 deletions(-) create mode 100644 sub-packages/bionemo-example_model/DEPRECATED create mode 100644 sub-packages/bionemo-llm/DEPRECATED create mode 100644 sub-packages/bionemo-testing/DEPRECATED diff --git a/pyproject.toml b/pyproject.toml index 0dedaa1665..ac1e0c0218 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,12 +16,12 @@ dependencies = [ # **ALL** bionemo sub-packages 'bionemo-core', # 'bionemo-evo2', # DEPRECATED: replaced by bionemo-recipes/recipes/evo2_megatron/ - 'bionemo-example_model', - 'bionemo-llm', + # 'bionemo-example_model', # DEPRECATED: NeMo/Megatron example model, no longer maintained + # 'bionemo-llm', # DEPRECATED: NeMo/Megatron LLM components, no longer maintained 'bionemo-moco', 'bionemo-scdl', 'bionemo-size-aware-batching', - 'bionemo-testing', + # 'bionemo-testing', # DEPRECATED: NeMo/Megatron test utilities, no longer maintained 'bionemo-webdatamodule', # external 'nemo_run', @@ -44,13 +44,13 @@ nemo_toolkit = { workspace = true } megatron-core = { workspace = true } # in sub-packages/ bionemo-core = { workspace = true } -bionemo-example_model = { workspace = true } -bionemo-llm = { workspace = true } +# bionemo-example_model = { workspace = true } # DEPRECATED +# bionemo-llm = { workspace = true } # DEPRECATED bionemo-moco = { workspace = true } bionemo-noodles = { workspace = true } bionemo-scdl = { workspace = true } bionemo-size-aware-batching = { workspace = true } -bionemo-testing = { workspace = true } +# bionemo-testing = { workspace = true } # DEPRECATED bionemo-webdatamodule = { workspace = true } [tool.uv] @@ -126,13 +126,13 @@ executionEnvironments = [ # bionemo sub-packages './sub-packages/bionemo-core/src', # './sub-packages/bionemo-evo2/src', # DEPRECATED: replaced by bionemo-recipes/recipes/evo2_megatron/ - './sub-packages/bionemo-example_model/src', - './sub-packages/bionemo-llm/src', + # './sub-packages/bionemo-example_model/src', # DEPRECATED: NeMo/Megatron example model + # './sub-packages/bionemo-llm/src', # DEPRECATED: NeMo/Megatron LLM components './sub-packages/bionemo-moco/src', './sub-packages/bionemo-noodles/src', './sub-packages/bionemo-scdl/src', './sub-packages/bionemo-size-aware-batching/src', - './sub-packages/bionemo-testing/src', + # './sub-packages/bionemo-testing/src', # DEPRECATED: NeMo/Megatron test utilities './sub-packages/bionemo-webdatamodule/src', ] }, ] diff --git a/sub-packages/bionemo-example_model/DEPRECATED b/sub-packages/bionemo-example_model/DEPRECATED new file mode 100644 index 0000000000..7f8c0fb01a --- /dev/null +++ b/sub-packages/bionemo-example_model/DEPRECATED @@ -0,0 +1,7 @@ +This sub-package (sub-packages/bionemo-example_model) is deprecated. + +This package provided an example NeMo/Megatron model for documentation +and tutorials. It is no longer maintained as the framework has moved to +self-contained recipes in bionemo-recipes/. + +This package will be removed in a future release. diff --git a/sub-packages/bionemo-example_model/README.md b/sub-packages/bionemo-example_model/README.md index 3611af6941..1d59049022 100644 --- a/sub-packages/bionemo-example_model/README.md +++ b/sub-packages/bionemo-example_model/README.md @@ -1,5 +1,8 @@ # bionemo-example_model +> **DEPRECATED**: This package is deprecated and no longer maintained. The BioNeMo framework has moved to +> self-contained recipes in `bionemo-recipes/`. This package will be removed in a future release. + This is a minimalist package containing an example model that makes use of bionemo2 and nemo conventions. It contains the necessary models, dataloaders, datasets, and custom loss functions. The referenced classes and functions are in `bionemo.example_model.lightning.lightning_basic`. This tutorial demonstrates the creation of a simple MNIST model. This should be run in a BioNeMo container. The BioNeMo Framework container can run in a brev.dev launchable: [![ Click here to deploy.](https://uohmivykqgnnbiouffke.supabase.co/storage/v1/object/public/landingpage/brevdeploynavy.svg)](https://console.brev.dev/launchable/deploy?launchableID=env-2pPDA4sJyTuFf3KsCv5KWRbuVlU). It takes about 10 minutes to deploy this notebook as a Launchable. As of this writing, we are working on a free tier so a credit card may be required. You can reach out to your NVIDIA rep for credit. Notebooks and a shell interface can be launched by clicking `Open Notebook`. (Note: This links to the nightly release and may be out of sync with these docs.) diff --git a/sub-packages/bionemo-example_model/pyproject.toml b/sub-packages/bionemo-example_model/pyproject.toml index a5fb915597..bc1a030ef3 100644 --- a/sub-packages/bionemo-example_model/pyproject.toml +++ b/sub-packages/bionemo-example_model/pyproject.toml @@ -8,7 +8,7 @@ build-backend = "setuptools.build_meta" # now include bionemo-* sub-packages explicitly. name = "bionemo-example_model" readme = "README.md" -description = "BioNeMo example_model: Example model for documentation and tutorials. Do Not Distriburte on PyPI !!" +description = "[DEPRECATED] BioNeMo example_model: Example model for documentation and tutorials. No longer maintained." authors = [{ name = "BioNeMo Team", email = "bionemofeedback@nvidia.com" }] requires-python = ">=3.10" classifiers = ["Private :: Do Not Upload", "Programming Language :: Python :: 3.10"] diff --git a/sub-packages/bionemo-example_model/src/bionemo/example_model/lightning/__init__.py b/sub-packages/bionemo-example_model/src/bionemo/example_model/lightning/__init__.py index 25e6abfbc5..8bdbf443c0 100644 --- a/sub-packages/bionemo-example_model/src/bionemo/example_model/lightning/__init__.py +++ b/sub-packages/bionemo-example_model/src/bionemo/example_model/lightning/__init__.py @@ -12,3 +12,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import warnings + +warnings.warn( + "bionemo.example_model (sub-packages/bionemo-example_model) is deprecated and will be removed in a future release. " + "This package is no longer maintained.", + DeprecationWarning, + stacklevel=2, +) diff --git a/sub-packages/bionemo-llm/DEPRECATED b/sub-packages/bionemo-llm/DEPRECATED new file mode 100644 index 0000000000..4dcaeae022 --- /dev/null +++ b/sub-packages/bionemo-llm/DEPRECATED @@ -0,0 +1,7 @@ +This sub-package (sub-packages/bionemo-llm) is deprecated. + +This package provided NeMo/Megatron-based LLM components for BioNeMo. +It is no longer maintained as the framework has moved to self-contained +recipes in bionemo-recipes/. + +This package will be removed in a future release. diff --git a/sub-packages/bionemo-llm/README.md b/sub-packages/bionemo-llm/README.md index 68bbb82e1c..5729ccac18 100644 --- a/sub-packages/bionemo-llm/README.md +++ b/sub-packages/bionemo-llm/README.md @@ -1,5 +1,8 @@ # bionemo-llm +> **DEPRECATED**: This package is deprecated and no longer maintained. The BioNeMo framework has moved to +> self-contained recipes in `bionemo-recipes/`. This package will be removed in a future release. + The Bionemo Large Language Model (LLM) submodule contains common code used in submodules that train LLMs on biological datasets. This includes data masking and collate functions, the bio-BERT common architecture code, loss functions, and other NeMo / Megatron-LM compatibility functions. Sub-packages diff --git a/sub-packages/bionemo-llm/pyproject.toml b/sub-packages/bionemo-llm/pyproject.toml index 9f4dfac490..b947ce3b4f 100644 --- a/sub-packages/bionemo-llm/pyproject.toml +++ b/sub-packages/bionemo-llm/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" [project] name = "bionemo-llm" readme = "README.md" -description = "BioNeMo Large Language Model Components using NeMo and Megatron" +description = "[DEPRECATED] BioNeMo Large Language Model Components using NeMo and Megatron. No longer maintained." authors = [{ name = "BioNeMo Team", email = "bionemofeedback@nvidia.com" }] requires-python = ">=3.10" license = { file = "LICENSE" } diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/__init__.py b/sub-packages/bionemo-llm/src/bionemo/llm/__init__.py index 25e6abfbc5..625a5ad983 100644 --- a/sub-packages/bionemo-llm/src/bionemo/llm/__init__.py +++ b/sub-packages/bionemo-llm/src/bionemo/llm/__init__.py @@ -12,3 +12,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import warnings + +warnings.warn( + "bionemo.llm (sub-packages/bionemo-llm) is deprecated and will be removed in a future release. " + "This package is no longer maintained.", + DeprecationWarning, + stacklevel=2, +) diff --git a/sub-packages/bionemo-testing/DEPRECATED b/sub-packages/bionemo-testing/DEPRECATED new file mode 100644 index 0000000000..d322a722dc --- /dev/null +++ b/sub-packages/bionemo-testing/DEPRECATED @@ -0,0 +1,7 @@ +This sub-package (sub-packages/bionemo-testing) is deprecated. + +This package provided test utilities for BioNeMo sub-packages that +depended on NeMo/Megatron. It is no longer maintained as the framework +has moved to self-contained recipes in bionemo-recipes/. + +This package will be removed in a future release. diff --git a/sub-packages/bionemo-testing/README.md b/sub-packages/bionemo-testing/README.md index 859987972b..15d9bb77c9 100644 --- a/sub-packages/bionemo-testing/README.md +++ b/sub-packages/bionemo-testing/README.md @@ -1,5 +1,8 @@ # bionemo-testing +> **DEPRECATED**: This package is deprecated and no longer maintained. The BioNeMo framework has moved to +> self-contained recipes in `bionemo-recipes/`. This package will be removed in a future release. + A package of test-time requirements and utilities for bionemo sub-packages. In particular, the `bionemo-testing` package handles downloading and caching data and other assets for running unit tests and example notebooks. For more information on test data handling, see [BioNeMo test data management](https://github.com/NVIDIA/bionemo-framework/blob/main/sub-packages/bionemo-testing/src/bionemo/testing/data/README.md) diff --git a/sub-packages/bionemo-testing/pyproject.toml b/sub-packages/bionemo-testing/pyproject.toml index 04b0c3ed08..2766758e30 100644 --- a/sub-packages/bionemo-testing/pyproject.toml +++ b/sub-packages/bionemo-testing/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" [project] name = "bionemo-testing" readme = "README.md" -description = "Utilities aiding test creation for BioNeMo sub-packages." +description = "[DEPRECATED] Utilities aiding test creation for BioNeMo sub-packages. No longer maintained." authors = [{ name = "BioNeMo Team", email = "bionemofeedback@nvidia.com" }] requires-python = ">=3.10" license = { file = "LICENSE" } diff --git a/sub-packages/bionemo-testing/src/bionemo/testing/__init__.py b/sub-packages/bionemo-testing/src/bionemo/testing/__init__.py index 25e6abfbc5..ba74fdfbd0 100644 --- a/sub-packages/bionemo-testing/src/bionemo/testing/__init__.py +++ b/sub-packages/bionemo-testing/src/bionemo/testing/__init__.py @@ -12,3 +12,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import warnings + +warnings.warn( + "bionemo.testing (sub-packages/bionemo-testing) is deprecated and will be removed in a future release. " + "This package is no longer maintained.", + DeprecationWarning, + stacklevel=2, +) diff --git a/tach.toml b/tach.toml index 4eea46cc23..6a36ed09f9 100644 --- a/tach.toml +++ b/tach.toml @@ -11,11 +11,11 @@ exclude = [ source_roots = [ "sub-packages/bionemo-core/src", # "sub-packages/bionemo-evo2/src", # DEPRECATED: replaced by bionemo-recipes/recipes/evo2_megatron/ - "sub-packages/bionemo-example_model/src", - "sub-packages/bionemo-llm/src", + # "sub-packages/bionemo-example_model/src", # DEPRECATED: NeMo/Megatron example model + # "sub-packages/bionemo-llm/src", # DEPRECATED: NeMo/Megatron LLM components "sub-packages/bionemo-scdl/src", "sub-packages/bionemo-size-aware-batching/src", - "sub-packages/bionemo-testing/src", + # "sub-packages/bionemo-testing/src", # DEPRECATED: NeMo/Megatron test utilities "sub-packages/bionemo-webdatamodule/src", ] @@ -31,18 +31,18 @@ depends_on = [] # "bionemo.llm", # ] -[[modules]] -path = "bionemo.example_model" -depends_on = [ - "bionemo.core", - "bionemo.llm", -] +# [[modules]] # DEPRECATED: NeMo/Megatron example model +# path = "bionemo.example_model" +# depends_on = [ +# "bionemo.core", +# "bionemo.llm", +# ] -[[modules]] -path = "bionemo.llm" -depends_on = [ - "bionemo.core", -] +# [[modules]] # DEPRECATED: NeMo/Megatron LLM components +# path = "bionemo.llm" +# depends_on = [ +# "bionemo.core", +# ] [[modules]] path = "bionemo.noodles" @@ -62,16 +62,15 @@ depends_on = [ "bionemo.core", ] -[[modules]] -path = "bionemo.testing" -depends_on = [ - "bionemo.core", - "bionemo.llm", -] +# [[modules]] # DEPRECATED: NeMo/Megatron test utilities +# path = "bionemo.testing" +# depends_on = [ +# "bionemo.core", +# "bionemo.llm", +# ] [[modules]] path = "bionemo.webdatamodule" depends_on = [ "bionemo.core", - "bionemo.llm", ] From 12ceb5f429efe6496785d2a852bfa715e01806d9 Mon Sep 17 00:00:00 2001 From: Timur Rvachov Date: Tue, 3 Mar 2026 20:49:19 +0000 Subject: [PATCH 3/4] Remove deprecated sub-packages: bionemo-evo2, bionemo-llm, bionemo-testing, bionemo-example_model Hard-delete all four deprecated sub-packages and clean up all references throughout the repository: Deleted directories: - sub-packages/bionemo-evo2/ (replaced by bionemo-recipes/recipes/evo2_megatron/) - sub-packages/bionemo-llm/ (NeMo/Megatron LLM components, no longer maintained) - sub-packages/bionemo-testing/ (NeMo/Megatron test utilities, no longer maintained) - sub-packages/bionemo-example_model/ (NeMo/Megatron example model, no longer maintained) Deleted CI configs: - ci/benchmarks/partial-conv/evo2_pretrain.yaml - ci/benchmarks/partial-conv/evo2_finetuning.yaml - ci/benchmarks/perf/evo2_pretrain.yaml Cleaned up references in: - pyproject.toml (dependencies, uv.sources, pyright extraPaths) - tach.toml (source_roots, modules, stale webdatamodule->llm dep) - .devcontainer/framework/devcontainer.json (extraPaths) - CODEOWNERS (removed entries for deleted packages) - README.md (removed from sub-packages support matrix) - docs/ (SUMMARY files, getting-started, development, megatron_datasets, release notes, contributing, slurm guide) - sub-packages/bionemo-core/ (README, data README) Co-Authored-By: Claude Opus 4.6 --- .devcontainer/framework/devcontainer.json | 3 - CODEOWNERS | 6 - README.md | 10 +- .../partial-conv/evo2_finetuning.yaml | 119 -- ci/benchmarks/partial-conv/evo2_pretrain.yaml | 130 -- ci/benchmarks/perf/evo2_pretrain.yaml | 98 -- .../about/background/megatron_datasets.md | 14 +- docs/docs/main/about/releasenotes-fw.md | 2 +- docs/docs/main/contributing/contributing.md | 2 +- docs/docs/main/developer-guide/SUMMARY.md | 4 - docs/docs/main/examples/SUMMARY.md | 1 - docs/docs/main/getting-started/development.md | 13 +- docs/docs/main/getting-started/index.md | 99 +- docs/docs/main/getting-started/using-slurm.md | 8 +- pyproject.toml | 11 - sub-packages/bionemo-core/README.md | 2 +- .../src/bionemo/core/data/README.md | 4 +- sub-packages/bionemo-evo2/DEPRECATED | 11 - .../bionemo-evo2/DEPRECATION_TEST_RESULTS.md | 47 - sub-packages/bionemo-evo2/LICENSE | 202 --- sub-packages/bionemo-evo2/README.md | 481 ------- sub-packages/bionemo-evo2/VERSION | 1 - ...inetuning_train_curve_500_steps_256gbs.png | Bin 106324 -> 0 bytes sub-packages/bionemo-evo2/examples/.gitignore | 14 - .../bionemo-evo2/examples/configs/README.md | 8 - .../full_pretrain_longphase_config.yaml | 450 ------ .../full_pretrain_shortphase_config.yaml | 81 -- .../examples/configs/test_preproc_config.yaml | 52 - .../test_promotors_dataset_config.yaml | 9 - .../examples/fine-tuning-tutorial.ipynb | 687 --------- .../examples/zeroshot_brca1.ipynb | 1245 ----------------- sub-packages/bionemo-evo2/pyproject.toml | 47 - .../bionemo-evo2/src/bionemo/evo2/__init__.py | 26 - .../src/bionemo/evo2/data/README.md | 234 ---- .../src/bionemo/evo2/data/__init__.py | 14 - .../src/bionemo/evo2/data/fasta_dataset.py | 79 -- .../src/bionemo/evo2/data/preprocess.py | 485 ------- .../evo2/data/sharded_eden_dataloader.md | 282 ---- .../evo2/data/sharded_eden_dataloader.py | 937 ------------- .../src/bionemo/evo2/data/tokenizer.py | 79 -- .../evo2/data/transcript_extraction.py | 421 ------ .../src/bionemo/evo2/models/__init__.py | 43 - .../src/bionemo/evo2/models/llama.py | 238 ---- .../src/bionemo/evo2/models/mamba.py | 389 ----- .../src/bionemo/evo2/models/peft.py | 279 ---- .../src/bionemo/evo2/run/__init__.py | 17 - .../src/bionemo/evo2/run/infer.py | 236 ---- .../src/bionemo/evo2/run/predict.py | 712 ---------- .../src/bionemo/evo2/run/train.py | 1124 --------------- .../src/bionemo/evo2/run/utils.py | 52 - .../src/bionemo/evo2/utils/__init__.py | 17 - .../src/bionemo/evo2/utils/callbacks.py | 60 - .../bionemo/evo2/utils/checkpoint/README.md | 74 - .../bionemo/evo2/utils/checkpoint/__init__.py | 17 - .../convert_checkpoint_model_parallel_evo2.py | 403 ------ .../evo2/utils/checkpoint/convert_to_nemo.py | 81 -- .../checkpoint/convert_zero3_to_zero1.py | 147 -- .../utils/checkpoint/evo2_remove_optimizer.py | 194 --- .../evo2/utils/checkpoint/nemo2_to_hf.py | 49 - .../bionemo/evo2/utils/checkpoint/params.py | 63 - .../utils/checkpoint/zero3_conversion_lib.py | 699 --------- .../src/bionemo/evo2/utils/config.py | 101 -- .../bionemo/evo2/utils/logging/__init__.py | 14 - .../bionemo/evo2/utils/logging/callbacks.py | 114 -- .../src/bionemo/evo2/utils/loss/__init__.py | 14 - .../evo2/utils/loss/embedding_variance.py | 192 --- .../tests/bionemo/evo2/conftest.py | 61 - .../tests/bionemo/evo2/data/cds_prompts.csv | 4 - .../tests/bionemo/evo2/data/prompts.csv | 5 - .../bionemo/evo2/data/test_fasta_dataset.py | 89 -- .../bionemo/evo2/data/test_preprocess.py | 89 -- .../evo2/data/test_sharded_eden_dataset.py | 545 -------- .../tests/bionemo/evo2/data/test_tokenizer.py | 240 ---- .../tests/bionemo/evo2/models/__init__.py | 14 - .../tests/bionemo/evo2/models/test_llama.py | 117 -- .../tests/bionemo/evo2/run/__init__.py | 14 - .../tests/bionemo/evo2/run/common.py | 71 - .../tests/bionemo/evo2/run/test_finetune.py | 202 --- .../tests/bionemo/evo2/run/test_infer.py | 79 -- .../tests/bionemo/evo2/run/test_inference.py | 104 -- .../tests/bionemo/evo2/run/test_predict.py | 506 ------- .../tests/bionemo/evo2/run/test_train.py | 637 --------- .../tests/bionemo/evo2/test_evo2.py | 900 ------------ .../tests/bionemo/evo2/test_prompt.py | 140 -- .../tests/bionemo/evo2/test_stop_and_go.py | 402 ------ .../tests/bionemo/evo2/utils/__init__.py | 14 - .../bionemo/evo2/utils/checkpoint/__init__.py | 14 - .../checkpoint/test_eden_llama_roundtrip.py | 104 -- sub-packages/bionemo-example_model/DEPRECATED | 7 - sub-packages/bionemo-example_model/LICENSE | 1 - sub-packages/bionemo-example_model/README.md | 126 -- sub-packages/bionemo-example_model/VERSION | 1 - .../bionemo-example_model/pyproject.toml | 40 - .../example_model/lightning/__init__.py | 23 - .../lightning/lightning_basic.py | 665 --------- .../training_scripts/finetune_mnist.py | 108 -- .../training_scripts/predict_mnist.py | 68 - .../training_scripts/pretrain_mnist.py | 94 -- .../lightning/test_lightning_basic.py | 187 --- sub-packages/bionemo-llm/DEPRECATED | 7 - sub-packages/bionemo-llm/LICENSE | 1 - sub-packages/bionemo-llm/README.md | 9 - sub-packages/bionemo-llm/VERSION | 1 - sub-packages/bionemo-llm/pyproject.toml | 44 - .../bionemo-llm/src/bionemo/llm/__init__.py | 23 - .../bionemo-llm/src/bionemo/llm/api.py | 51 - .../src/bionemo/llm/data/__init__.py | 14 - .../src/bionemo/llm/data/collate.py | 120 -- .../src/bionemo/llm/data/datamodule.py | 159 --- .../bionemo/llm/data/label2id_tokenizer.py | 123 -- .../src/bionemo/llm/data/masking.py | 167 --- .../bionemo-llm/src/bionemo/llm/data/types.py | 57 - .../bionemo-llm/src/bionemo/llm/lightning.py | 452 ------ .../src/bionemo/llm/model/__init__.py | 14 - .../src/bionemo/llm/model/biobert/__init__.py | 14 - .../bionemo/llm/model/biobert/lightning.py | 283 ---- .../src/bionemo/llm/model/biobert/model.py | 622 -------- .../llm/model/biobert/testing_utils.py | 56 - .../llm/model/biobert/transformer_specs.py | 257 ---- .../src/bionemo/llm/model/config.py | 186 --- .../src/bionemo/llm/model/layers.py | 62 - .../bionemo-llm/src/bionemo/llm/model/loss.py | 169 --- .../src/bionemo/llm/model/lr_scheduler.py | 142 -- .../src/bionemo/llm/run/__init__.py | 14 - .../src/bionemo/llm/run/config_models.py | 444 ------ .../bionemo-llm/src/bionemo/llm/train.py | 292 ---- .../src/bionemo/llm/utils/__init__.py | 14 - .../src/bionemo/llm/utils/callbacks.py | 225 --- .../src/bionemo/llm/utils/datamodule_utils.py | 161 --- .../src/bionemo/llm/utils/iomixin_utils.py | 134 -- .../src/bionemo/llm/utils/logger_utils.py | 114 -- .../src/bionemo/llm/utils/megatron_utils.py | 51 - .../src/bionemo/llm/utils/remote.py | 157 --- .../src/bionemo/llm/utils/weight_utils.py | 154 -- sub-packages/bionemo-llm/tests/__init__.py | 14 - .../bionemo-llm/tests/bionemo/llm/__init__.py | 14 - .../tests/bionemo/llm/data/test_collate.py | 209 --- .../tests/bionemo/llm/data/test_datamodule.py | 204 --- .../tests/bionemo/llm/data/test_masking.py | 271 ---- .../model/biobert/test_transformer_specs.py | 84 -- .../tests/bionemo/llm/model/test_loss.py | 176 --- .../bionemo/llm/model/test_lr_scheduler.py | 78 -- .../tests/bionemo/llm/test_lightning.py | 223 --- .../tests/bionemo/llm/utils/__init__.py | 14 - .../tests/bionemo/llm/utils/test_callbacks.py | 115 -- .../llm/utils/test_datamodule_utils.py | 69 - .../bionemo/llm/utils/test_iomixin_utils.py | 106 -- .../bionemo/llm/utils/test_logger_utils.py | 147 -- .../bionemo/llm/utils/test_megatron_utils.py | 61 - sub-packages/bionemo-testing/DEPRECATED | 7 - sub-packages/bionemo-testing/LICENSE | 1 - sub-packages/bionemo-testing/README.md | 8 - sub-packages/bionemo-testing/VERSION | 1 - sub-packages/bionemo-testing/pyproject.toml | 33 - .../src/bionemo/testing/__init__.py | 23 - .../testing/assert_optimizer_grads_match.py | 363 ----- .../src/bionemo/testing/callbacks.py | 63 - .../src/bionemo/testing/data/__init__.py | 14 - .../src/bionemo/testing/data/esm2.py | 69 - .../src/bionemo/testing/data/fasta.py | 63 - .../src/bionemo/testing/data/load.py | 31 - .../src/bionemo/testing/data/resource.py | 23 - .../src/bionemo/testing/harnesses/__init__.py | 14 - .../src/bionemo/testing/harnesses/mode.py | 25 - .../bionemo/testing/harnesses/stop_and_go.py | 369 ----- .../src/bionemo/testing/lightning.py | 66 - .../testing/megatron_dataset_compatibility.py | 127 -- .../testing/megatron_parallel_state_utils.py | 371 ----- .../src/bionemo/testing/subprocess_utils.py | 129 -- .../src/bionemo/testing/tensorboard.py | 60 - .../src/bionemo/testing/testing_callbacks.py | 296 ---- .../src/bionemo/testing/torch.py | 79 -- .../src/bionemo/testing/utils.py | 93 -- .../tests/bionemo/testing/data/test_fasta.py | 41 - .../test_megatron_dataset_compatibility.py | 88 -- .../test_megatron_parallel_state_utils.py | 199 --- .../tests/bionemo/testing/test_torch.py | 26 - tach.toml | 32 - 178 files changed, 23 insertions(+), 26687 deletions(-) delete mode 100644 ci/benchmarks/partial-conv/evo2_finetuning.yaml delete mode 100644 ci/benchmarks/partial-conv/evo2_pretrain.yaml delete mode 100644 ci/benchmarks/perf/evo2_pretrain.yaml delete mode 100644 sub-packages/bionemo-evo2/DEPRECATED delete mode 100644 sub-packages/bionemo-evo2/DEPRECATION_TEST_RESULTS.md delete mode 100644 sub-packages/bionemo-evo2/LICENSE delete mode 100644 sub-packages/bionemo-evo2/README.md delete mode 100644 sub-packages/bionemo-evo2/VERSION delete mode 100644 sub-packages/bionemo-evo2/assets/1b_finetuning_train_curve_500_steps_256gbs.png delete mode 100644 sub-packages/bionemo-evo2/examples/.gitignore delete mode 100644 sub-packages/bionemo-evo2/examples/configs/README.md delete mode 100644 sub-packages/bionemo-evo2/examples/configs/full_pretrain_longphase_config.yaml delete mode 100644 sub-packages/bionemo-evo2/examples/configs/full_pretrain_shortphase_config.yaml delete mode 100644 sub-packages/bionemo-evo2/examples/configs/test_preproc_config.yaml delete mode 100644 sub-packages/bionemo-evo2/examples/configs/test_promotors_dataset_config.yaml delete mode 100644 sub-packages/bionemo-evo2/examples/fine-tuning-tutorial.ipynb delete mode 100644 sub-packages/bionemo-evo2/examples/zeroshot_brca1.ipynb delete mode 100644 sub-packages/bionemo-evo2/pyproject.toml delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/__init__.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/data/README.md delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/data/__init__.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/data/fasta_dataset.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/data/preprocess.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/data/sharded_eden_dataloader.md delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/data/sharded_eden_dataloader.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/data/tokenizer.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/data/transcript_extraction.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/models/__init__.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/models/llama.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/models/mamba.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/models/peft.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/run/__init__.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/run/infer.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/run/predict.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/run/train.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/run/utils.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/utils/__init__.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/utils/callbacks.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/README.md delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/__init__.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/convert_checkpoint_model_parallel_evo2.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/convert_to_nemo.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/convert_zero3_to_zero1.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/evo2_remove_optimizer.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/nemo2_to_hf.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/params.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/zero3_conversion_lib.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/utils/config.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/utils/logging/__init__.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/utils/logging/callbacks.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/utils/loss/__init__.py delete mode 100644 sub-packages/bionemo-evo2/src/bionemo/evo2/utils/loss/embedding_variance.py delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/conftest.py delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/data/cds_prompts.csv delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/data/prompts.csv delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/data/test_fasta_dataset.py delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/data/test_preprocess.py delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/data/test_sharded_eden_dataset.py delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/data/test_tokenizer.py delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/models/__init__.py delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/models/test_llama.py delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/run/__init__.py delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/run/common.py delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/run/test_finetune.py delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/run/test_infer.py delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/run/test_inference.py delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/run/test_predict.py delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/run/test_train.py delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/test_evo2.py delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/test_prompt.py delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/test_stop_and_go.py delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/utils/__init__.py delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/utils/checkpoint/__init__.py delete mode 100644 sub-packages/bionemo-evo2/tests/bionemo/evo2/utils/checkpoint/test_eden_llama_roundtrip.py delete mode 100644 sub-packages/bionemo-example_model/DEPRECATED delete mode 120000 sub-packages/bionemo-example_model/LICENSE delete mode 100644 sub-packages/bionemo-example_model/README.md delete mode 120000 sub-packages/bionemo-example_model/VERSION delete mode 100644 sub-packages/bionemo-example_model/pyproject.toml delete mode 100644 sub-packages/bionemo-example_model/src/bionemo/example_model/lightning/__init__.py delete mode 100644 sub-packages/bionemo-example_model/src/bionemo/example_model/lightning/lightning_basic.py delete mode 100644 sub-packages/bionemo-example_model/src/bionemo/example_model/training_scripts/finetune_mnist.py delete mode 100644 sub-packages/bionemo-example_model/src/bionemo/example_model/training_scripts/predict_mnist.py delete mode 100644 sub-packages/bionemo-example_model/src/bionemo/example_model/training_scripts/pretrain_mnist.py delete mode 100644 sub-packages/bionemo-example_model/tests/bionemo/example_model/lightning/test_lightning_basic.py delete mode 100644 sub-packages/bionemo-llm/DEPRECATED delete mode 120000 sub-packages/bionemo-llm/LICENSE delete mode 100644 sub-packages/bionemo-llm/README.md delete mode 100644 sub-packages/bionemo-llm/VERSION delete mode 100644 sub-packages/bionemo-llm/pyproject.toml delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/__init__.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/api.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/data/__init__.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/data/collate.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/data/datamodule.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/data/label2id_tokenizer.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/data/masking.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/data/types.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/lightning.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/model/__init__.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/model/biobert/__init__.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/model/biobert/lightning.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/model/biobert/model.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/model/biobert/testing_utils.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/model/biobert/transformer_specs.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/model/config.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/model/layers.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/model/loss.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/model/lr_scheduler.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/run/__init__.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/run/config_models.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/train.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/utils/__init__.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/utils/callbacks.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/utils/datamodule_utils.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/utils/iomixin_utils.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/utils/logger_utils.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/utils/megatron_utils.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/utils/remote.py delete mode 100644 sub-packages/bionemo-llm/src/bionemo/llm/utils/weight_utils.py delete mode 100644 sub-packages/bionemo-llm/tests/__init__.py delete mode 100644 sub-packages/bionemo-llm/tests/bionemo/llm/__init__.py delete mode 100644 sub-packages/bionemo-llm/tests/bionemo/llm/data/test_collate.py delete mode 100644 sub-packages/bionemo-llm/tests/bionemo/llm/data/test_datamodule.py delete mode 100644 sub-packages/bionemo-llm/tests/bionemo/llm/data/test_masking.py delete mode 100644 sub-packages/bionemo-llm/tests/bionemo/llm/model/biobert/test_transformer_specs.py delete mode 100644 sub-packages/bionemo-llm/tests/bionemo/llm/model/test_loss.py delete mode 100644 sub-packages/bionemo-llm/tests/bionemo/llm/model/test_lr_scheduler.py delete mode 100644 sub-packages/bionemo-llm/tests/bionemo/llm/test_lightning.py delete mode 100644 sub-packages/bionemo-llm/tests/bionemo/llm/utils/__init__.py delete mode 100644 sub-packages/bionemo-llm/tests/bionemo/llm/utils/test_callbacks.py delete mode 100644 sub-packages/bionemo-llm/tests/bionemo/llm/utils/test_datamodule_utils.py delete mode 100644 sub-packages/bionemo-llm/tests/bionemo/llm/utils/test_iomixin_utils.py delete mode 100644 sub-packages/bionemo-llm/tests/bionemo/llm/utils/test_logger_utils.py delete mode 100644 sub-packages/bionemo-llm/tests/bionemo/llm/utils/test_megatron_utils.py delete mode 100644 sub-packages/bionemo-testing/DEPRECATED delete mode 120000 sub-packages/bionemo-testing/LICENSE delete mode 100644 sub-packages/bionemo-testing/README.md delete mode 100644 sub-packages/bionemo-testing/VERSION delete mode 100644 sub-packages/bionemo-testing/pyproject.toml delete mode 100644 sub-packages/bionemo-testing/src/bionemo/testing/__init__.py delete mode 100644 sub-packages/bionemo-testing/src/bionemo/testing/assert_optimizer_grads_match.py delete mode 100644 sub-packages/bionemo-testing/src/bionemo/testing/callbacks.py delete mode 100644 sub-packages/bionemo-testing/src/bionemo/testing/data/__init__.py delete mode 100644 sub-packages/bionemo-testing/src/bionemo/testing/data/esm2.py delete mode 100644 sub-packages/bionemo-testing/src/bionemo/testing/data/fasta.py delete mode 100644 sub-packages/bionemo-testing/src/bionemo/testing/data/load.py delete mode 100644 sub-packages/bionemo-testing/src/bionemo/testing/data/resource.py delete mode 100644 sub-packages/bionemo-testing/src/bionemo/testing/harnesses/__init__.py delete mode 100644 sub-packages/bionemo-testing/src/bionemo/testing/harnesses/mode.py delete mode 100644 sub-packages/bionemo-testing/src/bionemo/testing/harnesses/stop_and_go.py delete mode 100644 sub-packages/bionemo-testing/src/bionemo/testing/lightning.py delete mode 100644 sub-packages/bionemo-testing/src/bionemo/testing/megatron_dataset_compatibility.py delete mode 100644 sub-packages/bionemo-testing/src/bionemo/testing/megatron_parallel_state_utils.py delete mode 100644 sub-packages/bionemo-testing/src/bionemo/testing/subprocess_utils.py delete mode 100644 sub-packages/bionemo-testing/src/bionemo/testing/tensorboard.py delete mode 100644 sub-packages/bionemo-testing/src/bionemo/testing/testing_callbacks.py delete mode 100644 sub-packages/bionemo-testing/src/bionemo/testing/torch.py delete mode 100644 sub-packages/bionemo-testing/src/bionemo/testing/utils.py delete mode 100644 sub-packages/bionemo-testing/tests/bionemo/testing/data/test_fasta.py delete mode 100644 sub-packages/bionemo-testing/tests/bionemo/testing/test_megatron_dataset_compatibility.py delete mode 100644 sub-packages/bionemo-testing/tests/bionemo/testing/test_megatron_parallel_state_utils.py delete mode 100644 sub-packages/bionemo-testing/tests/bionemo/testing/test_torch.py diff --git a/.devcontainer/framework/devcontainer.json b/.devcontainer/framework/devcontainer.json index cb8804375b..a6434a543b 100644 --- a/.devcontainer/framework/devcontainer.json +++ b/.devcontainer/framework/devcontainer.json @@ -42,9 +42,6 @@ "settings": { "python.analysis.extraPaths": [ "./sub-packages/bionemo-core/src", - "./sub-packages/bionemo-llm/src", - "./sub-packages/bionemo-testing/src", - "./sub-packages/bionemo-example_model/src", "./3rdparty/NeMo", "./3rdparty/Megatron-LM" ], diff --git a/CODEOWNERS b/CODEOWNERS index f778f6df78..cbaa18c9f7 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -58,14 +58,8 @@ ci/ @dorotat-nv @pstjohn @trvachov @jwilber @jomitchellnv @jstjohn # ## LIBRARY CODE # -sub-packages/bionemo-testing @dorotat-nv @farhadrgh @jstjohn @pstjohn @skothenhill-nv - sub-packages/bionemo-core @DejunL @dorotat-nv @farhadrgh @jstjohn @pstjohn @skothenhill-nv -sub-packages/bionemo-llm @farhadrgh @dorotat-nv @jstjohn @pstjohn @skothenhill-nv - -sub-packages/bionemo-example_model @jstjohn @skothenhill-nv - sub-packages/bionemo-scdl @jstjohn @polinabinder1 @skothenhill-nv sub-packages/bionemo-noodles @skothenhill-nv @jstjohn @edawson @cspades diff --git a/README.md b/README.md index b35461f5bf..67a3918dc1 100644 --- a/README.md +++ b/README.md @@ -80,13 +80,9 @@ A core use-case of the BioNeMo Framework is to help digital biology scientists a (Click to expand) sub-packages models support matrix -| Directory | Description | Support | 5D Parallel | Megatron-FSDP | TE | Sequence Packing | FP8 | Context Parallel | -| ----------------------- | -------------------------------- | -------------- | ----------- | ------------- | --- | ---------------- | --- | ---------------- | -| `bionemo-core` | Model Config/test data utils | ✅ Active | ✅ | N/A | ✅ | ❌ | N/A | N/A | -| `bionemo-evo2` | 5D parallel model | ✅ Active | ✅ | ❌ | ✅ | ❌ | ✅ | ✅ | -| `bionemo-example_model` | Example 5D parallel model | 🔧 Maintenance | ✅ | ❌ | ✅ | ❌ | ✅ | ✅ | -| `bionemo-llm` | 5D parallel base model (BioBert) | ✅ Active | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | -| `bionemo-testing` | Testing Utilities | ✅ Active | ✅ | N/A | N/A | N/A | N/A | N/A | +| Directory | Description | Support | 5D Parallel | Megatron-FSDP | TE | Sequence Packing | FP8 | Context Parallel | +| -------------- | ---------------------------- | --------- | ----------- | ------------- | --- | ---------------- | --- | ---------------- | +| `bionemo-core` | Model Config/test data utils | ✅ Active | ✅ | N/A | ✅ | ❌ | N/A | N/A | diff --git a/ci/benchmarks/partial-conv/evo2_finetuning.yaml b/ci/benchmarks/partial-conv/evo2_finetuning.yaml deleted file mode 100644 index a07bf0f413..0000000000 --- a/ci/benchmarks/partial-conv/evo2_finetuning.yaml +++ /dev/null @@ -1,119 +0,0 @@ -scope: partial-conv -time_limit: 3600 -key_segments: - # Modify keys to be renamed (str) or excluded (False) from run identifier. By default, all args under script_args are included. - dataset_config: False - dataset_dir: False - data_base_path: False - num_workers: False - limit_val_batches: False - val_check_interval: False - experiment_name: False - workspace: False - restore_from_checkpoint_path: False - activation_checkpoint_layers: False - lora_enabled: False - lr: False - min_lr: False - warmup_steps: False - accumulate_grad_batches: False - clip_grad: False - weight_decay: False - attention_dropout: False - hidden_dropout: False - precision: False - seq_length: False -script_args: - # All arguments referenced in the script string must be specified here. - # Arguments not referenced in the script string must have the 'arg' field specified. - # See jet/core/configs.py for the specification of the configuration class - workspace: /workspace/bionemo2 - data_base_path: /data/evo2 - restore_from_checkpoint_path: checkpoints/nemo2_evo2_1b_8k - nodes: 1 - model: evo2 - config_name: 1b - num_workers: 1 - limit_val_batches: 20 - dataset_config: training_data_config.yaml - dataset_dir: preprocessed_data - val_check_interval: 5 - seq_length: 8192 - warmup_steps: 10 - activation_checkpoint_layers: 2 - lr: 0.000015 - min_lr: 0.0000149 - accumulate_grad_batches: 4 - max_steps: 1000 - gpus: 1 - clip_grad: 250 - weight_decay: 0.001 - attention_dropout: 0.01 - hidden_dropout: 0.01 - stop_steps: 100 - batch_size: 2 - variant: finetune - precision: fp8 - products: - - variant: finetune - lora_enabled: "" - task: finetune_from_ckpt - experiment_name: evo2-finetune - lr: 0.000015 - - variant: lora_finetune - lora_enabled: "--lora-finetune" - task: lora_finetune_from_ckpt - experiment_name: evo2-lora-finetune - lr: 0.015 # 0.000015 * 1000 since lora is used -script: |- - WANDB_API_KEY=$BIONEMO_WANDB_API_KEY train_${model} \ - -d ${data_base_path}/${dataset_config} \ - --dataset-dir=${data_base_path}/${dataset_dir} \ - --ckpt-dir=${data_base_path}/${restore_from_checkpoint_path} \ - ${lora_enabled} \ - --model-size=${config_name} \ - --max-steps=${max_steps} \ - --experiment-name=${experiment_name}_${batch_size}bs_${nodes}node_${gpus}gpu_${max_steps}s \ - --lr=${lr} \ - --min-lr=${min_lr} \ - --warmup-steps=${warmup_steps} \ - --result-dir=${tensorboard_dir} \ - --micro-batch-size=${batch_size} \ - --grad-acc-batches=${accumulate_grad_batches} \ - --limit-val-batches=${limit_val_batches} \ - --seq-length=${seq_length} \ - --clip-grad=${clip_grad} \ - --wd=${weight_decay} \ - --attention-dropout=${attention_dropout} \ - --hidden-dropout=${hidden_dropout} \ - --num-layers 4 \ - --hybrid-override-pattern 'SDH*' \ - --devices=${gpus} \ - --num-nodes=${nodes} \ - --val-check-interval=${val_check_interval} \ - --create-tensorboard-logger \ - --activation-checkpoint-recompute-num-layers=${activation_checkpoint_layers} \ - --disable-checkpointing \ - --early-stop-on-step=${stop_steps} \ - --wandb-project=${wandb_project_name} \ - --wandb-group=${model}_${variant}_${config_name}_${task}_${target} \ - --wandb-job-type=${pipeline_label} \ - --garbage-collect-at-inference -tests: - - logic_type: static - product_identifier: { "variant": "finetune" } - logic_spec: - exit_codes: - - 0 - baselines: - consumed_samples: - operator: eq - value: 800 - val_loss: - operator: range - max: 1.34 - min: 1.31 - reduced_train_loss: - operator: range - max: 1.34 - min: 1.31 diff --git a/ci/benchmarks/partial-conv/evo2_pretrain.yaml b/ci/benchmarks/partial-conv/evo2_pretrain.yaml deleted file mode 100644 index 3c76b20039..0000000000 --- a/ci/benchmarks/partial-conv/evo2_pretrain.yaml +++ /dev/null @@ -1,130 +0,0 @@ -scope: partial-conv -time_limit: 14400 -# artifacts: -# # Artifact data mount paths for script execution, specified as mount_path: artifact_tag pairs. -# # See Confluence Onboarding Guide section 5.4 for more details on locating this data. -# # Needs update of script_args.data_path: /data-jetart/evo2. Cannot be enabled since Evo2 does not work with read-only folders as data mount. -# /data-jetart/evo2/data : text/opengenome2/processed/2025-01 -key_segments: - # Modify keys to be renamed (str) or excluded (False) from run identifier. By default, all args under script_args are included. - data_path: False - clip_grad: False - lr: False - min_lr: False - wu_steps: False - pckg_url: False - file_name_wheel: False - seed: False - ops_kwargs: False - -script_args: - # All arguments referenced in the script string must be specified here. - # Arguments not referenced in the script string must have the 'arg' field specified. - # See jet/core/configs.py for the specification of the configuration class - workspace: /workspace/bionemo2 - data_path: /data/evo2 - pckg_url: gitlab-master.nvidia.com/api/v4/projects/180496/packages/pypi/simple/ - file_name_wheel: subquadratic-ops - model: evo2 - variant: train - precision: fp8 - gpus: 8 - nodes: 4 - batch_size: 8 - max_steps: 490000 - stop_steps: 6900 - pp: 1 - cp: 1 - tp: 1 - seq_len: 8192 - acc_grad: 1 - clip_grad: 250 - seed: 3735928559 - lr: 0.00015 - min_lr: 0.000015 - wu_steps: 5000 - wd: 0.1 - products: - - config_name: 1b - ops_kwargs: "--use-subquadratic_ops" - tp: 1 - pp: 1 - batch_size: 8 - stop_steps: 6900 - # FIXME: mamba training is not finished - # - config_name: hybrid_mamba_8b - # ops_kwargs: "" - # tp: 8 - # pp: 2 - # acc_grad: 1 - # batch_size: 1 - # stop_steps: 4000 -script: |- - INSTALL_FLAG="/tmp/install_done_${{SLURMD_NODENAME}}"; - if [ "$SLURM_LOCALID" = "0" ]; then - pip install ${file_name_wheel} --index-url https://oauth2:$JET_GITLAB_TOKEN@${pckg_url} --extra-index-url https://pypi.org/simple/ - touch $INSTALL_FLAG - fi - # All ranks wait until install flag file appears - while [ ! -f $INSTALL_FLAG ]; do - sleep 1 - done - WANDB_API_KEY=$BIONEMO_WANDB_API_KEY ${variant}_${model} \ - -d /workspace/bionemo2/sub-packages/bionemo-evo2/examples/configs/full_pretrain_shortphase_config.yaml \ - --dataset-dir ${data_path} \ - --grad-acc-batches ${acc_grad} \ - --fp8 --fp8-wgrad --activation-checkpoint-recompute-num-layers 5 \ - --enable-preemption \ - --ckpt-async-save \ - --use-megatron-comm-overlap-llama3-8k \ - --overlap-grad-reduce \ - --clip-grad=${clip_grad} \ - --eod-pad-in-loss-mask \ - --seq-length=${seq_len} \ - --seed ${seed} \ - --lr=${lr} \ - --wd=${wd} \ - --min-lr=${min_lr} \ - --warmup-steps=${wu_steps} \ - --tensor-parallel-size=${tp} \ - --context-parallel-size=${cp} \ - --pipeline-model-parallel-size=${pp} \ - --workers 8 \ - --num-nodes=${nodes} \ - --devices=${gpus} \ - --micro-batch-size=${batch_size} \ - --model-size=${config_name} \ - --max-steps=${max_steps} \ - --early-stop-on-step ${stop_steps} \ - --limit-val-batches=20 \ - --log-every-n-steps=50 \ - --val-check-interval=500 \ - ${ops_kwargs} \ - --create-tflops-callback \ - --create-tensorboard-logger \ - --result-dir=${tensorboard_dir} \ - --wandb-project=${wandb_project_name} \ - --wandb-group=${model}_${variant}_${config_name}__${target}__slen${seq_len} \ - --wandb-job-type=${pipeline_label} \ - --disable-checkpointing; -tests: - - logic_type: static - product_identifier: { 'target': 'dgxh100_eos', 'config_name': 1b } - logic_spec: - exit_codes: - - 0 - baselines: - consumed_samples: - operator: eq - value: 1766400 - val_loss: - operator: range - max: 1.26 - min: 1.22 - reduced_train_loss: - operator: range - max: 1.24 - min: 1.19 - TFLOPS_per_GPU: - operator: geq - value: 390 diff --git a/ci/benchmarks/perf/evo2_pretrain.yaml b/ci/benchmarks/perf/evo2_pretrain.yaml deleted file mode 100644 index a01be5b69d..0000000000 --- a/ci/benchmarks/perf/evo2_pretrain.yaml +++ /dev/null @@ -1,98 +0,0 @@ -scope: perf -time_limit: 1800 -key_segments: - # Modify keys to be renamed (str) or excluded (False) from run identifier. By default, all args under script_args are included. - data_path: False - clip_grad: False - lr: False - min_lr: False - wu_steps: False -script_args: - # All arguments referenced in the script string must be specified here. - # Arguments not referenced in the script string must have the 'arg' field specified. - # See jet/core/configs.py for the specification of the configuration class - workspace: /workspace/bionemo2 - data_path: /data/evo2 - model: evo2 - variant: train - config_name: 1b - model_type: hyena - precision: fp8 - gpus: 8 - batch_size: 8 - max_steps: 490000 - stop_steps: 600 - cp: 1 - seq_len: 8192 - acc_grad: 1 - clip_grad: 250 - seed: 3735928559 - lr: 0.00015 - min_lr: 0.000015 - wu_steps: 5000 - wd: 0.1 - products: - - nodes: 2 - pp: 1 - tp: 1 - config_name: 1b - - nodes: 1 - pp: 1 - tp: 1 - config_name: 1b - # - nodes: 2 - # pp: 1 - # tp: 2 - # config_name: hybrid_mamba_8b - # - nodes: 1 - # pp: 1 - # tp: 2 - # config_name: hybrid_mamba_8b - # FIXME, issue: https://github.com/NVIDIA/bionemo-framework/issues/814 - # - nodes: 2 - # pp: 1 - # tp: 2 - # FIXME, issue: https://github.com/NVIDIA/bionemo-framework/issues/815 - # - nodes: 2 - # pp: 2 - # tp: 1 -script: |- - WANDB_API_KEY=$BIONEMO_WANDB_API_KEY ${variant}_${model} \ - -d /workspace/bionemo2/sub-packages/bionemo-evo2/examples/configs/full_pretrain_shortphase_config.yaml \ - --dataset-dir ${data_path} \ - --grad-acc-batches ${acc_grad} \ - --fp8 --fp8-wgrad --activation-checkpoint-recompute-num-layers 5 \ - --enable-preemption \ - --ckpt-async-save \ - --use-megatron-comm-overlap-llama3-8k \ - --overlap-grad-reduce \ - --clip-grad=${clip_grad} \ - --eod-pad-in-loss-mask \ - --seq-length=${seq_len} \ - --seed ${seed} \ - --lr=${lr} \ - --wd=${wd} \ - --min-lr=${min_lr} \ - --warmup-steps=${wu_steps} \ - --tensor-parallel-size=${tp} \ - --context-parallel-size=${cp} \ - --pipeline-model-parallel-size=${pp} \ - --workers 8 \ - --num-nodes=${nodes} \ - --devices=${gpus} \ - --micro-batch-size=${batch_size} \ - --model-size=${config_name} \ - --max-steps=${max_steps} \ - --early-stop-on-step ${stop_steps} \ - --limit-val-batches=20 \ - --log-every-n-steps=50 \ - --val-check-interval=200 \ - --use-subquadratic_ops \ - --create-tflops-callback \ - --create-tensorboard-logger \ - --result-dir=${tensorboard_dir} \ - --wandb-project=${wandb_project_name} \ - --wandb-group=${model}_${variant}_${config_name}__${target}__slen${seq_len} \ - --wandb-job-type=${pipeline_label} \ - --wandb-run-name=${batch_size}bs_${nodes}node_${gpus}gpu_${max_steps}s_${precision}prec_tp${tp}_pp_${pp} \ - --disable-checkpointing; diff --git a/docs/docs/main/about/background/megatron_datasets.md b/docs/docs/main/about/background/megatron_datasets.md index 2050b039c9..c88189139d 100644 --- a/docs/docs/main/about/background/megatron_datasets.md +++ b/docs/docs/main/about/background/megatron_datasets.md @@ -53,10 +53,10 @@ for sample in MultiEpochDatasetResampler(dataset, num_epochs=3, shuffle=True): ## Training Resumption -To ensure identical behavior with and without job interruption, BioNeMo provides [MegatronDataModule][bionemo.llm.data.datamodule.MegatronDataModule] to save and load state dict for training resumption, and provides [WrappedDataLoader][nemo.lightning.data.WrappedDataLoader] to add a `mode` attribute to [DataLoader][torch.utils.data.DataLoader]. +To ensure identical behavior with and without job interruption, your datamodule should save and load state dicts for training resumption. Use `WrappedDataLoader` (from `nemo.lightning.data`) to add a `mode` attribute to [DataLoader][torch.utils.data.DataLoader]. ```python -class MyDataModule(MegatronDataModule): +class MyDataModule(pl.LightningDataModule): def __init__(self, *args, **kwargs): super().__init__() ... @@ -83,10 +83,10 @@ class MyDataModule(MegatronDataModule): ) ``` -!!! note "MegatronDataModule" +!!! note "Training Resumption" ``` -Users will see non-overlapping training curve if their datamodule is not inheritting from `MegatronDataModule`, unless similar logics are handled by the users. In `MegatronDataModule`, `self.update_init_global_step()` must be called right before the dataloaders are returned to ensure that training resumes with the correct sample index instead of restarting from 0 everytime. We recommend users to inherit from `MegatronDataModule` similar to the pattern above. +Users should ensure their datamodule tracks the global step for training resumption, so that training resumes with the correct sample index instead of restarting from 0. ``` !!! note "WrappedDataLoader" @@ -99,10 +99,8 @@ WARNING: 'train' is the default value of `mode` in `WrappedDataLoader`. If not s ## Testing Datasets for Megatron Compatibility -BioNeMo also provides utility functions for test suites to validate that datasets conform to the megatron data model. -The [assert_dataset_compatible_with_megatron][bionemo.testing.data_utils.assert_dataset_compatible_with_megatron] -function calls the dataset with identical indices and ensures the outputs are identical, while also checking to see if -`torch.manual_seed` was used. +To validate that datasets conform to the megatron data model, test suites should call datasets with identical indices +and ensure the outputs are identical, while also checking that `torch.manual_seed` was not used improperly. !!! example "Example datasets in BioNeMo" diff --git a/docs/docs/main/about/releasenotes-fw.md b/docs/docs/main/about/releasenotes-fw.md index 25335caccf..9d627ba338 100644 --- a/docs/docs/main/about/releasenotes-fw.md +++ b/docs/docs/main/about/releasenotes-fw.md @@ -75,7 +75,7 @@ ### Known Issues -- Evo2 generation is broken (i.e. `bionemo-evo2/src/bionemo/evo2/run/infer.py`). See issue https://github.com/NVIDIA/bionemo-framework/issues/890. A workaround exists on branch https://github.com/NVIDIA/bionemo-framework/pull/949 and we are working to fix this issue for the July release. +- Evo2 generation was broken in this release. This has been fixed in the new `evo2_megatron` implementation under `bionemo-recipes/recipes/evo2_megatron/`. - There is a NCCL communication issue on certain A100 multi-node environments. In our internal testing, we were not able to reproduce the issue reliably across environments. If end users see the following error, please report in issue https://github.com/NVIDIA/bionemo-framework/issues/970 : ``` diff --git a/docs/docs/main/contributing/contributing.md b/docs/docs/main/contributing/contributing.md index cdbde067ee..96ccda7c18 100644 --- a/docs/docs/main/contributing/contributing.md +++ b/docs/docs/main/contributing/contributing.md @@ -293,7 +293,7 @@ To publish your sub-package via "Trusted Publishing" to PyPI, you can follow the - Dispatch the `bionemo-subpackage-ci.yml` workflow from GitHub Actions to test, build, and publish your sub-packages to PyPI! - Required: Input a comma-separated list of sub-packages you want to test and/or publish into `subpackages`. - - For example, `bionemo-moco,bionemo-llm,bionemo-webdatamodule`. The sub-packages will be tested and published in separate parallel environments. + - For example, `bionemo-moco,bionemo-webdatamodule`. The sub-packages will be tested and published in separate parallel environments. - Optional: Set `test` to `true` if you want to test your sub-package. (Default: `true`) - Sub-packages that require pre- or post- installation steps may require modification of the `install-and-test` job in [`bionemo-framework/.github/workflows/bionemo-subpackage-ci.yml`](../../../../.github/workflows/bionemo-subpackage-ci.yml). - Supported `pyproject.toml` Optional Dependencies: \[ `te` \] diff --git a/docs/docs/main/developer-guide/SUMMARY.md b/docs/docs/main/developer-guide/SUMMARY.md index 8b082f947a..bcd720c695 100644 --- a/docs/docs/main/developer-guide/SUMMARY.md +++ b/docs/docs/main/developer-guide/SUMMARY.md @@ -1,10 +1,6 @@ - [bionemo-core](bionemo-core/bionemo-core-Overview.md) -- [bionemo-evo2](bionemo-evo2/bionemo-evo2-Overview.md) -- [bionemo-example-model](bionemo-example_model/bionemo-example_model-Overview.md) -- [bionemo-llm](bionemo-llm/bionemo-llm-Overview.md) - [bionemo-moco](bionemo-moco/bionemo-moco-Overview.md) - [bionemo-noodles](bionemo-noodles/bionemo-noodles-Overview.md) - [bionemo-scdl](bionemo-scdl/bionemo-scdl-Overview.md) - [bionemo-size-aware-batching](bionemo-size-aware-batching/bionemo-size-aware-batching-Overview.md) -- [bionemo-testing](bionemo-testing/bionemo-testing-Overview.md) - [bionemo-webdatamodule](bionemo-webdatamodule/bionemo-webdatamodule-Overview.md) diff --git a/docs/docs/main/examples/SUMMARY.md b/docs/docs/main/examples/SUMMARY.md index d68c2c627c..35b166001a 100644 --- a/docs/docs/main/examples/SUMMARY.md +++ b/docs/docs/main/examples/SUMMARY.md @@ -1,3 +1,2 @@ -- [Evo2](bionemo-evo2/) - [MoCo](bionemo-moco/) - [SCDL](bionemo-scdl/) diff --git a/docs/docs/main/getting-started/development.md b/docs/docs/main/getting-started/development.md index 1266d6d851..757680f1ec 100644 --- a/docs/docs/main/getting-started/development.md +++ b/docs/docs/main/getting-started/development.md @@ -12,23 +12,14 @@ The BioNeMo codebase is structured as a meta-package that collects together many this way with the expectation that users will import and use BioNeMo in their own projects. By structuring code this way, we ensure that BioNeMo developers follow similar patterns to those we expect of our end users. -Each model is stored in its own subdirectory of `sub-packages`. Some examples of models include: - -- `bionemo-example_model`: A minimal example MNIST model that demonstrates how you can write a lightweight - Megatron model that does not actually support any megatron parallelism, but should run fine as long as you only use - data parallelism to train. - -We also include useful utility packages, for example: +Each model is stored in its own subdirectory of `sub-packages`. There are useful utility packages, for example: - `bionemo-scdl`: Single Cell Dataloader (SCDL) provides a dataset implementation that can be used by downstream single-cell models in the bionemo package. -- `bionemo-testing`: A suite of utilities that are useful in testing, think `torch.testing` or `np.testing`. -Finally some of the packages represent common functions and abstract base classes that expose APIs that are useful for -interacting with `NeMo2`. Some examples of these include: +Some of the packages represent common functions and abstract base classes that expose APIs: - `bionemo-core`: High-level APIs -- `bionemo-llm`: Abstract base classes for code that multiple large language models (eg BERT variants) share. ### Package Structure diff --git a/docs/docs/main/getting-started/index.md b/docs/docs/main/getting-started/index.md index 0629578db2..ab9b21db6b 100644 --- a/docs/docs/main/getting-started/index.md +++ b/docs/docs/main/getting-started/index.md @@ -9,23 +9,14 @@ because this is how we expect our users to use bionemo, as a package that they t own projects. By structuring code like this ourselves we ensure that bionemo developers follow similar patterns to our end users. -Each model is stored in its own `sub-packages`. Some examples of models include: - -- `sub-packages/bionemo-example_model`: A minimal example MNIST model that demonstrates how you can write a lightweight - megatron model that doesn't actually support any megatron parallelism, but should run fine as long as you only use - data parallelism to train. - -There are also useful utility packages, for example: +Each model is stored in its own `sub-packages`. There are useful utility packages, for example: - `sub-packages/bionemo-scdl`: Single Cell Dataloader (SCDL) provides a dataset implementation that can be used by downstream single-cell models in the bionemo package. -- `sub-packages/bionemo-testing`: a suite of utilities that are useful in testing, think `torch.testing` or `np.testing`. -Finally some of the packages represent common functions and abstract base classes that expose APIs that are useful for -interacting with `NeMo2`. Some examples of these include: +Some of the packages represent common functions and abstract base classes that expose APIs: - `sub-packages/bionemo-core`: mostly just high level APIs -- `sub-packages/bionemo-llm`: ABCs for code that multiple large language models (eg BERT variants) share. Documentation source is stored in `docs/` @@ -89,7 +80,7 @@ $ tree -C -I "*.pyc" -I "test_data" -I "test_experiment" -I "test_finettune_expe # Sub-packages represent individually installable subsets of the bionemo codebase. We recommend that you # create new sub-packages to track your experiments and save any updated models or utilities that you need. ├── sub-packages -│ ├── bionemo-core # 🟢 bionemo-core, and bionemo-llm represent top level sub-packages that do not depend on others +│ ├── bionemo-core # 🟢 bionemo-core is a top level sub-package that does not depend on others │ │ ├── LICENSE │ │ ├── README.md │ │ ├── pyproject.toml @@ -114,68 +105,6 @@ $ tree -C -I "*.pyc" -I "test_data" -I "test_experiment" -I "test_finettune_expe │ │ └── pytorch │ │ └── utils │ │ └── test_dtypes.py -│ ├── bionemo-example_model # 🟢 a small example model that demonstrates how to write a megatron model from scratch and train on MNIST -│ │ ├── LICENSE -│ │ ├── README.md -│ │ ├── _requirements.txt -│ │ ├── pyproject.toml -│ │ ├── requirements.txt -│ │ ├── setup.py -│ │ ├── src -│ │ │ └── bionemo -│ │ │ └── example_model -│ │ │ ├── __init__.py -│ │ │ └── lightning_basic.py -│ │ └── tests -│ │ └── bionemo -│ │ └── example_model -│ │ └── test_lightning_basic.py -│ ├── bionemo-llm # 🟢 shared model code for LLM style models, eg BERT variants, transformer variants, etc. -│ │ ├── LICENSE -│ │ ├── README.md -│ │ ├── _requirements-test.txt -│ │ ├── _requirements.txt -│ │ ├── pyproject.toml -│ │ ├── requirements.txt -│ │ ├── setup.py -│ │ ├── src -│ │ │ └── bionemo -│ │ │ └── llm -│ │ │ ├── __init__.py -│ │ │ ├── lightning.py -│ │ │ ├── model -│ │ │ │ ├── __init__.py -│ │ │ │ ├── biobert -│ │ │ │ │ ├── __init__.py -│ │ │ │ │ ├── lightning.py -│ │ │ │ │ ├── model.py -│ │ │ │ │ ├── testing_utils.py -│ │ │ │ │ └── transformer_specs.py -│ │ │ │ ├── config.py -│ │ │ │ ├── layers.py -│ │ │ │ └── loss.py -│ │ │ └── utils -│ │ │ ├── __init__.py -│ │ │ ├── datamodule_utils.py -│ │ │ ├── iomixin_utils.py -│ │ │ ├── logger_utils.py -│ │ │ ├── remote.py -│ │ │ └── weight_utils.py -│ │ └── tests -│ │ ├── __init__.py -│ │ └── bionemo -│ │ └── llm -│ │ ├── __init__.py -│ │ ├── model -│ │ │ ├── biobert -│ │ │ │ └── test_transformer_specs.py -│ │ │ └── test_loss.py -│ │ ├── test_lightning.py -│ │ └── utils -│ │ ├── __init__.py -│ │ ├── test_datamodule_utils.py -│ │ ├── test_iomixin_utils.py -│ │ └── test_logger_utils.py │ ├── bionemo-scdl # 🟢 │ │ ├── LICENSE │ │ ├── README.md @@ -217,28 +146,6 @@ $ tree -C -I "*.pyc" -I "test_data" -I "test_experiment" -I "test_finettune_expe │ │ └── util │ │ ├── test_async_worker_queue.py │ │ └── test_torch_dataloader_utils.py -│ ├── bionemo-testing -│ │ ├── LICENSE -│ │ ├── README.md -│ │ ├── _requirements.txt -│ │ ├── pyproject.toml -│ │ ├── requirements.txt -│ │ ├── setup.py -│ │ ├── src -│ │ │ └── bionemo -│ │ │ └── testing -│ │ │ ├── __init__.py -│ │ │ ├── callbacks.py -│ │ │ ├── harnesses -│ │ │ │ ├── __init__.py -│ │ │ │ └── stop_and_go.py -│ │ │ ├── megatron_parallel_state_utils.py -│ │ │ ├── testing_callbacks.py -│ │ │ └── utils.py -│ │ └── tests -│ │ └── bionemo -│ │ └── testing -│ │ └── test_megatron_parallel_state_utils.py │ └── bionemo-webdatamodule │ ├── LICENSE │ ├── README.md diff --git a/docs/docs/main/getting-started/using-slurm.md b/docs/docs/main/getting-started/using-slurm.md index 5fca07f23c..b614426546 100644 --- a/docs/docs/main/getting-started/using-slurm.md +++ b/docs/docs/main/getting-started/using-slurm.md @@ -69,8 +69,8 @@ echo "*******STARTING********" \ && echo "---------------" \ && echo "Starting training" \ && \ -python /workspace/bionemo2/sub-packages/bionemo-evo2/src/bionemo/evo2/run/train.py \ - -d /workspace/bionemo2/sub-packages/bionemo-evo2/examples/configs/full_pretrain_shortphase_config.yaml \ +python /workspace/bionemo/src/bionemo/evo2/run/train.py \ + -d /workspace/bionemo/examples/configs/full_pretrain_shortphase_config.yaml \ --num-nodes=${SLURM_JOB_NUM_NODES} \ --devices=${SLURM_NTASKS_PER_NODE} \ --grad-acc-batches $GRAD_ACC_BATCHES \ @@ -181,8 +181,8 @@ EXPERIMENT_NAME=EVO2_SEQLEN${SEQ_LEN}_PP${PP_SIZE}_TP${TP_SIZE}_CP${CP_SIZE}_LR$ After setting up all parameters and mounts, the training script is launched within the SLURM job using a compound command. This command string—stored in the `COMMAND` variable—calls the Python training script with all the environment-specific arguments and hyperparameters defined earlier. ```bash -python /workspace/bionemo2/sub-packages/bionemo-evo2/src/bionemo/evo2/run/train.py \ - -d /workspace/bionemo2/sub-packages/bionemo-evo2/examples/configs/full_pretrain_shortphase_config.yaml \ +python /workspace/bionemo/src/bionemo/evo2/run/train.py \ + -d /workspace/bionemo/examples/configs/full_pretrain_shortphase_config.yaml \ --num-nodes=${SLURM_JOB_NUM_NODES} \ --devices=${SLURM_NTASKS_PER_NODE} \ --grad-acc-batches $GRAD_ACC_BATCHES \ diff --git a/pyproject.toml b/pyproject.toml index ac1e0c0218..e70e2990b3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,13 +15,9 @@ version = "2.0.0" dependencies = [ # **ALL** bionemo sub-packages 'bionemo-core', - # 'bionemo-evo2', # DEPRECATED: replaced by bionemo-recipes/recipes/evo2_megatron/ - # 'bionemo-example_model', # DEPRECATED: NeMo/Megatron example model, no longer maintained - # 'bionemo-llm', # DEPRECATED: NeMo/Megatron LLM components, no longer maintained 'bionemo-moco', 'bionemo-scdl', 'bionemo-size-aware-batching', - # 'bionemo-testing', # DEPRECATED: NeMo/Megatron test utilities, no longer maintained 'bionemo-webdatamodule', # external 'nemo_run', @@ -44,13 +40,10 @@ nemo_toolkit = { workspace = true } megatron-core = { workspace = true } # in sub-packages/ bionemo-core = { workspace = true } -# bionemo-example_model = { workspace = true } # DEPRECATED -# bionemo-llm = { workspace = true } # DEPRECATED bionemo-moco = { workspace = true } bionemo-noodles = { workspace = true } bionemo-scdl = { workspace = true } bionemo-size-aware-batching = { workspace = true } -# bionemo-testing = { workspace = true } # DEPRECATED bionemo-webdatamodule = { workspace = true } [tool.uv] @@ -125,14 +118,10 @@ executionEnvironments = [ "./3rdparty/NeMo", # bionemo sub-packages './sub-packages/bionemo-core/src', - # './sub-packages/bionemo-evo2/src', # DEPRECATED: replaced by bionemo-recipes/recipes/evo2_megatron/ - # './sub-packages/bionemo-example_model/src', # DEPRECATED: NeMo/Megatron example model - # './sub-packages/bionemo-llm/src', # DEPRECATED: NeMo/Megatron LLM components './sub-packages/bionemo-moco/src', './sub-packages/bionemo-noodles/src', './sub-packages/bionemo-scdl/src', './sub-packages/bionemo-size-aware-batching/src', - # './sub-packages/bionemo-testing/src', # DEPRECATED: NeMo/Megatron test utilities './sub-packages/bionemo-webdatamodule/src', ] }, ] diff --git a/sub-packages/bionemo-core/README.md b/sub-packages/bionemo-core/README.md index b3ab3b312e..4e6fd4a332 100644 --- a/sub-packages/bionemo-core/README.md +++ b/sub-packages/bionemo-core/README.md @@ -4,7 +4,7 @@ Common code that all BioNeMo framework packages depend on. Contains highly reusa abstractions and implementations that are valuable across a wide variety of domains and applications. Crucially, the `bionemo-core` Python package (namespace `bionemo.core`) depends on PyTorch and PyTorch -Lightning. Other key BioNeMo component libraries, such as `bionemo-llm` obtain their PyTorch dependencies via `bionemo-core`. +Lightning. ## Developer Setup diff --git a/sub-packages/bionemo-core/src/bionemo/core/data/README.md b/sub-packages/bionemo-core/src/bionemo/core/data/README.md index ebcdd58f07..15ab172545 100644 --- a/sub-packages/bionemo-core/src/bionemo/core/data/README.md +++ b/sub-packages/bionemo-core/src/bionemo/core/data/README.md @@ -20,7 +20,7 @@ are accessible by external collaborators, but require legal approval before re-d ## Loading test or example data -Test data are specified via yaml files in `sub-packages/bionemo-testing/src/bionemo/testing/data/resources`. As an +Test data are specified via yaml files in `the test data resources directory`. As an example, in `esm2.yaml`: ```yaml @@ -65,7 +65,7 @@ same NGC resource can therefore be used to host multiple test assets that are us ## Adding new test assets To add new data, first ensure that the data is available from either NGC or `pbss`. Next, extend or create a new yaml -file in `sub-packages/bionemo-testing/src/bionemo/testing/data/resources` with the required information. Owner emails +file in `the test data resources directory` with the required information. Owner emails must be provided for all assets. The description and `ngc` fields are currently optional. If the `sha256` is left unspecified, `pooch` will report the downloaded file's sha when loaded. diff --git a/sub-packages/bionemo-evo2/DEPRECATED b/sub-packages/bionemo-evo2/DEPRECATED deleted file mode 100644 index 3a9d3fa6b5..0000000000 --- a/sub-packages/bionemo-evo2/DEPRECATED +++ /dev/null @@ -1,11 +0,0 @@ -This sub-package (sub-packages/bionemo-evo2) is deprecated. - -The replacement implementation is located at: - bionemo-recipes/recipes/evo2_megatron/ - -Migration guide: -- For Hyena models: the new implementation has full feature parity -- LoRA/PEFT support: not yet ported (planned for future release) -- Mamba/Llama model support: not ported to the new implementation - -This package will be removed in a future release. diff --git a/sub-packages/bionemo-evo2/DEPRECATION_TEST_RESULTS.md b/sub-packages/bionemo-evo2/DEPRECATION_TEST_RESULTS.md deleted file mode 100644 index 024e366d86..0000000000 --- a/sub-packages/bionemo-evo2/DEPRECATION_TEST_RESULTS.md +++ /dev/null @@ -1,47 +0,0 @@ -# Evo2 Feature Parity Test Results - -Test execution date: 2026-03-03 -Environment: RTX 5080 (16GB), 1 GPU, no PBSS data access - -## Test Results Summary - -### Tests Run in Both Containers (Feature Parity Validation) - -| Test | Old Container (evo2_old_container) | New Container (evo2_image) | Match? | -|---|---|---|---| -| Training finetune (1 GPU) | PASSED (66.88s) `test_train_evo2_finetune_runs` | PASSED (86.95s) `test_fine_tuning[tp_1_pretrain]` | Yes | -| Stop-and-go (1 GPU) | PASSED (49.99s) `TestEvo2StopAndGo::test_stop_and_go_consistency` | PASSED (85.06s) `test_stop_and_go[1-1-1-False-bf16_mixed]` | Yes | -| Stop at max steps + continue | PASSED (76.77s) `test_train_evo2_stop_at_max_steps_and_continue[no_fp8]` | N/A (covered by test_fine_tuning) | N/A | -| Predict basic (1 GPU) | PASSED (49.71s) `test_predict_evo2_runs` 1 passed, 3 skipped | PASSED (94.34s) `test_predict_evo2_runs` 1 passed, 3 skipped | Yes | -| Infer basic | PASSED (110.09s) `test_run_infer` 2 passed | PASSED (62.41s) `test_infer_runs` 1 passed | Yes | - -### Tests Requiring 2+ GPUs (Auto-Skipped) - -| Test | Status | -|---|---| -| `test_distributed_training_gradient_equivalence` (both) | Skipped (requires 2 GPUs) | -| `test_fine_tuning[tp_2_pretrain]` (new) | Skipped (requires 2 GPUs) | -| Multi-GPU predict/infer parametrizations | Skipped (requires 2+ GPUs) | - -### Tests Requiring PBSS Data Access (Not Available) - -| Test | Status | -|---|---| -| `test_forward_manual` (both) | Would skip (no BIONEMO_DATA_SOURCE=pbss) | -| `test_batch_generate_coding_sequences` (both) | Would skip (no BIONEMO_DATA_SOURCE=pbss) | -| `test_predict_evo2_equivalent_with_log_probs` (both) | Would skip (no checkpoint access) | - -## Conclusion - -All matching tests that could run with the available hardware (1 GPU, no PBSS) **passed in both containers**, confirming feature parity for Hyena models on single-GPU configurations. - -The new implementation (evo2_megatron) demonstrates equivalent behavior to the old implementation (bionemo-evo2) for: -- Training with mock data (finetune workflow) -- Stop-and-go training (checkpoint resume) -- Prediction pipeline -- Inference pipeline - -## Container Details - -- **Old**: `evo2_old_container:latest` - PyTorch 2.8.0, NeMo/NeMo2 based -- **New**: `evo2_image:latest` - PyTorch 2.11.0, Megatron-Bridge based diff --git a/sub-packages/bionemo-evo2/LICENSE b/sub-packages/bionemo-evo2/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/sub-packages/bionemo-evo2/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/sub-packages/bionemo-evo2/README.md b/sub-packages/bionemo-evo2/README.md deleted file mode 100644 index f7e3ad5393..0000000000 --- a/sub-packages/bionemo-evo2/README.md +++ /dev/null @@ -1,481 +0,0 @@ -# bionemo-evo2 [DEPRECATED] - -> **DEPRECATED**: This sub-package (`sub-packages/bionemo-evo2`) is deprecated and will be removed in a future release. -> The replacement implementation is located at `bionemo-recipes/recipes/evo2_megatron/`, which uses Megatron-Bridge -> instead of NeMo 2.0 / PyTorch Lightning. -> -> For Hyena models, the new implementation has full feature parity with this one, plus additional capabilities -> (embedding extraction, mixed precision recipes, MCore inference engine). -> -> **Known gaps in the new implementation** (out of scope for initial deprecation): -> - LoRA/PEFT support (training and prediction) -> - Mamba model support -> - Llama model support -> -> Please migrate to `bionemo-recipes/recipes/evo2_megatron/` for all new work. - -`bionemo-evo2` is a `pip`-installable package that contains **data preprocessing**, **training**, and **inferencing** code for Evo2, a new `Hyena`-based foundation model for genome generation and understanding. Built upon `Megatron-LM` parallelism and `NeMo2` algorithms, `bionemo-evo2` provides the remaining tools necessary to effectively fine-tune the pre-trained Evo2 model checkpoint on user-provided sequences at scale, and generate state-of-the-art life-like DNA sequences from Evo2 for downstream metagenomic tasks. - -## Available models in NGC - -| HF Model | BioNeMo Resource Name | Blackwell FP8 | Blackwell BF16 | Hopper FP8 | Hopper BF16 | Ampere | Notes | -| ----------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | ------------- | -------------- | ---------- | ----------- | ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| [arcinstitute/savanna_evo2_1b_base](https://huggingface.co/arcinstitute/savanna_evo2_1b_base) | [evo2/1b-8k:1.0](https://registry.ngc.nvidia.com/orgs/nvidia/teams/clara/models/evo2-1b-8k-nemo2) | ✅ | ❌ | ✅ | ❌ | ❌ | Low accuracy on bf16 (eg ampere) GPUs | -| | [evo2/1b-8k-bf16:1.0](https://registry.ngc.nvidia.com/orgs/nvidia/teams/clara/models/evo2-1b-8k-bf16-nemo2) | ✅ | ✅ | ✅ | ✅ | ✅ | Fine-tuned variant of the 1b-8k that supports bf16 as well as fp8, enabling ampere as well as hopper/blackwell. | -| [arcinstitute/savanna_evo2_7b_base](https://huggingface.co/arcinstitute/savanna_evo2_7b_base) | [evo2/7b-8k:1.0](https://registry.ngc.nvidia.com/orgs/nvidia/teams/clara/models/evo2-7b-8k-nemo2) | ✅ | ✅ | ✅ | ✅ | ✅ | The original 7b models have good accuracy across the board at bf16 and fp8 across tested hardware. | -| [arcinstitute/savanna_evo2_7b](https://huggingface.co/arcinstitute/savanna_evo2_7b) | [evo2/7b-1m:1.0](https://registry.ngc.nvidia.com/orgs/nvidia/teams/clara/models/evo2-7b-1m-nemo2) | ✅ | ✅ | ✅ | ✅ | ✅ | The original 7b models have good accuracy across the board at bf16 and fp8 across tested hardware. | -| [arcinstitute/savanna_evo2_40b_base](https://huggingface.co/arcinstitute/savanna_evo2_40b_base) | | ? | ? | ? | ? | ? | Unknown, likely has the same support pattern as the 40b-1m row below since this is the same model at an earlier step of training. | -| [arcinstitute/savanna_evo2_40b](https://huggingface.co/arcinstitute/savanna_evo2_40b) | | ❌ | ❌ | ✅ | ❌ | ❌ | The original 40b-1m context trained model only supports hpper fp8 | -| | [evo2/40b-1m-fp8-bf16:1.0](https://registry.ngc.nvidia.com/orgs/nvidia/teams/clara/models/evo2-40b-1m-fp8-bf16-nemo2) | ✅ | ✅ | ✅ | ✅ | ✅ | A fine-tuned variant of [arcinstitute/savanna_evo2_40b](https://huggingface.co/arcinstitute/savanna_evo2_40b) with broad hardware support (fp8 or bf16 and ampere, hopper, and blackwell have all been tested). The original model only has good accuracy on hopper fp8. | - -On the CLI you can access the resources in this table (and others) with: - -```bash -CKPT_PATH=$(download_bionemo_data evo2/40b-1m-fp8-bf16:1.0) -``` - -In code these resources can be accessed with: - -```python -from bionemo.core.data.load import load - -ckpt_path = load("evo2/40b-1m-fp8-bf16:1.0") -``` - -Or you can follow the links in the table above to the ngc registry and follow the download links from there. - -Note, in the following two sections, the model described as `ft1(step199)` is the model that was released above as `evo2/40b-1m-fp8-bf16:1.0`. - -### Loss evaluation - -| device | model_size | is_finetune | fine_tune_desc | precision | ctx_length | average_nll | Notes | -| -----: | ---------: | ----------: | -------------: | --------------------------------------: | ---------: | ----------: | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| a100 | 1b | FALSE | None | bf16 | 8192 | 1.242033 | 1b base model works ok on b300, but cannot handle bf16 precision (and by extension ampere) | -| h200 | 1b | FALSE | None | fp8 | 8192 | 1.076465 | | -| b300 | 1b | FALSE | None | fp8 | 8192 | 1.084777 | | -| h200 | 1b | FALSE | None | bf16 | 8192 | 1.243525 | | -| b300 | 1b | FALSE | None | bf16 | 8192 | 1.243527 | | -| a100 | 1b | TRUE | ft | bf16 | 8192 | 1.078681 | 1b base model fine-tuned for bf16 can handle both bf16 and b300. B300 accuracy is also more similar to H200 accuracy after fine-tuning to handle bf16. Ampere appears to work fine as well. | -| h200 | 1b | TRUE | ft | fp8 | 8192 | 1.078623 | | -| b300 | 1b | TRUE | ft | fp8 | 8192 | 1.07901 | | -| h200 | 1b | TRUE | ft | bf16 | 8192 | 1.078671 | | -| b300 | 1b | TRUE | ft | bf16 | 8192 | 1.078694 | | -| a100 | 7b-1m | FALSE | None | bf16 | 8192 | 0.995102 | 7b model got lucky in training and generalizes well to bf16 precision as well as to blackwell and ampere. | -| h200 | 7b-1m | FALSE | None | fp8 | 8192 | 0.995265 | | -| b300 | 7b-1m | FALSE | None | fp8 | 8192 | 0.9951 | | -| h200 | 7b-1m | FALSE | None | bf16 | 8192 | 0.995109 | | -| b300 | 7b-1m | FALSE | None | bf16 | 8192 | 0.99535 | | -| a100 | 40b-1m | FALSE | None | bf16 | 8192 | 1.702023 | 40b model got unlucky in training. It is sensitive to fp8 and within that appears to have memorized the known difference in hopper that leads to lower accuracy when using standard fp8 computations. (see Deepseek V3 paper where they point out the hopper difference in the “Increasing Accumulation Precision” sub-section where hopper uses 14 bits to accumulate partials rather than the typical 32 bits). It does not work well on bf16 and that seems to carry over to ampere as expected. Note if we set (use_split_accumulator=True) to True by setting https://github.com/NVIDIA/TransformerEngine/blob/bd55e7ba5f0235a80eaa63d49adaa8fb7c6ced50/transformer_engine/pytorch/module/base.py#L56 to True then the fp8 is more accurate which breaks fp8 on hopper, making it seem more like blackwell. | -| h200 | 40b-1m | FALSE | None | fp8 | 8192 | 0.922422 | | -| b300 | 40b-1m | FALSE | None | fp8 | 8192 | 1.789 | | -| h200 | 40b-1m | FALSE | None | fp8-delayed(use_split_accumulator=True) | 8192 | 1.791161 | | -| h200 | 40b-1m | FALSE | None | bf16 | 8192 | 1.70015 | | -| b300 | 40b-1m | FALSE | None | bf16 | 8192 | 1.700162 | | -| a100 | 40b-1m | TRUE | ft0 | bf16 | 8192 | 0.962564 | The first fine-tuning run used a global batch size of 4 rather than 16. The training loss curve was very unstable which could have lead to a lower quality fine-tune. This was successful in that every hardware and fp8 precision combination works to some degree. The accuracy sits between the 7b and 40b checkpoints. This is also reflected in a 1% AUC drop on the BRCA1 notebook. https://wandb.ai/nvidia/evo2_40b_finetune/runs/Alp3KXuC/overview. Note that the accuracy on hopper or blackwell bf16 seems to closely track with ampere bf16. | -| h200 | 40b-1m | TRUE | ft0 | fp8 | 8192 | 0.963434 | | -| b300 | 40b-1m | TRUE | ft0 | fp8 | 8192 | 0.95985 | | -| h200 | 40b-1m | TRUE | ft0 | fp8-delayed(use_split_accumulator=True) | 8192 | 0.959287 | | -| h200 | 40b-1m | TRUE | ft0 | bf16 | 8192 | 0.962654 | | -| b300 | 40b-1m | TRUE | ft0 | bf16 | 8192 | 0.962621 | | -| a100 | 40b-1m | TRUE | ft1(step119) | bf16 | 8192 | 0.955813 | The second fine-tuning run has the same accuracy in the BRCA notebook as the original model, and maintains similar accuracy on hopper at fp8 (0.926 vs 0.922). Unfortunately the accuracy drops somewhat on bf16 as well as blackwell, but it is marginally better than the previous fine-tuning run. Accuracy closely tracks between ampere, hopper, and blackwell at bf16. | -| h200 | 40b-1m | TRUE | ft1(step119) | fp8 | 8192 | 0.926986 | | -| b300 | 40b-1m | TRUE | ft1(step119) | fp8 | 8192 | 0.954112 | | -| h200 | 40b-1m | TRUE | ft1(step119) | fp8-delayed(use_split_accumulator=True) | 8192 | 0.953928 | | -| h200 | 40b-1m | TRUE | ft1(step119) | bf16 | 8192 | 0.955881 | | -| b300 | 40b-1m | TRUE | ft1(step119) | bf16 | 8192 | 0.955859 | | -| h200 | 40b-1m | TRUE | ft1(step279) | fp8 | 8192 | 1.379552 | Interestingly if you keep training the model, the accuracy continues to degrade on validation slightly, but note that the model has now shifted its sensitivity away from the fp8 rounding pecularity on hopper to requring the more accurate FP8 implementation on blackwell. Perhaps fine-tuning at a lower learning rate (I used the final minimal learning rate from the pretraining run), with more dropout (I used 0.1% dropout), or more weight decay (I set a very smalll value to nearly disable it rather than how the model was trained at 0.1). https://wandb.ai/nvidia/evo2_40b_finetune/runs/Ji2IRcrz/overview. Note if we set (use_split_accumulator=True) to True by setting https://github.com/NVIDIA/TransformerEngine/blob/bd55e7ba5f0235a80eaa63d49adaa8fb7c6ced50/transformer_engine/pytorch/module/base.py#L56 to True. | -| b300 | 40b-1m | TRUE | ft1(step279) | fp8 | 8192 | 0.958749 | | -| h200 | 40b-1m | TRUE | ft1(step279) | fp8-delayed(use_split_accumulator=True) | 8192 | 0.957551 | | -| h200 | 40b-1m | TRUE | ft1(step279) | bf16 | 8192 | 0.959398 | | -| b300 | 40b-1m | TRUE | ft1(step279) | bf16 | 8192 | 0.959373 | | - -### AUC Evaluation - -| device | model_size | is_finetune | fine_tune_desc | precision | BRCA1 SM AUC | BRCA1 Bal AUC | BRCA1 AUC | -| -----: | ---------: | ----------: | -------------: | --------: | ------------ | ------------- | --------- | -| A100 | 40b | TRUE | ft1(step119) | BF16 | | | 0.86 | -| H200 | 40b | TRUE | ft1(step119) | BF16 | | | | -| B300 | 40b | TRUE | ft1(step119) | BF16 | | | | -| B300 | 40b | TRUE | ft1(step119) | FP8 | | | 0.87 | -| H200 | 40b | TRUE | ft1(step119) | FP8 | | | 0.88 | -| A100 | 40b | TRUE | ft1(step279) | BF16 | | | 0.86 | -| B300 | 40b | TRUE | ft1(step279) | BF16 | | | | -| B300 | 40b | TRUE | ft1(step279) | FP8 | | | | -| H200 | 40b | TRUE | ft1(step279) | FP8 | | | 0.5 | -| A100 | 7b-1m | FALSE | | BF16 | | | 0.88 | -| B300 | 7b-1m | FALSE | | FP8 | | 0.88 | | -| H200 | 7b-1m | FALSE | | FP8 | | | 0.88 | -| H200 | 40b | TRUE | ft0(step2600) | FP8 | | | 0.47 | -| B300 | 40b | TRUE | ft0(step870) | BF16 | | | 0.86 | -| B300 | 40b | TRUE | ft0(step870) | FP8 | | 0.86 | | -| H200 | 40b | TRUE | ft0(step870) | FP8 | | 0.86 | 0.86 | -| H200 | 40b | FALSE | | FP8 | 0.85 | | 0.87 | -| A100 | 40b | FALSE | | BF16 | | | | -| B300 | 40b | FALSE | | BF16 | 0.55 | | | -| H200 | 40b | FALSE | | BF16 | 0.53 | | | -| B300 | 40b | FALSE | | FP8 | 0.48 | | | - -## Quickstart tutorials - -Two Jupyter notebooks are available to help you get started with Evo 2: one demonstrating how to finetune the model on your own sequences, and another showing how to perform zero-shot BRCA1 variant effect prediction. - -- [Finetuning](./examples/fine-tuning-tutorial.ipynb) - -- [Zeroshot BRCA1 Variant Effect Prediction](./examples/zeroshot_brca1.ipynb) - -## Installation - -To install this package, execute the following command: - -```bash -pip install -e . -``` - -To run unit tests, execute the following command: - -```bash -pytest -v . -``` - -## Preprocessing - -To train or fine-tune Evo2 on a custom dataset, we need to preprocess and index sequence data for training from raw FASTA files into tokenized binaries compliant with `NeMo2` / `Megatron-LM`. For more information about how to configure your data for training, refer to [data/README.md](src/bionemo/evo2/data/README.md) and [utils.config.Evo2PreprocessingConfig](src/bionemo/evo2/utils/config.py). - -```bash -preprocess_evo2 -c -``` - -## Training - -Given a preprocessed collection of preprocessed datasets, and optionally a pre-trained NeMo2 checkpoint for Evo2, training can be executed using the following command: - -```bash -$ train_evo2 --help -usage: train_evo2 [-h] (-d DATASET_CONFIG | --mock-data) [--dataset-dir DATASET_DIR] [--num-nodes NUM_NODES] [--devices DEVICES] [--seq-length SEQ_LENGTH] [--tensor-parallel-size TENSOR_PARALLEL_SIZE] - [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] [--context-parallel-size CONTEXT_PARALLEL_SIZE] [--create-tensorboard-logger] - [--wandb-entity WANDB_ENTITY] [--wandb-project WANDB_PROJECT] [--wandb-tags WANDB_TAGS [WANDB_TAGS ...]] [--wandb-group WANDB_GROUP] [--wandb-job-type WANDB_JOB_TYPE] [--wandb-id WANDB_ID] - [--wandb-anonymous] [--wandb-log-model] [--wandb-offline] [--sequence-parallel] [--fp8] [--micro-batch-size MICRO_BATCH_SIZE] [--global-batch-size GLOBAL_BATCH_SIZE] [--grad-acc-batches GRAD_ACC_BATCHES] - [--max-steps MAX_STEPS] [--early-stop-on-step EARLY_STOP_ON_STEP] [--val-check-interval VAL_CHECK_INTERVAL] [--grad-reduce-in-fp32] [--fp8-wgrad] [--use-megatron-comm-overlap-llama3-8k] [--tp-comm-overlap-backend {nccl,mpi,gloo}] - [--align-param-gather] [--model-size {1b,1b_nv,40b,40b_arc_longcontext,40b_nv,7b,7b_arc_longcontext,7b_nv,test,test_nv}] [--add-bias-output] [--result-dir RESULT_DIR] [--experiment-name EXPERIMENT_NAME] - [--limit-val-batches LIMIT_VAL_BATCHES] [--log-every-n-steps LOG_EVERY_N_STEPS] [--ckpt-dir CKPT_DIR] [--wd WD] [--restore-optimizer-from-ckpt] [--no-average-in-collective] [--seed SEED] - [--workers WORKERS] [--gc-interval GC_INTERVAL] [--enable-preemption] [--ckpt-async-save] [--ckpt-format {torch_dist,zarr}] [--eod-pad-in-loss-mask] [--cross-entropy-loss-fusion] [--no-fp32-residual-connection] - [--debug-ddp-parity-freq DEBUG_DDP_PARITY_FREQ] [--hybrid-override-pattern HYBRID_OVERRIDE_PATTERN] [--num-layers NUM_LAYERS] [--create-tflops-callback] [--log-parameters-and-shapes] [--lr LR] [--min-lr MIN_LR] - [--warmup-steps WARMUP_STEPS] [--nsys-profiling] [--nsys-start-step NSYS_START_STEP] [--nsys-end-step NSYS_END_STEP] [--no-renormalize-loss] [--nsys-ranks NSYS_RANKS [NSYS_RANKS ...]] - [--activation-checkpoint-recompute-num-layers ACTIVATION_CHECKPOINT_RECOMPUTE_NUM_LAYERS] [--disable-checkpointing] [--clip-grad CLIP_GRAD] [--seq-len-interpolation-factor SEQ_LEN_INTERPOLATION_FACTOR] - [--overlap-param-gather] [--overlap-grad-reduce] [--hidden-dropout HIDDEN_DROPOUT] [--attention-dropout ATTENTION_DROPOUT] [--save-top-k SAVE_TOP_K] [--metric-to-monitor-for-checkpoints METRIC_TO_MONITOR_FOR_CHECKPOINTS] [--save-last-checkpoint] [--no-save-last-checkpoint] [--no-activation-checkpointing | --selective-activation-checkpointing] - -Train a Hyena model using NeMo 2.0. - -options: - -h, --help show this help message and exit - -d DATASET_CONFIG, --dataset-config DATASET_CONFIG - Path to the blended / weighted training dataset configuration YAML. (default: None) - --mock-data Train with Mock data (for testing/debugging), either set this or provide a dataset config. (default: False) - --dataset-dir DATASET_DIR - Absolute path to the dataset directory. Defaults to using the absolute or relative paths (dataset_prefix) specified in the dataset config YAML. (default: None) - --num-nodes NUM_NODES - Number of nodes to use for training, defaults to 1. (default: 1) - --devices DEVICES Number of devices to use for training, defaults to 1. (default: 1) - --seq-length SEQ_LENGTH - Training sequence length (default: 8192) - --tensor-parallel-size TENSOR_PARALLEL_SIZE - Order of tensor parallelism. Defaults to 1. (default: 1) - --pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE - Order of pipeline parallelism. Defaults to 1. (default: 1) - --context-parallel-size CONTEXT_PARALLEL_SIZE - Order of context parallelism. Defaults to 1. (default: 1) - --create-tensorboard-logger - Create a tensorboard logger. (default: False) - --wandb-entity WANDB_ENTITY - The team posting this run (default: None) - --wandb-project WANDB_PROJECT - Wandb project name (default: None) - --wandb-tags WANDB_TAGS [WANDB_TAGS ...] - Tags associated with this run (default: None) - --wandb-group WANDB_GROUP - A unique string shared by all runs in a given group (default: None) - --wandb-job-type WANDB_JOB_TYPE - A unique string representing a type of run, which is useful when you're grouping runs together into larger experiments using group. (default: None) - --wandb-id WANDB_ID Sets the version, mainly used to resume a previous run (default: None) - --wandb-anonymous Enable or explicitly disable anonymous logging (default: False) - --wandb-log-model Save checkpoints in wandb dir to upload on W&B servers (default: False) - --wandb-offline Use wandb in offline mode (default: False) - --sequence-parallel Set to enable sequence parallelism. (default: False) - --fp8 Set to enable FP8 (default: False) - --micro-batch-size MICRO_BATCH_SIZE - Micro-batch size for data-parallel training. (default: 1) - --global-batch-size GLOBAL_BATCH_SIZE - Global batch size for training. If set to None, infer it from the TP, CP, and PP parameters. (default: None) - --grad-acc-batches GRAD_ACC_BATCHES - Number of batches to accumulate gradients over. (default: 1) - --max-steps MAX_STEPS - Number of training optimizer update steps. This controls the total number of steps as well as the shape of the learning rate curve. (default: 500000) - --early-stop-on-step EARLY_STOP_ON_STEP - Stop training on this step, if set. This may be useful for testing or debugging purposes. (default: None) - --val-check-interval VAL_CHECK_INTERVAL - Number of steps between validation measurements and model checkpoints. (default: None) - --grad-reduce-in-fp32 - Gradient reduce in FP32. (default: False) - --fp8-wgrad Faster option that is maybe less accurate (TBD) when using fp8. (default: False) - --use-megatron-comm-overlap-llama3-8k - --tp-comm-overlap-backend {nccl,mpi,gloo} - TP communication backend to use. Defaults to 'nccl'. (default: nccl) - --align-param-gather - --model-size {1b,1b_nv,40b,40b_arc_longcontext,40b_nv,7b,7b_arc_longcontext,7b_nv,test,test_nv} - Model architecture to use, choose between 7b, 40b, or test (a sub-model of 4 layers, less than 1B parameters). '_arc_1m' models have GLU / FFN dimensions that support 1M context length when trained with TP<=8. (default: 7b) - --add-bias-output Add bias to the output layer to enable learning a simple prior. (default: False) - --result-dir RESULT_DIR - Path to the result directory. (default: results) - --experiment-name EXPERIMENT_NAME - Name of the experiment. (default: evo2) - --limit-val-batches LIMIT_VAL_BATCHES - Number of validation steps (default: 20) - --log-every-n-steps LOG_EVERY_N_STEPS - Number of steps between logging. (default: 1) - --ckpt-dir CKPT_DIR Directory to restore an initial checkpoint from. Use this for supervised fine-tuning. (default: None) - --wd WD Weight decay for optimizer. (default: 0.01) - --restore-optimizer-from-ckpt - Restore optimizer state from initial checkpoint. Defaults to False. (default: False) - --no-average-in-collective - Avaerage optimizer state in collective rather than dividing by dp size and summing. (default: False) - --seed SEED Set random seed for training. (default: 1234) - --workers WORKERS Number of workers to use for data loading. (default: 8) - --gc-interval GC_INTERVAL - Set to a value > 0 if you want to synchronize garbage collection, will do gc every gc-interval steps. (default: 0) - --enable-preemption Enable preemption hooks. If enabled this will save a checkpoint whenever slurm exits. (default: False) - --ckpt-async-save - --ckpt-format {torch_dist,zarr} - Specify checkpoint format to use. Defaults to 'torch_dist', as 'zarr' is deprecated. Only use if resuming training from a zarr checkpoint. (default: torch_dist) - --eod-pad-in-loss-mask - Do not predict EOD/Pad tokens (typical default, but not default in original evo2). (default: False) - --cross-entropy-loss-fusion - Use the faster, but maybe less accurate fused form of cross entropy, which also has bf16 grads internally. (default: False) - --no-fp32-residual-connection - If set, turn off fp32 residual connections which may be faster but may impact accuracy. (default: False) - --debug-ddp-parity-freq DEBUG_DDP_PARITY_FREQ - Set to value > 0 to debug DDP weight parity between ranks. (default: 0) - --hybrid-override-pattern HYBRID_OVERRIDE_PATTERN - Override the hybrid override pattern in the config (specifies hyena layer ordering and type). (default: None) - --num-layers NUM_LAYERS - If set, override the number of layers specified in the requested config. (default: None) - --create-tflops-callback - Enable tflops calculation callback for Hyena / Evo2. Defaults to False. (default: False) - --log-parameters-and-shapes - Log training parameters shapes and dtypes for debugging. (default: False) - --lr LR Learning rate. (default: 0.0003) - --min-lr MIN_LR Min learning rate in cosine annealing. (default: 3e-05) - --warmup-steps WARMUP_STEPS - Number of warmup steps in cosine annealing (default: 2500) - --nsys-profiling Enable targeted `nsys` profiling on the training loop for a defined step range. To actually get profiling output you must run the whole program with `nsys`. For example: `nsys profile -s none -o output_report_name -t cuda,nvtx --force-overwrite true --capture-range=cudaProfilerApi --capture-range-end=stop [regular python command - here]` (default: False) - --nsys-start-step NSYS_START_STEP - Start nsys profiling after this step. (default: 0) - --nsys-end-step NSYS_END_STEP - End nsys profiling after this step. (default: None) - --no-renormalize-loss - Do not renormalize the loss weights. (default: False) - --nsys-ranks NSYS_RANKS [NSYS_RANKS ...] - Enable nsys profiling for these ranks. (default: [0]) - --activation-checkpoint-recompute-num-layers ACTIVATION_CHECKPOINT_RECOMPUTE_NUM_LAYERS - If set, override the default value set in the config. (default: None) - --disable-checkpointing - Disable creating a ModelCheckpoint callback. (default: True) - --clip-grad CLIP_GRAD - Grad clip value. Note that when using DDP this may need to be inflated. (default: 1.0) - --seq-len-interpolation-factor SEQ_LEN_INTERPOLATION_FACTOR - Adjusts the linear scaling of ROPE (Rotary Position Embedding) for context extension. Set this factor relative to your base context length e.g., for an original context length of 8192 and an extended context length of 524288, use 524288/8192 = 64. (default: None) - --overlap-param-gather - Overlap the parameter gather with the optimizer step. This is currently disabled due to a NeMo bug when using DDP. Making this an option defaulting to False is a temporary solution until the bug is fixed. (default: False) - --overlap-grad-reduce - Overlap the gradient reduce with the optimizer step. (default: False) - --hidden-dropout HIDDEN_DROPOUT - Dropout probability for the hyena layers (default: 0.0) - --attention-dropout ATTENTION_DROPOUT - Dropout probability for the attention layers. (default: 0.0) - --save-top-k SAVE_TOP_K - Number of best checkpoints to keep. Set to -1 to save all checkpoints. (default: 5) - --metric-to-monitor-for-checkpoints METRIC_TO_MONITOR_FOR_CHECKPOINTS - Metric to monitor for checkpoints. (default: val_loss) - --save-last-checkpoint - Save the last checkpoint. (default: True) - --no-save-last-checkpoint - Disable saving the last checkpoint. (default: True) - --no-activation-checkpointing - --selective-activation-checkpointing -``` - -To supply a pre-trained checkpoint, pass the NeMo2 checkpoint directory to `--ckpt-dir`, and the script will dump newly trained checkpoints and logs to `--experiment-dir`. However, if there are existing well-defined checkpoints in the directory specified by `--experiment-dir`, the script will automatically resume training from the most recent checkpoint in the experiment directory instead of starting from the checkpoint specified by `--ckpt-dir`, which streamlines long training sessions. (To disable this behavior, supply a new or clean `--experiment-dir` when restarting from `--ckpt-dir`.) - -Training data and sampling weights can be specified using the `--dataset-config` argument as a YAML file adhering to the following schema: [utils.config.Evo2BlendedDatasetConfig](src/bionemo/evo2/utils/config.py). For more information about dataset sampling and blending during training with Megatron-LM, refer to [megatron/core/datasets/readme.md](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/datasets/readme.md). For example: - -```yaml -- dataset_prefix: /workspace/bionemo2/data/metagenomics/pretraining_data_metagenomics/data_metagenomics_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.18 -- dataset_prefix: /workspace/bionemo2/data/gtdb_imgpr/pretraining_data_gtdb_imgpr/data_gtdb_imgpr_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.24 -- dataset_prefix: /workspace/bionemo2/data/imgvr_untagged/imgvr_untagged_data/data_imgvr_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.03 -- dataset_prefix: /workspace/bionemo2/data/promoters/pretraining_data_promoters/data_promoters_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.0003 -- dataset_prefix: /workspace/bionemo2/data/organelle/pretraining_data_organelle/data_organelle_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/metagenomics/pretraining_data_metagenomics/data_metagenomics_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.18 -- dataset_prefix: /workspace/bionemo2/data/gtdb_v220/gtdb_v220_imgpr_merged_data/data_gtdb_imgpr_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.24 -``` - -## Inference - -Once you have a pre-trained or fine-tuned Evo2 checkpoint, you can also prompt the model to generate DNA sequences using the following command: - -```bash -$ infer_evo2 --help -usage: infer_evo2 [-h] [--prompt PROMPT] --ckpt-dir CKPT_DIR [--temperature TEMPERATURE] [--top-k TOP_K] [--top-p TOP_P] [--max-new-tokens MAX_NEW_TOKENS] [--tensor-parallel-size TENSOR_PARALLEL_SIZE] [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] [--context-parallel-size CONTEXT_PARALLEL_SIZE] [--output-file OUTPUT_FILE] - -options: - -h, --help show this help message and exit - --prompt PROMPT Prompt to generate text from Evo2. Defaults to a phylogenetic lineage tag for E coli. - --ckpt-dir CKPT_DIR Path to checkpoint directory containing pre-trained Evo2 model. - --temperature TEMPERATURE - Temperature during sampling for generation. - --top-k TOP_K Top K during sampling for generation. - --top-p TOP_P Top P during sampling for generation. - --max-new-tokens MAX_NEW_TOKENS - Maximum number of tokens to generate. - --tensor-parallel-size TENSOR_PARALLEL_SIZE - Order of tensor parallelism. Defaults to 1. - --pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE - Order of pipeline parallelism. Defaults to 1. - --context-parallel-size CONTEXT_PARALLEL_SIZE - Order of context parallelism. Defaults to 1. - --output-file OUTPUT_FILE - Output file containing the generated text produced by the Evo2 model. If not provided, the output will be logged. -``` - -As in `train_evo2`, `--ckpt-dir` points to the NeMo2 checkpoint directory for Evo2 that you want to load for inference. `--output-file` can be used to dump the output into a `.txt` file, and if not specified the output will be logged in the terminal. - -``` -[NeMo I 2025-01-06 17:22:22 infer:102] ['CTCTTCTGGTATTTGG'] -``` - -## Prediction - -To run a forward pass of the Evo2 model, you can call `predict_evo2`, which processes a batch of sequences and returns either raw token logits or, if `--output-log-prob-seqs` is set, log-probability scores. - -For example, to predict the log-probability scores of a batch of sequences saved to `fasta_path`, you can run the following command: - -```bash -predict_evo2 \ - --fasta \ - --ckpt-dir \ - --output-dir \ - --model-size 1b \ - --tensor-parallel-size 1 \ - --pipeline-model-parallel-size 1 \ - --context-parallel-size 1 \ - --output-log-prob-seqs -``` - -An example of using `predict_evo2` for variant effect prediction can be found in our [Evo 2 Zeroshot BRCA1 Notebook](https://docs.nvidia.com/bionemo-framework/latest/user-guide/examples/bionemo-evo2/evo2_zeroshot_brca). This notebook demonstrates how to use Evo2 to predict whether single nucleotide variants (SNVs) in the BRCA1 gene are likely to be harmful to protein function and potentially increase cancer risk, by comparing the model's log probability scores between the reference and variant sequences. - -## Context Extension - -Evo2 supports continuing training with longer context lengths beyond those used to train a prior checkpoint. For example, when training the original Evo2 model, the first phase of training was performed at 8192 context length while the next phase continued training at 1m context length, but starting from the prior 8192 context length checkpoint. We call this process context extension. - -To change the sequence length used in training in this way, supply the prior checkpoint as the `--ckpt-dir` argument, and set your new desired sequence length with `--seq-length`. Only doing these two things will run, but one issue is that the model's ROPE embeddings may not be scaled properly out of the box for a new context length. The way that Arc institute handled this was by setting the `--seq-len-interpolation-factor` to linearly scale the ROPE embedding for context extension. For example, if the base context length is 8192 and the extended context length is 65536, the factor would be 65536/8192 = 8. There are other ways of accomplishing this as well that may require some minor code changes, such as the approach used in llama-3, which is also available in megatron and could be added into argparse as an alternative. - -## Checkpoint conversion from hugging face to NeMo2 - -The following conversion script should work on any savanna formatted arc evo2 checkpoint. Make sure you match up the -model size with the checkpoint you are converting. -The pyproject.toml makes the conversion script available as a command line tool `evo2_convert_to_nemo2`, so you -can try replacing: - -```bash -evo2_convert_to_nemo2 \ - ... -``` - -with the following if you want to run with `-m pdb` or something: - -```bash -python \ - sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/convert_to_nemo.py \ - ... -``` - -### 1b-8k - -```bash -evo2_convert_to_nemo2 \ - --model-path hf://arcinstitute/savanna_evo2_1b_base \ - --model-size 1b --output-dir nemo2_evo2_1b_8k -``` - -This new checkpoint `nemo2_evo2_1b_8k` is ready to go in nemo2 format in downstream pretraining or prediction workflows. - -#### Optional steps if you want to register the model with NGC - -If you want to register the checkpoint with NGC (typically only NVIDIA employees) then you can do the following. - -To create the checkpoint for distribution in NGC, first cd into the checkpiont directory: - -```bash -cd nemo2_evo2_1b_8k -``` - -Then run the following command to make a tar of the full directory that gets unpacked into the current directory which -our NGC loader expects: - -```bash -tar -czvf ../nemo2_evo2_1b_8k.tar.gz . -``` - -Finally `sha256sum` the tar file to get the checksum: - -```bash -sha256sum nemo2_evo2_1b_8k.tar.gz -``` - -Then register it into the loader for testing purposes by editing -`sub-packages/bionemo-core/src/bionemo/core/data/resources/evo2.yaml`. - -### 7b-8k - -```bash -evo2_convert_to_nemo2 \ - --model-path hf://arcinstitute/savanna_evo2_7b_base \ - --model-size 7b --output-dir nemo2_evo2_7b_8k -``` - -### 7b-1M - -```bash -evo2_convert_to_nemo2 \ - --model-path hf://arcinstitute/savanna_evo2_7b \ - --model-size 7b_arc_longcontext --output-dir nemo2_evo2_7b_1m -``` - -### 40b-8k - -```bash -evo2_convert_to_nemo2 \ - --model-path hf://arcinstitute/savanna_evo2_40b_base \ - --model-size 40b --output-dir nemo2_evo2_40b_8k -``` - -### 40b-1M - -```bash -evo2_convert_to_nemo2 \ - --model-path hf://arcinstitute/savanna_evo2_40b \ - --model-size 40b_arc_longcontext --output-dir nemo2_evo2_40b_1m -``` diff --git a/sub-packages/bionemo-evo2/VERSION b/sub-packages/bionemo-evo2/VERSION deleted file mode 100644 index 6b4950e3de..0000000000 --- a/sub-packages/bionemo-evo2/VERSION +++ /dev/null @@ -1 +0,0 @@ -2.4 diff --git a/sub-packages/bionemo-evo2/assets/1b_finetuning_train_curve_500_steps_256gbs.png b/sub-packages/bionemo-evo2/assets/1b_finetuning_train_curve_500_steps_256gbs.png deleted file mode 100644 index 20c513cd9da6974ac63f827c824df62cbb4de838..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 106324 zcmeFZcT`j97B`G2qN1Qur7J25(xkTp3!q{^>Cy!WB29V?2#6q6Mk&&jUP22!Km;i& zy_W!i&HMcY+Bh&U<_-OWsZh+yH zjk5~%>yH=HFHVaH3cmXC4sq=J=T~=HPd~1EeIxfv;H69CKYRkn`I#a)Ey#`uk(%N7MYB31CRge9h2Lm@D|ZmEwo|vfokBf4 z^4i=e>5d^s0(*dybJ(%0~ z+S#VIyX0)14eZfmR|Q82#o6netSPIC!duCozHkWcP1ElpQWc@;dGg4K5AdP>nP`WK zGpO6)4pEmw6R6{#ela4?*Tj=@#xdoe-;?DNw?sc~N>5Lm{vvsX>|3$d^oKJ?&VD9u zfB)tCDAcnB9zmUis#Fx=q?qu&**dTbP2i~P))to0RkF%aVdVY%~g2JxG z`Vm>+mm@Ek)P0yA(Jyp87FxBcs}W5oRgx|f9;kO+b+0vHs(7^9{=Enr%U<67OLnLl;`sGwHRGFF}ZBO&tb!h?pWW>1apC%ngSl6|BRrtnUz#6+60S zoS9B5uqKBJ)k~Q82--$lMH@E@J>*Se5f1)Xd&9Uu`+AO8?|IzYGmU8`RQZYpX8BqL zT>a+r?yTq1Uzv=u+SZg^)??3Q?R$;%VX0{ujaOfBcHZmd z+N@f!+E_pP*G+x`Coty{7;Azu-uAzJ9W;M7^L+WWoBV~>9?*-@tafBCHjY{alyEAX6J}Ut zY+}ehkG#=9t60}lr(Z`~r&QM#6!zBZ!@KwS;Q=4|xjQZiT`IVIo$GsK@P`{8EF)MW zcCN`s*}V&llDgJ&as4jy75J3}zW6IJy$VA&gEv=AE`ANa{y|b;^&UfH&u{DD!&lvJ zJ3VrGtZdMFpI?44V@R~z)z#Lu#3Rn*;|~3nBt@1_#O<8hA8R%I7#)Nv&d7Qj+`=ad zT>HTz#S6XG%R_ygpEr|t>-yyNVjkJYSFa&HB!6i7;2y!KPNy!TZl@8W!JdiLc#f|+zqQzr-7Vgf8Do)>jxbj=hOl-Q;$+m2H{n^}jpL5^|n8R4Rj@Bw9n5|pDE(%jY37E7}JWO2@> z@yr(<}xOj7W(JQM(Z2~r@9w{ot*4JGct>p zrfY4@9iF@tbsSzCoK~=<+Ud~G>N(gZEpEEsYA08Q=8ZP`e@^R{8L`P(Y|L1Ar z9Frd=Qzi?>ye5W?l8rreeD&@PiGhZY6Zf7&>aM)GV({?u<$CV*D^1*H+%)OW)9F%{ zR9n<&()nKwmz^cC9ml8*iqz6@!2mZkyKm?GXF;@5x*zd#+fIAUr2Z* ztG{Kpgx zdHh6PZPn-1j2|6G6}5OwcoefOvvsmfvk4jkY(WxYc5$%ERE6BMjHz)#0S27mm}Qlx zmdy+HmJD~rx>kAZe!1WKz2W%mo6m1l(tlH4Rwl+RT8LO#TK;CmWBw*qIA*%_bzC{o zcb6ufuY#`_j)lvc)eHn91X}&#S)}n!)dX_f?WV9Sux9CF^eObG4fp~Tn-E)NYf|f2 zP%fi8@G{l--F@#F@6MX%cDzUJ#C|;S->*j&k$b zK8pMLrSVmmaMKgs{)j-8n8XSY66RSMQ&z$A@`yLp2|c+{a&pGAWv{+a9OLKw`>j1M zeNMg~GdM%4fM;!5-iDf5n2aN@_>PXMeV?1NTbP?mmm+(2lB_~_@7U`T+{*5tb`QY( z3pCZf4~0O;c!28@WXFy$lTiRyM}QaT2+O~&6^>jZJNolJIT@M1IoYv4o>2zg_kZ32 zul+W^zaRbW4H+fy={)dqNh1IAX-a6)(Lb-ri-7OQZmHe5e;;^PGqE!@wYGn5<1l@s z$sM?H^5wlJ_GDxXSN2~=?yFv20{ZVWSJ!sXhCGxqv9aQR`pm}Il;6eb<$gb8GA>fU zrIo3}Q%)BvOKW>67uiccpO6Bs_iqba;{5rDgN5uRZO9|eJ2rNvoMQZf{DPN2r#Lw| zW$d0orBv@K{LvivBzx((gTqTH0Rd-cXMSg4ej7V80U=3ANddu|0yl5+0Z;JRyIMOu zb>XwN=lZ>qKl{0BYHwm^{?ftR#+q}#->1eljt;VyF6|HWubc+4yXIB^&44jLVv;vyem?(i-~2V=ftK2TwG@(& z6gk-Rz*m27s$p+xcgMyG7}Ejt*M9xc_~4g+G?Wq8U-|)3{7&becL73!PRR)TYtuld zP7-vwfqi5(zpJ7SyaQae|8qnK_;c;|_x)>omiMZr6J%s^WcTmhQg=BrKTKK1sxb^+ zGQ9TbHT6l+i*nCz)vRc7YSP_0_nPs}`4}Iz)RUhwm^d8*C>|X%@1za>EOJEt@nz1W zYcbl-FMhfT{9cyZvlnZA=8PXHAuLW^-D+MPP53A|-8^pwU9E0gt(uNASS{qUJa*zN zr`#*DBjo@2qFvD2z-Qpiz+CiyedK5BD@ST_7`+dF<_yExW-%sh!has=&qqKY~M!Cz|6#c|*Zt zDW4|;d2`NkCpz3N$P+Xi4MtQmwt=MKqfD+7V+A7>I0mAU-Pk98j!+isN=c|0zLOv6 z!81Zka2B}9iUC=xS8fdA>UR?yuo*L5=_&% z8y;F|65wzi^3-j#cDUFbBVpdTBML=a_Xe+jk2m?Yf#!RS%)tNVGq62Wbm=d`8+`}5 z;vZC_P#C73dJ|9r$PgjzS=KraT4l2WpAr>KzFZM0*|+O zf)+y$5Qd#+2^%EPzzyF0h+Wh)Fi+iFfQvH{PpwjHj1cUX`zO9rG}1@upO2+xR-bov z9>Y;`FnhU=;!~|D5W{d}61Zlboa)K}LY#0=mlH%kzP^57)1JqC=~4mHBA{)C?)DK8 zTA{^#21tj6(h)b_#hq3C<-E7=jmt->#@Tewgf5hzOQv8gt!e(d1Qdyu^dl_@wjQBn zf@l#7;SN^b>g9V!7B!v@PHwHY!RlRUI2;~Bd&R9>ML69Q5 z>t%MgnX%}brY~|(d)o!B8?xeTMfX#T3gK3ZL*=RXh_r6V6pM|Bp#5OZT6y!$rlOuA z?2wW`6)Vl&YT|O6*DkN-6Vr(Po5F)HmLgMeUSmpzA@@mp&W};&YAoFZ3|e=fEphbz zte$r_^b`2?1**6h^<6w|7u2<#;~h;>Bev-pE4FhXi6B{a*R=u{5u(lDn)!fqCs=I6 zCAryX)oE8PRvhDoD2sxKjYvf5yvQ^L)9WBn4vStS_iKw)`0DuzVdDkQU1J1SPx;IW;YZ4!LjnvBTyIm|&fm zPdJH~m9O$!!pKEtf41$AnJ%+i&kf={kni@Guz|dJt-WWfuNt_n_sn$_?4umWJuIMY z0cDf(Xgut^HFCju`8|~* zejY66%2eDlT4THCIvsg?07V+-pl96K*(289G9*5*o&RnrGUBWluPt;NA@M|0+ayF3 z+uWsMF6y#F@Y-W(^u$RoTk?L=oqORosTI&SUCDku?xC)*?*IwPMXe?nRhzp}qNc*m z#(^G7mM`urJXc~Cw_df`!dJ@B_=3JOU?5!4LwKVfR;rWE1)1D|<2_16>07R87r){KDseIb((b~x|Ub9^10CiW&ISKk9qx4*E zS^gGMU3@?*$t58HwKU9^1wKIXiW`sE71o>5oZ|cp`0J27Vxncrf-lV;V#cKXYHZyk7+F@|U0}X8yHrKUW}1v) zPQC6%X)VDS%fI>7WjU2G$aCd3!e`(k0$L9udh(~WMTZ+!tbNQynTtg%1Yc;Y|B$Js z)Ck)2zBurFyo-mtf+G%Q9gN(VmPgawwIc2j>bD2z+SC$VE-h9O_AbdOsU0DC42gE+ z8Dc(r8ZbH5&Xg^bqMDqQ5;OObEO+~-5t-1{+1I>0&!lA~qZhs*H}c-njw=~TW?(Jg zrHd7*vq9;QrZiE{i4J2r$E*)&-*yM;88u~mD{U2PTUIc&ol1Q1sl%&^e(9i2yb+*Q zGPgjXlB;X?+E9;m6&Gdor6IDO0P7k;FoWq2lpzO0z^Ybg^6Ao zX50usJwForqjRltt43r_h(j_a58`fEqF~DGxLx;NWvVAe%y=W^y-G-|r@fNxa8an` zsn4B+Z+L-_&8pFKMulz9>yKJX=|HJVQigpZeqHJDcB*T5)GdQ6S0UTZtQ##{Tohl- z(I$kpx&p;#0<6ZXE}V3qyE!@9di(;8~gNcz0V z-pK0sYzM=RWF(rOS~WBvWV^`dnz>G#^Ji>S8w&;RT@T&eu&po95hDPKWwTzuD*|E< z$`bj3%*Z*guVmiVfP!=4O3OW9-psIj>pboDB+j!@n&H5`gPlbc?u(+Iqi;HoemOH< zsKn0$QPR;T{WP7LY7s@L9Zrlp6N>T?B0e@@vV<&c)Jm*B8}nEHGkkLsezRTaKTT$g z)dN9dA4>Kt2GEn(n9Qa39PzaHRs}leXGCliih-6J!lMDm=N>sLaj3y`gwyK%}L(1ktwjSWkFy5ZfJvTJzh)dQ;L7T|WW4vu<-SWbI+s%e?mx zqHcBSbgw+slSmw2EnGKZ%-N0Yy|FjD1-2G?APczwa_AqSx41%@ zXq;|9Vu~TQ7=L!pXG887SpA+rV_1-gYB`s#&a?z1HO|I-VO$?vrBsc3f6%gk&;pi4 z$e`ar!e_`iG8K=#I+7|X+|>;)E&*diFDkIZEN$m9xB%t0Sgu%LXmIb=<5R(%kqYd4 zQ)#%ME&`P6DPS)K7=s~x9{LPq>#b=9@DWvs(k5#vg?UjEp7GUI6em9ln}yW`?R=tv zUL4M>_tf`O5l zvBnkS7G5AwHR&rw*TvG}k%`(9P?YOLhb=*tKt~wSY}ZhCLoT&>L4pOm9BTr`VG5_i z!eH~sHi_^Zm)X=PCAIM>#il-9*yfm`F{+ItIx(=?0!}Z!vmI6M>GmxVIa zL2o9KwzXt-mXZgy3&ZNZhJl#FK-=2QZtgrY>ZB^XtIE#i5QnCCNa*X>9+Y2i!d8$# z6(}PJ zWHyah*Fx^_?s0PNK9!GoK~zf|lsvwMPc%%cH;ajM-wc2$S#V5qy9=PDod(StJHHJ$ zTOk}h`wt@hF=iglXsc^^tp_Zm48^46r|Khw3|fUoD->Mdb;WjIlks?nIFDmQZ%Eam z7OE8)(uPMFvaS{9(Ge&8{LVEmmJkz!U=6`ou|eicoqbAX8)D9;Czcz*hUZqT7C3;k zYt3@xqMF(~bFo)FkoZ`?{ytNe+rI)Y~oDqFvO&FmYThtqw35 zc%`0L_4HsPV;O^$1(YlGGuTtB_S|sw0-n`I%oFNxk0N(RBAq85*%lCEv6WWgMQZd- zEq-t4wYxr-7Orp~FS>>ph$&8dE8J-<@VDJ{-EvUK;81?ZJaUOvSKpS(mbT4 zs23`WE>Pr%2zeL>W?ub~o%fbwcavLQoMZb7B{oM`g?A&HD#%389c_gQ>PU}cwy9iJ zsPEGk(zSPLPl6ctXlRB=<14ZyN-rNr^mO^8FWC=?YB5~x@k>wel-gje7%eRU+`qbb z=OOkgxAm-ym~B~M;?Y3v1dl?jOEu!FBItZ$*tYTw)XvJ*C+zB5?u20BR%TklFsTo# z?fMi5kzhZ(SGln#dkvia@UN%=nfKyZK*jif1|5*l8BOoj6TToPW5tH?D&M5ia@IkYPh6eGE$cg=*nrYj@$U_8Oje?Lv}bT+uE7;L341wP*=Zt zzGw*78x@J=371v4xRRj|MZmr}Vp%k~IPk5Xj|=V_r(ou}fTNr~rZf>Ske=vyb}FqJsGpJW!W~eECi5D_q<5T<_i?=u?4%%E;k)&{XmJ|@d1v$8f%nQX z)mZH;JpF~*R_*d*4c?+U{GoibZr4e zx+5Ro>uEL_(W&#pq*0$G%FtB<@RWCwWaGi}?Y<`XF{;~>D`GzmNYUAuKp-Z?>o$1) zK+NmxJ|EF8R&$3kxjNViaO{5bA74{r44%88T%=aEUV>>@=1x%W)-hS{QTjAq*u<~P z+C;^62AZAzWN(bnSaf1VHL+*R4sNtDZ|_Oh_w1Hxb#ZC6g@FL9dVA1fxq!`?Ux9H$ z-vZvroKS^$Ss;vFo9!|wl?CZ`OspgVaU?6L46Jr_SF@2*Z_Yr$cx&Ph0e$AGZM3d+ zAPAOe0@Tr{FiNRjOc^AkPR zYrL$k-9olNDO3=bA#pB>D+G$4ar$9yR(D2xL|xqmrz4+QN`)$)?3Gwfq41*X=y+6C z!9hGorROSuH=FQo^}sz5{UqV1Pb$jyeWCm@VjY!0we}D$lRGRlY+n(kCd4XK^2IZTtFScr)gbl@#Db7Vzy zktH6;@i4EzafSFt1yFN$j%+bbXngh}oxz$h8ukMQx`erE%31Nm-vD(V8QR2Ol~H71Ga><>CZBjXoaT=~=q zsPxG+sRK?{2;Ru!W4TXw^H70?CZyqBE(qwQIJ5WT<0cv9dazICC+!DXTG@@cGY zBdbPH8lhr#wLt^Yo$dPB!Sf(Pb~k8f96%=3J!#qdYaAlBf8S+ z%fV84*e$@KH4%*UX<8vPc!FcCO3ZCRAs;|9EdJ^; zgjK}!O0HE2f6slcxvqdA$6f>J>W{k{oiP5Ywa!%g+4%jqOhaBC>eCFK*Kpg|W9&;C z5RE<{n`dYE0bAAi{+$0`TEN?ycIKhBre_D20y{13qJbdW3#lmb!84SnzbVYZqbcsI zjhElBe28YY$T^29+lbMA>PP>8xJ8oL>Q)o7tgGhp>2v`3Wlbi7SH)%##YQE)*M?S{ zTwMV(0Q;Rg;YS*)XR$*@v5}Zx>UBC^4)c$MaG&vy!e$IyG+oOgsrE60l9>H);ATgd z*KhbpklMZCnr&Q^&v1Hzn^c15)W=JYswQfwjF(3Fc<0jd9XCNl`Ix~{H;blgJys5a z?h9>=jRY?j<#nt2S9`lA4Z#;mNJ_Duh|aYt^CJDM*b*R&>%bqv z4@rKgiyk3W6Y9;=gG2M!9bU@peB(-e0m$U+M2Z(F-NDQJn3=BbcBmnE#zM%Wzx}h$ zDUC+cu}KPW)wNEC#sw=wnWa~oD^b<$p9hn}WEmPgaMJKM;hDFX**tv(}7ICzW`7Ca=FC_}WUem}% z)GmD5E9KmvNuJJ9&g%Q`^J~J5bY67f9C>>Wxxy4-)98Yl<2bFj2VD7WvV_XA)m$V` zXy4#{)Yl+^iX^(a^i}hiRVDtdCp$ZgsGMk?N=-lwJdC!-F-d3j+`25y;^=uQzh$Ih2`opjCSFUE9qV|rbh z32mV$*(JH4$6v1NJ?yGtclk11816@v77SnHhFo-?n>D+8YNT^_0?osl$BKliv2d*Q z2sDz|-L6${PUcOfGlPu9^k-6|3_@vDq*Ci=W*zupc`LK=IM+T;lD{<89V~&o^!#BL z7mBEl{*d+q(seb_Zt#Nb*AVekCH_bt$C8eUf*jci!g*?Jht7I+e#1#yqu|DKOFPeE z*rh|BDdVndk#5N`S5`qIq!uzZA3WPuFOAs>)pj0sl&9r7Ix`drnCouu(U{>42E_)S8=W3{`Be-PzXMb9h9!bv#;Yyj7OOOuRZ%^< zb6gbgS?7=B%OVg*X4BZTj>T_cuNC+H+stmOjl0obCGtF+G$20k@n}E~$Xrq)_xhBMBR&U-6Ak9CJ1sUhyJo zTl+jS9)X!gm{)OR6SLE<7v@#36jq=u?)@^;b3RXOjvSE5V<&FVd9&D5NbcF;Vfqyt zrJ<5MngAU0uNPbv0qtjji_E)lds(~N>mhflm)HNTL!T%3zZBaq9slX!F!LMLS$=)1 z|I0jnStnp5d0)|&6GWz@!!Vw_LzFB=1ew@4rP?M=_Kn z&?eziv7zE?jQ`JHMGNvh*b?5?i$jO#xQR;qTx(PWy@XWb6mF!*~70Y41135kPMCVVwP z<6+0X-0HjLqkt{u?nHzZiwPPD-i7wj#eZB#(z32T9$`J2n``%I&ghCQEWG8pd6k~$~!a80-EY< zGD#}I%wqZ2=)?Ee?VVf*5anIek;Hfk>UJpgIlVZJ(8m?Z<48M1FI$6>;|rCRe1lKF zk_13&pB8kwpJ-G5@Ey?Ila#i0ed!dWdqLVRo1yUs&BGWev_s=0=Pz-)zcVev5%2m- z(ec7&$`2Fw^oC#rSSo3%^&<<%@R)BwBRjHARf(_1g#C`9&p=7g80$no*b#?@gufN zjk+VwuDia@dVI&>!$_z*shgn2BVb~_G-x^8k<6@Vy zsF@!<0yFOXK+$z6efRO#d$BDlUc#$& z0371a?soaOO|d+yL%a8?^uk?YjC02WqDiQvkio~@2lM$~{f*n&7IhHjvqroNLC5uF z4ISt@bOd8>_jZh5WfWaCDk<(L%rrJgf1dImk>$^&%8gUZHPfJJpvdK3i{psley@?9 zSexn#Tw$#A`x%+}!X|F48gf25Q70o2aQw}6upNEMxlE$EIE8G*US?5UrSC$Atxi@Y zsqr{sziL%xwYdnW5QLayrbhBiK$~4ao_ni?jUb|j*uHy(b#Wc@reqIR;;+tfm`Td> zx_F#sq;hBNHg73?bBU;w*zi*OMVLt^ZJzq;QlA08jcfXc9kP&1KgP_^+!`w`Fx3i6 z#zI@A@mQCMu}0%2KJg#{*p_{LIpE~-L>jow8k1(z6W(?y6Q8DlMoDo;Ui9Iwz%b;9Det9aE=Eo0S`e{HtCEB+1paQfEVWPcHoEIeKGVXkK#+wGMQXwAk zMz^G2P7rZ)s&{CZBn;@p@3^??n;u4>V{Qp@UwsCq6Wl?{5_`eauyN-qV*GNwz@k=n z)$RtkJ}(!US%gV%3$)$7(8xHQu&+8tcM|32z?g0Lu(+lntk`8^V0IZxB@jt107PAd zRx*4|7UuO-23`Ogc6q-2;_ib|3FGEEtftybOi2w(4wEmDaD6vwBRgG2?1fln|1T)< zH(6e@^k%=%oc3yc&9cr?hil^~N57cDe3h9W7SMKFex=SBn4t`5h>Cu?v#1emL#pnd z>`DyK(0o#k9jYBM7l?)=L)A{8do^Vz3yezm_@)mX3{c(_2>o3*1hgg8ybD^_z+%%6 z)w*5>yU22dxQ(+nh8On+c<%NFnno>LP-E1>xWJ7{5H&B>KQyKZ=W3Q5cE}}Z`s7-_ zQOWdU$K6nXGA4@FJ+qwZ{nz*Ff`!{}RdlyQ-K2YMgr^eOqrPYv%b#ar$#fyfv0{Np4jG z4mT3hTy|d1nwR#gZpM_{SDP|KN-R5lN?5nfCgvYT@>jptkiUP!cAW;g8kB=r+$lC# z-E{+5_#t|BKZK<~raPab@2ga*p6d){1_>S~Byx;smX;pg=PqEdZ!bs<7e21BtOzfu z9hiG0hFYG>d`E#O4^}x$*oJul^W30K%-!OdU@ZTTjs$v;uKs;mYv?{|5-j5fd~P3h zDod_(KMWXwSZHNtYfj77@<+f!B3tkd`!p6hhM@2xKV+l4t?8eGqvNCW^R!s{9ICAn zZ&pB$AtsZJ&{U>fa1F|Gct|cEkvZ@%4vf_EmRjxD@Ey1kRfI;l3Il=KE>{V3_`71M z=;Bt(Qm_--fpp`)A^(XQOIkx8Cw#-PZ0YQ`Yv{_{Ln(Gl>MB68XtmOA9Xg*U zxMruOULPLDc|Mf325x53}b{*uM`5} zaa}L)AcOx(5u>+2f?jM{h|q0R;uim8e@UmJ4ixME4ZqH6Ujm8&As)>^4+JgZ2d-Ar z@gF!O@b_Wz@1FSSO|iNaKOOTxgG?P{MWZ3Vqo*>(Kke{?KX&IwQ$Nl}PK6q(Xecdt z74lKvL%Qn%Bi*6mSFwLa;^xyv>6z~}^P4BTi~)hGk2=@P@c*G%N+!Z&t?YY!lu*3w ztxx~9{RbpIsR@uFep;tW-vrj?G@Hk=@yq?;x*8tkbR(fCcekHbc-i4r_#l86B@10} zGe#g~<0dsuHO?^plYD*-4GUZ`n8&nE7 z@^9Npfda0#pZABVjZ70v5UhH4{-4Mrb@``Df=kz8fk|RYR4xpEKHD4_77Qs(xZMv3 zeH1JxpyudybZgn|I_m$<)?dzZJu(9gdI}=zPKfOV@t>-(1UMU>8O&C$VUf;y7;noZ zJ&+UBrRe-NEPpW?fe5wszG{SKb5*+^bDy+B_2}a zGOzi-k?J|ZXc+!Y!Y`xzIspb7K;mf=@t^6MBUQ}e^0;-`nx`6L@6D(l#t01e_gitG zfmT-|MdEF@bP6Y*T1h1WD#4WHzi$K}uW&%!>6XrJ2nPZ7u1|l>E>T_LL!K;eG(720 zCm~4#7%^L97^u>sf?AvewmdTMP<8(ogZu9@xCsD$INr~~|D932>-P`e)KMZOjw8~9 zt+7-ShmjyCXP>FVtZh#~X|(`u+P6pU${&VgocS|9ak81Ad-0pjuYP@D{>`>AhW zc?MP<;?wAj>&d!$J_F0NY4rfS1le4T=DYSw8RFmYg2DL@G^=OJeT%WNJqhK8#Iq$g zLii{CQPsr&+#7QvM6#^xIKr25?JeLDi5-XJ9m)_#e=-CXd%pQ<;W?A540b;Mq>Uj! z3rJ)GD_yf#gT|qqyH)JdTIIRzseeK8Ob8&mk2Mcq;;(GI&+$pBfauP4v{%(jMze`} z7@c0kq<`|1I!xU2GXN7Ic5{Lu-NY>LET5!4hup)zxWfSEV{Dgkrb41 zHfS?%^&84H^MB%bDT`E!&1tsIyne02=9{m5s{3PD2}d)3qz=`z5?|}@UTJr}Hx*^C z#$gWo+J5eUmZKdDNMp%9@!kHB_*6g(s^1#ew1<2K0GBZIz8aL#2;gCj4>-P*>%UPF z@M<>p3y0vwiM4w_2MsbRv6|oK$`oz`p(}RihpmU0FgJVv&bkUJ+OLSaH3#r9RgF}4 zA@Dx|Zld@97r^7MluqVF79)nv!Iga04-U2EXnvXz7jIYt@d+mvKyCUNu>*dNsMQ_+ zHQrXoly`YgVhLU0VsnS>oHpH0K$l8l>1z_XTgROzOwJxnD#qu<}Q|et}=0D~;&9)Om-{fqGG`QYLhT5O} zCE@uuevo~T)ap^RYgD2LYmz5I83;fiAiXR6rPEsRd)z4pU1Zw$VM6Hk+fO+$u{{mV zKH5_vfo7)A1+9qTli}O0Y=@0|jDcc*GZQ<110s}<3n$f>xu<#+S~XQn;=sT4&P`9l z4%yVR+Uh^4s0w8F(K;DYP7|7saeDClV$G_Z0w&Q|rOnHK!=?jlrTqcOjzK)!fVR@+ zbMs8H)^>i_{Yb&gJI_>;g?N>S7Z8&;XKmhZEp1ze^uw8-0Yvy?H&Y^C8Xfd}4 z3E<;6iT;^*Pqwi-jBE_1KNmlOWoo`!y<${ceAW@5w{DK4u0_Q--Mq3JHX^WM@}e~@ zN0idR!$yBcoEfp-xuMvt*jDxr#P%JKae#~Jl+5E44FtfEY1i2+YXS#!in>h@1H!YOD_W2-zmkj06_$EEkv&y!Zms291xPxtN*16`@kLnO!@`O2^3{x z)%5gy2U^c-`scb5OZNSVsRy7#fQdo=Ct+iZoIp+%YRpk8T6(gDn!qF&ds2Vx|Cc3K z0NGdcLJHjZTC~2A8e1zhdwaTZ$l%=SWU@Kq+UL7}r~3gRGV1sV3z8?Hyai3^BDvHC z6o?4TjY9@wu=+X2M5B@`PVB9{F7LJeyz4|?xpbN-;ti--? z8gEo`dVIkcbhE?*Hc%O9Bo?)FNED&nu%<8$>w=(RvcRhMxhzlrfYE`oO@1*oj;BB`SbO*|O8jJh(q=4YO+A?3o`Q|hu zLIut`PQ&1Y|`E1wsf6O3*8fpQ*gDz+9x%%Ich%^7?vB5J?eC2Aw=I(+Q&!Tlh@brVp z++zsZt3xb(db>f|*5#ANq_#6*@KW};$?R3eLP)Z`R{}+i^YD69r=x1V)bp;8AYA(G zQ!jzx(Lw@LE~xFEqgXclP$AYsW%yjP)8hBwL_xj=m1qNze{z0;qUDf)pF)tF7&Up| zzfTWp>&mg-erT-oUD6Spmf@wT)_4}~gqP5iVl(hg_{9R?T)0*@a748aJChtLz{cX* zT~jdjW|y~>uz#w5I^m&?yAz-&<&!_%*=_*Bih;1h89eaIZQuyW(F!0#2ApDI4~B@N zFH_5|@zVDsdM)}%>}_@_H7c{hEOQDP=IE#0=R%E2>{S#5(UJnihk$}xu3XrDf%(ex za+DS)^nFLasJQA>MV+t``Ur61lSplBMy zTB`&j4L!Fca5=O}1abEb-hn;4pPbztB9&+8(Al#guXxU9;ASv20|kQZwdyY?Fn(1m zNs<6o`vciojZ(~|Xt)%H4LPYs&#~zT8<)ioRD4E@)Z50)m9;&98ROuTW@_vtVS}dL zc>Ncdsm3ipY}6U$j_Ws1XEQFJbki!rwZ;RbqD~da=|uMe`B>3jJ$QXV3H91y)$?F# zYn-&h)X54r5k=&G5|yjTt|lGgQSvWsP0CLc{BGnXoH zxv08+3(qG~`}Mcl+4z`(9pI3&aGP>ix&73K(7cK{9@|ZE6oUo47%Gom9Q-JXi-;Dr zzh%^~zw4Bec;$NB?iimi)=LFtkRRtfx-I|YP~)wNJf2PN3EpwVNtQnvwp8=hc1%S; zU>Flsy;BY_cqG?lAZblM-e}0NfArX6v0-v7z7{N$aw)UKil}&jVan z+DkFN(%~w;9H7Fqu!=?Ed>JynVi2=VK%MQwpo~(MA16K$7c;ck%5};bDl;Cpf6B^al}{`LI0~$)&xER42rb*&#(Ws`T(_XClO8EB z)!!}Z%RqEpaBfs@m`#XGIfoeMei#JU!;E3Z-Amp=WuzU>YKdPeum7&2ll-aHqK%xC z#VD3(yk2@fjNg8iQ?mWScCEwzgF;>JIAJQ*rU5vjE#5hui(Fup#_trkRm^UPwaHX* z8Z}EBnhjUGc9@;yDFe9_vLxvE^HMEuW$S1h&v&`>|*r7jtPLKfN@Nl{N~KO zhoa7*=5+wM?4@Wn_ZETiT;$&Q0ponzFVl)2YP)k|SiN>;mv`(1>L@|^R>A%5ZMjZ; zdi(&_`9i9I$J!-!e+N5lo~%w=)dALw9Iu*W16ws1KHiWWf+XQ zxL7vcmioip)2X!osXvXTQ-?B9r!P^K7$E_u45i)E90g_`OTK8$jVIMwiloUF3w;#U zV*o(1)5)~*HJ04@>W9==JkODf-!dlFSoT1zW35mm^ZiGdBSVa-o$x@7*(6*{L z>WS0!Zv@H-)er^B)42E1Q`p=~x? zY|)WOwM@idK88%hYt1lhlmXdQcJG!7Faq}rdt9tOSpSVAILNth_C};grM+(Oy$8h)U#UMYJV51gZSI7 zwx7cU5$D$=WLboWKrZZG%#M|gQhBP!YMJ~9@L6BvRd4I;h{d*Uj<}N;1gbVw<(ud= z^)5Q?t|>3q&{$~+`ILU{yghiXxt9_XZX_0x#Rm8gz^noerC0v`&THk#TJ+9Gi&-fE z3D`IK3t3R22xJA(rX0j*=yH7udSt0$V6DL-w_IUut|uG1>j!%zsEeV!XVDnSE?)jz z25^cQj?4!L(V8s10tvn7|MEHEQ01F&slDB(B;DhCf*r?Cx;YB!vVEDF&JOEy7KIL6gyDvQ5kc%8LRM9=R4(DwVa;ckNbz| z2Af&kIX#a|NXj51zCY$+h>gE@RCx#1fu0ri+c>t0X z{28W``gXLxH}wBd_U7SGcmMymq>!=}l|4yi8^zd{QWT+5WG6Lbmyn&ZB+FEivA0Rt z8Eck7wyYt`*!O+x`;3|I8G7G$-M^3N`u)?@)z$0Fc|G^%g_z?hYc4et|YPe%bqSCJdJv!^KE>fF_{Lyl<{fcQf zyg*EQFNIL!YqckZ9{a@Y@BxLrU_>cvo57`q)j_6CYf&N}JhF1R9_@Sv(ZK6Y;3ka|y-Y?ZUO@H~X z=luSQZ*{=NU{}pLK(E1G$OA&Zn-eIR4NwS*rTQr zA42fkXQYH@dG=9x6W7brU;rQRQDt|3JE`>lDV!L70V5&#Xx?lMAXiS@FU<7_`yuug z@l&!3DwiSB+w*e-OexedEQ0@d7YV3^>&Ho;)3CR#_bHGD)iwa7l#PIY6r_VG2S*v;Nwqx?m6SR6*o7Iaa?Tdnb84vh5uSQcMU0y`bMV#H_+}%6&kgEYs z4$7bp>6Y#MkRGcqVr_r?P&8%6#*vogDp1tbDK$45g+Wslr9mLZWs{7EYJd)czSJWw zHCkrHkq;w#eW6{J8qw%!&66Mjn|-(7T}ihhCyvnO{l zXRSEewI0Dp=*3%siD(oGjl$f(8^5mV$VuhOAOz{GT9338fYMl7zy34&04=F$Mm5H5 z@}u`^I&`*bBiEyLf&zK}sKMzsz|lR>AThv}hDYO-8s1o*CNWk2=zlT2)%pDeOW+yp z_oSSQz+A-Fn=S~w>Dn&@KtKJI8YM~PeFg90nAz5G1)>+Pm#XzZ7QOf&STh#9M-Jj^ zzE-zx>#;^acE3%)2@l*BQ!AVKfx&8Blk5Qu zn^L=a(g7n)iD-Ce>eSb4K(mo#E)|M_lw3!r%=r+hdsD+tf!!^FNi{I&v8h7 z1k!r@JyMkEXJx>;z;$D-f!N9&-|OhQ<+|4;3aVEpH38z7^8!c}9XCkeFcV2279~H4 zB?dnVt=QYGhpR{TSVmPzaO&uJ2pOE%m-j$rBs&C@C(Due#3BRx>m*yv*quNV=a$o4 z_)o?-ek+C6s8zITgGhM>WCAGkyG9OT>JCKj2dNt2HO&q|^Ty~>fhrJ`(1PPX!{g!&USunW$7|2cJo-PbH9S#m)l?(Ds{HD<1HAO z;~i!DZ@l|AXe*cj)I9dc^hJ*{E0%ew&TRJ)OF%HNKM+D-p1u0>+|HlrVg+R2=^M#N z5a`wO)kW(7oE!Q^E#h?ieo2fov{TLckE2Ha4dE6bD_MRKoj@4pUdXuLdjKTg!=#{| zV4%uC_(e!J+l(l$z3c~r5QKH(lKqiggJ9{a)7v;ROFfiQIwXo6j6Ej`Yq8gZ>7tA@ zPbm+RLnJ~Mv)gu47_0r4CGiJ~x6l^Qm!(CWK;uz9Np%#>EdQG*xMdJ@pA2qHV!OULnp zPw5cro&Vez`k!S!YXe74*JsB7M}8|-5NMqHeLzV0UnBg_f4_5an+5s-s=~V+E@#TD zV(p?xuqD}Nc1Y9yOT4J9!QsQYIdMy&Hcx7Sx)}9|FL`?1iiT4RaEj65St`CNg&q z4>{C3`m%Hz8sry1VBnJdP91S07pgnp{(*v~-(B44b21z3QwAgcqlw>w;M-Urr`YR$ zYx|H#?zZ1aGJr@+iZPu^gnhgD&N>ACNpyF|Rfq>!nl(lO2(HuM;B6=(#2?Wq1(O~Y zN^#sBVmcT^w3$d|qRmuZZ(cJ^Tw_-|+v8SOH6kH(r9aOi*RP7iJYOjzkTtLIRK5Tr zRWWBXYH~m?0tZ*ia(%KGNx}a`=dz+R%IlAN-kZ{7I*NKMC#7{Kzpm$mQFMEq7>npj$ z++<}hMS{Bp^-fkhwmPzrC_xW?B8t%kglnQ0(PU8C>q!0wNJW7h3uuz~MvWDs&sntl z{(?+eyj$z#UGpNDj}MX9ln4RNomqG^$qL+AmlURn{C>qH8P@GrU_YL4v3I@1h{OVT zW^++QY;-hD04+PMWqS&BVth$1V*!hy#9@GZ6%Ni1ej5z2y~ENdg;g`bbhV!SV&Dia z-EDghF0=7ch0^M`cD#T20pbX3;Jac4W{3vuEndQrQ;77}5!AQAQyf~nC}x{u9bg*%jKHHB2 z>@>4;{e&;=zKKBzx8%Y zve6QGyE!jrx!rgHHiRs_5H`Vx3LGrC-Bka|IM?jcT^+ht24c}H^|0=blr@%|{Z0G7 zGU-(f>3NbUn)hEZ9(aO&+C<>$ah+xl(tOlAx25%XT2FX3a+0xrwf>gC^o1;by}A@n zHM;AE>tTC)i@GLtLrJW-sYzT>!(+hQlXeTD z*G+S#M{58sC(e|X|5#Mt{)Q(6A3=vRdc24(cq4Tg^TFR}qv^vSKODt8-*2~ZY_Lc? z<-r$Hx#+E{k2rokvW-RXbEh04)@VZZ=5>=}YjgNc*+Fop+<{?MabHY#x?9e~xy^C8 zN;)j3)FTP^k;cMrG{S5LFIcHa(lXzR*LulDq0(r?jV;^f;zZ2d3~wNXmnJz6I{ zrjGb;Lb_TcHuGFszv>6fys5tN;k|Y62P|3+)1~ZzBT)jUyj3W4w+qCAM0KG5j5wHp z#rO2+uTR7iZg-*xZiB%c@eFeFKJo`GJL0r1j9RF!S6Su66>AByW+^pw%tmLT<%Xw~ zzg4`+G;|YdB5}g*M}k5>II0Neq>Ep#==YtJB`~SWZH(A0HFSRiPCfN1A#N75{rYZS zra}$ezAcw%=baK_&9W)9)L1-lAA6FW?RSjvKgHd$A93+DPkhkeU;w8oiyQy95!IKEgrBu}B$EWDPR8tdNOmnXAi$k!wRZG1)dr7Pr$1s65UL7do z^fil*rDlc3aH>m9eq<_jn!`U_@1QIxdA!xDZOD;Uw5Y9a-f9T~S}~MoMMi@y`;Ebd z)J~Zf7Qj${>M-r@Mdn_f>v|ArkV!(>-3s)^<8?Vo&I=^xKo`+k$Jm@ip#eBruoj$hg_Hqiqjx;s(^vb^H&75C#mZRg@Sgg55~|_^I4rF?`Sd-IFr? zXN|WW5LQ72_A`&Yd1sxO>gCpYtny7wDrdmG^=#leD~w6EtzoQ;f-BAN$lbCF-NsIV z;~FYXZt`aiKLM5hV8&(RZUeVhOQ#X+^4Oo1bFFVSl&o8t|GZ4(52IDs0ySX(aJ5ZN zS!Z7!o}~{=ZV5>&^igO{AALrX3moo zYP-Wj;uqt#YrsR>&p!Tq`eO+GvKJ@<`HBLKS1A}-b~j5|0P0%XJ*Pk(x=Z)tdYI;U z!9AGu&1OjEeJk(2AIX0R>JO{gZv@bd0X7;KYXwKfyWHL!J{LeLTpavQ4ESwP15WQX=S`S_Te-ea$h?M?nSgv|?ALAqCvmV?;;Y4i< zXW)9td1>#ksqo!o>GN(|e{Dn^P}-gDk7O^sx}vh1x=Vi$39lFnV$-)*;`LlT?Z8J@ z-rY28Yz`2VL+ao>&V`vhY_-lXO1qoIjDNoBzugi~0DHi?QfEv3JsUJxO)2XuI7mGyYgw_P=g>FoTDv3Sg+^>yjVpl8&_9 zv5CKb=&i_~KlJYg6^vi(lL8Zt;BaJc`+OlajCDU}=^y|yb}TPtIa(dS6O)NX`daZV zm0~2q#T_unUwBe&MVtcJ7ENnfx$F%4u!dTD6OwR@0-5kdboWPZfxCoLUnBDYVA)yS z`;ffX-3&Tg7z4@c(R{G$+tYJhKgopPCVP!KgN$fHM)kC>P|&7i%YO=;BS-%at}3b(;yNhei*GSt6OX!-bBp=eRe zMjvK5G=a?m4jyDs>tA5+f!fW?fXns*WVUcgPs_G!CM#c5i2fPs2&?!RBssKK8Nyn( zv3uz+%JYL*7(IUR@=1U_s*~?BA41JUVn&6lSkV5K`O>HJEcjIZ6!iDA-Q`bgcE=0+ zZ|u0BbTC4!7~K4Es*_tF`Vg!;)w>}VUQ?Tr#ePvYorDKp23RAFr6)KNS`w5TOZ_qL zROs2m9%j%j5At(L!s@KU4WFHvJa? z!(AsvH^Kp;GY1%do>{H9+#lKCX#^c&sHZI>oHTs-M=<;?CQKaLb|-G4J5l-9?nhp} zU}*?pWjJE(B}3u6`*U1B5MZ++Q}o->=KFOtCV1&ddy#dpY214Zd>BfqG58ATMJL-k zUjf-K8t%`#FRRR|j~E(XAxPk?7KhEj^)kZ2ayREG0Gjd<@Mvca282`H%R3+X9H!kM zloMkAGuIqDJW=xdH#2^FRiZ=t1StX18rf$C&zglTtbE!`q@+s{X^ z#)FV3_&%iAUOM}YnI!aMUMm2kk#~PrhUq;)OH{Dml}lIOH8*_*bLD!{3zmDSU`9y0 zTd4|tN$3QvYFPIHSaK{)+kf8qx3J~{yi+DTDvcUu`Q*0K4X#2dXdPqvdw^^$tXR42 zP7drLZ==S^laY}T2vgcE3jh1$=@~oxJ64x;0K>~Yeoefd;OiB$+!AG z-6F;78eSLgcP%!pPTJl3d)jO^wKn=Rx7=6UZBujR+~Vujx^H?|&gJy2l+10W=B%N| zWpbJ%9WdsWom$-UfdT0~hsSA*T)Nd9Zz&iH__+0NytL?8a_(=BL19*?95*l+(T8b@m3EJGX9_`o8nV(Ec~4!LhR^_kjg+ybsBV` z>9Q`15fb9ouQzMe*lX|99AiiOfA8#{f2Dm=dc`AK4RVYH`i|$1Pw7&7Xr0Wr4 zA1psL>ou?_a-gJf{pdbvQZEXYGiD&CLIxQNN?%3@I=pl8AlE^J9Hckvh^IVpk))LD zT&cL~(S>jrC40+f9y=}T`zu_H8U2pF ze`ojzX{EHPk(oGEZYYCF9s16d>2x4cMy#8a)Bi6o_?BglFWL|>CUOxG8^B{Y#`mI9 z#}~bEY?$Hy$ItC&$Y0J(ZrGyJmz(|8ag6hWF(t{Tfx<5Wg%>*=9>1n4(AB+hRLWQ9 zY1wQeLq0(^B3hNsP_}Wp*VY5GAon)~k&!ns9Q^`!qCwKfg^-ocSe>@dCi4-fUv#XZ zQK;hG)rhxco%jYk@mv!QDLi3y@zAN#j$?F6N|gVn(f?ywTtA}8%B^f~wH{)CC7!SN zmPIk^F|HiCccee^(BNthyBWM2?X$b{?5s1PL1vOGpZxs|6*7!r$k>EDC;fbh-NMVU z@=E?gMz%%s6ayS2CFt{%*Beqe^z%f08ddc54^$)R=V`fD%PG@EBRGm&XXdtrhkqq4 z^RDoufx99}Q_X+^6F3sc^J}SlCb=UcxZXUm@wt-XpQ4rqBaEy@@iP2cF&9WL!wX!6 z|H+O4DlydvYNoWkl=|zDDK~*Egkx=?%7=VLXpg8{=aR87;8pWV&p9dOxf|RWWYpIDOVAy65GA zhQBzG@i`#D?^IZ61q7z)KploLe=U|%Qe8o#de!9>RLalT4WHHxeUcJXuvR?Uc)>`O zP4e`r{kgxzMIYI1C0B57YbK29im#(d_VP*03ocV%bZ)vb2P3NqHwD1xo>-GS@-#RfJ7VH?R_r@c)ZDSu!Xf41$(#um(E4&mO;AwL`G=M#;1zQ|qVFP~|=#u-RDsX~~5Nk%HH+CPPK2QWedl(@WN?IR%4 zKk+{vi*S(EFl`voUY5YdL0H5b^byE*+G?Ef3xbQX2pLK?5`G1Tt#t%D>4ms5;M=Am zwxgItVwkeQ#)1SB8zuFMbd)qc1~B(ohGwrZkd8wk0|-L@se}HM=M&4l>yfAQ7y%Q=S6%}2rg?tH|q=< z)74q?x1|1vVd5}Me)%1p_?%0Vyk(s0;&-N!F?v|yIzBLO>z>2P)UdJXi*K=SY)|=DxHQZP26 zdUOCe=S2vJ>VMpC%MKrJR+i5}$15V(r?Fi*mda12y;}99T_3Jt0q`D`zMgF48PhK` z$5lVOIP;?WIE9Kv#n%a7@Kplzdr2o=YkKgK4mag-iniUi8o292SGEe8WSeo|D@vvZLG7Ie6-4N3vi1tfk0WpVDX;U*o#8cP_pq20`s(DwyPA`cu9c0kz`25ea~;>cA)^L0 zrNeIGWBWI}qP*toP%SYYljt#i)S~{9G-` zYh}3nUa#s?))jeQ^yQ49M;g>4>4Kq&cZKzfZn7OJw_>0PCvAH(3_iuc>gRpDdW*1L z*lN~#aJ20~$w02QsIX^qP?U!IqRP_GFb$h2LQg!lXiOQO%xPW%oISJVk<7-3rW%el zOLi5VEFOtkzHermWSN{l`i2F%06#>IRUas?R92_TARBD)Muen|Hlr>1 zDDYjPuPzOivpvs{+tHlq?4s^|&t&Yp~@JTIk%rFL?IH zu6eGXZ&2p+e7~_^>vg|hsFQS=YNtX~w49KaCe`xvFf86>EOM;ca~Wm*-hDXF%VDXZ zAIF(?Acr})ACda<9X^|}26cX#ZBd&Q1!|F%F+=R;*AWp9w|%8le|gj8lXfr4t%A1_ zJ*T|PxJrA=CI0Ko`e-VrBMIt$Ir4dH*6dG*4{`{ z60~ND=WsPuO^A}{P#ozzNk)F2LE?_dD#q4Jd1+F^5B$YBKH_b zM;*qL)VCuboBMclNsh}m){=J*ZekDBjj!&{?jMoXKR5v{LrDjbrlQ;Pp~LzcxBeb? zOh<{jns@{cF5a%Ctv{Fnc4&Vg{o_#Y3QbDJixyO=cu{LL#GxC|T!H$p6Ra8oea=m> ztkQVWbq<-&Rf@06Wh!oYkm#(l&lA5Z1;pc*&7w5eJYwe~JkUlqz~{%meAQiD*qZnd zZ-Z|#A~cLO5EfXY!RKlcCZ>Dm@YM~J_%@UrDLJ z;JUsUi+%;sbDI+LR9*$;Y5&QhskQr+-u;nBsQ(gnb1%HTjR3jVEE>4i*d;^x)el}~ z4!9~?!q^xqYGFFG+i?ELcs#ukC;o$msQgyzoJE>JVLNW6!$WPUx8I{F(|RU&Wh>2j z>*5k6iV)DiQnJ`@*@qhwho-wU#5M#OJ~DXgG4&-e$_w8_xW5F}#DCzkP8aK+=PW9b z^ukN4SM>fb$7)lrho#nv#D#`5#3QVkV8i@y8# z(huXSWNj@<@Y8crxgPyk&*Y6n!bk_S+N`Ost7zD)&$EYF+SG$k=(R==LoEjpESEqm zavGkiZw|{(-yG703O^pCiGq9K2FJe3yFJt-@ER>njRg?u2t_V~H@4EOsE~!{m_{;H zDOAd>o~0C&tU6X~o*fY=8soFKET0w1}AFgDiJwntLmkZu*bsE3Lq%j`{ zMgTye@}`u@X`K2V!)*U18+zBwrMe1!q>aDO&Z;`hELK%6W_!ctw(hee&QzRNLR-6z(kUQi5xRSsZWQ`gR``qzGD1?2^ONN%dE>4j5?;NTQ`G>ZF#X<4M=IdiZEkle z<2#*-A;VGrdf1>=xuqInCDCG>?6sdxW&AgBInvd#gaCPO`Ap7-J$WAIA)xBED&`{g z{M;|7uH199*K|C$lKZ)?{BvAx!`4qPb(8kWdmBv~U*bsT9G|@>c6u?oXU(|*PauVu zV_bM4rWc5*A(-c4PO4zlsX;NCT)A1CV@)6Qp8Pnohb)S_B*TwnBpjFrC|#OF_0IeD zG5lcqEEEG1RrzVinj}NR7kCz9mD^(I=9&tikssgT7#Oh`6t65-pO zqSJP#IAu(=9+t@nQ#InzXSKVESV_N>wlbZxR&*~2!q51cbU}6x^o@}j`XU=0o1`il?(mQ>>Nua3I32m+w)2# zg$i7RHn8kq@{NoW43iPMGre7QU1 zI@cTahC}qS92Ug#3eBTEoY;BlofzJ;lk#6OpFdYIC)c^PQl?@o7?>bbKjyUJVC1oe z6kZt))Oq=q^h$r>%vT)>eA}_y9e|Tk`R(#+3+9UesoPFiJRUB;$HCl_FI@Y{ zZe>mGrQL=oNJlrj?cm0tf#yFkij$C2?|szlEN9=}))t$<<` z_>HOS&-JoC(2`c2EblM0>2O%{6u$8cSpB5_ zdW(0>T zZhcbUS4Zm@=^dJatP}bUnnhPWbH(n_(^4{W?zAc`*C^=JaB^r)0ffIyOnxEB6+PDf zB_z=Xv~(vTK9q25u`>wFKF2U19bRG_6dkIGHwqm|S2)$tl)ZYLeN6%>H1F5mTIG;!Kj+j{yiO-pLxp15v2P8yXV`L2>O?Nbg6m>admv2+R$hf{Z#6-DL5qhyZv8$9q>q2 zA@$@dAcH7W@=_?#zsN{RHs_yz^fPD`YBL-*x(QR(CF7mZ;8A@0IxgygaqyjG6X+zt@^ ziv;bqE%xRwi)E)6I-|YyCgs}JckQ+>DS@fYeaeXN)2yqEPfglojDZgA0JHsZ2A%l&U=%)Ung{!+@xa{UT)#xvMgC(36^>JPib z?I&Mot0ba?cRzy_hYy=TgR%yhOtkKu;r>5h;eV>SKb2ri0SMZ9r>#>~%#ZFHEd@ZN z?>?L$CzTO89f<6OI@dOPvCrovg$n9?G%8A>uu-E@1tGX`vtKAa>n~8>0A$-%;lFHE ziLpilwd(RU=VX0VYZfR7zQA7kq37)Z!k4_7&}sbu0{P65{q(*~{f^zIO`zl9Ua~5#8W9e){qp{cu&0M@#6?eV=#ME7DX4vqWIaVe%dN|;; z`{cW7;?Tl*@xOR&K&0;u3gWRwTQnU+6Kka1Fe)-PI z9BWemb3qj-&dN$j0_~-Z+cmJoKTC3fhNKnh%&27v+bG^f361jJ-awr?}oZ3Ylmb=?O51?KfhA-S1|3;eMiimDiu0RCqC# zCVKBk=uU7kCf`Gi^tgNz-EmPxH2m3dC!zW&>fX0G7=GaML=gf2&-~Tf;a&e#(fL%H) z*N_laje;sCkf_BI@Do>c*;Tz$+X7-~N~e^mNgF!(4rwz(UJ$1hL4{0o96Pr5fa?bc zd!=VNxa+I50ZH-r^6ltzUV@pM_f6XU41Li|RFWjo>KV4}l7ixwC~GP7Pph{Z{1ErmsSN>BorOG%e3S^hEH0NTX3 zn)Z0lw$>~YZ-^T?6G@uUr}zX>A&+)AX8JpkrAJMm6rBRKN?l?m?bFB_)~zf%R!1w+ zL1$+F_MZ0qvnQ4E6q+w8d|q1_Qx0GFtb6;mHo~%}3bP@sckOVkP9UV^sFK!#0wq|I zR>1+vQ{Q-%$%wvFket^9T9PCY6d{zI8^U#|o&Ewb50_tiVto}MSSj7?GISxdAe&xB z#NaDQ!*O~}%)hOmLLuf~fZ4#VAYT1G9KKQASFhh=wJO| zwBn8PufQI+Yg!Ka)6_`D8?sxk-cDf*u!}lam3tm-_7U+S7|B!RGe@550MHYO9N zvo|(Lh_Up+w%u6mAPc~506ok5c1$!Ywkv=NIX?FlbBf`P=klf0(DAdXE*(`8+ShO- z>kisLqE?WB|7s;aCGZVe{eeHkZ8j8ny9^BG`C|ZjT{QLB>x@b6@<&_i;Og%ge}O7Q zfEAe@t^-V;P!ocRzLVKUAiuq2OuYL{|N4O;-Ds;@{#>r0u9K3v(LNLhId1O_ktRQG zK(c>PA~R_>7AOWxvZx78(b$b5>vPws7i0YvAt@`}840;KU#nGcFWwWmJwEe!IP&4Rn&3YaHVZlBmC}73(6VzuG5AN;Us8#HyEv3!F|0fDcxr_> zc>x9|#y>T^ke3u*Nv*soO>%;2V1Q}|(Rj*}Q@n?}-UcrFk~?F+kEAae7hc0C*P92w z{|vP0xJmuB)*Pdtbllg?R+6)O5*T9o(%^Xwk#G@;LL7$3eA z?LuBZ%kXon#(9+mnrV>#cr^dZ$v3rTN5i||(kxE;0*wud9;0|R67E1op-w{#fo8{l zg+OMiD>9B3a$ft>#3m$Kj#lsM78t9wrTUQBODG*xJLdHe*OPa6Dc`}Qda0oS09I7h zBtrvn!024WWeWxvOWAWxs+og(sFMutCeXy{TnZ`keQ~^dqtLeh^+Gk4O`8Mb`#8Eg zYd)#yr*K|!i5ASH1M{_NDwlLNRX@MIea?TkPqxBmp!y*Hg)SrPgDigK2#m+y>vkqh zy4Z~ze&wdZF&-`w*c=y*v3-^#n$Av^9mx1y%!6Z%-m84^?pYhH z@>^C8b%J@K@oO%^g|m+o2zQyAR=j4wfFhHIOmmE^o8{80h@uhy$!mC9iEC~K_y#uM z&lxm6dr#7zm`DiVu$?U&Z)|M7dN16&3q4rFMf1_aSEuqjzS!2Lu39oJ4}Jv3zze3P z9I57c8TyVPNn!2MN^ey;Q?i|DkmrM!1_R{L?(5%B-9>~vxlX`6#9KBNuhvCz;>zY? zXEq|u;vNVYd1h>sQhF9{h%4jc{}|c`^AH{c(xgsW_br{Jr!s*;2_S+wF-j*=A%&jT z#t&68WpaBO5=#@lTJCZywVp!(inzz1Pw9w{;#eHXS~4Y-%~s*1@5^Vtc`XNyjpO@- zp>U7YOs0EaeB-x?4k_0I9pb(^iDhyWTp6c)-j!R8w2x2fhH!&Qhz41p=23KzVWs7% zHavOrc{zY0I8FV~^sfSfwG1AF*`Mndk@a zA(8K=D437H96IB%gxq zNrylvk42Sr6=P_f2^XZ$9Y2x8>@-H@EqjtP~=}Ze6D=vU4y{z?wR~L zpEpF$?7SNdN<>=vCfh~hL@M8BXyw--AHP9!OIBHj!LKY2b^7V#sLrR#woPprAVS=S zjF_LV+)Km-yg{zBlE(w0J@!ar}e>&p-#veOkM2LHkzr&EN=&Ku~_eyJ1NtP`2f4T_T%o0=JlY#SXD?VAO6bQw)l z9nld%@MbM0nDcE!ut*$gdUI}ZEK^Og5$EiP)2zBeg8QS?_$Ly);y-{qzmZP-qsfND zZDwICv&oPUQqz+=vLKjOluEeO41H&EVh|p} zcg^7+ELozK(8IHmETfOD?6QZUk|lXA@z%2h7D8RM#)yoz(4e@jJfYYqCYjXWr_Yg{ zQId(W`1I?7 z*L0+4{!R}f7Szaq64}mj1K!jXbMeIKg%8T7eIPXOjXYCX@9rd2SKckAq4+{F#(3l#j$hD!9Wo5y zlGRsalhA~tLh4DVj>|ncQ0kw&Wsg;86YK*})*%kHcP2;yJl}}t4$PomDa&WfA3Xvj zPjKlgjWCtNw@~TU%9ml^QX{6P=?{8Lx>*4Uw)IF*ULbt{~;w zE892%+fH#oMxn$+Wg?cljzPC{P@BCJp+T7X>TJ}M&v&6vQ>L2gtVfv$BlZQ-zY@(Z zFrLph-ffcZI4+WzY;Tv>>>Pe;Y|V3yF3KT&D{G9>Yu+tgI(3<_oG9PwKEwnUHCj#Z zDsbsrir3Z{D8O%zuXO%1zB1_VQAx-wYr$0eMD5=$2li0S2>8(RsXj^JOG#AuAVh=c z`y`k3fa&xwH5v>}f3Ewaua~`yd%YCstHklT!lIz7J*@1x2C2GyFKF;s00HRZQiB6i zi4B?>vZ>$HHo(khT>~c$&j~fck9s9+ln`d*Wo_K>FXuOL{^+EZ~OG}`xNjv5K z?_h|%-i!=!f~!?S-r@Z$7-E{U3hiIVfI*vKanD}+ZZJ-Y8*^#0WN?BDM)Y4m7{rM% zfE?*amA@Xf@0zV9G|PTa)7m;=GY4IdezO!k9oWmrG#WyhV5N;Fj#MPdtxS%g+^|i{ z@7Hb{ZD~iNHpB^GrrqFU#d%R)n;M?h4(`M;S82~NOtp39(mOhj{Ecw0b-4P^Hrjut zMv}zRb-~!-S(_(dVvB~+?p<@GCgH3xop7{(`&Ttz2D~?YL#LWCQn}*{xjik))bQfk;;mI5hz0O>3aq z06%3){ko!RUhEd40sIF1l{f2(OSB$BaJ0nQ1o}`4uy-yKY;(szhL??owCpnMP7 zvtO88q+iv2W!3Wr*X}12Kw}{Gt1%!W4`Qq-e}Ab(f9~o%1}&q(WH~7<&bDcZi06^h z;3MANXE_}z>`=!|IrJtcji`|?X@*i@_vGskw!Y}C80oBCTI|voim!fXOXb1FY~PRn zB7|h*Zface250xQUapZTje_tSExz* zMsIC#(OYt*&u!Jj2<%p1_?+;0xoMsU(F;}@Jqj{{>E~v^ad%!OXVeUjzQo7E-S+7m z4U+M=;^`2tg#Me3d(O`gDM{y$I62w0i2|kq;yD>64anF+P58r~3qQ^U)h$L02gYNY zpv|A~HhIf(8gA-*!4Ty6u6PE;JIoPfn;PEYB`1-cd2ZKZ1aCptm)i(iW%fm*Uk=d&Aus|s-~Y~0BRY2? zaeBV?qLmy64`h=mo8j|vcS<$&EvJ*vuNl!>Ny*)x)f0j$ZfGBe-~9roXw)s9d0!9h z(|SINalr5gKxrK+rS8d7Ap>LzgLeb3w?MX8H4{Sv9`W2oHsB1>7mb4<(VCn*La;rz zFPg3PI?c$vR%F&`L!G)ZwooUhvJvb-9i7P;z1Xl-+~imnjZHEfJ$gge1N)}&e#m2w zr7k|{21jc_OzWN8mzMMVTc?(x54lh6QZ9W0%s{M)or0GydQS!f_?ARi76ljn%z+`dJrZ#Jx)+hjdtJ!&&VJ2D|uWO?aZ!XvM zXq(mzB~g5m8?Ny{LXwYixHZygsK&fTRi|Jmc|0|99=N9k%qQDp^yJ5NCs-Y9q%-xx zx_eAZIe}PwJoxOW()qA*lp}vO$85|*m|+~ii$w4mY=>{4+~6wWkVvKV!;7XB%+z9w zx0%%249|8yJd2K4tOs4JXhUXCL|%9m;KFeC=3bRsam44pzMCWnkok^43DtN)z2)q% z;()M|Vta27+X5xP_6M?=Bz!x+_R|Ry`R~nhpIo>8^~J_Sc4k@0rrKksTde^!BWj9b6Vd#zPQ)CO-etk z+a@?hfati|fBOjD*p<5Ue&4@ssPV6`A}u2PwI9HRZcq2Q zfw~*dRKw{$!XU_7Kb9{v@8t2glegY~{-|DuZGj4;1;Im^9_Fo+q#2)^oADjP{qXC0 zG^$Fn%JUaxrpc`oK4=!&zdsbm@N>vH!ggLiOnH9I!Sv_d&bxf$Z#p)aTTh4*HL0oW zO)z(y`sAKY8Pm^~;xd=hF>vL#!->YXiN){GF9Mp(r;<85FoyBeQUH&N3Py_KCM;e^ z&N0wOX!Z47f3m|vpJTbw}RQW`@;tyh^Go56uJGtAQ6(6G!51?9uWfX9-h0%{Qu# zTTVWG%j<`hQ4g_BIy(NuE?5K?Ue&&uV^_;Nei~MItIFNNk~PfA|4pNfAcyV;7YuEK zNAzk5ZO2eH{}!9Ddu99T>ty%BE6k4EtNAF2MoBY! z{ya;8Sw43&6Dan3!DF}8grj&9P3DcCW1c7A+|llz;Xx+W)>+zX{Lqr_46nv2gb9rkz| z5f-_*C*Et~)xICkZZFMkm8-uu8R0|;vU0YJ#vs$*nTAMY27X`pK>v#K%k%F!Y*{Zp z`ZjMz>t85xaugX${K-a2C60V9bR9R!leg4H$GIyMxHN5d1j0K;&mHmADSi!i6rHp1R8&CSvGbNTD zV%n;~iOddZo9>1+b8?}V3+6PB;Y^1Wn?0g(9;Vr=!4JJGt`F$)qdU&r|&@+MC#z^dy{;yH|)a`3UFBYg?>!dU+{ycw^$}b>8I6Q}}rHR#G(B|;3#~mV%^Y1d`6YukC_^WM$aNa>^vQ$*KC!+ z+InMTVbY(sBgizA->vDLC8|zYuVhvldnUvHYPz_5M{~_XXwLJv~S4m882ODZEYzgK$Wp5pSHmPd*Z9jiQ{HP4a ziYsFu?R~+?o*%M5Wj@Yl(NL(Y8NBRxm(pusTy&3CN<1PaT{5j-^<3%NEWhdXSR3XG^p?;CmsuKxT1uY0QEd{?HA7JUD`-rBV6*b z^C!zd3#Bmk${Ef7$JCq0 zC7reXegA|?k#Q!%A{jz zCWZ!z3sx?;0l6e9C@Bgl?kLJ4e(C=G?&tOV$6vfYuXC>JI_J9H%W+}g{`?(81{H%T zijj7Mtec?-hV;DF4UJ=fdi{2PR3+ujOmyK`bYK>wv^@1AIO95(MCT0Rbd>`@_OX}j zN#o8VT-Au8862^Dr@BApSmV(in*ucK)A7%@CpHYrQmTGCQOKMBQ*!G|yf}-4+?~@WoCg23wEPT)F=(muGDO&Yg>UnwYm?+L zPLoZDh_r=ZRt>M6!9-d{ifvzRV2RJUy|EET@G0L{VEWPsXuUZfe90Q+C8N`Qfm$17 zhzY=J4Pmn@voVGad0x%LxJ~dE1YtWg?TN=+^1csr2XV7j@2w*eUw^mM8@+cx**=gY zEd*Hn+?wUq?BanNvtic*$s8P1zca%gnlF_IbxBPhT*b+Y&ts33<2c9Isb4y-b$GSN zRtn0Y5|}Y}HEVS9%No5}|7z_yixYM zYNY>uACp^icG%CiOrs9|*7_GsnLld0*Gi@y^8d$8Eq^&V3` zM#v`BZBgpI-jp0t)^&xhs!i&rns-tqfBMu~q|bn_9JI^F1W?sf?2MJTJV887X%3{p zdT$;6cc4JqUoovmZ!ds@8d2+%o?-FL7ZNE%+{3=Mgo} z7d9=EzSlx5YGFW>kg2*1@Y1B#v$ilj|A6#@rNMK;EoNi((hoeBdeN3Y9=Z>sNjib; zA|zaxssU(s{I~UpP5ynN@8olOBx<|{9MEeeGwfY3&ZlX=gwy0ZNKdqJ6@Ra|+-#b> z=g*&@y18)(r|H(n&*eQyveE>Whk=8CP5#2j*@oRm;sVXnB*5-Y$sKO_ ztCf@0`to#enf9p75`;coK&4-629z-idRvtl#6KQynls6i-Yk^;{r%gMfw6V28Hkr> z98$8WvzyddvNU=D(09nHX2}dRUA1t2BDbE5@Ls)|(Pc`jga@dbhkUQwxiVI%EH82A zqr_x{s7agB5#56UFzuyr{lww}veyFhC# zcp%TEbYq-{cZc^rrY-D!sLU`~q}N*)>XTU2l;N$IK%ryK#$i29fu@JvO?`~p8{+y$ zsfO43n)}UKh|7ZliF>9-g0|H7I4j<;S5jKNAaZ83S=x>c&(-)twh4$5T5Q{6#33en z9q?xjKd*JsCIUsOfb&*&-s~~nsGKoxns;3<&K8rzS7gp^t=&~K8i9KEb=%@3*!K*lB{nAX5hsG`)`Q=%K{Ppyuq_Uq zP6=~dUwFC@#L8EFer>X|oW4 zyo87Fb_2oCy@!&JOu($-Fy%$Ftmd)Al2QB$un2^GHnhMlDzF@2BzE0+@5UpD;MOo_ zFw%FnI6hNp=Tv`AflG9}F@etekKpNEsNcT_{@3H=n5O4)ek-`k{9Bx`k}H_^u+#ba z6=8Rh_s8|*q_k|l-D<`7P^kYXb*7M~5ytsnMy}N6rSRJE`H)DTq?KXfOv2G@i}4y! zQ*ZtT(tk!TCTJlhl1GIK`}1J!eJhvjD1PR^6oWd88;iV+k*?$!iE_$|Afc~r$a6R*!%2cR zITLRPZ#}%ASy}6#=H18kavwnabYSE#aNxLIHQ+=`9!BMiiJ}}C{Ez!nOwhcChNa~2 zgHe%YQ5{V_+0_I7qz7V!e>tjWw4>3W@jIJ@mt+IQW(Drt&X_sh;Nl0(_&auGeZrTSm>qs)|IY9|gB~CMQGG z@8FmEAXC-S__=t+J$@O|)l$ws^8VKB%9U=!Zm~g4?AE-{pOL)WhmT>qcRPcH=K(23 za;X>W`nVTzI;q-=JN&SISB)9PaJ6OL@AM_)angWYTu#Jw@#S)&{;D0V4}Jr$6}%7* zO7Wd*)P?znGon3S=WLgWE=JHWkrp<#8%>?mGsABmZGT^CQwrL0Oq+`>%LqtOmls01 zarvtsw(QoWTQ!k+%UHJjL0Ag#_c56EBrNZ`#gy+-FbLIS? zDX)Deboo&8v}z4$ z(IQi1m<@|9w?s1qQo*xGbkQ^#qN;!P6!mw6=_7IE9F=mAC8#<**?JJ2b-pF!GG_dl zadsK5-M(q@X8H79snfNFHJbyPQs-9^5H}fv=m8;`YM|+7du;+Gd0=fCE3R_$ojcEq z`xxTDJWkro7lfudlf2*wnQ;HUehpvl%0E;W{(-OJmcR7yziDtFjST9`T*v)qJKaC_ z+I_VAF}8)mvfI<|6q%rMDG@o_MWl_#)_JBK*cwBv+wDs8JSEjned0beW1vX^9>}(y zg%9GZe~K!8u&hH!I`c%l(0Fl!aBGR%>8~#7lPCC3>qYU=YTP#l8$t{QXW?4EA&2*V zlwaJvv5!gh?YE63IJn5fupU^PB}^(<$7A}uMQ5-vj`AG&PSP==u<%^U@Ad7?4}-%% zNXPVQ4(2E<1EIsDV-V*X17}pL@Qz0@|JY25r%t?m=v20TY#ZfMXhdash8&(m&yVCZ zwW{~9JSC`0hW$P@-Oi%eU&(q^iqnT&)!b9K0JoYJFf47I?)LuG=9;7H8!g-`oHBs~ zl(~r;RgvbV9pw?Gf5(_snTX(IcnvC`>e@sKY7ZIdukfo-mL1zb1uX%sD7NF8k6SEaX2Ez{D!=KCgsCc3-gzS!E2bsRTg$u0xsYiiJ^|KViva{{y^v*6_v^o( zzxIT;@WpdXGa%MDDE+7z-+ugNb*D8_2)b~m*Dmf$M(nCXL?wjVtzo=S#N{I~hNgne z+=+jMv?7Vh+=voOC0yT!&4t}@>K6$sIyUU;*%rL|nOr5EGw;g8WLAKC9?ncP%q25^ zU}4N$W-|j)Ns+ZYR16e@KVDpKuFk1FXsn{LJbXan94TuB%ZibYGiA+M=aRv2KU!e; z!jOVHwLbGoJ_}!>S5sT9Wv~w)9cMXfH#Alm0pe&tn`;6F!39SHz2&8e^j^7Nvd*!Xu5Xosy{! zMA4fK_~W{R&}^GK7MNDrRzp;lg}@=|RH*6z+##@cYaQ#dQPtRk7D{bHk2e~go=Lh( ze$2VHPry~~my9xh!UyUo-I%PE*aeN+vg64A$)$e@h{3v$s0!5f=G)V@I(j_pue(#h z2qRi%s&~`#RA0t=%k8y*(j;D{r)J-BRGXQ_Vv%yH0N3O`pLjaTeEq3MeXkM-jmq}5 zoObo+ffAsCn#|U7d7zp|Z)vHoUX<*)j0Vhg7u2>pC{Cp?@OSdjY$6&YK5pchOd5&3 zxp(BJkyX>WdxI1#l?~s51EipMOU9Ds2OqzH)BII&SIZJ%)P5%%9_0@^)v=Cli5~xK z?TTmTHsnqX&rSDAf8-H)0}<%vuIJk6uchw7)J8KP9Fj}|W7_>JAUu_-H=}JMhlOh* zk?jGrGN;RH*yKf}fs24_V<@9o$lnEX*G*8qXVoFUbGBA68`RNGma>bRBXtIM=9Hl! zcr558&xJ0+M#8YLAkq3E@Fl6ux?&1Wolg375zZEV;AR{X`o51nO(4T3hXQQ2okf8- zknE9qB5D@Lp-QL;mnX|eSi zx`9?sbjvyEihe*-u77G{jOfreF#v6SQmOsom|`>92qMh^hTgEDJ$B{)p5Kt; zd*KQ&PYjH1J=`WeI%TaiN58kSjeuJ{<-1up{Scb@5LuG8jt0Gr%XAJj7$%+D~AQc1yL0?XjSA$!BR zH_#wJNOd@9AM6rat?+IX%>e$y{WiabB_bD{L?gJwZJK={&sKkfnu(jK7=cK24HVdW zW=XIfUlIayLCfSe{_-m_7*E~=KH)wP6L<&PlvRl38w99+FpYXyMI6OkRFpzi{{XMnFmi z@-DG~Z8@v4N7>D_-l&eslpNj+<; zLlQh}Su6BrV2CiO#^+}JOIkkr-!Zkf$KSH|Mz!gI!(}Q|>u3Mk^aO!oS|r0^ zT`U#L=W32^98xkYFTuVi6tvEp#&-}-Zoiz-Yg^vuPl~xWUaO3<;C`Nk&B>z2k8_;O zxL{kqq%t)QeUnGd3}%T2EJP26&fkXiaBDKlVrPIa`e3=)H2XR0y!4>cH&3~Ce4a9rxv9&I zpd_0}0LSEZAgrKnjXH}S@3g2lBU#dF33Y3#I;fFHq`x$tB@aLccn9SUKUzf0*s*gr zJtDl>-$ISNxuMgq67e$)*Jd%U8DrF-%1!63tlL8Pt;&^I8|#{C+Dc_M?=r+elis+{ zhC5jl!~8WT)2GiF#OI1|$GoxRW^%jDD~Y2=`;eS*PUy=+c)ZK&G(Q|vXFlUc504Du zH@*G0JcHG-^b72=^Dufud;Ai2aG4U#e~uv|1Yca*F3nzc^ZP#vRt@00>p+*Co*|jT zN{@_gCdry6K49j_`kB}V`|TzzzZM6K*ROg$Q^t0%8I7HXkSNAuDC0;TDWVG0l7o;v z=Sx8qf#7h!S-a7yKKym4dDDb}S<+0ou+VArVj^Pq?E~oPshi6^?_Z(d(m`?NEeoHT zDE<8GZsnT=G3F%8-_R#Z2%MpPvg3~MIstE3Xv_6XNmYesvES zFdfq$Y}Hz(d3Ro0Jd0aZtkKMr)OhFN$|*SLydj~E;~s|SY3&)KFUsv<>jbaHsExtH zuJCeJCk_FF0u$zGn1d;rT(^l9a6?lGWBF-I@(^2m*0e3Op3cINAJ;Rb=w1m-zpN{uFoXASU1d%=zXj;Oc-Zbdg_-EDiLTb2iVEWaBF?ow=H&n* z^$GS`&k|d{yVKfazmt8hOIJEFmbG4Z(AJIR?XN21U6NKKHCzAc0YltT5OY4+t-Z3v zQsIXZICo06@+PcisxSm=n&^aPWb_v)9fn340bZ1m^(P%@1HgmpFljc!ftvXrD}^Gx z8N8_m5)w#VWnVzieJ5KsH(Dl_^=m+@WWvtY?%77O(=(q}E^WEdI~p1p-;Hi{;u)#c zPV$`&cRA|6YS%QNv;oB^^Vgt9+B~Y#brnT#st&Y9zp;?bG%R9%vORIuWT_-GG z_gIcO>WsP`!3CMGOTj^(V+dKAbIv!=Da47BqP%Jt$fc3EuEqZAtkcqXET3+n=)7@Y z`*_Nx58Ou!oreJ}I_S~A7vfla8 zb@Z@lwiBs!l|XOU9?_AEEYS7@c>mf0;e z>$sX&iHO9fnY1!Q=jy}Lz4(_9wf@FY9Ru_dp*?d;@mcto_2I0pdkgfTjg3fQeU4ix zduL6vQKS9C_5{25ku*aUb||G^Z+PX=S?qeZiG63%n{>phM+7N5B6+$c83DB|POrHS z4rqxI;E)FIJ#eUuGE^Tj*}$RgbNz*^^V<2VPIsE!M)E}d2G)Y-yGxQt@ZZIMkGN%0 z*BU>gi9#)497`yc1lkP}xER;`h7BBXmfNxrKKGyu85(+t;9&66_( zA1>lZgp$5~@5 zFpv?K7VDRzbb(i3&oPekkg|a9p|h)gVT=Pf?kawvYE)fFO!b`I!D-HV6LJaMY%!J_ zrI{3q?;K>n{SDnlX8EkBbG3*tRt5Q>m6QcLNcgOe4bnfKzX%h)RZqtcM>p$lHO-F& z<%q7crJK^R9D!}}Wban==qcI$;4sG_eAM|VI2M?18&Zac$VOYU9HC+nC|Ez3d;T8p zn5H(sI&+_8u`myfW;Dmamc8&8Hk`J|LyUF40;9!KE9Hw~E_<09dKqEsjT%rTXYaS8 zS{)LgxvEMwHz1Wjt}LTH(f(+0U14JybV02y6bX+IoHMALof4bv9%Q&B#WKiz-1=jX zc6D|Dtasyt^VS-L1CKheaaT>G4v&tYf0=hQs=i$#X)*$GQK`QBVYoe6sXjf4bBbAq zPD(5yA{>m{Gt@AwkX3K!;>7Qj@k9u5N0D(4`iHCp_cYxy?&*#RZtXe2)X4B~Xmfun ze0)XtOku6n;Z<*XFjS)7hx;@l z78>b2cCKZ?lGH4C6F#!8HzDL$RObFoE{J5pKh*_n-Frac#jnrmKsCH^mygW>( zpJyjKxZ%QDcML6Q2-LX&h|~!Q&S#1kZJkaZQf(=$t+UP>gK?}x(imQJIJOv;>Ddhp z85-saldVQY3CQc9s12F{k2e&TIw1XF8-bK6@~V|rA!b|}KtvnE++3B!6sL2Uc>l{@4dmuKk1mgY{Ud zS)9KVDww#DF2te86G=R!oNB#VxdMedSJl1Xmq z=~rg9SUt)H1RFZPy50&_#CL9HgWvrkTwL`m+{K|R5%Z?(rMjUJ5bm@Fux#`txTzVn zOv~z;yK%#{m5>K~XqbRwL(MMQ!Gf9Tkn}q|fhUTCx>I@pjG?%0FL~a`(A3N9F*jUW zNFUW5jLrb7P)71JRF))FNq{-kw3wjQblo;d588ZOLTqtjd^q1Pi4z)Ox#)~+9IlxE($1Vz zt+AX$bP0jhIZa&vYnq7v-)8ySFAOB$*O~{5x^}e+{`~RtBXvJ>yS^SfT^sxKNmSlP zt8BBm+DgB|fzxc1w24pPKPIcJ3+N0&&8OL2B`s>>p&b4wA`=s?b*{JG^T|%~W9)4VTH$9&hG>^H` zh%&F<34MT|UnKWD??odO8oY2wA~tHAWnQr=HC=6`69wGQf;3lU11wwCTh& zUN1#8XD|ARTj^dV7V$MOlIJiAMF423&tw%mGqHI;0RYTby`ikFi{&CD5XMRD!<)Iv zD>Fx>&#ZMiimIZESLrMO25b`9sQ!o=(k;orq*uE&KP-?>42h$JKdB5Lc6Z7_Lx;Svp3 z0+dP??$i<@U!71AAYucci6U7|bG@g?ro>=PL0}p#gr{$Pse z9j2^+t(v$B(wt?A4eFj&WvvDWwfFvSd-^~3#otfsXEE&b@pZy8N?CRTZo3>~abuyg z^iEAP%70;xHv{l;R6YW8YQ5dHAV1NN<3Uabu%`2Hc9)4@J?3$VI-R2DW(DHZ~A6{YVR&kh;D|?o9M|7<;UU2ytPpdAU8YPTs73- z>MzxfThH(jj^ECyimhdUT`Y{(@3HP*ri%*hISEWRxVE9 zaTS@y8-w@w3{LY!r_m0^MqF|K3l`#lKyKPM`)sdI1eG*8U1#)c#T zwdL2rJW6PG^*(|L`M@?%mQALQ8I3ks@UwdrOLC`3%1)H zD-?b)wt+^pM%(D>CNC|wGcqnVsix*y1%CWYNS3&Y~c+ia8?z ztJt0CCc^~XME1+{+9-{1sHR#eSjT-)seb=o5cFTm249VTzw1{4VNdJr>w=S8qK^_O zzCANZo_U*=F7$gfwkLS@#3f8H^yNkaJ;@?#qaQD-OvnWWnvo2gB3}obJ2~@os~sdV zRh5u-HB;$q*hBoyg6fZQtO)t=7qj|LPniEo%)R{-?d_k#E@QEk?dkx#*O=xW)9k8F z#|@w2$54{CKgwX8Q#QqoRM)i_!7m5eG4aJU3?!=6%Tg~Y(tPDd-iT@s4b?66mNp^p zt(D&)ak{aNR|O|=`0!Ya>M?vaU|s43;3o}*5h>7~mJ=1hPSk+5r6&aObIMSs=ah)s zIc9TADP74Sdmv7)DxM<9J{f*j!j1fgr~Yas+bjayB2etx2P74=y!TKeCqu4yT*|_u zEEh}P)TkG|W5Wk2`BeMj&=EMJWoc;3my68oKfF!<&3{(!Z(A#LasLf{??4BW8;hE% z#%97cK|`pug2}2H$l74cPcpQjr;h(@bL#|3s9Is9wSMO?^Lzkv5%St#weTk=s+Ss< z6;5mXPAvTyTtf2wh#i`nX&Rm|ZS{lJa19jaS7dZq(QmR!${j+V&4&3=IB3E)R*Ro6 zej?qytyYIzZk`{MZj1!BvTzi$Oq))xgv@J39Ea@crF9Ez&eWZ(G8KrS+HCPA?!xn# zt}^;G1)6>+lx^U~Ee39xi^*q)(;2Miy|KS01{%-l!{#;4dlA?@y>U8Lqh_%=jamug zLpUe#B}`9ikeE7yyHjzqCU?e2mPvBx^jqbsMurW)=Wfwk z>e5thTiBzGxOwvwB7qldN!^^#A}0OzL(I?B&6 zC*8zeGKU%axKc|6&lPo*+7iN8h||}2f2P(le9%+UU1fJ0tdcyV1Xic zWWAXB>{011F=O8RqiFT&?CuiFRf2|Lmhjp7@XUVTX423j?HMBPy5L6Mh{7?Nq#9FI zxofJK2OeE%uQ?w(OnF1>zdi*f3UYCOZc|`V=`gy#Ib~_&c#>0GXL~QWCXu1`!8S+o zls2}HeKN|{uC;D>s=>C^a)#J2S{MB=#ANo^@)7yuzyBNPF=y;Gjk3VvDGaG=l%y$pVSD#_x~T;1I=t8 z?HiTL-%IZ4t()f8ai9JXl53YTE5%|ke5JNU&^Jw6e(rA9Ef-O(4#>N=u4@`4sb>?n zvL>I3T_t?IWGpBqppiU-)*&C@kScdLC1_}(fvcDXIljQUR1&ZG)TPUoXN`tt7^{GP z`s7U2)wa;c1DGa=Od@&5)K!jpSaA7KpOI%)w3l0w z*qZOL4^iqu4~NXO38&>!T61Ix>scd$Z9W#=(gkWEMKnjw;r*NUKk#eT)92izG+Qcr z(WNpkQiMeECdZ>YMOD=Yc0(87#B>g_mh;f&&uUavuF3W&&7*msF^2?tQNhLbylB6K zfb|-bBC_0or^wdfu*ukdyUrY_)r@t|@*zUH8zHUK>2Xyn*#+mXMqs+dIcKviraFkq zt#>&F=X`L(_fg#|tMpu+G6Tv8*6+h*;8wi_G%w*E0*$JJ;IMkwq6zQfdd1DpmG7AkutdY8+orHcJb zOK6V|_&r!TyZW2~&oRl`$Gs_I8Fv@Pe=yd>zLCvO|Ln?X~rt>pS~4-Xk>4cR(M}Z zyEad+?ezJX6?4*Oqit1Na-m24k-|_KN76AG%)=5#MU>p(t6`T%YqE0P7QI)iz@B-X zIpUlA7U+Cvli#Zd)1+sqoqyY7PJ7l!&&be8QR%S=6Q`bEY|**z>O>CWsuhu{oziX* zvDK#ZdSy_~8M%Z|OSszVnn5qHpH5kW zXZ}{VO68P+iNt=U6v|F=fGwFA0z4@GVAGvgI}T!eqm*m_z`^2 zf|ZNS>3Mhrqo6v+zQbxHdZ-MzsWr z*Cmw|6iW_9ng{~L$Y!w2CNILfryM?h$0+TiO@6KSGmbyU1X}Yxj*L!}N0_6N>JJ&! z@I-ihLYEs1`5irbbpy@hHgDus=hw0#E=dJ5Zt|XO1F0F*(}nC@gPssd7f)(%%+c1 zhgT#<_0H6Gdl_t#F)#wS4y+zK`utn(U8v+<}L=ieDdUf(JePv{#L;NbMKbazH{^RdN(p? zYtt{i0h9Tc5j1pFKOp>%pR-EAo?4ItqhAW6o#+II`yqbOI=)@R6+TznY8_S#+T6(O z;Ce5X4)I;KN?4fQVK=!{-Ma_F&-QsONqw>0kz-JDP_=2oK0s-lf%Ts2qRxxkAMUJrY+>*T|ADtm6-mn0m-n{l6;E z@7kCLltDszr6sSEvbs!r(xOE3bkXX6xnbYfjztc%)nt3_E^3(rMW*InxV5B@BT|%~ z>z0Ivb!s%qKzazlo-KDRYUsct(nrREoF*Q1o>=)8ryO;IL>^$iV6f}XF%0SsJFAB# zW_)6cy@$Fzssp#KB@ipjGmp(liFe1R?J>pSLZ58|r=Us;88ufmgt|`Ba6!9a-LONF zhu&~>(rq0uOe(n+X23R?YWFjgo_`{?2uf*HMMI`Psih;Of}HAh)1=RWSc_}(8senq zDe--#QS8)x%O z17%{2Mx7FY)>?#p##^``vfvaq{3UK(7w1{-DocE)J%h#3U*E+WYe|k+P2doNbtLR1 zhZ06e6`0;fxnuy}T~~u89;u%Z(HNFedYLYQaYq;&Mx;kpcx)Icfj&mAv?3Hy*sj|2 zGq0vR^yx7jj;#dGqlp!RwmG?+sLka9JiZdlG}NopLGOF)CJFs~NzVMan+TI5RE%7#D(iL{$x|r#2@?o-$evE14~HoXWzb349>gFbWu0C>p?g?uU&SY) z(_*W+5&FzB1GiX647&qSRK&Gvfol4M{Q*;qmk7)7SK=v=!KxyaZm?>2CY%1|FlYk2 z@P#no|G%xw{jcvg9J_~neG3(k8@0O1VpAqBJCx~{(g?>}fnjy&YeA<8Nwdx}3arQO z#=g>pJmuiU2{Yk zph0`xZ1kf4NXSP50*O z+@zU#g1mG7Ti_$lp=-*5fA@7o=w>jEb4CYtQc*F9(%-*IjxvL4wtc3y{nY!mK&7biHz?0F7XCw~fO zFSj)-;P8>2k%T%&-e-w)&oDJ9LAiudDRy4(z4;``WiJ zzrKDSaPaM(;I$v}z8{DJySBW`cwGu^y?Ni=?8O8zXC6iQ;Iy{qU=ZCFVi)yzv2^15 zgot~q`qtc9I8pJHzSGNerj+ojsJM5h>h6PB(iA{r{(B+z&HA3bo28$fUj&I>`<07y zBG8OC$lY^`(&xxohc_;#Ri6W9M7MqgalUFUaBc5LqxHGff6g!xGWcundeG}BN5nId z!z252TF4iK)6<(nBkM4yt*cE|Owi7?{LP%s0He(Nv`EnBf>!W_0Jp}y={9TkE@y=~ z{-hIF^NBRb!+7(&3^peDmD)ka8O#`W{!aYs)QPLZDJbTBfxSu3Z#6KS5SWGPnf*q; z$Xsz@>{cJWp>vHH5Oi7k0RV|Alm5*4DszGEveHnzm%GsYZA?yURvh)Z`OL3Q91l#t zi)6nK+j!w9VXK14O7{hVAB&b#&vo5iqx_Xwob>QMKOk8Rq9fC|{*%aoQf3NxwQI)j{qYyWX&%tlz|Djk+u((VaBjfQ#zj2O zseie=dAIMjr%%RXtMz+{S&2?rXz*hT1=iwk?SM<@y{sdu#!Fh>E5ASYi@tpY&8K=I za9Z{v`pH&4^-TR?$L|)Jp|I&UyL11n)~&{*QG7&F#i>~d7h z>7)7@glLUvFjsdujEn+`vG*90&7u7p=_5=K>wjv)0gml`S$FMpQCPEva@;m>#@eklO5B0)aN3J zjjKp<0-8V5i(C~0B>X*xnVyp7Pxfg`n`si5Ns*&Z`{4T#d|jojp1;x!!2zU9#=ia2 zA`N`?57V@X{}JkLF;dgj`*Gsm|M_40t@e6nPO zOMuV$-3s9DST?p?%M*uNcwKbiVq_42VRY0vK` zO_QX3>lstPtg%OR>>bQXhip&RSOXtRdcoU2J74N=0iEn+^cQ=88rn%>z)0z5p!4pv zCP#(uP!$LEcz?229?sf;-;&YB9MM5%fS5`dfX} z#ipv0I&(rKTGjX(Xo$49eOCPlwAb8Ef5P=jptlb*)&7EfZ%vL4;?0e%(IxSI1<>wp z;+1`_BbevpdD9od%ENCUY3B!$=`QHsu_fHIDAz#6Sm~C+_1F10NQKWq0(d$FHu9B@ z6L8^nIYtZOwCw76rRR2J^k!7YgL=5#pXGDEIx$^a8MYrH?WJl%_I(V>;)3vk*zXgC zd%nCqbGTST-Cb{vR`H2dMR0zubGyadpsuc%pF0df4&~zwypvc(!ZJTxSh?H<-K*QL z3%J`R^qkwzfS>tMeU)TjBz{jn751~f_>(Uf+(VIes^-?AV&2MlzR)w0k#~u%nyG~k zJ~!HOR}7kP_tsXgq>jdLZvyIpO>Dm~seTPX3%6!5*eR)Lc^m1qGX@sxbxHQNQoqje z!pjqyF#h@ats`b{3AqEA@BjI%aR25W+HOHt=T=)B`DcmjzY{B=8EJinCpRrX2)zCT5^{*@lC>}&T@yX4t6T~G*Q;J)op z2J`95(vkB^&QlF!PX;6T`H=~O^-RnZ4H z)ML;k9~LN2xmO?HiSo^|4t30b8r654L9)tlbE-)d1n+bj+Yif$UtSMLP0?%IG`$a8 zEPM_Xr^mjsF%@6Ye|4WveI{w+9Nj#x!%2m|Q@-n7(cRflhJE-$)AJ4~x1jyhN{MDe z30ZXTVArmdb?qNf*taGd3Dxj#RBoX0VIh*ZHB`WS=|NryNUfA*F=HmBJB{vrkkoO~ zrxN_BSDmtrkkO}|ZbW2Vv}Z)#?!}e@g8Gx0^f4PD@m(M=Tn&8X)e8TPxBmoMLf8p2 z4_*7kJE;GJ5BzjTs;EQyi)BLAMKHZVFWvUjAM}1|l?sFx#FM`jH@(s73LB=PgRr7G z`YpcrUH0?mf~EnFnzD=;beoNS$BS(C0TmiNWQZ?io5D(NT_ z-`dM%<~YG}hZ$^-93w!<*!t`Nf~_8x64r)a&L$r<$e!B|o_D>u@j;K{b)5fOG>6cH+=>#G!}yX^mGR;F&qnSCUzbF4;3@;6z=C zg=R(WG!Q)%E>KhjST`UbSvh4tb-{uCRkrC(SFEdM3C?q;=e2*(GcU*eW;Z3PY}yRN zqBbz8ns?B@YJCDZ=#L&iE|_{~W|+A2p#ORSto@Y<{}4JJly!BDHApL!&#d$>VFvOj z@dh7zYVME*cYxK&cg}^*aSVudTLIoPxzgK)jf}@u%4ZI+#NAn?-zA4c>8C_127Db;a@ti0}q%L5D&7An1jz=!knbhY@CXWnd$bYvg(VRn8tyI zyJ+<0ho!b-I#hJwyJ46%%UcLM%q$@STmtDxMdDG`%_~tG!KXV#@j3OK_aTdXQx`HD zxkD#IF42cN6OHNOsn=On3aI^MEk6nhC?Q(za!*F`41Nrc5BE5@u#zSb1)3zIMXum7zHci-dR#vk}( zwK-VQ7XOwre&LL5)cH$x{%?w0So(BLyt8b8vi>m~8XTFT`}%s(D09=36@cqQb4F)2 zfyIXA8v}ly&R>Og_O5vPM=9OowH^TYM$DdZCdAsvXk$d4b-;sy)haQ#{`4*%aN=1N z%#|NAE{^upUzV=_%-#oP8kD-}C@@+KT2VUua?+A1ID9(uNutn${{D)z9gvQ`-8$Su z;zt`S?DyH+)OYG9T<>{|M4>;eL?69iKb;=X7}R!Hfr$wE+XeK|@VtV6gN!{fMPJ88 zIp{18V(#M(W}inoo;fkUPp3odA`RbjL+w@E{t<~V*U7B3eLFY;PR_i^@f1IkvA=Cl zr-sasu_H#ePu8Eb6h45>vdlAg8nM6Kx>Xzze5ha>1R|d1mrvrN__vkK9|1wJ5K8<1;@$_+5`=9m1x~Jaqepdr8S}*)mwh~ zz3J*#j)#@+%HG_c0%q!YY*he(7cl4uFPqg*G4BEBFBhx(%2i$98s$NYn2@gRz)f4Z zD&-G0e98z|Z{&bJ2`SOLg@L!0Hm>Sv%ZjsM(MMg0D)mc`-O#FsyE50P`*Ckk{^6-B z$2#b3`Qpk6(}s|YeJmaJZ<`9x`xLI0A`zP<9?q0ZGTTkGvWl9r`oU9^+*~r%(RykYUV1Tl)|^j=e)s8jB@Gu?_*Qsx%u!fKI=K z4LxR`UqpO-%j~)+toSZcu(KEE;uw1rpsknW1+s}lWr|d5Xe$q^t<9*g+S))mpebmE zt1H~@Bdgly&$F`TcNsDeW3fMO!)cLyW9KeQUq|*`@g7fuXe#-&Z%@?xPvpO@y2uM3 z4PA4PD>U`NSU+va{a-@hcS?wo z2uU@fG~d8ovYQXzx%=Z8=FZqp-O%!*KB{;NW-E&e+pjck$bmLkAX~Alfft?k(g8z5T93a4TH1 z4thqoG!Olu;<_x#!ubne6LS0QqjK0~px2IUR&(^8xA`)l!CHPAGnH&UmmUAoF`oWi zKj;ALp#_#`G)4c8T-EtE_$6R|EA#AX??;oZ>)JCV0CdBI2cvLD1gbD$cRSav1dT-@ z`J`C(0p_RgkD@nxdfYMgH_kC5+h85v*d6>xI)70i*XO@6iS=I}8n5d z4!Gl~$9*VOVb^1(@(BJtFv!B1RV1^whZi`J3}da(^E12BH>X7?dyuTl10(7Hho1f1 zStx2n%y9J>(-z&r<6Ky@_T4LV$yJ1?9X$0O*90!3iXRRxdD73Ic7q~gk-gE)Loz1< z4}Xuwc?@(zFHl`)priW|1e1KG!JEH&(j4 z)!YuGZg!Q~BGf_a8|36qT1?NLdu0>yrk#&5pdb+~`WEo0pqoZnM4a;L+B{Y*BRy$rb><9uDS_`eA9pUd0K=4xc^%D%z3=Vvvu zM*oaD0o$e7zz8S1k&JRvNB`7hJn`E?TFW{Q1^-*Q-R`dTA3?x)C5?cxk>Bm>EqtpH z@yn~(=Rya7v~w6=?6NL^ftPPI$cQ(-J0JAa~A55$IXryZ8bqI)`9 zL_8E;8NM~9gEeXEv9hh!_YgX*A#oR&K6NN!qheXN9!L$LH7&`Gm|{8)5Rl7xbTycK zCy?k?SvKz}Gx0)C%xutfL|hBEO`2ucs#VG&lX(eWj~TA9pSzBm64qEHAcKcw>HX(g z4?ksR03GIrvM_rKaP0vX@MZ5blup^`z;RWR$Ev538zimRCxGx|n%mNsP9qjsXJZz6 zZmZl88#8MOsoki?x!Z!o&0*o9W-OOy5otr)zS^SWNKc7+P!~LFrDF$YcYZipxso2% z7h7p@jA%zAwH*76mOce)Tc^X7!|vEUJlUIFxDM3*M28n}nsP;R#CLM&8i{;8hUM0O%I3{B1F^T$V4sPheQ5Yll9dMo;=q zZWIx_f#=A0pSsN#_7=S`(pf;CW&0Q9P15@JBe7fZv$m|87uzN>)&gu+y0KN+TAQoZ zj>>S;u`d=bz+R*$lKjTz!sz-1LXO;rN-m_tae9RJ(J#Ks;uzM-p2qKbEhb-v3Qp{H z{}TpQtgwSAmV;6H)~c|Wir`E82T&B;5sA5W^8e5zVOLZ|P6)9)_R%}UHVXZ+B|j*f z`dsq*&+orhE@!c5k<3!(!a)dN`+!`A@2j9K= z8SBO@_do%uR!q^N@M$jQfag+Y!%t^uNI^1tGAIQIf_(hueztcgEyo0VBi<} z8v>srtEJ37s2Ivhr&X}RG~Zn9g;V@HL?!!4RwWkdYtqt>ee|%@q{+ztu+4BrcRzMu zQEyKR#$OoHUgTn)ll(w>u)#n&nTP$hkQv9%HEmnA*i#DV(}^OWF$g=+{RgU)1@TT7 zNl+hF1zYU4?dE|KOM0O08gUpHLW85O%*SZ&ohi%E{etz2yX^6bGnI^fA2d z5JoNSXF7$>iT{|#1@-0N9K9!-Vg0?15y=~`P9^1AoLZH2MyQPzxq|bm6z43-CMk@A z(^pGFhprd7adz_$6DeY-!k8&}JHm*)370&I9lMK7K$`AkT_zW@9Y&0|_~RhmGrK)C z2p5QV-&W9W_}2L#G)A~XGwH>4euBw40zr=W#q_C<$xSLYuio4MeLuPJl1RSE&j%v_ ztvg&NlbE!jTh!>W0cIsUT@}sRzJqkM&6ey(`k0TS227! z^g%555O!0KCJP|%GiCX#KJV$dp@XRSdeQQ>GOojtk}(|nMcShQV(1?>^5Ug_d(wVm zr;6|Xi!kf|f9CbzpU0e0Eara&-oac_zzW*7ru%axlCz1e~OGfH_s`%diW-GCySh&t2w3Q9Uk*eYMVX9$MJ0 zv*m^teX>&RrrPfj(G|;IE8VN*IBJZsao-Zy2JT|50q;NNVUd)s*7^H0JpI&nK^qG4 zWk0$@T_tPSsJR#T`tF}1Y~Qg=d;P>RoSQB8aC3aa;DeCW1i*sK+>E1tsuILzcM2 zdt5O*FZPSOpE(hR_Uq&L!wIy#Yjq8*L=sV}Q+vz!xlgU-X9DNmVb|5?CTyw~uB8So zAL?O^rZ^JYH6sm1(OCTc(q8q7ur|qqY|#V`o--?69%6%EXRU+*h>Vj$^pNlk2{ELF zHU?5`yrcl!1mMG`Zd6Z3^@``?eW;jEr_-tO52;qh1{9&2eztRFh&fD0xGA%G?`;aF zKZ)`Og|T+-f=nRP{DoNg7{*7>b&x2mfTqu;VHDDx%q@OORCy0HKHEUMasMDL+5AeJ zh^4!PJ3$QSjCkHM^K~zFC_O_@U7l{))oaNHAo3J$Qb!v$o9ETqQxQ5BOM9Z&@RBgj zAodfGp@3|38$6iyfJoIup(q{K>3z1gaDLsZCJ%dH4EDr;Sn^}J@}5S@k369v#J=zi zlIG70)|S1qvq+lismLR%{vsHa$KTVM1M3~Nk(SNOqipK}O8GAwRR=@9#e-^GYI}$P zbd(@pHM=9e+TI+_=)_!w|F7|1+Xgv-AA5bke&pgru-;4c;A>$a58n-k1t~f`7YdHE zL0j_6df#L&g@dcz(B`CID?^)+sCz>&c5AZlyT;ifSr;m(d!ZQ@1H>WoO2Ml3@G0a) z4RWB{N>%m-3^=xn=b_Qj_O|ROtZ_m>zOKG8W^{M>)2+a*t^02`%5l4Lcq3X&Mxysv zs6jU;8&3SmhM>y^KV75io5Vechdl}ea<3}8*6JhbC%)(gJ7m{WB=0x)#HqTs*+N6p z@N4=iXfwfwsRxSWv$_#yD7dd+x&Ng(2mNn(qtWk(&=VUG2RiD|#ai@a6+QQ;wDG5Z z;nyE$Gq>k1*Uwi$3DY_OxnA8IsBGw~QG0h8LGQGnP?&_W=HX30sl|>p+dq9WmnSC0 zeQjD^wS7`!NJ}@S{88vn+&c=P;)yS*qVvS|2L#SlQlF2)u8MeEYu?W$@yW73(N1UJ zFtf~PwXPJ`JwRAdKlW6}k<#QYFc_uy#FSUkyE2CfQS8Jk@nSm_qY_Kl1^|S!7*=aJ zZMT!9^chB9hOb~RuD0xyth_?-Ip0kGD z^%#bOCv&wxh5CW)tAjwuh9e^)+bAht4D!MRH|1|ZPo#*n~u(s2dsmQ zBle#mk`(*WEFuKpK+zNEl8m&ON4A4S-?JCOYk-Q0YOX8FYJ7)5mGoz2>m5nJH^SP0 zt=;BAmRb4hiL=8Q*>5xTJypu4!y9YFe=+O(9TJFfx8{p4@qhyaNeeq&feoixJUSiB zj6!>_^Z?FQi>bKqG@j_z4;At;8zTcl(~-*cM0Mf^cYP`Aw7~8+Py@DKcjEGU#gtpK z?!TRA45;420|7-_X-TMLg%3xj9H8x)4m0sS@Z z5LzB7i>_mUCVoJ11Ph8rIKO?a2yOUi#@0cXn=}M`@#N?*je+aB(216rBO1HyeC5s!lc&2>`OF-JY7il^^fCuofuAY!e|a~P(nxfGSK83 zv|y^mB6o{X#ZD|^=7eqyb413X#F}3+Dw5iM-vh>$7B9DF^v0)X* z(-FFXgt_jvKpS=bhKv|%MsvQDhG-Nn51~WRmQvGzX-V4dPWoUVtV9PNMSdAYNL9r z(~Rp7fkVlQG!=Q7*2ZQ2TmJA}*&%BI^J~@xK{VHS3He19|Evxnay?^WI_l}!?4bl@ zU~v#dTi;<23SUbZuh7?ES4x2a;$>i(HO3zNB%~x^@ItZt-WZF(EG(aEVz(vev<2 zz=w@e|SJ{JErhURGiFz+WO_~Blz&)GfjseAbF_v4pZT!#zVr?{)|Nw30fb$f#f zN@@rhkRzM&x3T`|Qz#38S}7Ad=GmL1>}^#*X%<1>)RA_~Eo`Fru`W$Zl#qd>qnmRu zaE9@2_BYWzaSf?EF~VHMjnS`l?n!iP)I$YLu>szJufHVop?;yOnwA6Bu1ilw&a}5l zWXVKl-tXM25xyfub|C#8#tI&2)*hefh#f)+|0&Iw$iqJ7A?D%2Lm?&J!=^^u)M~NTZmfRY?k@(Tmy8Pz{ zrxyi6#5LRo)v5}I-8Io&Xk}XCByO=dyZO}uC5ui0mLcxY*KPO0%WRNdYF}jMFwZuX z_{2sIiTGi3+3-@Z1FFrTq{u1<)67`c?Ks|ijxrdZ=2P5f)*QBQG-nCkZ85P{dRAiM z6%%E-8oMNQjd1heY7X@N6x5cdCBHG$_*PRzKdtR`73Pu{#ai-l5%gpKqWI>QX2uiW zi7n&g>Z;@s1Pu7Sr zzv6N=3H20V5fo&QrVJF%!)j)!wRsfq1*=&S9^pZ(d!^^hwmoREV|T^pDj0oX* z2a~IY@eP%VOOQ+A662txeSiIBC)n~|XO*T`weWd0^@Grr} zUZ*@Jgpkm*j_#15{aKI%mEIvOD4!5kT&QvbF2*|{vlib|M>|YHZYpZnCi zWc%?WFKEVV+V=B#=$rX_vsTbN?(&7qJr3NXBr3XcoloBmviJD-MUf{Nk|qljwP$RH zf4mH;{^4#l92CAjQ)o8a zZz|eRPkV!*DEZ_nA(@}0P|}P4d5sCcuNoH&P7y8Q_%*xd*7cai-Sorl-$eH-aLXAK zl0%liVQ2J&-~le(ovb~OS3@9XmA!^8UT;*5LSk{(Z(pKZVlExc>DJ4e(o+wjn5}+9 zq13zB^@eu!T5G*668968Ze)k^fX;X<-K@C>E3bSIkNVUjD>@Vr3Rr`0=utdr8d5>j zBh@FR#8!@Dul1mKdlWahu_#=oI%~65XL4fw)6?-G#8vL&4Sreu19o)if}S(9$ez&G zKge-o8^*OIoz)#PdC9?m-4)kx`^6GjEBxw0D`|B? zH;{r3EVdkp1c@|L)Q{9=u;>yKVPV=B0B`>x05-&YCl0 zU$J?a%FZ+%tH)#CSJ}Bh9lx37L* zBUGbGW!?|IdDc%-x`IVsHX8+YD#9m&=*s;%TAqEHjSI$%1kUM=o z^IfAlC~&1_>gLBA>~B*p`6}PsJf5s11af#*x&*-q2w=*^N60yjJ?5sk*D^FsCN*DI zD(hEq#}h%f&Jntrz}WftDf!Kp$qt%G^|h9=)%!$GWBIAim@;&p{XdxP6X_!A5V!zf z6`ss_#}qNyxExqw(mUMQ-6b#06!Ko04z`}+2^4_?s~>A#s606IO44s#2@{zuNwEBv zfjoC;41R1nDtx%hHr!{(9lH1QQ}q;@<{MhQg(W%wU)4uc?Ritq%D>toUFF58qM3nJ zl_nOxD`-q{dr!p7N~%D#auYF?*0W1!EI(!}sIe2ZC=RJxpaW2!4Ip_ zOSuL9qNg-X{_6PEuR!bEY1t%#{DG(jDAVq4cqRG4V?O>Ev&t$Ykd3$z!$IPl>`PT7 zk=Dm?{MU54WTyu^KP1@a(@6-&vk0lN8f_c+6f8=Fot$97=)8(6yX1lMsy!5XddD@|L$UvAwnR<-vXdE%iB^?#y5y~YQ2-j|1tXtX# zZYlp+9jj}D?`ogQg^_YC84+55{e3K{-|o^|OJ6FUlW@0SjM%0f?6Q~~oAL(Iz;?Q$ zt*s7Cd)^PUq(a=*3Fw1;nVT2trxW=WG&DIBpW7hwD7%OHj-t$ zlVEEUCbYs-8@hu`o4V);=ygdAjD~6ET?sGkih?dMVwW6_rz$+(t^?bfMW@#sH!`Vw zmpUZmc42Ax7%%n4UaIAP8S^vOe<|J;{EzhZF-%RS*0kn<5Yxivf3vXo#-F5E1If-x zaYAQ^uR?H|Q)ID`yJQ9(jGxj04TReMl)9IVKGm z367(bs_83Ghejf=PNx-9p>Fd>ayV`khB5kFIug#gVd}o04bJmi#UWP3St&FP*}05- z7*ah&GloQh>8~SBaq}S!WV5BqpKSE}IJKstdUs{P_xCw%C#Uw6UiXTD&GE?J;I;lS5ON$Jfvb${4^llTZ0Bx8BGOm z3R2f9vU`rEYb!6qnJJSJ=SEn~g7Zb+7qdy)^AD&zJJk9@u`t8g?%1)wY(E;Ddv?CN zU^mo8P(v9sES%Pgp4P#dmL8A?xX}1IlPOq{I%?5yH{Y-=&sz1m1`1e0NZbI+!p*jQdh~F)mYF>S!2s2O1FI27G(u~c4u*k{$`_-e8y;}5@ z!#Zq<fAb&Gj&7YV#|H0e6J-70L_O*JSvtH#s)jvsE)BL1}%l{ zqUYvx#G_N<(IBc8t4Os`c@_S+N>?II9>m~5lA&A0IOmwj&#+Sc!Y?_@Om&54yfY>B z#*_#+;A-1n?l4|$vqA2SfwY+Wrnhe5=63#So*Tl_L6b4t~XMeizx+)2Y) z=n0|3*`D`8=~ZV2?kgzP$KpUD0p?speARQ+5)N8UJgY!c9v10#VgO^ypEz}M5t^rU z)XSuUw>)4kg06cjKdTu&4_P4nK3(Xk7a!D@OepE|z(p8T`gIOixw~LXmhl&rH^q*! zE=xf(c%5A_c-l;~!+UI6YM5ylraR(ncd9y`;NoE-Dywk0+DJiatT6NpETW&0iP<$u43eEby zCk_6a0xNT9jo8&@lp}gC7qQ3=rs}L7Xp`5OZp^Ov%NYDTd{DMwMS2Y&HAEt7H+oQ# zn;;D9#J8qbqUOt*hYCqMpR*Ximu3vGO2}?UC_=aIoUdE5B3eV>dr_j?WpW*@?8ju_ zkSq9>kmA<2d)~WQH9(T_rF{>oWE$hGLz&F1=yS@!_cI#QDg-@q?l*1&%EA8T! zko^bhLJ+>;-+GAP+PMHoa4Nugf841)!0b>+*~j9N255 z2YN~2*wA^NPsNz~Xxd?gn~g_mVWe9YKCglXqx~=mTwc{Z8k?g5J#~=CIM_`T>~$dA zEN+9&=ngimLFlR|`hKJ4_4-_Z@3h4kYJw~&8D*O=)CuZ%T)~F)vrH!5Y<*O0;k?(bg z^yy3w8;Z_`mF4`fyW|0dwasba+ZUV>QK*)N_K#6Xn2XAVL^R(Ct;)~Vosn1p3k7pj- zk%jPwpD_49EUz%VtSUuYLt;5E_GV>mJH*yFVfJ~?jtt`*=SP}06VtpOQ9|Mm_LGm% z`v;_q#Md8uI>60uOEH{H!{F6!J?P5?O)H)O92#9hCW#AefodBJSDj{8p~-GOaZ+e& zdaKEa_Y3jeNq&BwxO=|n0lH4#BD{nN4Aa}jLy62IcQ5#d~#Wc;ARKf-zOiu;S^0BO%`#MP4TMN|NpNREgcL_{d1=HKC(G{J&lwAuVsC;9{f$s!K5ak9#R%jr zxfGpXSNr~8ZfvQ(1Ft?OZM7S_?ieg*9l}M}h%C+-uIN@1eVv9ZE-l>4wWL-?XI~5Y zCW=CMjBwFmb1HVAu#pGzfcaS8-qn`0@)bKKyVtFYxXMqg64a)ap#!Itt-aAVN3G~= zY;0?Rk8MIW$58%pc=V4*052`dZGPwoVC`kN+X7=VNzHqxcsbTZM6@i~dwTtLHmlo$ zD#JAT$`f|7^YnC)l#~M03ytXaPL*Z>7XZCa2YGNM9hr&i;|K*A6&5-3;k)qdWqxoe1)0dLt%kC~m@T>3K^|a6( zPb8`d7J4Z#-ZFX*mt9AsZonX8G?5+{Ndfwiw%e>6J6QRZKErN}Y%~rbsZ3~UC_A3H zYg=EmWb5m(A3|}m4;F^_7eG(%} z-UTe!=4Eqrd=wVy120!X@zYkbtP{S+K|pvuhPFT6`kd8>C|Yb~5elH_umCUBeBYZ9 z+1hkQK=l|$kO#P1f+!a9Sd5sn;;vmG-plM0luF0l=HJiMs z>p=Rb`53}j6{VJsnBz+NA;2HPI|JOQCHAyKf~E0OJXf1i9R8-Z-G)__uj86Er>gOM zUhH0woslimzT;~@7J*{tMow6z@+BNB#cA$GZM~T(l7!0L6LkLp<*SrO_XGCU9;}S} z1E95NJ(}aOzlE2~5@vpdRlQCgoCf?@ew2bBCuyTouEcB(0c$M{G@{4p7e)u3sDjJ$ zYqSRx^}x}54JmiB&~qfx0BE+{9ew+bNO$}_NOANN|dz7!T^&CR;&3WQ9iSB#`~)M zlyHEU_5p`{AO!46nUvKWOZhmkDg=vxMBD?yakJ{eQprt3DQ>7YA!uwyd`dxI%W$Zf zuEhp@>RI0Ft$g9!TVTI@B)b073YXLh zC~)h<@@|<*t_0n=z(2R~>2S5B9c+mnbw?jv*UFZfeA`E&bMzISxY(hc8(Y@nxYLi+ zR9>y5`RM05v-@Jr?d3T{lFchoa?}_V$Ot}_Ab|A-tDgR9QG8A8cN!urgM;FE1eCOb zb`5(lFoQU@=ZnE-qTOwv>fr`(3_;ts^!E&FpMLmTl|FpLDd_+3ODZG&8@KzAYu%k= zzy7cSaP-xBqCq1B519Aw!G~r3=|Yh1jQV@y07ho~M$7C^i*~s%^YBg{QJ7w?AMaTt z-F`_St?%BNzIB03F9lj$dQ>Xh3N6U+@9D=DX{xph$S>%1`wj6K4rT;g>JU4%hX=wh zwP5lkhMdaEC;cPOEY8LH7-|vH{w1^?ea_jdBXC*+I;7I4LP`{JYQl&snWkGQ_KpFv z?tUL4_Yg&?LM$+1w|7uwsb`0`qW6`MFPisf4j;4TOcL?o4Mk-uxs7WdExnw3-71cK zi)5z2iF%k%P*(IZ@IaZXjo@sfztSHz1|U3jR`wdyShIi^mcurS3<-6!{tM3h!=_JB zWx9o}!)U0-`q5*7q3sQy7sYB;NXpu=bYqWfK#BseKsFJ5iCtOHE7WH!c}BRI>hi$+ z4{H`@k-+#hFPppifc$wx(xS3;r$uOa)V@74@Z z?5;>#5F7i1BpS5Pdb-H8W=Y34yA}y!g_2W`)O>$4z;%{t45XV!*oy>En^$3HPrNYp zn~le=cY{-?R%R13>zCU5z@I;~j-tER3JVUfu5EK*(Lw}QWM6$$w${KmTp&e;bgVt~ zNtGL`_)A66wV?m1v8tGd%av^q$0&!tdes-1Ub0RdWnR{0Vst8?bqIhA{NmZ|@`m4; z*O|pX9L;&?kqVCf!HRdzAVLY<4Z60P+G2VqM{Od{V!4tO6!*pkQGq7zI|J9>RTl$41mA8-$Bjd+ z3w&ROkv70P1=8c3A7J?z$c%RVs`f;j!!CUX3aMwe#hnPX-`P&%DxGV4O{QA^a|1LP zHnONX1OW6jK57F(&q29ujS()P$beB_(q<;{PU0$<#wCd!VMjM_=d_+dOvSI&hz|ng z!T&mvlWxr9HY%pUN+f_juGa^vnt=n?FfM>K$Ex0MqDQcIQWo&%(eA}ZeM((rx?##wPwE4KDvV(9(G zz$TT1*Z+h3vu2Q4P8z1LVodCr1CZ5Frl!%v^TbT8sFySdCw}=th3(z2yC>GtbOP7= zz(I96DtBPm^~M`?6`C!15iICU{dVXn(&2whVpCx5w&0?r9?{tl@}n?FFe9t$eCyNT z9qf-PC!)KY*jy^V=(C;hNjE|%W9+kgies>3bp+S^C6opIXV?k;yl zl!&TTSCY<`olWhzyY*)K&zKH1(UC%~as9hg`K5iRi21-YjrmXKr9$06l!g_mo{Q_2 zd6b;5$~F@XF@8CQo6`nuMQPV^0Rm){s(7G5N4(j71`O!i2I5s6Rl4&Xh9Z1c$eJB= zUs&p8CXc!rOk? ziD{etnhNVkQ^lY@BPH7{Rs5n@(-HFURN*{hXmeRRndZy^u`i2j*5E-K)FtVWe~wEX zm+iQ)t)m>bT{q?U88W8h$3c3cU1fhKtnJK+QR8k~eJwO|;eghBl8*d&$f#_ord6sL zyGot#^-b}lnXp*LyK=b-Ewa7nQ|#mz?gx-vNa!xj2-tX!D$9)9c{2R!Uf>bJkn4Py zSjVz9A$0CBPN2L8Tpq7c+-;+RW?}UfAsp||S~U(TlZDUvN)#2J4X)%P5B65>^lq9D?PD6+E*19$i2Y_=T-4Z zjh7y*BEW~!`|A?k|4&Tp`+fUR%6K6+zT4_2nTty2U_B!U`%?}K|BIMx*Bj?9-#vMK zzOZl`$b93Jx%oa`nf%sCF<_hgW?|$VxGqp^E!sSL$KL>d<|>i+lc_aP_-UuoSalh6 z6?pf1ZoTM;=J$Cr@WD_++$%OBE#+NpMMy-6McGj8)VK=HG9EEkl31MFEQ>!051~sN9ty1>KiQcgF(q)(J=?+=Kxx}*dTNlB8&(&n-=wTl> zF2lN-MZZ1m>C-9A9g;e7c|cRbaK9)XTelJkY>g-Z>dfCIV>v-#M$D-(>=7Dlz3H%% zdBXCKDtq6K9EK)7$2sK-THpqnY+>YU&XOtoH^}B0>T3bRu4eX_X4I_mApUis;9{O; z>atLjSGKoACxCU98tY019eJA*r)e6!b+pVsMBu4YGM^kUH2<;R{afgVk z3$-Id=2gy%%i*2=IR-YF5O#L!JxNW{-cmyrC*T2g*7hf#6v*4#rUs*;pK#-GZv>*# zr|{@1%dxLY%env1^xfbp1}EwwjAPRr2FP(8DPvlzztZ6wT8F=-GV;!zln+d5B)b6No^wFGLTB1oQ>zE#H3px&DG|`ktFlZ9!#YZ$G17 z;?32Of{rH0W3sU3RfD_i3KCWUs@8ojL|_9GbO^{vm0x9OW)UH03^1%c9X$gnn3x=4$u0JkOUUSWis~KV z#F-atl+Sx#&^xU@JBItRMZ+gKTvAQifz`KYl`2)3<*(KFgsGj8Ue8WnSmpn2(+Spbi0;ygs2u4k!~D$l!KZFi%+7fx zJ*f|O7+%KJ57EMbr8+)lhITJL2=^spy4i(T6*FFgtwVY*Y%H>%RX z2bssLJYea2U$G}PqZ_B*dHLJR$|HT|{5m~tBV$tFgi5$Rwi`AO1zgRwT0<-jk!LZ* z3X|C5aRQeQ3v_U@X4h$sm5F}b=6cV0qb=iGhSI&N(`j}~57rN54IVEfXTnxaU6lgF8&0 zFIn9AUum7d{1DpXd2c# zh<7KGH*y}k5Z?lEz98n~j24Q-@#2f-DcRF*Q1R(~YPP(*0mQWv0y1UnqYYGBvq0WM(~O@eob* zQIHiik}^plIZQc0;!B-CvU6zRtj;j|vKDI+T=VACZ4Vf<5&D8LinO$*dHt49MYYHk zYl{AkeRz=B0ml{$Pa&?LTXA+TA*{ZG?A9}YHn!+q;kvhwly`XQ;L%!xG*}tV26mx{7mqXQZl71 z`^WSsAk{T|w?Y+u0$*33hkLXA7nLd(A?E?7q_hTa4y@O5rf;R6>A{ zM6cq%Lco6t}IJ` zH?1ug;1wRr;h03%d@lk66WK}e6o2X?O9zYyrpdV%{E!j|d9k=BrX+^f&g~nb8$WGf zfCSp(7xPGnBnuOlL#`N<14Gh1zn|J`U<4t(7P>#>g8T48l5-XJwd$FOVd7R1wE`Qq z^EOQ-i(>a`hakiZ&~nn|CVBpz@=A(vg2W?6rBF>Ziwd=ZA6Z-iMadgppc?nv3yPvo zv_&|JOa+CT@SxM|uMtNeA_503NCT~$IZhB<$6i<4cu*M9(d;wd3xZJ(PSP@Ha0+Al z!N!8fYcQ(Z4M0J6Y7KNyxzPeWA-QdMNwfxt_XH(I#gzZYS4+N0to zN;Z%|T&*<@fDt|N9E69DZ?iAajO#qp$X2l#%Yw{*gD6zs!Fk(spbkG?9St=C+L+Oo zJ#~e$-)%~^6HXF+q42R;3)80#n@ONK=h5jgpz}t`>CRdldTnrY%SX=QHCk}Tn>D@< zRI!IM8dlnizkcz#cdmA&-K2+hx1MlM^==K!Rd9kTw&wgA|5%J!gsW&B6vbeD1D@Zo z&Sc~9=3U?GMGfZ2QB|(^5I0J-yZFn)<;i<0B$qDkxaRXeO1}J<@t)yTM@Uav*N%!= z*4(czN_3syomnkUi}@uyN8ZE+$DQ$uqd9F(mp#O-&5;aVymVc?{39yvvIAwX_E=Pg z!;E6vH$B&l^r@*Xg6NC#SoB=2sZ?$0ZJ5x@xD><9u7GFjFp2N%qxS&lW!|do87fCJ zm7K=NXea22e_#H5mX01R3FLr;ny-y$`2o}CU<3>oecVC{<;(~pI}WVfQ{J{0W5ZMq zl;D)>@88LuS){cX9%N%R9l9Z|IE0NdZ*iw^ki0FoL*2L)@wQUJjFFEEcF!E zzf=|V?At4LAPx7cN@w7A^@fpmc422<_wF=C!}(&br!4OyUnX=UOPJSF+unDyh`f5b za8N%uQ0idgR~AWjrT<0YSIk4-H2&#o->av_9!t#=C(C|!7=0F$nuSuiLMYaP`Py;Y zam>HKI1$Gfc>hq_nMR3(u9j6)R6tHFGF$oUBJY86^@$I9D6y)Du|+4%f8x&HqGBFK z?^UC7HSVnT(AYN zUc`$OGgm2_7x_0^G{5eJ*PEACkKEHhkf2ie4*^@hZ<$AF^s;>!OgEB~L~bK1Qrr1=6QT6o3bME&dB5dW0q z8>-pJIrlzAbydJrd3Mi`LVn56l~CT9t>|vawm(y=$*SA>9j_-dE|@A%`kUoXO$ah4 zR|}Q1S5BAx&5GUn1%kTjure7v2_|S>bU;tor1H$eDE$D#-W2P)$T^DRJR+e(oA-4O zC1CPjre2$!uKHPy-VrBfTIdDjn~Ri^v8bPtEs=B3fsibV{5%R&3J`W&?W-O|+j900G2^e$I4 z&fCd(ms3Z~XUH?(oWw?TSo?9Yy&10GttWv7bA}g@+U1`QaJ8|aLA~}%x0u1s9n1=U zuBtYoePi(_ai&jN_@Y-zYQG*k`lX$o?xFv?3*eNU*~YBQt2wVsBoOb%JW^dqk@tgVmIBt^2ymdX0Fn^ls!Zikm- zeLtD~ZF7`gXYt*6?TIUX=ty;stNrPQ`O?K;1qwec`C2tNf*+a6v)?+dA2}c%6tj84NDNY00=jQGzkHu| z`Dym+bv16I&psG)=LJl?39E!14SN|5Ow?X}0xq>`HS1#U>}i)HGmL z^pll?YoHi94Ty&IuY012$?7FnVhPY_RaoV7!;_hsqYchz;_1)mzC*kLX56XC*sXxG zo$7e1grqvx;DLU>Q_errfw(Bmx}LTE(5?Ot9i$(D{-m5eL+y7yKpBdFbwk>tciCYV z-`4J<&}oNP87Bp^(F?Y^@lnG5qqZEE%1BnK&H1>%Y1?j+0(>wwEHk7dGw7l5TB^6s zXV+NfKdO%ctyL71iSiIPy)%t`qxCyM6?q~C*$#iij!b2*lksH62M~0})v}29*((yv zG4SmhW}jIlu`13={X3H1AVdA*vlP*07Q~$0_&eF`yjI)>Z04*MbFu#OV(hwNl?4HQ zGia0s!QV#kkauJ6Kq0DUAuK1mz4U*RM)35{8!o7DwAFFVJsL-AZ)9Ifd3yCgM*fbd zADxz6+JWkEe|XKRgC?(9ulEI;j-J$+oU?rY@yf(It&H{`=4pvHs>|3d`tfXU?=Ycc!iV3Ls@V zDqpNBz4wuH?8dbiS<;;FJD_M&~rEXoc{sGj700aHT028S+X5aVSm)N zovivvLJ)h8WXCE7PxlZOVz}XB540%NP-$<^*SZneVt`W0a}BvC84vod8Y26FqsFBT zYkW!Mmr^&?6fl@_4fcPhfP&+1AOr@W#lj^mGvxYBy`MK6zFnb&$h+L{{jtz82%5dt znE&Z2?y9N3$RKl=W{q4nG(11IWrAq&+ zjZA*c_ZoKZy?27NMvO=8rS8IOS3RnqOdavl82vN?E{xdak{7b=wR=4_w1Sw2vu~cr zOlINrovIl0T%Nmci{*NwcYqN5jbzhFAJ;fcC&D`j53N=+L#a= zdXaEJ4v+07H_^mf2QtUli&_Yiv0buSOzUk^f6Vd8SCY>&^O9yOXbh1W*_lWGBpt6J zELrrM^Ho2gLX4v15b^AoLA+1TX52RyNW=Mz!I)9W`059W^2j`s9XxZ~73x1SHIWwg zH*Dj9>)TYz5##x)!TFxgM&Hem-J7G|R0#8JXZ1jd>|YDPGat2>sM_w-gIlBg==(I|>5V^~O_MBCLt#nvjfrv~ z`ZDkDg37~jbZNzTpU81ff&b=}*z0cBavOYQ6cy7p~V6dpxi&emNVWaJxE6-U_rywXB@_emi?- zEb5}2+1P2CLAUKTNNw=som;3(n-tf&bL*#jh|r&5Q)P=0=@{5kp~T?Z)Kn{P&(eeR zx}2N!V)@=%PM9A(eavaBZfU~SP&F74`R%WtzBh+SIw8)3cRtWtbV!CHW^o;BGb@s~ zVR=xaGE#-hs(u6`Buv?kxffS2xolh>yPXx38OaY|@1bO;2yvWcnwRwPJawIaWWIa^ zZ3h%!AWJz^;)+p*nZPAsW%6j7!OiOX`~u+QJA z-_kD0vGS!v*taQn`ELSJ+}w0{^G`kPz0n)4LwK|LI^!1A<69UopbKFckXlV%pWt)%#%Od#GSq0^_JgT*1OhvA!QS` z>D#cG3n`XVRQfTXgg#||3GjMtS#rkP^H{S}n3z&9UQKMK$H{&W{{k)6C9>H%%UP@E z+CfX!GP>B~_*Aus6KCyo=jp_4Sf2M^DoSneUblJ=Y3D&ncLn~H$iXkozOFW8FeuW# zBZko3ac=q7Us?fkbNjErCqmmjHqJ^a0!Ps1c;6}2FW4*;Ttg--kL{>-g`tq`7MCYqiLtcW zKE%ZQ-h2%;WqeUr(rHMrvn^cEoP=7mrEaE*~F>Wp+7;K3838Mx_QLEQF7pH4;mXRC3S2tV(WsLEK` zp~U)`DO14&y=n70_LT8I)I$Q(P>W^G`;1h3BOh$0aJwh_O#}tPYnx}Tp*ITq`;d7&DUFgB> z9W|fIs9r7db;^#)k;^dVY`SeSb|==fJ4e6xaOq6^w>{k~XTaU7r{=2|YYj+^imkm~ zzo&N(y)WY?Y%jCA*w5|-^lkMRx@9K&65sDvzHg7&v}f3V#xy7UjABZo)2#%9z~6yw z215$s1YE^sQn;hVNj>D#5B*1|=9Ffx=CVlQSh=GeZrzM1YjL<;Ps;_LZIP~gK7LdO zrr^X2g?dMu$IKsZP+6p>eY;ant#FCcZ8ClU~6;Qb5Ikcqw z+rdUPjlNeQn?heICd{BwSya2ib-3U*afG$q5@Y1VSYF!L(Mn4N4cWmX$ZCVv`M z9?28i&HiNw{=lF6dTq!x!LNSfeFs#1-gWUK{!A~VKvGcdmF;f5S^N(+$)KjkSRS~9 zFJu3_2*^qbKG#TgU)85}3cL%)=yfE3ulaOVcPT#f?HU#w5LFdyC%qc*M^uScHK$yo z_M%@POWi7FRty#|3@0*HV-WEZEM`nO50)x}X+_Mg=z@3he5PJQZdcZi7xsClt@lqn z57|V<7(%zdv&y!*1Lk6T(Y4FHqm`JmV1|m1z27ll6EzGqXpZ)J7+ZRu^pCmVjLSuL zLc3Feja$cf1j|#m+uLj^#-D!DRA0A6)N#ij{FN0gEwmM~~-i z6c507zQtcQ{&T3yHvUb_`h&h0c%i{`xvqVGAhd2Oz-U^4J7(k>GpM%7wr}CAV1~a< zWKQE`c;p+{!-aC4@|ZymOm@%Em#&|jPudT{zuI& zDd9Ee^)UGjGB&lL0?tQQR};%tiF=n2RC;^-PO>)Qi;cj5K4~aiz3r~8|F?&irtk5h zdt^JGK)=481@#DZ4u1y!Dw8JkO4bd}SBI7Bz42!&@yg0{f8;HX z;L$P1F`MO}`Hfj#e^hQJ@y}~rrVAD-+!OR`^^ax$tQK<9R5p#DWYV|1)-G#|37IAK zMpur2)#=0B`FiAv`5mUya-yx*pWlV7z1(Wj0W#eJz>b2V4C)TB`G`{Y! zWl%Zd9AUaQdp2nP5f#RJme$4ZJtMSUrN&a=-@0;=XhE*__2{rA+==B+*)1GYgQwop zbGX4P;7h~T1(Gmfo*^;YqZ2Crp2|BbT78Zvg3pAtFqv7&{~5Y7Wxn9XENwBrk2j6A z26}0Zo4c@(qa?D`d9AwoQtC9#w;3!sugEG{ea zMe)A?L*9CC{etk?qIXIsl)o^3h06Yq7euiU4;M!~{; zM?Kv_^j@vrnAEX$s_qardLavz3NJTW=teSOK~3$@(c4!dqbxY5HZx#GKC(bZ52EX# z;7QV7sFpYFSZhP*=Ig3o>G*1X#EQVej2QF0$ktauAaol_X|x~E0?D?bLTLMKW0f4? zww{I57=Em(&CpJd2EJK(BOB=x^*zAHQ1~ij!#t$x+KMOG0B(OWROcd)G(_Sw0)Inr zB@CBmIQ2gbWq4bEtDB6)e2iWMNK6a22-Jo{V*+p1bi&iyCdyWogstz3Sv@ zr=T5y!xxjfTnm8N0)s`NFaL@GEw}<=bNG*1OslZ>yL?-)r&|$zOX!poaIs+vxTx?L{eFcv4&AdWGd#+jBb9wTjH8_36O)ol$&8KpOrqHK*p2Ch-a?**a!q-IIQ5~DRZzodw);P z%Go;`0GeAK9y8-U`~VhPB{>-Qt4@4rtlDJW_@mVTlR@s*!yv~;N2B%JJL0=MZTV~F z&O5a*O)Y$&lqZp!p10RO)=Fp3i;fgITGwMb3zfDT4ij8yQ-Wd>h+|X-x!}E@n$b?c zrg8M;+U(rZA-{%FBK90=?e_G>^NuX+00+%>DbfLntf&&=R?7R~Ajj5E$MvC+M{|9; z-f_dGh>CYspSu6X(i6$n8VQ=)>qhsUNVhgiuLTf-As-O5zI!7q)*IG7;_N(xx z_H=V)7~7|=@bOII10xPb@+u^6gpW>LWCVpNlI5-Deeq19@eicq&qdgR}N&|gr( zDT^}}fyzSld}=x(pf_gpTP-;-!6!?2{K>@1vwd6o#DY?z;E*om`D_m)J-Aa6tfWuz zz(!M&VQ4+Vc5`JPGBw0g#epphWfg!O~N^z;u2RKd7vtnmV{pQ8}#aR=&Qx zpa{&({IPs#*wGy_r!VtEG%| zIJHX;S@WmcIo1$lR5^yI4J0l2(C3J$M+F^zHqYuqXze~UDn7@?Z0EX-ch9L#wK3`O zl2%`G@OV}g<%e;{L^;)Q5cvd~bON<4nYNv^f$Bq`m_e&eQAVTLss32gTJD$i1=imp z4j_A%BD7(@v6Ru;ac*zIS!%V%a*(E~tjRuy-jB&JZ-0 z8R`$&F>?BhQ6j3_AmKHgZvIAA9`z@F_tpyqZwCdoT!H8*)h`K>@M0LCB1Ed3U#=m_ z^1y^&Vq`W{|DZ3~0OimHzsg$Q5x2_GN+R)vfYI$-#UR#i{`nlYis71p(_%AY0n<^I z$hoP29-uxLIGzfW3AF_od8v0^RXu7t=rTGu6xTGjq(VgZ?extfEjH>&Vgam{z8J(t zUs*fJV|3@*pWF`m6zm;tYrwv5Z_EUQJs+{o7`eXqr8{RjM)ebwPzB3QsGE7`vu>yP zg;6j9>{@De8xPcz{}Da>lmhnT*l3pVu-V-x#ssKu%nNOAz_mpAi-9H59gAVIg!89N z^F4PKQSQ@Ul!T#aystoocv0-9V(W<~iBTo_PRCP9FJ=?l7n6q9oT9uvj|q!wU2O`% zSBqv2J+*?X`;pOjr+RQ>0{)5Zy`Eg*itD14+r+I!pYa@9o0{nS9=;yr2fUa~djA_( zLdzF;p=Uvt*vG_t+{#P2Tnv-Ul36OtQo_wDcIuGEz?={Rzfwo<1L-?EK>D9#eyU`zdoeeD?wYh6nm<&DY~|tB_8Xodsx^Vj`Eq? zPkHdHexVwwN3`{{9j$aP^h*WeJi#crbxdf>``V?Q#HKt_Ko{;jTr??v5m9_l7yNK2y4B3V?nGi7G2Xb=-hq{ zT+`qf#;0^E2Dy`!9Q9pQ=Fj}Mo#v%+KVvK4Twq$8IksnQ<~nhd@Y`>p)fjQZ63N&c z9vIwCUc!V>i3_y^hUOn`@ug68lC!Y^5ftoTNt?;t8j-CBe?N?ml7p{Gxq%m3wrYrI z)UqEdBD*`i=1T2JXC=rlg_Ym>u2m(Ijj`$5+d#nH1*|F!1|j#XR_Rdi@MQ`wlR}#I z2Va9TU<3pITDmBS^#DAVC|4#7wBB#cp*#kgLzt4XDM*G61e*U#=UZS_dSVnh!8^W` zVKbK4lvzKQ3=@=}f(TFGsO{nOagPCrv#nEVxu7dHt#8Q|ho)|`HanMYY?p8X~&svcAAFK-tYh{oDUCg~VFDu*0PYd$bGm z#Dl)$4f~F^0nVc#m)tBJ;9sAF=C28flvNP*Qv38yE8?=>E+QQwvzc?D*D zLaiplLSfu(UQ|wUvVy=bU>&8Lc88*~5wJ0|Xz;QPDy1QyD7cx9ne+?GD*vuSExz9Y7}0#7#1bsu#Nw;e$U9T$E*gr~M3)vk}DsV}~s zbnjR$Z!9O_J^|?edfLt1J7Hs5%FL~;CyhutA;-dR5iCVlM-pUckN;p0o2U)6KQmaE zKmEpp2aCH>>}MmkM-uF5KzFtZIp)n`%!9C%BQB9XrKnE$-M;cSAV&zif4KCQWPjuNz4-@aa`Y) z&eYicnt7$``fj3U<;FLr2y^_8j7MPP3**PE3}wNLVQ@^~UD_{yjJumVwpDRKbJX@7 zla9lUh!V<&zclB*v{)Vrd25nr-ng3}CmjwM-F|A6N@WPdmCn|~MSK0&34ht@=+zhu|;bF0RlGb?!$Yu5s zINGnWVMnFZO0DVoPNgJf0ad4uU4vGqR2!oR0XaSRWMgRr6T+$;{xS+;v`413FION4 zx!9f%(t1p9jd>bjyYLneqQwe;UPBoR*G>-o-h=<8URkSBUp)MojtZVoSO3iuWBa&~ ze~Q%Wx(8A_d|=S4>4f=bd>yTmgdb*3d#8;c>Ix?$SMd)izy z4&+E}y+6Z97Pl>V!eCdVIM>g`{s~rFI<*if$;=jM39^{q5g3iK@L3vs;>T_d zi3{0T!b0%7U<*=wCa?l6;l`%W-zOZfyEb@FH0)cvLa4X<*13HnGUz^#br{^( zTNfBnVvYUo`KQ@cjBoo_WgNeMwbHl*L&r-@{i|WY9;NPFAelNn_$UQV`g@|O&-4sM z<<%D(On05%hqIu95vP}A>jt#ny@wwtSKIuPF}&+M21R?9x061L3Vg0?)}YpO7K+=z zE*p&}nkt%=>jFnJ}F9 z``+05(cE?zQAnBymfLZ0974=Arn^9zf^l<(-7WfmIt+@vpy+vM7%fE|uX~`Z&jO^31w{IGz5Ysik_= zgyCwjfTYeQ$*+BD9-R_##*w$I#pF<~m(8n7H@3>9KT$gxRO+t@S*gJvwF-@))Hi z&%6~nmA?rexfs%jR{w(A#o-2_OpUTr&-!ZtLyfo=cWERswOQgM)o9%}RN1hknRp7p zZeBALiJnCru8!J2fed(oUjI(Q$4#4n95-PI?;u_LyL=2*BK7I_*PW6~FusjGlnYiG zCw7f?>5I5E{|;*MHdb=<@QMlRbDr;l4;0wmlXV3Z97YZZTXsk)LVJP*sc$CafUcys z7`q8HRbeS|sjq1Ld~!2tV!LaLh=6NtLQuxNQ^;pi!gru_j9fbF#U+f}o6yZI8XhNZ z^Yq(GL-g8EMkZ>tp32H%~$=z|=M%q#r2;-C?8THvq+?PT;r;kCy=-qMw z+UpnWI)g%5LF9h5 zhUy9%USTGx$qUO^efHxylIZ(T{>>ha181IT?B{xZaQaloF_g}kdCra5n>G7B4TpcG ze_^F0oKN}*{iUCAm6~-;Ks9mSs86(sg{T00?|VbTi1~(x_JT(=GB|j)x%Mg0TRHA@ zF@}dtaSg5|zSo`g*B9y(WZUO&IIlI9oX~Hehds||C|LeTC^tDd@6&W_qucFrsD;Oo z&TcERR3TAt6s$Vml`08gmEDY&wQ}G0-pGiyiMu~iNifM8kozl_A~o-k4?IEjK*5PQ zw6NNk`_$NaR~a0oG8(+I>SS>5$2B)-9vx+wp&91qo%eAnXk$q>RaNY$RR>Q;&^{*w zofX4tIU*D9^sIOHB zyOp-994+fskwc}`ayo9litE#`t4PThKE#&l9u7UuF=YRFi@0I7jBkV*uzmq&L=U$X z(3{?_i=-zB{1P;Epq|s{))Ki&6%XxLC~@FC^3#7W&Q2rK?&I_OqVN2ONTT{moc`>lg|=S>eMsV9hf8Tagt1*3{E?@{ zSb6qEm8@hKhf`O5r$tg4Diy^%FITrxl0s<#W_CVtE0<1zp^BZSSrmxJ8$Xl4u)HfZ zvCe)&r?5<#6_u`)<;I)0Z@=wXx6$P<76t9+4|a4ZX!hzF7Tu57yN`95G*VtXjbJIm z7Z>YHa^iFMUrIgeYUj7qC--W9sbV3zQT+*@BdJtreExZ`Fk)I$^Y%RUQ~4Xn&ZIz( zea8ITYfBZu!ODz{*w4Pi_+kyGp@yaHRe{<1)_W4+<4DuP64&WnVkUmVFL-Rhh16uB4H>+vh2d`s!8gfDUMqkXjcT_L5*3gfn239V$q_k~s! z$qi2hm7Dq~`DiHRPtF*eAr@LTBd|Hy0z0X#>e|D5@TOGmJKUaQfRBZXvNlVIpS4grO z-y>oMmpH0%W}3q&qc8O+S>K7G<}^k> zG@!6NQu1){cR{{FdYArm$pKotgqXCICs)cmwv=158%ZvIZ&==xQs>m}WWnXgPa}y` zXOds$q<1EJ)+R3lWU;L!8u_jNiNfCQ_(IE{li$pnW~DlA-kAirks5SsA9bUNJA1s# z?K2doFMy^+;Rzr0Vr^qDo96#K*~g8!_Jq_ITk9X&$Pf83nCy%72McgHM!p$rFAsCC zh}=>k`Y5mcbSuA_TDIIRnHFG{W0?97X5&KAryK#v2e+@(8XWPH4sayZV2&N#h3a~+k93bz%H z5>{I4v~`Ac9KIeDdpcxg?}fpS=!Sy95fa4D;&resU?34<2p8I()H5G1n|^rM$gR(y zseb$k&6Io2;WV*MT4GeTjj}|ga+4%HW7N04G?=R7j6}fF!A?m`^ZE7~U+o;sVDnZw zXs?I@fKe{VXO-T*8^}j~JS?*Hu3Z*F@K>yf{-mYbYT3skYI2h#kxYGZ)$DgpN2G$G zR?Jnyo5ZId79=ZC3v%WWohYStMf{=ias+*6VW}*g*H2uJxgOkgp;qw8(=88w^~d-E z#hMl)sUwA*H7)}fujzM3x9Hwo3uhyeo>lu_G$de+EYj2hL*8( zbT6eXGR5@u`r9b@Z=u$ldD&14eCQ>j2}h}eOb6{K8^Ly^aO5XsFAh?{)8U8@1lpv^ zK>By0;=fc62p(}LU;Yv(t`2ylJA)h(cF)}9hV`8!jS<~9em^cNYu5JYpufkOe z70cE62YRZ$jUR%=<-AR8C{UV=yC`pU|NeQ)+ItDZmD3Ymt4;8!wRqQa7v+g%`zJq> z&92|eF+^Q@%`slHB#`5mBb&R|gzs&<oj1-o4})89ZKSS(wc8m#qL%(x19^!mE_$%;M86Wv9;!>sReh8eR~* zjmTV*OXe*KEGu>Nu%pun2&?X%rSNhcT70lgo7t>lHzBgdjPJ6|$wNt&xzS5YGBJ2--^p3fF^|*XTOZ7FTJ0&5oLy!7JtkT1Z#viZJ6g#F z+WLJpHx3`Wex~9r;iq_v+J#a_X1dQ%uyOUDObBiN)K2e$f0Km9^?;S9m#!+hjw_m` z&X$w=M|CEo^y7EVDZ&QgcxA5}Zw=X-t6KFLrKxG#Ca=7)>HZWUdVZc!oDi$9wE%=n zyrkT%4%^%$p@Elv*!|(@$mI*vjXr0;no%B`+0U`jrPH)@cl(BCsK@I$hP{~%>vz+6 zTesvYK5>dq(4WbUWZe_D*cj%tx>oH^7AG+&Nn5V?46Hs8g|S~rQ#S{C<8@9 zT^OXoGHZ-c`PLh;P!few_+Z-{+^z0ks;8JbxGf4nWFAMYZq+VZXC}GYBy7DVinM(V z<{2jmddu--gA?JiJMFpu#E7d3zvASprFrn z$Bvz2E6?`9fUvzacZS3+I$MfhE$<)4;|dUhej+ul&fFZO*(dC&_c`nEIu>btc{6gO zy*otUXyHf;=!mOnzOA9(HAP85)=s1whdF9s;8R)XFb#rp4=b_cy%&}pd6My1OQyw7 z-Etyu9zOcRt-oO@PVC_Aojvjvc4icZ=RCKt^yLW9UxkY5Rtg@M`FMqYt~p5Egc-P8 zs}lk`?Rp`^sp4-uDS`V+m!CbT)%o~%ykGJ&(KB_n|DN%y>`mb$<@I(q_=n^(j=uN8 zN;Si1#Tpgi*=6a&L7R(hq7v6DSxkCz;HsgpYKtN4f#0%MRZ75Abi)^SkKfP5X5Z_! z{A@dszcg2VGI#&ZNtvD2{TZ9>HY1hS6eC!VD;)&A81tNtiUG<<<_#-YnMu}-+5%By zKIUSE<|wbBU3GG?4}Yvd)VPvCr;Uh$1~l1Qv+w*U#73eAV7B^0*2qIc~$7X{94lBgo>!6OSqWlkLZ!# zLIEO9$sH3_53`OZI+kt-xZ)ugTkxxqJ{@k$NYE5 z{88~39COe=jozLkQRH84;Usg9K5z=m3W;|+YynGfA$6i6Y=|9i;AkjL(^lcz59dC| zpYfemj|eb%N)^N>9xZVPt*L_p{D4a=wr1QZJ@pK)!Lz|r{kQ4A1`rO6!B3%9iot|_ z4Ovok#^`}kq>=3on88s>dpnk;&9^@NjGvtVy$2f4n=iNx6@{jF-Buz<)ccCrg6LV! z<660mhmRJ*iK1pe$TV#b^~h~-h{ui?Qh#y_q{$L6x(+JR@e2#z%to@>4UTM|NOE?$ zPh9owbt&{CO%Gzsg74UYOd|ItT@(;VEC;ZhDK2jX?sDYC{J4~Ux6b>Unyd3MzkEV?-coG!`j=xzQ+y&bf- z?Z>d+&5Bvwu=|$RFmHH76=4${8?hF0_jT&`%JXrG;5=k%W%-86VR#7`LDO00hZu5a zxL;@NVkb)R1m7Df*C_$bs(p{5Kwa@QFkIYw@rd&_-THD2-HAXt+^V)hKSA8}-7lUg zH;`t3QzWvu{5oz2=dm;w17ZH9Tuoo~wCKt4R%H3Q;AMMs3xYNIF1sADj49q5M}1onSl*$cX{SFRHXWN^XmY%Q;J*S zad<$t{)N}QUvFB(ssMGBsHI+4yfXi|^Ybm`U22!J?ln(yI=26nI3Nztcb4<&W&_`Q z#gm$VKUmXwmT+f5y`R&ukJvhL>Ea_ZB#btf^Nj=ApQu5$HkgA^;>e&Ids^9`=k zf%Y>~`pon6<j4VBT561~@fS41rx855A zn_BkfOch=kkpEmBBA zdxD&KAy;5cA1A0u8GXdn57(+~1f?1ZgdDY&mY1C>R^cXJ8cjg)6#TG;fd`? zLv?ILW=PP!Qb6LT18g0?8W`ABobHz3D4l)PnZs75J#5X|SJH@QtC{V=m1CFdBT^e& zgVZgAIjCnelCFzIU!}j=!!axiT#v>ap-L6*2_5czQygLe6q96l2M*R5ag?6eo_y~M z7|BJ=SFxYI)fYO%Lsw~g!R|S9$oj75_8_R zuezRMI?xes_ICE)^4;Cc zcj)Xpo6zA$vBl3tlBBLQ@B9GE76Gx-;^0H6LN=nnNc@WjrqOHP&9JXEe{ zI$Z8t?o*KL`|k-G`;G6`IWBQI-g_H!*S;dfSTdLV%xgj##Wv6X^tAu_XN<<}P>XJr z??%TG`J_JP%pT~-;rTG|UmyN&V`9JSh{>}C*P9KlM$4Rz*t zk9qBP@@CtME@0l)#-PNTz!26ff#AYc4B;mVSh@yqeeyJC2|!e!9H7Z$}nfZO)`zlku4W9-GojeMt2tJk%D5>oUtz-L3d_;Px?w(NTBfvEmcc?UADHFT^ zR?+Y6*#l5gq>CF@V7LKnhqw7SOpiSr3^1Je&EHP*fU5EWw(Imw z^9(OA912_Cn8)`3#9oztmv{9_=&%=fMeS+(sgzE3K}nQto9e@M#{dERgaKYM5WYPe z2wZaj*A`EKe*qn?-{UY<2jSWS5POLC@hm@`v*F~MrlTN09+Ay?#@TaW0D03K@7sS|YSxVfLbKD@I{z4M z_rS7{jMH^7(;VXpjtfZG6^rtqu%Us1rT59JQ-*&}va@22Mc|5ua|V`KHW z@6nWs-B$Iuw)~^9g>h4btjfs8Wb_|oVnEPLFgRNmNj+~Jf=6J3BKcW&a znmwTxfyz|YpZDwt`QP#I|NQ5Hvl@0F&SR1P``b;}*=mot%)byCAn3pS)sY(j!W}rj z8{_`ZZ_fl=XH=%H<^M*c#+eryYYnc&7W$gc6|abib;V5b{x^({IDqHg{ph~i!)E=M z*mLyZwDiAOKgm4uA7Tny+m+3!D#&^Mj{VS*4t6~LKht{T|1X>Z!jdDmqyf12@1?`m zqg}DxJPIh2c1+p-avPxh2SR56hg%x|oD=p43{DaICivem`Y4)z_Y-qw@89aqX(OTr zsRp#3t@CI9%^qfVWpni@@|I{KANFxl*gr}7Z`Em+;QvQ->i-_6FmcXGZX?o=9eF5% z=3HFnK5%2C0w!*nZ+2I6J2GKT>!XVdoBjqr?&SXWcENPn>>$#33sgF|T z@izYB7D6D`JF?DN9xu4qNG~loRO6d3{h;X9T&LAV-^KoT$Ij$iQuiukE?zwFas8sh z)J114hv=7VlRkK^$ZPhpx0vl2Df5C4-*sO0W@=w^9;=e~S-vC?k@H9sAmB{gv-Rj3 zf@mmGGVkt%RaL^p*%9Cgq(i)H=VKw8;qVZy^!~}0yV+l>#qj!NuK-60O!EAGs7L59 zhr;!3k)+hBSZ%Rix#^xIr+x3|Zn(WtWdBrII)mRDzE{zq&FQM!;vp)jk}%WXXB3CF zi7~3dOT}a2BA8%Hghh#h`|zNfWvfF1Lwj+sP=Hb&eD1a-CFn0*z`!n-G$bp+W`~_@ z<2)R@-*=)wO{CkS6@}T9YIQfeGw$I|=WZumzis)Cz~yp5!^T4C%N+KxD#33A)jXvL7i2)TZX=soIGM{ZW~zKpKo5tc|We~f?2a5`ypH> z9+dr&4BY-LNrIPU+`V9x<2GKCP4!*4N}VH;mty$y6XWgBI4mx&c;KhMTvpuctk0XB zhj!D+69yVEU3K8}v9;mcKG&{tgZx6=lfr_RyvR86{Ei(?PC;{6GYlPPS$@0XqCmE_ z@C7%nEQwn7YxB6$LZ&Q|To7u@hhB4(?y^%Qa9RLR<)2)k-|&ZHedkiq&126;mU|nP zOwDcXKJw3sgJpg8bvhqtS1}|F94b;HF(uq;@a)zSKWa{>Eks-+s?wYalA8D>0_T1kiznRaN8A6;!| zC?IYVkObUJEg75e{d@S;m1HT`K2#9vL9ti%Sm1nE$zVT+>PiM|kVukZYt{TGP zd!f4Hf#w>PHjyy8s@bBDJ;Eq89W7q#7B}6#+WBd|w8HEuFp>Hgm^dow#){00O{ixZ zjgJ)f+3w@uSKt1^$a+#aKHB~fE+^}`Gu`$hH|eS;p3`9}$8f5*C3|3PS^5HeZd@sd-pTtTum1jAFiwnaD@9_#B3c zU2{TQiUD8M5mG4dKqcT)WsfF~V_6s4%DaLeU1QC@=9v&cQAaM^n^c`cqRKoBD<-hy zMDHQOvI4z@kwC}P9}i)*e(Mk`qjX z{Yz-l(6nHJ8Q}^ZMAqq`&ZEq(P3n;!D+JmNT72k^<4gDrz^%l{j zl0T|8VKKp^?T1Nk(0Z!^VC{F_a}8n$XY$PoMY5k7(GW?(LYKeQ-ZX%Oe- z7FXt(WXHi)8!< zqe{1C+dlRs?%NIdXV4l$4F&g5UlCz6uXY*y$34Tq6z?HXHEWRj&_$<21g`e9_hZ+j zi_Z2^4>{RH?h9>-Oc+YITa#~|1hy^mSkyMj=SdCsp!4M4ZP%+*`bKT zA`WN6`NX98gh2v+v z`U9xqm=4>2sU7*{7#u!PM&i$sqS>AgUMhyUVs@b{%Hi2jw(~OP6b+a>O7+`1O%VDU zjplgNYhMGY>P30`X2Gkpp4{x)1DCx><>t4egcN6*&x#kmfZBm>SA%YksHrI|T?O*M zs%L(=J`v8Z%_3^a)uvy~3Wb7VcDb5 zOH0}+@~v9Q1eXvH!F1V{1YnGNI953x?kmMZ{qx<-zUX!@_g1eTU6mRJD>_fX)|!RM zwZ?|6w{r{$<(s-H<^2=!of;2)A7 zKQP6ytDJKOlxstQCjwbFO6(l?3F6%s$gApg^uw)SH%qKe)S{l%=cwJM##2e+e@mH3 zMiS!8S@d`Xm-`pBJVcLI&4uoB{{XaJydj7ghrUi^KX9g6Be7x0BS#>Bu^PFxFg|bW z)c%cIY%+hVYn0p$Od_=SY}vL{Jk8+M)+5#)#j@)RUJoW0a?=uwA*m9ED8=_@wf6rY zIRk~_|1d<&P(u``wKuzcb~=XT#B!S`vrzF@zG0+F`ccJf_*+#qz5I9pRW0I(Q3sjj zAi?dRham%|35f)Wljr$5c71vKS$1W{%g~OtVX-#!r}%EIk=w#hahQQp;Td;%Mwi*w z`+5h37320D{&NwzttFUeTW>jr*4+&Q5?+%8I2{@HdpA=1aFKrCg4842d?QOJ5NnY1 z0`1kS*TN;!OxcyaY^Z$bFcfJcu4IkOH}oBzo!5^z99%V6WWUj8oYU54jI(1U)=;qT zw=_jBkfIf@0af$^GuVbqZAfdXznUHLmzNP)bhqb_ zozq6#D}aN|->6^9Y$%8I2}T-1OKI(cqUtr=vMq<$$xx^W2QdAo-?5K6XtiAOdgI|3 zmeHkZHYw!IEb7ISEd;tf2j8~De?7&nA$wVBAQgk~HvFyMxgtM1Mgt&qBH<>a8$5rA zy=#Mg@JDHR;oW7#&x(v z+iy5Km9I;Md@++9wrtuPvorYCDMH~7OawI)JPl2}NUm4Xwybo%5Vn6a%HKQZE#ci+ z@yDBs(Ak;gmiE;avFVQCt=AqI7_RT!V#~&>ltYyJ(NHnHeWi}9$U(u@^007=kNUEV z(xuXFTh$lVU@=3gK^lBZdzAm1IwiVD(J9h^?*+`zkW=LL=K&RXOd7gIQ0|EsE2=jL zDY+!zKWZi+``%1&OHWKce|7a&Ya2hL$S$D~hn69fEsS+klCtC+I%CbgW=%bKUFp|6kYtdhT(4t4w{F z8j!`+grSdnSNWl%sXjM|3XJ5KFx>+sK?LxDvCdQnAa>4f>Ww<8CT@~^g zAU6;SSj?J+W1pgl!?yb2`nsWdFyH5!5j`#qcoPtv@^l)~NEQB-CL%_D&HT=u9h+)! zt2yIoPLC7Y|Kq-8Br^2F;|!O_b?2`gFFkLZn+^W=EW~b|a?=E0X`))0{MUk7G#x8K zi(MA*R$EefCh5#2Q?+KSwJkJKtywQ_9u>CH+_FPNpc;J7I+Nm?iI z?76k^%+j%TITP*ij+(gT5(oT5p<=vny33-}VDzjJHhs(jKE4+TWWwOikXkDY$r-F?y}0*m*5^&s=$4WL;9cf|vNHS_hV zNTxx=T^UgC-Sw!`p)Mfi4Q`&dukxH1b(NVUpvLz6Z`!3nAmUwiN8z2RxMxq!j2u~E zQk_8!hIG|B-TXFUVF5oonIYsgj{L9qj48qONNJb>MN0Wo+-g*IA-ihoQT@oklSoR8 zF@x8D3KfIq73AC64}kk1S?5`&;YB1Me6~QrY9qJFwS?OeG`pd?`f}?~!1dOh^ZW_7 zzNCGZtx1~L%hM_WIn$yX05-Qlb#Gx4=8mJIRhCxT&JbyP540fqIM zf39Lf!B`=*-zgM$Wyyw#NZap-`4BpMxRw)Zjz2LW_-mK``!{fCha?~^(ps@hbRbKk z=W_s+lWsopG<@4f4;ZAj9(-X5tq$+zg}Zv8^;Q){55=twPoC5TbPYtFz>R*RYt5U} z)U5w*E*@mAgVLRx5eS&g`HBx3+sFjsit`&Ce2KfvWX}45DTfjfo`?bT5r|Gt`U`ED z;tCinyy|?eUBae1Y+^w231UpFa`XVWh8|pBXLLBhpyXWO&wF4fj^F%zi%)-+T(clT z^SMrCBxf_Dm5cZMO{Y25EEcX)7kZtrL*po5d)u<*kU@dSuNZO9%InP9RcaQ8@(z`+ z1~MF(TIxjiIGBxnlaa5>lLH6&oqRrw)LerglfK`Yh_(Rl4)k@@%zU2DHMwTwK{JKf z1MMfMKZghiJIqd-ZX|~ajvwn;d+O?OQ!;;N&1 z&j0{zaT#RpE(mBX$+k)kib`A*2DRn40!DS=v`k1{#xbzI%7vG@zr>BYelhI<*IP*U zdo(OeyS7j|YyGk_L;7k9mfbJXt61cZ>%o;o@0}RPP_7uCcn0huDzsP)Wzb-B1YSqa z8V6D497lVD4IrpK^|nMI3`Srm#?=eR%Exf(tFmWO-+?zET1=&w@hoHj!uxVBB)Cgp-XsIUHt@)m zFLez>=L%-By-LA@AOdPJ0X0gI#{0=2vlPjwDC_0fHreUwC)X4Gp}eMAYE7x6qL>o9 z&p#*p6GYrkd2t2lf0(>qNmCY2>AhdCwe_*FzNOZbQMqTc@Yx$M)!CW8RRMBF{L1%M zqm9{}ibq}91}4dQw}NP!huYXcvY+YKwj{=FvI;nD)owVu?)Oo0me3?s?-Nx}FJj5U zL`OA?Vhvmco*Ueylz26&o3eM+_uvQ)R#Nr~=h(>1=xsZ!ZL zquK49^6aDF@6*bKlU1+<-S70Br*nRLiPa>s#F>w(EOrNjPQNcn{E}z6GMSw`LFq(G z1@+R505P>aiJle-Yf7Th`yP1I&&^}|-!tzHC}O9cWN8o=R>D8m7puMi^?Iq3cE!@w zAtO({-KgsR!#h-tfWslWZx`8{CfR*u?g%S17~~(F>2##zVvHD)PK2h~`?# zAcI`~q|AfmwdHoSz|3A-8OsDOhL|TS+-egMDvBFzhMk3p?;miQ*iaM(2pti>K~p$a z;L+_ocNiZ9QQ!Z|b((@$2 z4O>CK!KVU*qT=l|h@Zet`MVD#P@iZemf-c>2H(CoWDktH=h_SZ(D-pkG`}9Y_lJ@^ z&vRh>tE#$jKM!385}EClJ%}HS1}OFzVBB6Q?Z?J#w#%z$uh(j9JR|}c7;iK$g>?3f z=kt#TK&9CJ&v7WY+_>o#383d-u1{>R+Ut7(l$3x<1wJ#)jW!0{RK6G(C)@0Yl>Cj2 z1C85F=c~rOaZ`6-JTCSBTXTc?{9m@_n>DU4HhZvtmVZ(F34BhNo;+G|#4Yw;G7vkq diff --git a/sub-packages/bionemo-evo2/examples/.gitignore b/sub-packages/bionemo-evo2/examples/.gitignore deleted file mode 100644 index 055a26eb49..0000000000 --- a/sub-packages/bionemo-evo2/examples/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -# ignore temp files made by this tutorial -# chromosome files -*.fa -*.fa.gz - -# config files -*.yaml - -# directories created during these notebook runs. -nemo2_evo2_1b_8k/ -preprocessed_data/ -pretraining_demo/ -brca1_fasta_files/ -brca1/ diff --git a/sub-packages/bionemo-evo2/examples/configs/README.md b/sub-packages/bionemo-evo2/examples/configs/README.md deleted file mode 100644 index 0ec5d3022c..0000000000 --- a/sub-packages/bionemo-evo2/examples/configs/README.md +++ /dev/null @@ -1,8 +0,0 @@ -## Example configs - -These configs are provided as examples to the user. Note that the files referenced in these configs can be downloaded from [OpenGenome2 dataset on Hugging Face](https://huggingface.co/datasets/arcinstitute/opengenome2). - -- `full_pretrain_shortphase_config.yaml` was used to test full scale pre-training runs of evo2 at the 8k context length. -- `full_pretrain_longphase_config.yaml` was used to test full scale context extension phase pre-training (starting from an 8k checkpoint and continuing to train at longer context lengths). -- `test_preproc_config.yaml` was used to test our preprocessing scripts to generate .bin/.idx files that are used for pre-training from fasta file inputs. -- `test_promotors_dataset_config.yaml` is a smaller test file that can be used for pre-training but is one of the smaller tests. diff --git a/sub-packages/bionemo-evo2/examples/configs/full_pretrain_longphase_config.yaml b/sub-packages/bionemo-evo2/examples/configs/full_pretrain_longphase_config.yaml deleted file mode 100644 index 93ac53f9c0..0000000000 --- a/sub-packages/bionemo-evo2/examples/configs/full_pretrain_longphase_config.yaml +++ /dev/null @@ -1,450 +0,0 @@ -- dataset_prefix: /workspace/bionemo2/data/metagenomics/pretraining_data_metagenomics/data_metagenomics_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.05 -- dataset_prefix: /workspace/bionemo2/data/long_gtdb_v220/imgpr_pretraining_data/data_imgpr_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/long_gtdb_v220/gtdbv220_longcontext_pretraining_data/data_gtdb_stitched_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.235 -- dataset_prefix: /workspace/bionemo2/data/imgvr_tagged/imgvr_tag_data/data_imgvr_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.02 -- dataset_prefix: /workspace/bionemo2/data/ncrna/pretraining_data_ncrna/data_ncrna_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.01 -- dataset_prefix: /workspace/bionemo2/data/promoters/pretraining_data_promoters/data_promoters_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.0001 -- dataset_prefix: /workspace/bionemo2/data/organelle/pretraining_data_organelle/data_organelle_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.0025 -- dataset_prefix: /workspace/bionemo2/data/mrna/pretraining_data_mrna/data_mrna_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/euk_windows/stitched_transcripts/pretraining_data_stiched_mrna/data_mrna_stitch_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/euk_windows/windows_split/5kb_windows_lowercase/5kb_windows_lowercase_pretraining_data/windows_5kb_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.05 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch1_animalia_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch2_animalia_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch3_animalia_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch4_animalia_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch5_animalia_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch6_animalia_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch7_animalia_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch8_animalia_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch1_plantae_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch2_plantae_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch3_plantae_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch4_plantae_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch5_plantae_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch6_plantae_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch7_plantae_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch8_plantae_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch1_fungi_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch2_fungi_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch3_fungi_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch4_fungi_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch5_fungi_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch6_fungi_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch7_fungi_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch8_fungi_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch1_protista_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch2_protista_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch3_protista_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch4_protista_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch5_protista_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch6_protista_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch7_protista_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch8_protista_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch1_chromista_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch2_chromista_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch3_chromista_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch4_chromista_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch5_chromista_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch6_chromista_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch7_chromista_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_train_batch8_chromista_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/metagenomics/pretraining_data_metagenomics/data_metagenomics_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.05 -- dataset_prefix: /workspace/bionemo2/data/long_gtdb_v220/imgpr_pretraining_data/data_imgpr_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/long_gtdb_v220/gtdbv220_longcontext_pretraining_data/data_gtdb_stitched_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.235 -- dataset_prefix: /workspace/bionemo2/data/imgvr_tagged/imgvr_tag_data/data_imgvr_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.02 -- dataset_prefix: /workspace/bionemo2/data/ncrna/pretraining_data_ncrna/data_ncrna_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.01 -- dataset_prefix: /workspace/bionemo2/data/promoters/pretraining_data_promoters/data_promoters_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.0001 -- dataset_prefix: /workspace/bionemo2/data/organelle/pretraining_data_organelle/data_organelle_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.0025 -- dataset_prefix: /workspace/bionemo2/data/mrna/pretraining_data_mrna/data_mrna_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/euk_windows/stitched_transcripts/pretraining_data_stiched_mrna/data_mrna_stitch_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/euk_windows/windows_split/5kb_windows_lowercase/5kb_windows_lowercase_pretraining_data/windows_5kb_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.05 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch1_animalia_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch2_animalia_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch3_animalia_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch4_animalia_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch5_animalia_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch6_animalia_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch7_animalia_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch8_animalia_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch1_plantae_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch2_plantae_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch3_plantae_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch4_plantae_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch5_plantae_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch6_plantae_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch7_plantae_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch8_plantae_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch1_fungi_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch2_fungi_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch3_fungi_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch4_fungi_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch5_fungi_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch6_fungi_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch7_fungi_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch8_fungi_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch1_protista_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch2_protista_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch3_protista_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch4_protista_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch5_protista_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch6_protista_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch7_protista_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch8_protista_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch1_chromista_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch2_chromista_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch3_chromista_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch4_chromista_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch5_chromista_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch6_chromista_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch7_chromista_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_valid_batch8_chromista_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/metagenomics/pretraining_data_metagenomics/data_metagenomics_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.05 -- dataset_prefix: /workspace/bionemo2/data/long_gtdb_v220/imgpr_pretraining_data/data_imgpr_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/long_gtdb_v220/gtdbv220_longcontext_pretraining_data/data_gtdb_stitched_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.235 -- dataset_prefix: /workspace/bionemo2/data/imgvr_tagged/imgvr_tag_data/data_imgvr_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.02 -- dataset_prefix: /workspace/bionemo2/data/ncrna/pretraining_data_ncrna/data_ncrna_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.01 -- dataset_prefix: /workspace/bionemo2/data/promoters/pretraining_data_promoters/data_promoters_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.0001 -- dataset_prefix: /workspace/bionemo2/data/organelle/pretraining_data_organelle/data_organelle_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.0025 -- dataset_prefix: /workspace/bionemo2/data/mrna/pretraining_data_mrna/data_mrna_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/euk_windows/stitched_transcripts/pretraining_data_stiched_mrna/data_mrna_stitch_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/euk_windows/windows_split/5kb_windows_lowercase/5kb_windows_lowercase_pretraining_data/windows_5kb_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.05 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch1_animalia_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch2_animalia_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch3_animalia_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch4_animalia_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch5_animalia_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch6_animalia_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch7_animalia_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch8_animalia_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.045 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch1_plantae_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch2_plantae_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch3_plantae_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch4_plantae_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch5_plantae_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch6_plantae_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch7_plantae_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch8_plantae_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.015 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch1_fungi_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch2_fungi_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch3_fungi_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch4_fungi_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch5_fungi_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch6_fungi_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch7_fungi_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch8_fungi_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.005 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch1_protista_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch2_protista_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch3_protista_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch4_protista_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch5_protista_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch6_protista_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch7_protista_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch8_protista_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch1_chromista_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch2_chromista_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch3_chromista_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch4_chromista_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch5_chromista_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch6_chromista_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch7_chromista_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.001 -- dataset_prefix: /workspace/bionemo2/data/eukaryote_ncbi/euk_ncbi_long_sequence/euk_ncbi_long_pretraining_data/data_test_batch8_chromista_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.001 diff --git a/sub-packages/bionemo-evo2/examples/configs/full_pretrain_shortphase_config.yaml b/sub-packages/bionemo-evo2/examples/configs/full_pretrain_shortphase_config.yaml deleted file mode 100644 index c4c0609394..0000000000 --- a/sub-packages/bionemo-evo2/examples/configs/full_pretrain_shortphase_config.yaml +++ /dev/null @@ -1,81 +0,0 @@ -- dataset_prefix: data/metagenomics/pretraining_data_metagenomics/data_metagenomics_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.18 -- dataset_prefix: data/gtdb_v220/gtdb_v220_imgpr_merged_data/data_gtdb_imgpr_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.24 -- dataset_prefix: data/imgvr/pretraining_data_imgvr/data_imgvr_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.03 -- dataset_prefix: data/ncrna/pretraining_data_ncrna/data_ncrna_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.02 -- dataset_prefix: data/mrna/pretraining_data_mrna/data_mrna_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.09 -- dataset_prefix: data/euk_windows/stitched_transcripts/pretraining_data_stiched_mrna/data_mrna_stitch_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.09 -- dataset_prefix: data/euk_windows/windows_split/5kb_windows_lowercase/5kb_windows_lowercase_pretraining_data/windows_5kb_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.35 -- dataset_prefix: data/promoters/pretraining_data_promoters/data_promoters_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.0003 -- dataset_prefix: data/organelle/pretraining_data_organelle/data_organelle_train_text_CharLevelTokenizer_document - dataset_split: train - dataset_weight: 0.005 -- dataset_prefix: data/metagenomics/pretraining_data_metagenomics/data_metagenomics_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.18 -- dataset_prefix: data/gtdb_v220/gtdb_v220_imgpr_merged_data/data_gtdb_imgpr_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.24 -- dataset_prefix: data/imgvr/pretraining_data_imgvr/data_imgvr_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.03 -- dataset_prefix: data/ncrna/pretraining_data_ncrna/data_ncrna_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.02 -- dataset_prefix: data/mrna/pretraining_data_mrna/data_mrna_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.09 -- dataset_prefix: data/euk_windows/stitched_transcripts/pretraining_data_stiched_mrna/data_mrna_stitch_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.09 -- dataset_prefix: data/euk_windows/windows_split/5kb_windows_lowercase/5kb_windows_lowercase_pretraining_data/windows_5kb_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.35 -- dataset_prefix: data/promoters/pretraining_data_promoters/data_promoters_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.0003 -- dataset_prefix: data/organelle/pretraining_data_organelle/data_organelle_valid_text_CharLevelTokenizer_document - dataset_split: validation - dataset_weight: 0.005 -- dataset_prefix: data/metagenomics/pretraining_data_metagenomics/data_metagenomics_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.18 -- dataset_prefix: data/gtdb_v220/gtdb_v220_imgpr_merged_data/data_gtdb_imgpr_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.24 -- dataset_prefix: data/imgvr/pretraining_data_imgvr/data_imgvr_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.03 -- dataset_prefix: data/ncrna/pretraining_data_ncrna/data_ncrna_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.02 -- dataset_prefix: data/mrna/pretraining_data_mrna/data_mrna_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.09 -- dataset_prefix: data/euk_windows/stitched_transcripts/pretraining_data_stiched_mrna/data_mrna_stitch_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.09 -- dataset_prefix: data/euk_windows/windows_split/5kb_windows_lowercase/5kb_windows_lowercase_pretraining_data/windows_5kb_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.35 -- dataset_prefix: data/promoters/pretraining_data_promoters/data_promoters_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.0003 -- dataset_prefix: data/organelle/pretraining_data_organelle/data_organelle_test_text_CharLevelTokenizer_document - dataset_split: test - dataset_weight: 0.005 diff --git a/sub-packages/bionemo-evo2/examples/configs/test_preproc_config.yaml b/sub-packages/bionemo-evo2/examples/configs/test_preproc_config.yaml deleted file mode 100644 index 2d81a550c0..0000000000 --- a/sub-packages/bionemo-evo2/examples/configs/test_preproc_config.yaml +++ /dev/null @@ -1,52 +0,0 @@ -- datapaths: ["/workspace/bionemo2/data/mmseqs_results_rep_seq_distinct.fasta"] - output_dir: "/workspace/bionemo2/data" - output_prefix: promoters_ab_test_noodles_uint8_distinct - # Datasplit - train_split: 1.0 # because they do manual splits of first 1000 for validation, 2nd 1000 for test, and leftover for training - valid_split: 0.0 - test_split: 0.0 - # Overwrite existing binaries. Otherwise, skip already preprocessed datasets. - overwrite: True - # Raw Preprocessing Transforms - embed_reverse_complement: true - random_reverse_complement: 0.0 - random_lineage_dropout: 0.1 - transcribe: "back_transcribe" - force_uppercase: true - indexed_dataset_dtype: "uint8" - # Tokenizer Transforms - append_eod: true - enforce_sample_length: null - ftfy: false - # Tokenizer - tokenizer_type: "Byte-Level" - vocab_file: null - vocab_size: null - merges_file: null - tokenizer_model_name: null - pretrained_tokenizer_model: null - special_tokens: null - fast_hf_tokenizer: true - # Compute - workers: 1 - preproc_concurrency: 100000 - chunksize: 25 - # Filters - drop_empty_sequences: true - nnn_filter: true - # RNG - seed: 42 - # Evo2 Taxonomic Lineage Tags - taxonomy_data: - FP002272: - kingdom: KINGDOM - phylum: PHYLUM - clazz: CLASS - order: ORDER - family: FAMILY - genus: GENUS - species: SPECIES - FP000491: - kingdom: king - order: ord - family: fam diff --git a/sub-packages/bionemo-evo2/examples/configs/test_promotors_dataset_config.yaml b/sub-packages/bionemo-evo2/examples/configs/test_promotors_dataset_config.yaml deleted file mode 100644 index b73b8b214c..0000000000 --- a/sub-packages/bionemo-evo2/examples/configs/test_promotors_dataset_config.yaml +++ /dev/null @@ -1,9 +0,0 @@ -- dataset_prefix: /workspace/bionemo2/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/test_datasets/test_promoters_uint8_distinct_byte-level_train - dataset_split: train - dataset_weight: 1.0 -- dataset_prefix: /workspace/bionemo2/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/test_datasets/test_promoters_uint8_distinct_byte-level_val - dataset_split: validation - dataset_weight: 1.0 -- dataset_prefix: /workspace/bionemo2/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/test_datasets/test_promoters_uint8_distinct_byte-level_test - dataset_split: test - dataset_weight: 1.0 diff --git a/sub-packages/bionemo-evo2/examples/fine-tuning-tutorial.ipynb b/sub-packages/bionemo-evo2/examples/fine-tuning-tutorial.ipynb deleted file mode 100644 index ae2089298b..0000000000 --- a/sub-packages/bionemo-evo2/examples/fine-tuning-tutorial.ipynb +++ /dev/null @@ -1,687 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Fine-tuning tutorial for Evo2: Adapt the 1b evo2 checkpoint for your hardware\n", - "Deploy tutorial on brev.dev:\n", - "[![ Click here to deploy.](https://brev-assets.s3.us-west-1.amazonaws.com/nv-lb-dark.svg)](https://console.brev.dev/launchable/deploy?launchableID=env-2uGqijcTiNxv3V8LZJxXAa7KlKC)\n", - "\n", - "### Background and motivation\n", - "To motivate this tutorial, we have noticed that the public\n", - "evo2 checkpoint in hugging face for the 1b model is sensitive to `--fp8` status in training, the zero shot inference\n", - "task, as demonstrated in the zero shot BRCA-1 notebook, produces near random AUCs if you do not use `--fp8`. \n", - "If you want to infer or score new data, you need FP8 enabled since it was trained that way. Interestingly the `7b` checkpoint does not suffer from this\n", - "limitation and seems robust to FP8 being activated or not. The consequence of this is that if you have older GPUs with\n", - "a compute capability less than 8.9, which do not support FP8, then the output that you get from scoring sequences with\n", - "sensitive checkpoints may not be biologically meaningful. \n", - "\n", - "We plan on making\n", - "a `1b` parameter evo2 checkpoint available soon that has been fine-tuned to be robust to FP8 or BF16 inference in bionemo\n", - "on NGC, but in the meantime this notebook tutorial outlines the steps for fine-tuning. The only difference between this\n", - "notebook and what we did in production was to run these steps on more data on a slurm cluster to increase the global\n", - "batch size. That said, if you run this for enough steps to get loss on the 1b checkpoint to the 1.08 range, you should \n", - "have good luck with downstream sequence scoring tasks. \n", - "\n", - "### Requirements\n", - "\n", - "This is a tutorial demonstrating how you can fine-tune Evo2 on new data and/or hardware. The tutorial should take \n", - "slightly under 1 hour to run on an RTX A6000 in bf16 precision.\n", - "\n", - "As configured, this tutorial requires an NVIDIA GPU with approximately 45GB of ram. If you have multiple GPUs with less\n", - "memory, or you are having trouble with CUDA OOM at the training step below, try reducing the `--micro-batch-size` and/or\n", - "increasing the number of `--devices [int]` to match your setup and also setting `--tensor-parallel-size [int]` to\n", - "the number of devices. This should split up most of the model evenly between your devices, which will require much less\n", - "memory. When we train the 1b model in practice we typically have the micro batch size set to 8, and run without model \n", - "parallelism on available devices to achieve the largest possible global batch size." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "jupyter": { - "source_hidden": true - }, - "tags": [ - "hide-cell" - ] - }, - "outputs": [], - "source": [ - "import os\n", - "\n", - "\n", - "# This variable should be used in the notebooks to run a subset of the model layers or a smaller model/dataset\n", - "FAST_CI_MODE: bool = os.environ.get(\"FAST_CI_MODE\", False)\n", - "# Clean up any prior runs\n", - "CLEANUP: bool = False\n", - "if CLEANUP:\n", - " !rm -rf preprocessed_data\n", - " !rm -rf preatraining_demo\n", - " !rm -rf pretraining_demo\n", - " !rm -rf training_data_config.yaml\n", - " !rm -rf preprocess_config.yaml\n", - " !rm -f chr20.fa.gz\n", - " !rm -f chr21.fa.gz\n", - " !rm -f chr22.fa.gz\n", - " !rm -f chr20_21_22.fa" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Setup training data\n", - "Evo2 uses megatron style datasets behind the scenes with advanced support for randomly indexing into documents, and\n", - "packing documents together into batches at scale. The file-formats backing these datasets is not a standard biological\n", - "format like fasta for representing genomes. First we show how you can start from a fasta file and preprocess them into\n", - "the required data format for downstream handling. High level the steps are as follows:\n", - "1. Acquire fasta files locally, ideally in some shared cluster storage\n", - "2. Write a config script defining how you want the processed files to be generated from the fasta files. This is where\n", - " you specify top level train/validation/test splitting decisions.\n", - "3. Call the actual `preprocess_evo2` script to generate the results.\n", - "\n", - "The next 4 cells go through this process on a set of smaller human chromosomes. At least 3 fasta records need to be present,\n", - "one for the train, validation, and test split." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "%%capture\n", - "import os\n", - "\n", - "from bionemo.core.utils.subprocess_utils import run_subprocess_safely\n", - "\n", - "\n", - "concat_path = \"chr20_21_22.fa\"\n", - "if not os.path.exists(concat_path):\n", - " !wget https://hgdownload.soe.ucsc.edu/goldenpath/hg38/chromosomes/chr20.fa.gz\n", - " !wget https://hgdownload.soe.ucsc.edu/goldenpath/hg38/chromosomes/chr21.fa.gz\n", - " !wget https://hgdownload.soe.ucsc.edu/goldenpath/hg38/chromosomes/chr22.fa.gz\n", - " !zcat chr20.fa.gz > chr20.fa\n", - " !zcat chr21.fa.gz > chr21.fa\n", - " !zcat chr22.fa.gz > chr22.fa\n", - " !cat chr20.fa chr21.fa chr22.fa > chr20_21_22.fa" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "full_fasta_path = os.path.abspath(concat_path)\n", - "output_dir = os.path.abspath(\"preprocessed_data\")\n", - "output_yaml = f\"\"\"\n", - "- datapaths: [\"{full_fasta_path}\"]\n", - " output_dir: \"{output_dir}\"\n", - " output_prefix: chr20_21_22_uint8_distinct\n", - " train_split: 0.9\n", - " valid_split: 0.05\n", - " test_split: 0.05\n", - " overwrite: True\n", - " embed_reverse_complement: true\n", - " random_reverse_complement: 0.0\n", - " random_lineage_dropout: 0.0\n", - " include_sequence_id: false\n", - " transcribe: \"back_transcribe\"\n", - " force_uppercase: false\n", - " indexed_dataset_dtype: \"uint8\"\n", - " tokenizer_type: \"Byte-Level\"\n", - " vocab_file: null\n", - " vocab_size: null\n", - " merges_file: null\n", - " pretrained_tokenizer_model: null\n", - " special_tokens: null\n", - " fast_hf_tokenizer: true\n", - " append_eod: true\n", - " enforce_sample_length: null\n", - " ftfy: false\n", - " workers: 1\n", - " preproc_concurrency: 100000\n", - " chunksize: 25\n", - " drop_empty_sequences: true\n", - " nnn_filter: false # If you split your fasta on NNN (in human these are contigs), then you should set this to true.\n", - " seed: 12342 # Not relevant because we are not using random reverse complement or lineage dropout.\n", - "\"\"\"\n", - "with open(\"preprocess_config.yaml\", \"w\") as f:\n", - " print(output_yaml, file=f)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "%%capture\n", - "!preprocess_evo2 --config preprocess_config.yaml" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "total 309M\n", - "drwxr-xr-x 3 ubuntu ubuntu 4.0K Mar 10 22:17 chr20_21_22_uint8_distinct_byte-level_test\n", - "-rw-r--r-- 1 ubuntu ubuntu 90M Mar 10 23:07 chr20_21_22_uint8_distinct_byte-level_test.bin\n", - "-rw-r--r-- 1 ubuntu ubuntu 82 Mar 10 23:07 chr20_21_22_uint8_distinct_byte-level_test.idx\n", - "drwxr-xr-x 3 ubuntu ubuntu 4.0K Mar 10 22:17 chr20_21_22_uint8_distinct_byte-level_train\n", - "-rw-r--r-- 1 ubuntu ubuntu 123M Mar 10 23:06 chr20_21_22_uint8_distinct_byte-level_train.bin\n", - "-rw-r--r-- 1 ubuntu ubuntu 82 Mar 10 23:07 chr20_21_22_uint8_distinct_byte-level_train.idx\n", - "drwxr-xr-x 3 ubuntu ubuntu 4.0K Mar 10 22:17 chr20_21_22_uint8_distinct_byte-level_val\n", - "-rw-r--r-- 1 ubuntu ubuntu 97M Mar 10 23:06 chr20_21_22_uint8_distinct_byte-level_val.bin\n", - "-rw-r--r-- 1 ubuntu ubuntu 82 Mar 10 23:07 chr20_21_22_uint8_distinct_byte-level_val.idx\n" - ] - } - ], - "source": [ - "# There should be a collection of bin/idx files created in the preprocessed_data directory.\n", - "!ls -lh preprocessed_data/" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### [Optional] specify or convert initial checkpoint\n", - "The main difference between pre-training and fine-tuning is whether or not you decide to start training the model with\n", - "weights from a prior training run. For this tutorial we want to tune a `1b` checkpoint from hugging face that is known\n", - "(at the time of this writing) to be sensitive to GPU architecture so that it will work with your architecture. We have a\n", - "script that will download and convert a savanna format evo2 checkpoint from hugging face, and output that into a NeMo2\n", - "format checkpoint directory that can be used as the starting point for a fine-tuning run." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "%%capture\n", - "if not os.path.exists(\"nemo2_evo2_1b_8k\"):\n", - " !evo2_convert_to_nemo2 \\\n", - " --model-path hf://arcinstitute/savanna_evo2_1b_base \\\n", - " --model-size 1b --output-dir nemo2_evo2_1b_8k" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Configure the training dataset\n", - "The next step is to configure your training dataset, in this case configuring the simple single-file example we output\n", - "two steps ago in this tutorial. " - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "\n", - "\n", - "output_pfx = str(Path(os.path.abspath(\"preprocessed_data\")) / \"chr20_21_22_uint8_distinct_byte-level\")\n", - "output_yaml = f\"\"\"\n", - "- dataset_prefix: {output_pfx}_train\n", - " dataset_split: train\n", - " dataset_weight: 1.0\n", - "- dataset_prefix: {output_pfx}_val\n", - " dataset_split: validation\n", - " dataset_weight: 1.0\n", - "- dataset_prefix: {output_pfx}_test\n", - " dataset_split: test\n", - " dataset_weight: 1.0\n", - "\"\"\"\n", - "with open(\"training_data_config.yaml\", \"w\") as f:\n", - " print(output_yaml, file=f)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This next cell takes approximately 25 minutes to run on an RTX A6000 with `MAX_STEPS=100`. Each step takes about 9.5 seconds with the \n", - "following configuration, so you can budget a desired number of max steps to try." - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "%%capture\n", - "MAX_STEPS: int = 10 if FAST_CI_MODE else 100\n", - "val_check_interval = min(int(MAX_STEPS // 2), 50)\n", - "warmup_steps = min(MAX_STEPS, 100)\n", - "# For evo2 training and fine-tuning follow the same set of steps, so we use the same train_evo2 command.\n", - "# the big difference is the --ckpt-dir argument which points to a pre-existing checkpoint from some other training run.\n", - "\n", - "if FAST_CI_MODE:\n", - " model_subset_option = (\n", - " \"--num-layers 4 --hybrid-override-pattern SDH* --activation-checkpoint-recompute-num-layers 2\"\n", - " )\n", - "else:\n", - " # By default do 5 layers of activation checkpointing\n", - " model_subset_option = \"--activation-checkpoint-recompute-num-layers 5\"\n", - "train_cmd = f\"\"\"train_evo2 \\\n", - " -d training_data_config.yaml \\\n", - " --dataset-dir ./preprocessed_data \\\n", - " --result-dir pretraining_demo \\\n", - " --experiment-name evo2 \\\n", - " --model-size 1b \\\n", - " --devices 1 \\\n", - " --num-nodes 1 \\\n", - " --seq-length 8192 \\\n", - " --micro-batch-size 2 \\\n", - " --lr 0.000015 \\\n", - " --min-lr 0.0000149 \\\n", - " --warmup-steps {warmup_steps} \\\n", - " --grad-acc-batches 4 \\\n", - " --max-steps {MAX_STEPS} \\\n", - " --ckpt-dir nemo2_evo2_1b_8k \\\n", - " --clip-grad 250 \\\n", - " --wd 0.001 \\\n", - " --attention-dropout 0.01 \\\n", - " --hidden-dropout 0.01 \\\n", - " --val-check-interval {val_check_interval} \\\n", - " {model_subset_option} \\\n", - " --create-tensorboard-logger \\\n", - " --ckpt-async-save\"\"\"\n", - "\n", - "print(f\"Running command: {train_cmd}\")\n", - "\n", - "result = run_subprocess_safely(train_cmd)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "assert result[\"returncode\"] == 0, result" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The plotting code is hidden in documentation for brevity. You can view the notebook on github, run it in jupyter-lab or launch the tutorial on brev.dev if you want to view the source." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "jupyter": { - "source_hidden": true - }, - "tags": [ - "hide-input", - "hide-cell" - ] - }, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "import pandas as pd\n", - "import seaborn as sns\n", - "import tensorboard.backend.event_processing.event_accumulator as event_accumulator\n", - "\n", - "\n", - "# Function to extract data from TensorBoard event files and convert to DataFrame\n", - "def tensorboard_to_dataframe(event_file):\n", - " \"\"\"Given a TensorBoard event file, return a pandas DataFrame with the training metrics.\"\"\"\n", - " # Load the event file\n", - " ea = event_accumulator.EventAccumulator(\n", - " event_file,\n", - " size_guidance={\n", - " event_accumulator.SCALARS: 0, # 0 means load all\n", - " },\n", - " )\n", - " ea.Reload()\n", - "\n", - " # Get list of all available tags\n", - " tags = ea.Tags()[\"scalars\"]\n", - "\n", - " # First, find the union of all steps\n", - " all_steps = set()\n", - " for tag in tags:\n", - " events = ea.Scalars(tag)\n", - " steps = [event.step for event in events]\n", - " all_steps.update(steps)\n", - "\n", - " # Sort steps for proper ordering\n", - " all_steps = sorted(all_steps)\n", - "\n", - " # Initialize the dataframe with steps\n", - " df = pd.DataFrame({\"step\": all_steps})\n", - "\n", - " # Add each metric as a column\n", - " for tag in tags:\n", - " events = ea.Scalars(tag)\n", - " # Create a dictionary mapping steps to values\n", - " step_to_value = {event.step: event.value for event in events}\n", - " # Add the values to the dataframe, using NaN for missing steps\n", - " df[tag] = df[\"step\"].map(step_to_value)\n", - "\n", - " return df\n", - "\n", - "\n", - "# Example of creating a multi-metric plot with seaborn\n", - "def plot_multiple_training_metrics(df, metrics_to_plot, figsize=(15, 10)):\n", - " \"\"\"Given a pandas DataFrame with the training metrics, plot the metrics.\"\"\"\n", - " n = len(metrics_to_plot)\n", - " fig, axes = plt.subplots(n, 1, figsize=figsize, sharex=True)\n", - "\n", - " if n == 1: # Handle the case of a single plot\n", - " axes = [axes]\n", - "\n", - " sns.set_style(\"whitegrid\")\n", - "\n", - " for i, metric in enumerate(metrics_to_plot):\n", - " if metric in df.columns:\n", - " sns.lineplot(x=\"step\", y=metric, data=df, ax=axes[i], linewidth=2.5, errorbar=\"sd\")\n", - " axes[i].set_title(metric, fontsize=14)\n", - " axes[i].set_ylabel(\"Value\", fontsize=12)\n", - " axes[-1].set_xlabel(\"Steps\", fontsize=14)\n", - " plt.tight_layout()\n", - " plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The following figures show various training metrics per step.\n", - "* `reduced_train_loss` captures the training loss. On larger runs you want to see the loss drop to about 1.08 consistently\n", - " for the 1b checkpoint.\n", - "* `lr` shows the learning rate schedule for training. Typically we do a linear warmup schedule followed by an cosine decay.\n", - " this small notebook tutorial just goes through the initial warmup period.\n", - "* `grad_norm` shows the gradient norm of the full model. As the model fits the data better you should see this value drop\n", - " down below 1.0 consistently. \n", - "* `val_loss` shows the same kind of loss shown in `reduced_train_loss` but for a held-out set of validation samples. If you\n", - " ever train the model a very long time and see this start to go up while the training loss continues to drop that's a sign\n", - " of over-fitting. We have not yet seen this happen. Small fluctuations up and down are expected during training." - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABdEAAAPdCAYAAABlRyFLAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzs3Xd4VHXWB/DvlPTee6+ENEpC771Jk2JBUUHsCrrq7lr21d3VtaFiQwULVkQQQUF6LwFCQhrpvU76pGfK+8eQO/dOJn0mUzif5/ExM5lMbkLmzr3nnt/38ORyuRyEEEIIIYQQQgghhBBCCOmGr+sNIIQQQgghhBBCCCGEEEL0FRXRCSGEEEIIIYQQQgghhJAeUBGdEEIIIYQQQgghhBBCCOkBFdEJIYQQQgghhBBCCCGEkB5QEZ0QQgghhBBCCCGEEEII6QEV0QkhhBBCCCGEEEIIIYSQHlARnRBCCCGEEEIIIYQQQgjpARXRCSGEEEIIIYQQQgghhJAeUBGdEEIIIYQQQgghhBBCCOkBFdEJIYQQQgjph7179yIsLAx79+7V9aYMSlhYGNatW6frzehTSUkJwsLC8OKLLw7L99u2bRvCwsJw+fLlYfl+hBBCCCHE8FARnRBCCCGEENKrmTNnYubMmbreDEIIIYQQQnRCqOsNIIQQQgghhJAubm5u+PPPP2FjY6PrTSGEEEIIIQQAFdEJIYQQQgghesTExARBQUG63gxCCCGEEEIYFOdCCCGEEEIM2uXLlxEWFoZt27YhMTERDz74IMaOHYuwsDAAgFwux549e7B27VqMHj0aMTExWLFiBfbs2aP2+err6/HKK69g4sSJiImJwcqVK3H06NE+v7eq3rK9a2pq8Oabb2LevHmIjo5GfHw8Vq1ahR07dnR77M2bN7F582ZMnjwZkZGRmDFjBl5//XXU1dWp3aZffvkFixcvRlRUFKZNm4a33noL7e3tPf7+etP1M5SWlqK0tBRhYWHMf10/c1+/fwDYs2cPHn30UcycORNRUVGIj4/HQw89hEuXLvX797Zu3TqEhYWhs7MT27Ztw8yZMxEZGYl58+bh+++/H9TP15cTJ05g3bp1GDNmDKKjo3HHHXfgq6++gkQi6fbYS5cuYcOGDcy/08SJE3H33Xfj559/5jwuLS0NTz31FKZPn47IyEiMHz8eK1euxKeffqqVn4EQQgghhAwddaITQgghhBCjcP36dWzfvh3jxo3D6tWrUV5eDrlcjueeew4HDx6Ev78/Fi9eDFNTU5w/fx7//Oc/kZubixdeeIF5jtbWVqxbtw5ZWVkYNWoU4uLiUF5ejs2bN2PSpEka2c68vDzcd999EIlEGDNmDGbPno3W1lZkZ2dj+/bteOihh5jHHj9+HM888wz4fD5mzZoFd3d35Obm4rvvvsO5c+ewe/du2NnZMY//+OOP8eGHH8LZ2RmrV6+GUCjEoUOHkJeXN6httbW1xRNPPIFvvvkGAHD//fczn4uPj+c8Vt3vv8trr72G8PBwTJgwAY6OjqisrMSxY8fwwAMPYNu2bZg9e3a/t+nZZ5/FjRs3MHXqVPD5fBw6dAivvfYaTExMsHr16kH9nOp89dVXePPNN2Fvb4/FixfD0tISJ06cwJtvvomrV6/io48+Ao/HAwCcOnUKjzzyCGxtbTFr1iy4uLigtrYWN2/exP79+7FmzRoAQEZGBtauXQuBQIBZs2bB09MTjY2NyM3Nxe7du/Hoo49qbPsJIYQQQojmUBGdEEIIIYQYhfPnz+O///0vVq5cydy3e/duHDx4ECtWrGAKrQDQ0dGBp556Cjt37sSiRYsQGRkJAPjyyy+RlZWF1atX4/XXX2eeZ+nSpdiwYYNGtvNvf/sbRCIRXn/99W5F34qKCubjuro6PP/883BwcMCPP/4ILy8v5nN//PEHtmzZgg8//BAvv/wyAKCwsBCffPIJ3NzcsG/fPjg5OQEAnnzySdx5552D2lZbW1s8+eST2LdvH/NcPVH3+2dvr4+PD+e+qqoqrFy5Em+//faAiugVFRU4ePAgrK2tAQD33XcflixZgp07d2qsiF5UVIR33nkHTk5O+PXXX+Hh4QEA2Lx5M9avX49jx45h//79WLZsGQDg119/hVwux7fffovw8HDOc7FXDOzfvx8dHR34+OOPu/3MPa0sIIQQQgghukdxLoQQQgghxCiMHDmyWwH3u+++g6WlJV599VWmgA4Apqam2Lx5MwBFgbfLb7/9BhMTEzz11FOc55kyZQomTJgw5G28ceMGUlNTERcXp7bg6+7uzny8f/9+NDU1YcuWLZwCOgAsWrQII0eO5Gz7gQMHIJFI8MADDzAFdACwtrYelg5ndb//LqoFdABwdXXFvHnzUFBQgNLS0n5/ny1btjAFdAAIDAzE6NGjkZ+fj6ampoFvuBrs32VXAR1Q/N0899xzAMBcWGAzMzPrdp+Dg0O3+8zNzfv1OEIIIYQQoh+oE50QQgghhBiFrm7yLq2trcjKyoKrqyu++OKLbo/vyrXuijppampCSUkJgoOD4eLi0u3xY8eOxcWLF4e0jTdu3ACAfkXDJCUlMV9TXFzc7fPt7e2oq6tDbW0tHB0dkZmZCQAYM2ZMt8eOHTt2CFvdP6q/f7bi4mJs374dly5dQmVlJTo6Ojifr6qq6nahYCDfx83NDQAgFos5BfbBysjIAACMGzeu2+dGjRoFMzMz3Lx5k7lv4cKFOHLkCNasWYPFixdjwoQJGDNmDBwdHTlfu2DBAnzzzTd44oknsGDBAkyaNAlxcXHM9hNCCCGEEP1ERXRCCCGEEGIUnJ2dObcbGxshl8tRWVmJjz76qMeva2lpAQCmi1m18NmF3d09WGKxGAD6VTRtaGgAgD6HZra2tnKeW912qv5utKGn71FYWIhVq1ahqakJ48aNw4wZM2BtbQ0+n4+EhAQkJCR0K6r3Rl2RXChUnNZIpdLBbbyKrr8Fdb9LHo8HZ2dnVFZWMvctWLAAJiYm+Prrr/HTTz/h+++/B4/Hw7hx4/Diiy9ixIgRAICYmBjs2rULn332GQ4ePIi9e/cCAKKiovDcc89h/PjxGtl+QgghhBCiWVREJ4QQQgghRqFryGMXKysrAIqYka5iZW+6irO1tbVqP19TU9PtPj5fkY7Y1dXOpi5axNbWFgA4Bdi+tufAgQMIDQ3t8/E2NjbMdqp2dVdXV/f59UOl+vvv8vXXX6OhoQFvvfUWli5dyvncK6+8goSEBK1v20B1/e7V/S7lcjmqq6u7FfNnz56N2bNno6mpCYmJiTh69Cj27NmDDRs24NChQ8y//dixY/Hll1+ira0NycnJOHnyJH744Qds2rQJBw8eVBt9QwghhBBCdIsy0QkhhBBCiFGytrZGUFAQ8vLy0NjY2K/He3t7o7CwECKRqNvnr1692u2+rsJoVVVVt8+lp6d3uy8qKgqAYghnX6KjowEoY136EhYWBgC4du1at8+p2/aB4PP5g+7yLioqAgDMmjWLc79cLsf169eHtF3a0tU5fvny5W6fS05ORnt7e7cBol2sra0xdepUvP7661i+fDmqq6uRnJzc7XHm5uZMp/qmTZvQ1tbWr78LQgghhBAy/KiITgghhBBCjNa6devQ2tqKl156iYltYSsuLkZJSQlze+nSpejs7MSHH37Iedy5c+fU5qEHBATAysoKJ06cQH19PXN/dXU1Pv30026Pj46ORlRUFK5cuYLdu3d3+zy7Q33lypWwsrLC1q1bkZ2d3e2xra2tnAL7kiVLIBAI8NVXX3G65puamtRuy0DY2dmhrq4O7e3tA/7ark5u1eL+559/jqysrCFtl7YsWbIEQqEQX3/9NeffpKOjA++88w4AYPny5cz9V65cUXuRoWtVQ9fA0evXr6v9HXb9e6kbTEoIIYQQQnSP4lwIIYQQQojRWrt2LZKTk7Fv3z4kJiZi4sSJcHV1RU1NDfLy8pCcnIx3330X3t7eAIANGzbg6NGj2L17N7KzsxEXF4fy8nIcPnwY06dPx6lTpzjPb2pqinXr1uGzzz7DihUrMHPmTDQ3N+PkyZOIj49nurDZ3nnnHaxbtw4vv/wy9u/fj9jYWLS3tyMnJwcZGRlM97OjoyPee+89PP3001i6dCmmTJmCwMBAdHR0oLS0FAkJCRg1ahR27NgBAPDz88Njjz2Gbdu24Y477sCCBQsgEAhw5MgRhIWFIT8/f9C/x/HjxyM1NRUbNmzA2LFjYWJigri4OMTFxfXr32Dv3r146qmnsGDBAtjb2yMpKQnp6elqf6f6wNfXF8899xzefPNN5ndpYWGBkydPIj8/H7NmzeJE0/z73/9GVVUVxowZAy8vL/B4PFy7dg03btxAbGwsM+z1iy++wOXLlxEXFwdvb2+YmpoiPT0dFy9ehI+PD+bMmaOrH5kQQgghhPSCiuiEEEIIIcRo8Xg8vPnmm5g6dSp++eUXnDp1Ci0tLXB0dISfnx9eeOEFTJgwgXm8paUldu3ahffeew9Hjx5Feno6goODsXXrVojFYrUF36effhomJibYs2cPfvrpJ3h5eeGxxx7DjBkz8Ndff3V7vL+/P/bt24ft27fj5MmT+Oabb2BlZQU/Pz88+uijnMdOnz4d+/btw44dO3Dx4kWcP38elpaWcHNzw4oVK3DHHXdwHv/EE0/Azc2NGXDp5OSEhQsX4umnn0ZMTMygf4+PPfYYGhsbcfLkSVy7dg1SqRRPPPFEv4roERER2LFjB95//30cOXIEAoEAo0aNwo8//ogTJ07oZREdAB544AH4+vri66+/xu+//47Ozk74+/vjxRdfxLp16zgZ8Js2bcKRI0eQlpaGc+fOQSgUwsvLC8899xzuvvtuCAQCAMBdd90FGxsbJCcnIyEhAXK5HJ6ennjkkUdw//33qx2aSgghhBBCdI8nl8vlut4IQgghhBBCCCGEEEIIIUQfUSY6IYQQQgghhBBCCCGEENIDKqITQgghhBBCCCGEEEIIIT2gTHRCCCGEEEJuM8eOHUNGRkafj4uPj8e4ceOGYYs0JyMjA8eOHevzcV5eXlixYsUwbBEhhBBCCDF0VEQnhBBCCCHkNnPs2DHs27evz8c98cQTBllE/+ijj/p8XHx8PBXRCSGEEEJIv9BgUUIIIYQQQgghhBBCCCGkB9SJriEymQwSiQR8Ph88Hk/Xm0MIIYQQQgghhBBCCCGkF3K5HDKZDEKhEHx+z+NDqYiuIRKJBCkpKbreDEIIIYQQQgghhBBCCCEDEBUVBVNT0x4/T0V0Dem6UhEVFQWBQKDjrRleUqkUKSkpt+XPTgihfQAhtzvaBxBy+6LXPyG3N9oHEHL7MqbXf9fP0lsXOkBFdI3pinARCAQG/8czWLfzz04IoX0AIbc72gcQcvui1z8htzfaBxBy+zKm139f8dy9l9gJIYQQQgghhBBCCCGEkNsYFdEJIYQQQgghhBBCCCGEkB5QEZ0QQgghhBBCCCGEEEII6QEV0QkhhBBCCCGEEEIIIYSQHtBgUWJUaprakV3VhJxb/xXUNMPfyQovLgiHuYlxDDoghBBCCCGEEEIIIYQMHyqiE4Mjl8tR2diO7CoxcqqaFEXzyibkiJpQ29yh5itE6JDK8N/lUcO+rYQQQgghhBBCCCGEEMNGRXRiEHJFTfjuUiGSiuuRU9kEcbtkQF+/+0oxHpseBG8HSy1tISGEEEIIIYQQQgghxBhREZ3oLblcjsv5tfjybB6OZVQN+OstTARo7ZQCACQyOT49lYv/aKEb/XJeDfYmlsLSTIAAZyv4OVkhwMkKnvbmEApo7AAhhBBCCCGEEEIIIYaMiuhkSORyOT4/m49DibUYXZqBuSPdEefvCJMhFI8lUhn+TK3AF2fykFLa0Ofj7SxMEOpmjWBXGwS7WiPE1RohbtZwszHHsk/O40aJ4jl2Xy3G4zOC4WlvMehtU5UnasK6nQnokMi6fc5EwIOPgyX8na3g52SJAGcr+Dsp/vNysICAz9PYdhBCCCGEEEIIIYQQQrSDiuhkSNLKGvG/w5kAgBtVhfj6QiFszYWYEe6K2SPcMC3MBbbmJv16rqZ2CX5KKMJX5wtQWt+q9jEx3naI8bFHiKuyaO5sbQoeT31B+ulZIXjom6sAgE6pHJ+dzsVrSyMH8ZOq98ahm2oL6F3fL6+6GXnVzd0+ZyLgIdDZGusm+OGecb49bj8hhBBCCCGEEEIIIUS3qIhOhsTKrPufUGObBPuTyrA/qQxCPg/jA50wJ8INs0a4qs0kL29oxdfnC/DD5SK1Wed8HrAgygMbpwQi1sd+QNs3M9wVIz1tkVbWCAD4KaEYj00Phrud+YCeR52LuTU4ml7J3ObxALm8f1/bKZUjs1KMl35LxYXcavxvZTRs+nmxgRBCCCGEEEIIIYQQMnyoiE6GJMDZCtvvHY2th1KQWdvZrYgskclxLqca53Kq8ervaRjhYYs5I1wxO8INfB4PO87l40ByGSSy7tVnS1MB1sT54MFJAfBxHNxAUB6Ph6dmhWDTrmsAgA6pDNvP5OLVJSMH9XxdZDI5/vNnOuv7AAefnAxXG3MU1DSjoLpZ8f+aFsXH1c1o7pCqfa4/Uypws1yMT+8dgzB3myFtFyGEEEIIIYQQQgghRLOoiE6GbPYIVzi3O8EnJAKns6pxLKMSZ7KqmaGebBnljcgob8SHJ3J6fD43WzOsnxiAu+N9YWc59O7suRFuGOFhi4xyRTf6D5eL8Oi0ILjaDr4bfd/1UqSWNjK3V43xxkhPOwCAi40Z4vwdOY+Xy+WobupgCuyZFWJ8e7EQHVJFFExedTOWfXwe/10RieWjvAe9XYQQQgghhBBCCCGEEM2iIjrRGCcrU6wa64NVY33Q1inFhdxqHE2vwvGMSlSJ2/v8+nB3G2ycEoglMZ4wFQ5+MKkqHo+Hp2YG49HvEwEA7RIZPj+Th5cWRwzq+Vo7pHj7r0zmtqWpAM/ODetzG1xszDgF9iUxnnjs+0Qm/721U4rNPyfjakEdXlkSATOhYFDbRwghgGJIc3JJPfycrOBsbabrzSGE6DmZTI5DqRXolMqwONoDwiEMiSeEEEIIIcTYGGUR/cqVK9ixYwdSU1MhEonw8ccfY/bs2T0+/urVq3jnnXeQn5+P1tZWeHp6Yu3atVi/fv3wbbSRMTcRYGa4G2aGu0Emi0RKaQOOZVTiaHolblaIOY+dEuKMh6cGYnKws9YGbM4b6Y4wNxtkViq+93eXC/HI9KBBFZa+OJuHisY25vamqUFwG0RXe4yPPQ4+ORmbdyfhVKaIuf/7y0VIKW3Ax3ePHnSMjbHplMrwU0IRrhfXY6yfI+aOdDP4omBjWydszIQ0VJZoRXFtCzZ8cxWZlWII+TzMG+mOdRP8MC7Akf7mCDFAEqkM5Q1t8Haw0MprWCaTY8vuJPyWVAYAyKoU4/n54Rr/PoQQQgghhBgqoyyit7S0ICwsDCtXrsQTTzzR5+MtLS1x7733IiwsDBYWFrh27RpeffVVWFhYYM2aNcOwxcaNz+chxsceMT72eHZuGIprW3AyswrtnTJMDnHGCA/bYdmGJ2cF44kfrgMA2jpl+OJMHv6+cMSAnqeqsQ2fnc5lbrvbmmPj1IBBb5eDlSl23h+Hj0/m4L1jWUym/I2SBizedg7vr4nFjHDXQT+/MbhRUo8Xfk1h4nj2Jpbipd9SEB/giIVRHpg30n1QFzF06Y1DGfj8TB6CXKzx4dpRiPDU/muA3D4Si+rw8LdXUd3UAUAxm+KPlHL8kVKOUDdrrBvvh+WjvWGtZjA0IUT/NLR2Yvkn55EnasbUUBd8es9otYPdh+LtI5lMAR0AvrtUiGdmh2p0ZSBbQ0snrM2FEPDpoh4hhBBCCDEMRnkGPW3aNEybNq3fj4+IiEBEhDLaw9vbG0ePHsXVq1cHXESXStUPjzRmXT9zf392Tzsz3BPv0+3rtW3uCFcEu1ghR9QMAPj2YiEemuwPJyvTfj/HO39looU1IHTLnBCYCXhD/hkemx6IGG9bPPNzMmpbOgEoTpof+PoKnpgRhKdmBt92J5otHRJsPZaNry8UQnXurEwOXMqrxaW8Wrz6expG+9pj/kh3zB/pBk97C91scD+V1rVi++k8AEBOVRNWfXYB2+6KxbRQFx1v2eANdB9AtOePlHL8bU8K2iUytZ/PqmzCy/vT8L/DN7E81gv3jPdFiKv1MG8lqW/pQE1zBwKdrYxiZQDtA7Tru4sFyLt17HImS4SHvrmCHfeNgbmJZmLffrhchE9P5XLua2yT4HxOFaaGaP696X+HM/H52XwEOFvik7tHIdSNhqobMnr9E3J7o30AIbcvY3r99/dn4MnlcnnfDzNcYWFhfca5qEpPT8fGjRvxzDPPYNWqVf36GqlUiqSkpEFuJRkuZ4ta8f7lBub2inAr3BPVv5O3gvpOPHe0Bl0vmAB7Id6a7QS+BgsgNS1SvHupHpk1nZz7Y9xM8fQ4e9iZ3R75pNcr2rH9WgNELeoLgb0JdjDBBG8zjPc2h7u1/l0n/CO7GTuTuJFGfB6wcZQt5gZRfA8ZHLlcjr03m/FDahPn/iAHIZwsBLha1o6eXk0jXUyxINgScZ5mEN5mF+t0Ia+uE6+fqUVjhxwLgy3x0ChaiUJ6JpfL8cyRGpQ0Sjj3j3I3xQsTHWAiGNpr9kpZG946X692/zA7wAKPjrUb0vOrqmqW4NE/q5nb9mZ8vDbDEV42+vd+TQghhBBCbi+xsbEQCHpuVKEjVpapU6eitrYWUqkUTzzxRL8L6GxRUVG9/sKNkVQqRUpKikH87FHRcvyeew551YqOrr/y2vCPlePgYNl7N7pcLse7X10F+4rT6ytHYXSgk8a3cUq8DP/7KxNfXyhk7kuu7MA/Tjdi29oYjPZ10Pj31BfVTe34z5838XtyXbfPLYpyxz8WhCO/phmHUyvxV1olRE3dB9bm1HUip64Tu1KaEOFhg0VRHlg/0U9jHXtD9dbVhG73yeTA9sRGSC0d8cK8MPANrJA5HPuA6qZ27E8qg6+jJWaEudDAO5YOiQwv7U/DryoF9Pkj3fDOndGwMBWgrL4VP14pxs9XSlDT3MF5XJqoA2miDrjZmGFtnA/WxnnD1cAikgxFU7sEWz6+gMYOxbvJkbxWvLtustYiM4aLIR0HGJqU0gaUNFZ2u/96RQe+zJDjo7tiYDLI/eGNkgZ88FsCp4BuZ2GChlbFhfzESgkio6I1ur/dfiYPgLKIXt8uw3/Oi/HDxnj4O1lp7PuQ4UOvf0Jub8OxD6hv6UBWZRPC3W1ga2Gile9BCBk4YzoG6PpZ+kJFdJbvv/8eLS0tSE5Oxrvvvgs/Pz8sXrx4QM8hEAgM/o9nsAzhZxcIgCdnBWPzz8kAgOYOKb65WIRn54b1+nUnblbiQm4Nc3v2CDdMDtFOVrmFQIB/3RGJsf6OeGHPDTTfio+paGjDXV8k4IO1o7Ao2kMr31tX5HI5fk0sxb//SEd9C7cL39POHP9eHomZ4W4AAC9HK0wOccX/LY1EYlEd/kwpx+HUCpQ3tHV73vRyMdLLxUgrb8Qn94wZlp+lN3XNHbhSUMvcdrExg0isvBDw5bkClNa3YeuaWL0p+g+EtvYBNU3tWPrxRWagr7eDBR6YFIA1cT63fa53fUsHNu26hsv5tZz7H50ehL/NVV6Q8XGyxvPzR+Dp2aE4nFqBXRcLcbWQe7GqUtyOD07k4ONTuRjhYYswdxuEu9sg3F3xsYuNYQ/z1TW5XI5Xf09HYU0Lc59EJkeFuAMBzsZRPDSE4wBDsz+5nHPb0lTAxMody6jCc3tS8MHaUQOOfCuqacHGXdfQ2qlcurpuvB/CPWzwz32pAIDalk5cK27AxCDnIf4USgduVHS7r1Lcjnt3XMHuTRNooLoBo9c/Ibc3be0DcqrEWPv5JVQ3dcBMyMfiaE/cPc4Ho30djCISjxBjcDsdA9ze1QcVPj6KnO6wsDBUV1dj27ZtAy6iE/23JNoTHxzLRsGtQsbX5wuwYXIg7CzVX9XulMrwnz8ymNtCPg9/Xxiu9e1cHO2JcHdbPPb9NWRVKjpMJTI5Xtx7AzPCXWBpahwv38KaZvxjXwrO59Rw7ufxgPsn+OO5eWFqC6UCPg9x/o6I83fEy4sikFxSj8OpFfgztRzFta2cx568KYJcLtf5gdbxm1WcfPf3VscgrawRbx66ydx3KLUC5Q2X8MV9Y6loCUXh8aXfUpkCOgCU1LXi9YPpeP9oFu4a54v7J/rDS8+z8LWhoLoZD359hVlZAyj2T/9ZHok1cb5qv8ZMKMDSWC8sjfVCWlkDvrtUiN+ul3EKaRKZHCmlDUgpbeB8rZOVKcI9bBDmZotwdxuEudsg1M0GFqa3xwHTUP2aWMoZ3NilsKbZaIro+qS0vhWXcmsgFPCwINLDILv9O6UyHEhW/s2M9rXHiwtG4P6dCcxr9uCNcpgK+Xjnzph+r2Kqa+7A+q8SmOHDgKI54F93jERdSwde/i2Vea86nFqhsSJ6TpWYGRIOAKYCPjqkij748oY2rP38En7eNB7eDlRIJ8ZNKpOjvKEV9S2daGztRMOt/+pZHze0sO/vQGOrBK42Zlg2ygurx/rQMSLRuHPZ1dh6LAs25kK8uSIa7na6X5XY0NKJDd9cZd6v2iUy/JpYgl8TSxDqZo274n2xYpR3j+fxhOiLTqkMFQ1tKG9oQ3lDK/g8HgJdrBDobE3nUgbGOKpwWiCTydDZ2dn3A4nBEQr4eHxGMP625wYAQNwuwc7z+dg8J1Tt439KKEKuSFmkune8H4JchmcQX7CrNX57fBJe/DUFv986kRa3SfB7UhnWxqsvkhmKTqkMX57Nx/vHsroNQQx3t8EbK6Iwqp/RNXw+D6N8HTDK1wEvLghHWlkjtp3Ixl9piiXwrZ1SNLVLYGOu2wOsv9KUHXi25kKMD3TClBAX+DhYYvPuJHTc+j0kFddj+Sfn8fUDcQh2vb0Hru1PKsOh1O6di4Ditfv5mTzsOJePRVEe2DAlANHe9oP+XjKZHEW1LahpboefkxWcrfX3BDUhvxYP77rKWblhYy7EZ/eOwaTg/hW8Rnra4Y0V0XhxwQj8eq0E310q5BTkVdU0d+B8Tg3nghePB/g7WSHMzQahbtYIvvX/AGcrmAnpgLBLnqgJr+xPVfu54toWtfeTgWlo7cTF3Bqcz6nG+Zxqzt/y174F+OzeMXAzsJiis9kiTqF7+WhvxAc44sv7x+KBr68w7xl7E0thJhTgv8sj+7xY3NYpxYZvr3J+PzE+9th2l6Kb3dnaDHH+jszqlsOpFfjXkpEaiRn7XaWr/tuH4vGv39Nws0IxJ6S0vhV3f3EZP28aDw+72+/CKLk95FQ14d4vL3OaA/qrobUTb/+VifePZWHeSHfcM84P4wMddd4kQgybTCbHthM5eP94Frqm5T3y3TX88siEQceFaYJEKsMTPyYyjW+qsiqb8H8H0vHmoZtYFOWBu8f5YowfdaeT4SeTyVHd3I6y+jaU17eirKENZfWtKG9oRVm94mNRUzt6mkbpZW+BIFdrBLlYIcjFWvGfqxVcrM3o71kPGWURvbm5GUVFRcztkpISZGRkwM7ODp6ennj33XdRWVmJt956C4AixsXDwwOBgYEAgCtXrmDnzp1Yt26dTrafaN+yUV7YdiIHRbeKFzvP5+OhKQGwVSmyNrZ1YuuxbOa2rbkQT88KGdZttTQV4q07o3EmW8QUzHZdKsSaOB+D3akmF9fjxb0pnI40ADAV8vH0rBA8PDVw0AdtPB4PkV52WBztyRTRAaBK3K7TInprhxRns0XM7Vkj3JifcVG0B9ztzPHwt1eZvOqSulas+OQCPls3RqNL6Q1JeUMrXmYVHoV8Hh6aHIB910tRxYrBkcrk+D25DL8nlyE+wBEbJgdg9gi3Xos+HRIZsqvESCtrRHpZI9LKGpBRLkZTu3J4n7O1KcLc9a/zet/1ErywJ4Xp4AQAH0cLfLV+cBdd7CxM8ODkAKyf6I8LuTU4mVmFzAoxblaIUa1m7gCbXA7kVzcjv7oZh9OU9wv4PPg5WSLU1QYhbtYIcbNBiKs1Al1uv+J6u0SKJ3+8zkRwqCrs4eSQ9K5dIkViYT3O51TjXE41bpTUc1b6sF0vqsfibefw2b2jMcbPcXg3dAj2JpYyH5sIeFhyK8ptUrAztt87Bg/vuopOqeKH/jGhCGZCPl5dEtHjsYFUJsfmn5NwjRXl5OtoiR33j+Xs1xZEujNF9CpxOxKL6jDWf2i/N7lcjoOsrvpQN2uMD3TCdxvG4a7PLyG7SrHirqi2BXd/cRk/PTze4C56ENIfrx9MH1QBna1TKsfBG+U4eKMcQS5WuGecH1aOpm7cobpSUIu3/8pEiKs1Xl0y0iBXMA1UXXMHNu9OwqlMEef+pOJ6bDuejS19RJ5q0/8O38TZbOUMDR9HCwQ6W+NMtohTjGyXyLD3ein2Xi9FiOut7vTRXrDvY+YZIUMhl8txKlOET0/l4npxHXM8Nhil9a0orW/FmSzu69DGXMgpqk8KckaMj/0Qt5wMlVEW0VNTU3Hfffcxt9944w0AwPLly/Hmm29CJBKhvFzZDSOTyfDee++hpKQEAoEAvr6+eO6557B27dph33YyPEwEfDw+Iwgv/KoYHCBuk+Cb8wV4UqVA/vHJHNSyhvA9OTMEDlbD/4ZsbiLA6rE++PxMHgAgrawRScX1/e7U1hcicTve/usmfrlW0u1K7PhAR/x3eRQCNdTl76qyzLWqsX3YVhCoczpLhLZOZdFzboQb5/Nj/Byw77FJWP91AvJurXxobJPgvh0JeHNlNO4c4z2s26trcrkcz++5AXGbsqj99KwQPDkrBM/ODcOB5DJ8cTaP6WDskpBfi4T8WgQ4W+HBSf64c4wPZHI5MsobkXarWJ5W1ojsyiZOEVqd6qYOVPfSed2VGR7mbgM/J6sBZxIPlFwux9Zj2fjweDbn/jF+Dvh83Rg4DbFzns/nYXKIMyaHKC/aVDe1MwX1zIpGZFaIkVkp5vwtqyOVyZEnakaeSE1x3dESIW7WiPVxwL3jfXW+QkTb3jx0E2llyguGk4KdIBK3MzFdRdSJ3i8ymRw3K8RM0Twhv5YTQ9QXkbgdaz+/hP+7IxJ3j9P/lVyNbZ04kq68EDwz3JVTEJgR7optd43G4z8kQnrr6sHXFwpgbiLAC/PD1BbS//NHBmdlj4OlCb55ML7bqpv5kR7414F05vah1IohF9HTyho53e93xHgCAJytzfD9xnFY+/kl5r0vv7oZd39xCT89PIEiK4hRyShvxGmVIokqCxMB7C1NYGdhAlsLxf/tLUxgYSrAycyqbpGFuaJmvHYwHf87fBNLYjxxzzhfxPrYG2yjja6U1rdi/c4ENHdIkZBfCwdLUzw3T3cF5OGQXFyPx75PRGl9q9rPf3QyB5NDXBAfMPwXn/cmluCLs/nMbStTAb68Lw5h7jYorm3B7qvF+PlKMaepBgCyq5qY18OiKA/cNc4XY6k7nWiQXC7H+ZwavHs0E9eL6rX6vcRtEiQV1yOpuOv7ZOKecb54eXGEVuanVTS04UpBLcYHOtHxVy94cnlPiwrIQEilUiQlJSE2Nva2CdTvYqg/e6dUhulvn2IOHOwsTHDuhRlMQae4tgWz3j3NFNp8HS1xdMtUnXVRFtY0Y9rbp5jbK0d7493VMTrZloHqkMjw9YV8fHg8h9PpCyh+7/9cOAKrxnpr9AAnv7oZM945xdz+YG0slsZ6aez5B2rL7iSmq9BUyMf1l+fASk3We0+DIp+aGYzNc0L18iBQG/uAXRcL8PJ+ZfU1xscevz4yAULWCgW5XI4LuTX48mweTmaqPym1MBGgTSLtcfmcpjhbm+KlRRFYNko7f2NtnVI8v+cGE+vUZUmMJ96+M3pYB9FKb8XeZFY0IqNcjMwKMbIqxSioae6xE7gn80e647N1uh/6qy3HMyrx0DdXmdtOVqY49PQU/PO3VBy9VSANd7fB4Wem6moTNULbxwHVTe24f2cC52JET0wEPIzxc8DkYGdEedvjf4duIl1l1dNd8b741x0Rer0q4ucrRcyFfgDYvm4M5o107/a4/UmleObnJM4+7pnZIXhmNjeibse5fLx+UFkYNxPy8cPG8Rjjp/5i/IpPziPx1smhl70Fzr0wY0jvP2/8mYHttxoBAODUc9Phz5oFUNnYhjXbL3KW7Ye6WePHjeOHfIGQaJehngfowuafk7DvunKFydt3RiPQxQp2Fqawu1Uw7637WSaT40y2CN9fLsLxjMoe33NHetrinnF+WBrrqfZYk3DJ5XKs/+oK5wKHtZkQ51+YaZTd/XK5HN9fLsJrB9I5DSUWJgKsGuuNby8WMvd52Vvgz6enwM6i59+DpvcBycX1WLX9IhNZBqh/D5RIZThxswo/JBThdJaox2P9ER62eHCSP5bEeA7r8TIxPpfyavDe0SwkqJynq2NtJoSnvTk87CxY/7eAp505POwt0CGRIVfUhNyqJsX/Rc3IFTX1uHKVLdzdBh/dPUpjsa9SmRw7z+XjnSOZaJfI4GFnjmNbpvXr/cOYjgH6+7PQuyq5bZncykb/xz7FSWpDaye+vViIx2cEA1AsIWMfWLy4IFynJ9x+TlaYGurCLPM5eKMMLy0aoZPO+IE4ebMKrx9MV5u1vCTGE68sjtDKlU7VTnSRuPdYCm2SSGU4nlHF3J4a4tzjm5K9pSl2PTQOL+69wVnK/+GJHBTWtuCtO6P1uvCjCfnVzfjPn8phvuYmfLy3OoZTQAcU0T2Tgp0xKdgZ2ZVi7Dyfj18TSzkH3f3pVDU34WOEhy1GetpipKcd3G3NkStqutV9rSgQq+b2q6pu6sAzPyehtrkDD04OGOBP3LuGlk5s/PYqEgpULqzMCsHm2SHDfmFFwOchwNkKAc5WmB/pwdzfLpEiT9SMrEoxcqqakFUpRnZlU6/F9cNpFUgva0SEp+0wbX3vEovqcDC5HGP9HbAg0n1Iv9uKhjY890sy5753VsfA1dYcfo7KwYlFtS16Mfi4S5W4DfYWpnq1jP2XqyW9FtBHeNhicrATJgU7Iz7AkTN4O97fES/8yr0A9WNCETIrGvHZvWPgqqeRIez9v72lCWaEuap93NJYL3RIZMycFwB4/1g2zE0EeGRaEADgUEo5/v2HsoDO4wEfrB3VYwEdABZEejBF9NL6VtwoaRj0EmKZTM4ZkBrjbccpoAOAm605ftg4Hms+v8h02mZVNuGeLy/jx43j9f5YR9vaJVKkljYgyMWaIgoMVEldC2c/NCXEGavG+gzoOfh8HqaHuWJ6mCvKG1rxU0IxfrpShMpG7jFuWlkj/rEvBf/9MwP3jPPFs3PD9Gqfrm/2JpZ2WyHQ1C7B1xcK8PTs4Y3xVKetU4pX9qciq7IJk4KdsGqMT7d9aH+1dEjwz32pnIs5ABDoYoVP7xmDMHcbtHVKsftqCQDF/v/l31LxwdrYYTlOqWpsw8O7rnKO5TfPDlV7EVko4GPuSHfMHemOkroW7L5SjJ+vFnd7PWSUN+Jve27gf4dv4u5xfrh3vC9cbfTzvZ/op2uFdXjvaCZndXIXEwEPy0d5IcbHHp63CuUe9ubdYoLVCXPnFsHlcjkqGtuQW9V8q7B+67+qZk4M2M0KMZZsO4//WzoSq8YMrRExp0qMv+25wemqL29og0jcThdhe0C/FXJbu3OMNz46kY2yBsVO6cuzeVg/0R83K8Q4eEMZ+TPWT1FQ0bV7x/kyRfR2iQx7rpVg49RAHW+VenmiJrx+MF1th/AID1u8uiQC4wOdtPb9rcyEsDIVoPnW1VzV5X7DKSG/Fg2tygGQcyN6/1syFfLx7qoY+DlaYeuxLOb+/UllSMivxbRQF0wNdcGkIGej65CRSGV4dncSJy7kxfnhfUbxhLjZ4I0V0Xh2bhi+u1SIXRcLmXx5NntLE6ZYrvi/LQKcrbtFscwIVxaspDI5CmuaWbEmikiTgprmbl0vrx1MR1O7BE/ODNbIyUbX8uKuvGBAcbD2v5XRWDFavyJ+zIQCjPCwxQgPbkG8q7ieXdWE7ErF748dU/Hp6Vxsu2vUcG8uh0wmxyencvDe0SzI5Io5GbNHuOKNFdGDusgnlcnxzM/XUcca/LphcgBTCPVzUhbRWzqkqG7q0PmyybZOKf6+NwX7rpfCxkyIjVMD8eDkAFjrwQF0ViU3tsnTzhyTQ5yZi2i9DQG2MBXgg7WxiPKywxuHMpgLOoldOenrxmC0nkWjFde2cFYjLYn27LUAtmqsD9okMrz8m3KGxJuHbsJMyEeUlx2eVulUf2VxBOb3cUwzP9KdczHzUGrFoIvoiUV1zHEWoLiAro6nvQV+2DAeaz+/xKwSvFkhxr07LuOHDeON7v2uPzokMuy+WoyPT+agvKENFiYCvLokwqDn4tyudp4rYKKXAGDT1KAhPZ+HnQU2zwnFkzODcSyjCt9fLuTkRwOKQvD2M3nwdrTEuvF+Q/p+xkokbsdrrFU6bDvP5+PByf46j53bm1jKFLWTiuvx8clcxPs74s6x3lgU5dHvQleuqAmPfneNiZPrsijKA/+7M5p5v391yUgk5NcyK4N+Ty7D9DAXrR93tkuk2PTdNU4RfEGkO56cGdzn13o7WGLL3DA8NSsEJzNF+OFyIU6pdKdXN3Xgw+PZ+PRUDpbEeOLBSQGI9LLTxo+iMXK5HCmlDfgzpQKppQ1wtjZljrVHeNjq/NjR2N0oqcd7R7O6zQwAFE1FK0d74cmZIfBhNccMBY/Hg4edBTzsLDjxmgBwLrsaz/ycxMyrar21SvlCTjX+vTxqwMfrEqkMX5zNx9ZjWZyLVgCwLNaTc65CuHR/ZkSIDpkK+Xh0RjBz4lnXouhGP5JewXncPxeN0IuTlZnhrvC0M2dORr+/XIiHJgf0OkBxuInbOrHtRA6+Op/fbcCGg6UJnp0bhrvifbWeHw0ArrbmyL/VAV81xCFOQ8EuGPJ5wKwR6jsK2Xg8Hp6eHQJfJwvOEMnyhjb8dKUYP10pBp+niDmZGqIoqsd423Xr1u6vtk4pCmqaUVDdAhOBotNpOP6NVG0/k8d0PwKK/Oj7Jvj3++udrc3wzOxQPDItCL8nlSG5pB7O1maKgrmXHTztzAf8WhbweQh0sUagizUWRCk7r1s7pMiuEmN/Uhl2nFPmNr53NAtN7RL8fUH4kPYbNysasX7nFU7ngZ2FCT5fNwbjtHgBStPUFdfv25nAXBD840YZnp0TOuiuqqFqaOnE5t1JOHGzinP/sYwqXH//DN5cGY05KjMM+vLJyRxcylMWQaO87PD8/HDmturBdlFts05PhKqb2rHx26tMF4q4XYL3jmbh6wsFeGx6EO4d76fTJdAFNcqVTBMCnfDDxnEDem3xeDxsnBqIcA8bPPnjdWZId5W4HWu3X8JrS0dibbz+5KTvT+J2CC4f3XdM1LrxfmjvlOLffygL3/93IB1WpgLOydGGyQF4YFLfq2V8HC0R5WWHlNIGAMDh1PIes9b7wu6+5fGAxdHqi+hd3/fHWx3p5beOddLKGnHfzsvYtWFcvzq7huJstghZlU3wsDOHr6MlfBwsdVK875TKsDexBB8ez+HkFbd2SvHi3hSczKzCmyuib/sOfUNR39KBn64UMbdHetpiUrBm3seFAj7mR7pjfqQ7Cmua8UNCEX65WsKZ6XQ6s4qK6D149fdUTqNLjLcdkksU+72G1k7sulSIx6b3XcTVpsv53btfEwpqkVBQi3/9noaFUR5YNcYb8QGOPe6j/0wpx/N7bnBiNYV8Hv6+cAQenOTP+TorMyE+WDsKKz+9AMmtCz+v7E/DWD9H+GqpsCaXy/HSvlRON2y4uw3eWRUzoPNcoYCPORFumBPhhpK6Fuy6WIgfE4rQyJqx1CmVY29iKfYmliI+wBEPTgrAnAg3nZz3qCOTyZFUUo9DKeX4M6WiW2b9b0nK91RnazOM8LBBBKuwHuhiBZNBng8ShbSyBmw9mo1jGZXdPsfnActivfDUrJBhPXeZHOKMQ09PwZbdSZwLpr8llSG5pAHb7hrV74tCmRViPL8nmdnXdbE2E+Kfi0ZgLV2o7xUV0cltb/VYb3x8IocpVL13NJNT/L0jxlNvBngKBXzcFe+Ld48qupMLalpwLqcaU0NddLxlijf8PYkleOtwJnOFtIuAz8O68X7YPDt0WE9GXWzMlEV0HXWiy+VyHElTXpQZ6+84oHzX5aO84WlngYd3XeMc5AOATA5cL6rH9aJ6fHA8G7bmQkwKdsbUW53qXvYWnMe3dUpRVNuC/OpmFNY0I7+6BQXVzSioaWaKFV1mhLng8/vGDutBWHpZI95ndd7bmAnx9p0DO3juYm4iwOo4H6yOG9hS6YGwMBUg2tse0d72cLExw5uHbjKf+/xMHsRtEvx7WeSgDsov5tbg4W+vQsw62fGyt8A3D8ZpLP9Olx6bHsQU0WVyxcWTN1ZEDft2pJY24NHvr3Ub1NalprkDG7+9irVxPnh5cUS/ur2uFtTifdbwVytTAbbdNYrTSeznxD3oLqxpwRi/4R/cBSiWcT7w9RW1v4Pa5g78+48MfHk2H0/OCsbqsT46OTErZOVkB7pYDfrAfkqIC35/fDIe3nWVGUrcIZXhxb0pSC1rwCuLR+o88kAul3OiXAKcrTCqnx3gG6YEol0iw9t/ZTL3NbOyNRdFeeAfC0f0e1vmR7ozRfSCmhbcrBB3W2nSF4lUhj9TlCv74vwd4W7X+zJ6XydLRbTL9ovMe3dySQPW70zAtw+N09rqiMOpFXjku2vd7rc1F8LXSVFQ93W0hM+t/3wdLeFlb6HRvxmJVIb9SWX48EQ25+9e1V9plUgqPoP3VsdiUrBzj48j+uG7S4WcnNtN04K0UqDwc7LC3xeMwJY5oXj8+0QcuxUleKWgDjKZXK+abvTB4VRFkbJLhIctfnx4PGa9e5o5Lv7ybD7WT/TnxIQNt679sDotHVLsuVaCPddK4OdkiTtHe2PlGG943joH6JTK8Oahm5xmDwBwszXDx3eP7nFodIyPPTbPCWXeT5raJXj65+v4ZdOEQTfs9ObrCwX45VoJc9ve0gRf3Dd2SHES3g6W+PvCEXhqVgj2Jpbgq/MF3eJFE/JrkZBfC28HC6yf6I/VcT5av1irjkwmx7WiOvyZUo7DqRXdzst6Ut3UjrPZ7ZyiqqmAjxA3a6aoHu5ug1A3G+pa74fsSjG2Hsvi7BfYFkd74JnZITo7F3OxMcM3D8Rj+5k8vHMkk1ndlF/djBWfXMA/Fobj/on+Pb6/dEpl2H46Fx8cz+7W7Dg11AVvrIjqVj8g3VERndz2zIQCPDItEP86oFjKx96hmAr5eH6+fk1mXxPvgw+OZzOdAd9dKtR5ET2xqA7/93tat6uZADA52BmvLIlAqNvwv9mwc9F1VURPLW3kLGOfO8COVgAYF+iE489Ow77EUpzJFuFyfm23ZVcA0NgmwaHUChxKVbzxB7pYIdbHHpWNbSiobkFZQ2u/B2yezBThhV9v4N1VMcNyJbpdIsWW3Umc19+/7hjJnATou0emBcHaTIiX96cyv+MfE4rQ1C7Be6tjBlR4PJBchmd3J3NmMozwsMXXD8TBTU/zmwdqXIAjRvvaM6sOfr1Wgmdmhwzrz/fzlSK8vD+N81oyN+Fj8+xQ7LteyhRZAeCnK8W4kFuDrWtie82Rrm/pwNM/JXGW7P9neVS3ThUvewvweWCiRYpqey6WadOFnGps+u4axKwOLUcrU5gL+Zz9VkVjG/65LxWfn8nD5tmhWBLjOWwdWw2tnZyOSn+noXX9+DpZYu9jE/G3PTfwByu27btLRcisEOPje0brNCs1uaSBc5K/fJTXgPbBj88IRlunFNtO5HDuj/N3wLurB3ZRckGkO6cgfyilfMBF9It5NahuUv773dFDlIuqAGcr/LBREe3SdWE+sagem3ZdxXcPDWwlQn8dSi1Xe39jmwSppY1ILe2ey8/jAR625ghytcYYPwfE+zsi1td+wAU3qUyOgzfK8MHxbOSJus+QsbMwwdwIN/yWVMq8T1Y2tuPeHZfx8JRAbJkbavTzUgxVW6cUX18oYG57O1hgoZYjIs2EAswId2WK6A2tnciqEiPcXTvzRwyxQN/Q0skZYC/g8/DWndGwNBXikWlBePV3xedqmzvww+UibJiim/jMpnYJ0xAEACtGecHaXIj9SWXdmmsKa1rw7tEsvHcsC5ODnbE01gs/JRThamEd53ETg5zw4V2jeo1CAxTHtmeyREy82PWienx4Igdb5oT2+nUDdT6nmrOKSsDn4ZO7R2ssIsPKTIh1E/xxzzg/nM4SYef5/G7RRyV1rfj3HxnYejQLa+N98fz8MK3vU6UyOa4U1OJQSjkOpVb0ea4a6maNhtbObpnvqjqkMqSVNXabJeNkZYqwWwX1cHcbhLrbIMzNhnKvAdQ1d2DrsSx8d6lQ7RyneSPdsHlOqNb2oQPB5/Pw6PQgxAc44qkfrzMrFTqkMvzrQDrO59bg7Tuju81PyShvxHO/JHf7u7AxF+LlxRFDzla/ndArhhAAa+N98cmp3G5vXg9NDoC3g37lQbnamGNepDtz8n8soxJl9a1aLTY2t0tQWt+KkroWlNS13vpP+XGtmuxpH0cLvLQoAnMj3HS2Q2YXQnQV56IaDaRuME5/OFubYePUQGycGoi2Tiku59fiTJaIWXquTp6oWe3JeH/tTSyFu605J4ZCW7YezeYULedGuGFFPyIM9Mm94/1gYy7Elt3JTBH1QHIZWtol+Pie0f2Kw/jybB7nRAJQnOx8tm6MTjpjtIXH4+Gx6cHY8O1VAIoDvx3n8gfUJTtYXQO6uvJFu/g7WeLTe8dghIct1k/yx3tHsvD52TzmokhRbQtWfXYBj88IxlOzQrpdGJHL5Xjh1xucZbcrR3tj2ajuf8emQj487CyYxxb10nGqLbuvFuMfe1OYC7KA4sLbV+vj4G5njh8vF+GjkzmcAmhhTQue+TkJn57KxZa5ocOyf1f93Wgio9HSVIiP7hqFKC87vHX4JnPCdKWgDndsO49td4/CGF8HnRSF9iZy/y6Xq/n76cuWOaFo65Tii7OKzsNAFyt8cd/YAUfyBLpYI8zNBpm3MukPpVZgy9yBNRawB4oK+LwBzZcJdrXGDxvHYe3nl5jjjPM5NUjIr9VKpNUNNY0AfZHLgbKGNpQ1tDFFGSGfh5Fedoj3d0CcvyPG+jvCsYfYFZlMjsNpFXj/WJba93IbMyE2TAnEA5P9YWtugvsn+uPpn64j99Z7u/zWSp5zOdX4YO0oBLv2Pj+kJw0tnThwowx7E0tQVNuChVEeeGJGsN4O3jUkvyaWcPajG6cEaqWTV1W8SofxlfxajReA5HI5tuxOxuHUCtw9zhcvL47Q6PNr07//SIeIdd63aWogE4WwJs4HH53MYT6//UyezmLN0kobOA0wC6I8MCfCDf9YOALHMirxy9USnM0WcQp/cjlwNru6W6EYAB6fEYQtc8L6dSFcwOdh65pYzH//DBOH8tGJbEwJcUZcDx3sA1VU04LHf0jkNB+8sjgCE7WwwobP52FGuCtmhLsiq1KMr84XYG9iCdpZzRTNHVLsOJcPkbgdH2ppXk9JXQs+O52Lw6kVnH2DOrE+9lgY5Y4FkR7MRYXa5g5klDcio7wR6eWNyCgXI6dK3K2zWFVNcwcu5NbgQi43HsjH0QJhbjZMgT3Wx77bikljJZHK8P3lIrx3NKvbRSkAmBXuis1zQvUyO3+MnwP+fGoKXvj1Bg6zVr0fTa/Ewg/O4sO7RmGsvyM6JDJ8cioHH53I4RzzA4qo4P8uj+pzhSDhoiI6IVBEP2yaFoTXWYNlnKxM8dj0oQ390ZZ7x/kxRXSZHPgpoWjAJ7bqtEukOJBcjqxKcZ9F8p5Ymgrw+IxgPDQ5QKcZugDgaqvssGhsk6CtUzrs2/QX601thIetRroqzE0EmBbqgmm3ViCUN7TibFY1TmeLcC67Wu1BgDpOVqbwd7aCv5MVApwt4e9shU6pDM/vucEciH1yKhdutua4f6L/kLe7J1cLavH5mVzOdv13RZRBXg1fGusFS1MhHv8hkelwPn6zCg98dQVf3D+2xxgCmUyO//6ZgS9VltsujfXE23fG6DxiQhtmhrtyCnTfXyrE49ODtRr5VFTTgke+u4b0cm4XxtwIN7yzOoa5UGEmFODvC0dgRrgrnt2dzBS7ZXJg24kcnMoUYeuaGM5yzu8uF+GvNGV2YqCzFV5bOrLHbfFzsmSet3AYO9FlMjneO5qFj05yO5XHBzpi+71jmd//+kkBWB3ng6/OF2D76VxOnmhmpRibdl1DjLcdnpsXhgkB2os8Y+ehA92jcAaLx+PhkWlBGOFhiyd/SGR+vorGNqz67CJMBXx4O1rAz9ESfk5W8HG0vPWxIspDG+8lHRIZp+gc7+84qPcMHo+HfywcgTF+DiiqbcGasb6Dfl0tiHJnXqPZVU3IqWrqd6G2XSJlVkYBipVpA4kzA4BQNxt8+2A8Fm87x9x3rahO40X0htZOTrfnveN9MTXEBUW1imOhotoWFNW2oLi2hVNwUUcikyO5uB7JxfXMhYxgV2vE+TsiPsABY/0c4e1ggaPpldh6LBsZ5d073K1MBXhwcgA2TA7k/NtFetnh4JNT8J8/0/HdJWXGdlpZIxZvO4uXFkXgnnG+/Xr/lMrkOJstwp5rJTiSXslZlfPtxULsvlqMByYF4JGpQbflUFdNkMrk+OJMHnPbwdIEq8YOz1DwYFdrOFqZMsfwCQV1WDeAGTP9cSpLhH3XFfFTO87lY2msJ6K97TX6PbThTJaIEx0S6GKFp2aFMLfNTQTYNDWQaWoQidvx85VirR4L9yRVpWs06lYxz9xEgMXRnlgc7YmKhjb8mqiIdMmvVt88Y2suxNY1sZg1YmArYj3tLfDGimg8/kMiAMVx0DM/JeHQM1OG3NzR1C7Bxm+vMnNKAGDNWB/cN0H7+f2hbjZ4Y0UU/jYvDD8mFOHbiwWcDu/fk8vwMOvCiqa0S6S498vLzNBWdcb6OWBBlAfmR7qrjdZwtDJlBqt36ZDIkCtqws0KRVG9q8jeV5EeAIprW1Fc28qsXAGAe8b54v/uGDksF/x05Vx2NV47mKb2AvaUEGdsmROqN5G+PbGzNMGn947Gd5cK8fofGcz7eFlDG9Z8fgkbpwTidJao23GGnYUJXl0SMeDVjkSBiuiE3HJ3vC92nM1jlrA/Ny9M59PYezI+0BHBrtbIqVLs9H+8Uown1XRGDoRcLsdTP17nFIEGavkoL7wwP1xvrma6qmS/icTtGlsa2B/51c2cN+bBRLn0h4edBZP/LZUpprifyRLhTJYIJXWt8LA3R4CTlaJg7myFACcr+Dlb9njwy+fx8PRPScztfx1Ig4uNGRayhmpqSnO7BM/+kszpoHljRVSfS0z12ZwIN3y1Pg4bv73KZKBezKvBPV9exjcPxHVbXtcukeK5X25wimeAoivqhfnhBrdEur+6liM+83MSAEX3zzcXCzgnspp0LL0Sm3cncaJL+DzghfnheHhqoNqDyPGBTjj0zBT8a38a9l5X5lSnlDZg0Yfn8I+FI3DfBD9kVoo5F2FNBXx8eNeoXpfI+jlZMt1AwxXn0tYpxd/2dP9bWznaG2+siOp2scbSVIjHZwTj3vF++OJMHnaez+fk+iaXNGDdjgSMC3DEnUE8xGphmwtViui+Gt6HTwt1wYEnJ+Phb68xxWJAsTpCuZpH1O3r3GzN4OdoBV8nRTb29DCXIRePTmeJUMcqJvRnoGhPeDwe5kcOfZ+9INID7x9TZvwfTi3HEzP79xo9k1XNeb31N8pFVaSXHbwdLFBSp7jolFxcP6jn6U2qSubwrHA3zAjvPgRcLpdDJG5HcV1XUb0VBTXNSC6uZ7rD1cm5dQHixwRF4dvGXMj53XSxMBHgvol+2DQ1qMfudQtTAf69LArTQ13x/K83mCJpW6cML/2WilOZIvxvZVSPFyxyqsTYc60U+66X9BoL0NYpw6encvH9pUJsmhaEBybpNhfaEB1Jq+AUy+6bMHy/Qx6Ph7F+Dsxw+yv5tZDL5RotmHTNNulyNL1S74voze0S/H1vCnObxwPeWhnd7cLo3eMUq5S7Xl+fnc7F2nifYY9NYu+bnK3N4Gbb/XXtbmeOx2cE47HpQbhWWIdfrpbg4I0yZiZGpJctPr1nzKDPgRZFe+Bkpjf23LrwUFrfipd/S8UHawffqS2TyfHs7iTO++4YPwe8tmzksBb1HK1M8fiMYDw8NRD7Ekvx/K83mM9tPZqFHevjNPr9frxc1K2AzuMpLpovjPLAvJHugzqPNhXymQz05ax/FpG4HZkVYmRWipFZ0YjMyiZkVYjR2int+ckAfH+5CDVNHfjgrlijiworrGnGv//IwNH07jWPIBcrvLw4AtPDur//6ysej4d1E/wx2s8BT/5wnYkElMrk+Ox0brfHzx7hhv8uj6SVZkNAR0KE3GJhKsBPD0/AzvP5iPCw1epAwqHi8Xi4d5wvk+MuErfjSFolFkUP/oT5l6sl/S6gW5gI4ONoAW8HS3g7WMDbwQKTgp0x0lO/ljqp5tpWiduGtYjOHigKDD7KZSAEfB5ifewR62M/6GLk0lgvVDW24z9/Kjpw5Le6ThytTDFew91/bxzK4AxPWznaG3OH4fekbZOCnbHroXF44KsEpsM1ubgeaz+/hG8fimf+NhvbOrHp22u4mKdcWsnjAS8visCDkwN0su3DaXG0B949mskMtfzqfD42TAnQaJFBIpXhvaNZ+OQU90DS2doM2+4ahQlBvf9N25qb4L1b3Vv//C2F6Zhql8jw6u9pOJZRiYqGNk4X54sLwvvsXmLvi0TidrR0SLRaXKlpasfDu67hmko26rNzQvHEzOBeT1rtLEzw3Lww3D/RH5+cysH3l4o4mf2X82txvRCYMLoN3o6aXQLM3j+425rDwlTzJ3N+TlbY+9hEPL/nBv5IUZ+LraqysR2Vje1IKFBkxb5/LAtf3j8WM8MHf7GUHeViKuRr5cLlQIW6WSPQ2Yo5KfszpaLfRfTfWRdrTIV8zB05+N9NjI89U0QfTOxKX1SfM8pb/euXx+PB1dYcrrbm3YYB1zS142phHa7k1+JKQS1Syxo5EQVsqgV0MyEf9473wyPTgvo9/G12hBsOe0/Bs78kc2IbjmVUYv4H9Xh3VQwzM6crrmXPtRIk9XIRwtnaFA6WpsiuUjYANLZJ8PZfmfjqfAGemhWMtXG+Rrk6StPkcm4Bw9yEPywdtmzxAY5MEb2isQ0lda0aPQ6+kMONhTiaXolnNbAyVpve/iuTE7t233g/tcM1LU2F2DAlAG8dVsyFKG9ow6/XSnH3ON9h21aAO1Q0ysu21/dqHo+HsbcipF69IwKnMkXolMowP9J9yIXQf90xElcKapn35P1JZZge5oLlowa3suLDE9mc8053W3N8eu9onRVsTQR8rI7zwdGMSqa4evxmFZKK6xHbz+HefWlql3DmldhbmuDZuWGYN9JNa7NYXGzM4GJjhskhyq51mUyO4roWRXG9QoyblWJkVYiRV93Mec86nFaBDd9cxWf3jjGK3PSmdgk+OpGDnefyOcewgOLC9ubZoVg3wW9ITYm6NNLTDgeenIyX96dyBtR3sbc0wf/dMRJ3xHhS9/kQGf6rgRAN8nWyxL/u6Hn5vT5ZMcYb/zucyVxJ/u5S4aCL6MW1Lfi/A9zhOoHOVrcK5JbM/7sK5w6WJgax83VV6dao6mMQi6YdYV3h9nawwAgP3UzyHoyNUwNR0diGHbfiRTqkMmz89ip+eWSCxjI1T2eJOMvRPe3M8eodhpOn2Zcxfg746eEJuG/nZWY55c0KMVZ/dhHfbRgHEwEf9+9M4GTBmwr4eG9NDBZHD65j09AIBXw8PDUIL/+WCgCoa+nEjwnFeEhDFxCqm9rx1I/Xu+U/xvk74KO7Rw9okOmiaA+M9XfAcyoFK9XM0Vnhrnhgkn+fz+enUmwurm1FmLt29hG5oiY8+PUVTkHaVMDH26uisTS2/93OLjZmeHXJSGyYEohtx7Pxy7US5oSrQ6ZYcbFKi0V0TeSh98TKTIiP7xmNR0oakFrWgMIaRXRHYW0zCmta1HYNs8nkwLO7k/Hn01PgYTfwGSUNLZ04zlpKPWeEG+wsdL8aTtHR7s5chEovb0RRTQt8+/i3aOmQ4BjrPXBmmOuQVvfFetszMXblDW2obGzT6CDiGyX1zMde9haDWg3lZG2GeSPdmQvmLR0SXC+qR0J+La4W1iKxsL5b95+pgI+74n3w2IzgQf08rrbm+OaBeHx9oQBvHrrJFAZE4nbctzMB94zzRUNrZ7e4FjYTAQ+zwt1w5xhvTAtzgYDHw8GUcrx3JJPTMVnd1I5X9qcxA4aXjfIatgHDhuhyfi2SWRdnVo/1GXCc0VCp5lZfzq/VWBFdJG7ndBEDimOc4tqWYW1YGYirBbX45mIBc9vL3qLXuT/rxvth++k8Jibxk1M5WDXWe9iKbM3tEuSKlBe0BhItYmkq1OiFWGszIT5YOworP73AvO+//Fsaxvo5wtOu/3/XDS2d+C2plLPCyUzIx+f3jdHpUO8uz8wO4XQobz2ahW8ejNfIc+88l48aVkTqEzOCsW788F5YAxQrQf2crODnZMVpXGqXSHHyZhWe+TkJbZ2K94uz2dW4d8dlfLW++0paQyGTybH3ein+d/gmZw4CoFiRele8L7bMCR32/bM2WJkJ8d7qWEwKcsbL+1OZ1aPzR7rj9WWR/b5IT3pHRXRCDJStuQmWjfLEjwnFABTFi5wqMSejtz+kMjme3Z3MLPkDgBfmh+HhqfqZBz8QqnEufU0916QqcRsSi5Qdn3Mj3A3iwgPbPxeOQJW4nYl+ELdJcP/OBOx9bJLajL6BaGjpxPN7kjn3vbMqxqiGZwJAhKctdm+agHu/vMxERRXUtGD1ZxfB4/E43VA25kJ8cd9YjXf767tVY7zxwbFsVDcpXp9fns3DuvF+Q+50TC6ux6Zd11ChMlR445QAPD8/fFAnwW625vj2wXjsulSI//yR0S0b2c3WDG+viunXa101lqSwplkrRfRLeTXYtOsaZ1aCg6UJPr9v7KAHg3nZW+DNldF4eGog5r1/hpmhMJRBxj1hZ6L7D8Ogqyhvu25dyHK5HA2tnSisaWGysQtrmlFU24I8UTPz3lLX0omnf0rCDxvGDThH9GBKGaczSp8GKy+M8uCs5DiUWo5N03o/RjiWUcUpGC8ZZJRLlxiVTsDk4nqNrlpid6JH99CFPlCWpkJObm2nVIa0skZcya9FRnkjPOzNcfc4vyG/n/L5PDw4OQATgpzw9E/XOTFy318u6vHrIr1scedob9wR69UtOuaOGE8siHTHL1dL8MHxLE7sS0ldK579JRnbz+Ti2blhmBWm+QGAxmA7qwudzwM2TA4c9m0Y6WkLS1MBU0i5kl+LO8doJpP9Qm73oZWAohtdGyvpaps70NopHfTrpa1Tiud/vcEZ0vnGiqheO2xtzE3wwCR/puBbUteK/UllGvsd9iW9vJGzvboebhjrY4/Ns0PwzpEsAIrO3qd/uo4fN/RcZJbJ5Egvb8SpzCqcyhThenF9txU6/1sZrTcxQCM97bAg0p2Z53E6S4RrhbXdVh4NVG1zB2c+goedOe7VQQG9N2ZCAeZHemCXtRke/OoKxO2K5oHrRfVYs/0Sdj0Ub3ARINcK6/DagTTOBc0u4wMd8crikYjw1OzAZX2wcow34gMccSi1HBEedpgU7GRwdQh9ZphrFQghAIB7xnHffNldvf2181w+sxwdUGSyPaSDA31tsLMw4RTiqsRtvTxas46lV3EOfOcNYRm7rvD5PLyzKhoTWXEXlY3tuH9nAupb+j9sVp1Xfk/lnJSvn+iPicHGeSIe6GKNXx6dCH9W52ZZQxungO5ua449j0y87QrogGI4FrvzvLyhDb9d774McSAKqptx/1cJnAK6tZkQn9wzGv9cFDGkLjIej4f7Jvjjj6emMAO+FPcDW9fE9phjrEq1k1cbueh7E0uwbsdlTgE9wNkKex+bNOgCOlugizXnYkBeDwPNBqulQ8K5+OnnrJvuRh6PB3tLU8T42GNJjCcenxGMt+6MwU8PT8C5F2Zyiq4J+bX48EROL8+m3j7W0lsnK1MmhkMfjPS0hbeDsnDFHhbaE3buvpWpADPV5IsPRKSXLdhNz8mszvGhqmlq5+yPe4pyGSoTAR+xPvbYODUQ762Jxd/mhQ+5gM42wsMWvz8xGet7GX7obG2KDZMDcOjpKTj45BSsnxTQ4z7LRMDH3eN8cfpvM/CPheGwVxkumlXZhE27ruHO7ZeQUjW8K/303c2KRpzMVOaFL4zy6HP1hjYIBXyM8VMOxrvCOt4fKtUoly7HMgY/W6knVwpqMf6N45j05gms3n4Rp7NEkMvVRyX15MPj2ZwLvXeO8e7XfvaBiQGcofCfnMzpMaZJ01JUY6Z0XEQHgEenByM+QHn8kFhUj49V4vLqWzpwILkMz+5ORvx/j2PxtnN450gWrhbWdfvdbZoaiGWj9OeiMQA8MzsU7Hrje0ezhvycn57KYYrSALB5dqhWBpRrQpy/I358eDycWO8NmZVirNp+EcXDNMNHE14/mI6Vn17oVkD3srfAp/eMxo8bxxtlAb2Lj6MlHp4ahMkhzlRA1zAqohNiwCK97DDK1565/eu1ErR09L7knC2zQoy3/8pkbluZCvDu6hijWZ7L4/HgwlqaNZxxLkfSlUUGRytTzkmMITETCrB93RiM8FAeZORUNeGhb66irY+hNOqUN7Ti7b9uYn+SssAS6GKFF3pZTmsMvOwtsPuRCQhX02kc6maNvY9N1FqUhyG4d7wvbMyVJ6mfnc4d9ElqQ0snHvzmCpNdDih+x/ufmKTRpc3Brop/t78vCMf0MBd8dNdoTAzq/4UgOwsTTlFK00X0A8ll2LI7mekSBxQXSfc+OhEBzprr6A5ysWY+1nQneqHK8K3h6EQfKFMhH9vuGsUpsmw7kY0LOeq7NNUprGnGVVZW/ZIYT73K5OTxeFgQqez6TiquRxmr6KyqobUTp1kFxDkRbkPOsrc0FSLUTbmP1GQu+g2VoaIxetIRORjmJgL8646R+Gp9HJytFQUQEwEP80e648v7xuLi32fhpcURnPf0/jznw1ODcOb5GXhqZjAsVf4tk4ob8K/TdXh5f9qwFRf13eesjlMA2KTD1Z3sC6Z51c3d4gwG63wPneiX82vRwHr/1YT3j2UxcUQJ+bW4f2cCFm87hz9ulPfrby61tAHbWf8mLjZmeHlR/+ID7SxNcP9EZdNSXnUzDt4o6+UrNCe1TLlvcrIyhccgBk5qmoDPw9Y1sZxjto9O5uJUYSs+OpmLlZ9ewOjXj+LJH6/j18QSZpWhKnMTPjZNDew1TkdXwtxtsIh1vHg+pwaX8tRfNOqPsvpWfHOxkLkd5GKlV6vN1In0ssPuRybAk/U3V1jTgjs/u4AslRgnfZQramIiSbtYmAjw3NxQHH92GhZEeVBhmQya/hyhE0IG5V5WN7q4XYLfk/p3YNchkWHzz0mc5eOvLInQ2xzDwWLnog9XnIu4rZPToTMr3HXAS/v1iY25Cb55II7TiXitsA5P/ngdEqn6jFW21g4pfrteinU7LmPimyfw8Ullx4qAz8N7q2O1MixQ37jamOOnh8dzBhTFBzjil0cmwlOD3YiGyMbcBPdP8Gdu51U346+0vrtdVXVKZXj8h0ROMXesnwN+e3wSp9irKSYCPjZNC8LXD8QPaiYFu4tbtWA8VB+f5HZDL4v1xK4N8XDoZ6d8fwW5Kn+vhTXN/don9FdhDbcor81M9KHwc7LCGyuimNtyOfD0z0k9Fg9U7VNZeaGPJ9fzI7l/34d76Ub/K62Cc2xxR6xmZjywi9vJxfWQaahge6OYW0TXdWSCJswId8Xpv83ADxvH4fI/ZuOzdWMwO8JtSBdnbM1NsGVuGM48PwMPTPKHqcpz/ZBQjKd+vI52ycAvsBuTsvpWzrH4pGAnra1u6A/VVUdXNdCNXlTTwgz6BYAJrFV0Upkcp7Kq1H3ZoFQ2tnWbawIAaWWNePyHRMx57zR2XynuMfO/UyrD83tucIrtry8dCTvL/scHPjQ5kHPx6OOTORrb//QmlXWBL9LLTm+Kfl72FvjvcuV7nkwObEtowNZj2bhWWIeefjWBzlZ4YJI/vnkwHkmvzMXfF47Q28atZ2aHclY/vXc0a8CrH7p8cCyb8/f53NwwgzgvDLq1kjaQ1XhR2diO1dsvIrmXAdX6oEjlmHpRtAdOPjcdT8wM0dsVAMRw6P+rlxDSq0XRHpxuxl2XCvv1Jv/h8Wyklzcyt2eFu2L1WB+tbKMusXPRh6uIfipTxCkgzNNgbquuuNqa45sH4+HA+ls7ml6JV35PU/v3JpfLcTmvBs/vSUbcf47hmZ+TcDa7GqoPfXx6kMam3hsCe0tT/LhxPF5dEoHXl0Vi10PxejE8UB88MMkf5ibKw5JPTuUM+ITltQPpOMfqAPZ2sMD2dWNgaaqfI2DYRXRNdqK3S6TIrlLmIq8Y5YWta2JhJtT8iQP75KpDKucUVoaqQOUkyE8PO9G7LInxxF3xyvdQkbgdW3Yn91lokcvlnCJ6kIuVXizZVzXKxx7urCzU3oro7CgXOwsTTA7WTDQNOxe9sU3CycsfipTSeubjAGcro9knW5kJMTHIud8RU/3lbK0YMHzyb9Oxeqw3p9D0R0o5NnxzFc3t/V8VqS3tEimuFGi+K7ovO8/lQ8J63euyCx0ARvnaw0Sg/Ee6nD/0IrpqF/pz80JhxSoyH0nXXKTLgeQyzrGjah05r7oZz/96A9PePomd5/K7rcj9/Ewe53xnYZR7t4uCfXG0MuXkV2dVNg3qQv9AtHRIkMN6H9e394UlMZ5YObr3bHhzEz5mhrvitaUjceZvM3Diuel4dclITAt10ftCZrCrNWfwekJ+LS6quZjTl5yqJvxyrZi5He1th/mRhnNe2LWSdiQr9qS+pRN3f3Gpx7kI+kB1xc3LiyLgrgcrOYhxoCI6IQbO3ETAKX6nlTUiqY+rw9cK6/DJKWWXooOlCd5YGaU3HQ6axJ70LhqmTHT2yYOlqQCTQ4wj6zvIxRo718dxCp0/XC7CR6zO8qKaFmw9moWpb5/Ems8vYffVEjSpOZn2dbTEq0sisHlO6LBsuz6xMBXggUkBWDfeTytFTUPlZG2GtXG+zO3U0kacze7/Afo3Fwqw65Jyuay1mRA718fByVp/J9GzO6tL6lo0FoWQJ2rmPNf0cFet7d/ZnegAkFfd1MMjB47die5sbcqJTNFHryweiVA35e/jTJYIn5/N6+UrgMSiOs4qhBWjvfXyvZjP53FO/K8U1qqdM1Ld1I7zrAtZC6PchzwkuEuMD7eIpIlcdLlczslL1dRQ0duBl70F3rozBp/eMxqswwKcza7GvTsuD3l2ylDIZHI8+PUVrPrsIqa9cxKVjcNz/NfQ0okfE5TziSI8bDFFx8eA5iYCztBGTeSis1/jdhYmiPVxwLQw5cWy05miHjvDB4p9kdHL3gJnn5+B9RO5F90BxTyV1w6mY/L/TmLb8Ww0tHQip6oJHxzP5mzrv+4YOajt2DAlAGasfdm2EwO/0D8QGeWNnI7uSC/9y27+v6Uju8XDBbpY4cFJAfj2Vrf5zvVxuG+Cv05mAgzVU7NCOJ3yg+lGf+9oJuff8YX54Xr5Ht8bZ2sz/PjweMT5K6NJmzukWP/VFRzV4AUzTRKprAR0stbsxWRye6MiOiFG4O54X87t3gaMtnRI8OzuJM4b+n+XR3GKzcaE3Yle09yh0agBddolUpy8qVzGagjdFgMxytcBH989mnNQ+f7xHOy43oi7vriMqW+fxAfHs1Fc270b1dpMiDVjfbB70wSc/tt0PDApwOAOJIl2bZwaCCHrb4t9sa83Z7JE+L8DacxtPg/YdvcoToayPmJ3ondK5Shv0EwXt2peZZgWfw9Bztwiem6V5nLR2cVlfe5C72JhKsDHd4/mFHfe+SsTiUV1PX7N3kRulIu+DVhjYxfR5XLgr7TuJ89/ppRzji+WRGsmygUAQt1sOEWs5OKh56JXNrZzOtb0rdvTEMwe4YqXpzrC2kx5rHO9qB5rtl8atuK1qiPpFTh/K1avvqUT31/u+bhYk767XIjmDmWczaZpgXpxnMOOdMkob4S4bfDd+TKZnNOROyHQCQI+D3Mi3Jj7mtolQ8qQ7pJTJUZambKL/I5YT3g7WOJfd4zE+Rdm4okZwZxsbgCobe7Au0ezMOl/J/Dg11c4xfxXFkcM+nzH1cYcd7HOt9LLG3E8Q3OxNapUh4rqY8yUtZkQvz46Ec/PC8XDo21x6tmpOPHsdLyyJAJTjeD8J8DZCstZ78lXC+sG1NyRXFyPP1OUKxYmBztjUrBhNlbZmpvg2wfHYTrrYlmHRIZHvruGvYklOtwy9dhxevaWJno1Z4YYPvprIsQI+DtbcSbMH7hRhrpm9R1Ab/x5k7NEfsUoLyzQ4LA9fcPORJfLgeom7XZGXcyt4XRezx3p1sujDdOsEW747/JIzn1/5rQgoaB7oYjHA6aEOOODtbG48s/Z+N+d0YgPcNSLk0qif7zsLTjLZy/l1fZagAQUJ9mPf5/IKdz9c1EEZoS5amszNcbXkVsY1lSkS2aFsogu5PM0OkhUlZ2lCZxYcRG5Ik12orOL6IbRxRbiZoP/Y3U6SmRyPPnDdbWREu0SKQ7eKGduTwh0gpcez0eI83dkhlUCwOHU8m6PYUe5uNiYYRwrK3moTAR8TiFJE53oqs8RcxvFi2nSSBdTfP9QPGdfkFkpxp2fXUBBtWYHDvdFLpdzZq8Aios72uwaBoC2Tim+Ol/A3Payt+AMJ9Sl+ABlB6lMrliROliZlWLUsM4xJgUrXuMzwlw5DRbHMobeofrbde6cp2Ws4wMnazM8Ny8MF16ciRcXhMNZZdVZU7uE8546LdRlyPMmNk0L5MwC2HZSe93oKaXKiwcOliZ6+97gaGWKTVMDMS/I0ujmagHAUzNDOM0d7w6gG/3tvzI5t/82L0yj2zbcLEwF+HzdWCxmzQCSyuTYsjsZX5/P7+Urhx/74riLHq9IJYaJiuiEGIl7xym7IzokMuy51v2q8OksESfuwMPOHK8OclmjoVDtOFG3/FyT2FEuQj4PM8OMr4gOAGvifLGllyiWYFdrvDA/HBdfnIVdD43D0liv22J4KBm6R6cHcjJPP1EphrDVNXfgwa+vQsy6cHVXvC8enOSvxS3UHNXCsOogpMFid6IHulhpLE6jJ4EuyiI9e6jrULR1SlHG6sz3N4BO9C6rx/rgjhhlB3ZpfSte+PVGtxPvkzer0NCqLK4v18OBomwCPg9zWTM+LuXVci7Yl9a34grrYuqiKA+ND41jDxdNK2sccmTEDVYRnc8DJ/eVDEyklx12PzIBnqzc2eLaVtz52UVksDKpte1sdjVSSrldvDlVTciq1NwFPnX2JpZyuh83TgnQm+GBY/wcOe+rQ4l0YUe5AMDEW5219pamnLiHY+mVQyowy+Vy7E9WrtQJd7dBmHv3VVU25iZ4ZFoQzr0wA68vi4S3Q/dis5WpAP9dMfTYSg87C9w5VpkDnlxcP6DO5IFIK9PPoaK3G18nS6xS+Tc/mdn3CoRz2dWcGT0LIt2N4iKtqZCPD9aO4qzKAIB/HUjHwRtlPXzV8GPvi1UvsBEyVPrxzk4IGbKZ4a6cE5fvLhdyBprVt3Tg+T3JnK95Z1WM0QzQ6omLDfeNs6pRe8NFZTI5JxtufKAT7CyN9/f75Mxg3DdBOWjJzsIE68b7Yf/jk3B081Q8Oj2IhriQAQt2tcFc1rLwYxmVnM7qLh0SGTZ9d43TaTYxyAmvLR1pMCebbrbmnK62Qk11orOK6MMRacMeLqqpTvSSuhbOMDlD6UQHAB6Ph/8sj+Rs8+G0CnzHuogNcKNczE34WGAAw8bY2yhVec/7Q+UE+o5YzUW5dGHnondIZGr3DQNxgxWZEOJqo7dDiA1FkIs19jw6EUGsC2vVTe1Yvf0irmogi7s/Pj6pPgbsj5TuKyc0RSqT4wvW/AN7SxOsjvPp5SuGl52FCSfWK2EIw0UvsKJc3G3NOfv/2SOU791lDW2cKJaBSiyq40QD9hV1ZW4iwLrxfjj13HRsXRPDzKcQ8nl4fVmkxjq5H50WxOlM3nYiW+Pd6G2d3OHgFDOlW4/PCOYM5+0rG10ul+Ptv24yt/k84Nm5ht2Fzibg8/Df5ZF4ZBp3aPKXZ/WnG5298tzZhoroRLOoiE6IkRAK+JyrwoU1LZwr4K/sT0Mlq4C8fqK/weayDQQ7zgUAqsTaK6JfL67nLB8zxigXNh6Ph9eWRuLHDfF4ZaoDLr6o6AKK8bE3mCIm0U+PTQ/m3P7sNLcbXS6X46XfUjiFgABnK3xyz2iDyj0U8HnwdlSe2GsizqW5XcIpPGgzD70Lu2BW09yhkYGCBdXc34UhdaIDiu7Ij+4azTnxfv2PDKa7sK65g9PNNjfCHTbm+n/RdXygE+fi+5+sSJffWVEu3g4WGKWFrrtYledMGkKki1wu53QsR9FQUY3wtLfAL49M5AxpFbdJcO+OyzjVjw7OobhaUIvLPRSI/7hRprXojaPpFchnxdbcN8Ff7y7IjAtQ5qInFzegrVPay6PV65TKcJmVdT4x2IlzvMfORQcwpKGD7IGiPB44q3t6IxTwsXyUNw4/PRWHnp6CI5unYsVo776/sJ98HC05OdlXCupwKU+zF4jSyxs5w8GpiK5b3g6WWMO6KJZa2tjr3/ZfaRWcgdV3jvFGsMoQdkPH4/Hw4oJwrGC9FrIqxVqPzeovinMh2mQ4Z5qEkD6tiffhdEd0db0dSC7jnNwGuljhhfnhw759uuBkZQb2anJtxrkcSavg3FY9mTBW8QGOiHEz4wx8I2QoYnzsmZxVQFGcK2YVmL88m4/dV5WRVbbmQuy4fyzsLU1haNjDRTUR58LuXgOAUDXL3zUtUCVzPVcDkS4FNdznMLQiOqAoyv59wQjmdodEhid/uI7mdgkO3ihDp1R5sqnvUS5dTAR8znvb+ZxqNLR2Ik/UhFRWhu+SGE+tXEz1dbSEPWuF143i+kE/V3FtK+pZWfUxVETXGEcrU/ywcTwmsDLx2zpl2PDNVc7xqKaxu9B5PGApazVErqiZs0pHU+RyOT49rexCNxPycT9rlZ6+iGMV0TukMs4qjP66UVLPGZw6KYjbjOPnZMV0gAODz0XvlMrwB2texLgAR3gOsJOcz+dhhIctAl00X7x8fEYw59xi24lsjT5/qkockT4OFb3dPD4jmLNycOuxbM6K7y4SqYyThW4q5OPp2T3HXxq6sayhxS0dUpTWt/by6OHRIZFxovKcbQzv3IDoN6p4EGJEXG3MMY+11PpYRiWuF9Xhpd9SmfsEfB62ro69bfKpBXweJwtNW53ocrkcf7GK6DHedvCw088hQIQYAnY3ulQmx+dnFEWKY+mV+O+hDOZzAj4Pn947RisnysPBj1VEL6wZevE5SyXeYjg60VV/95qIdGEPFbW3NDHYaKwHJvlzIg7yqpvx8v5U7GV1WTpbm2GKAa0MWxilPM7olMpx4mYlDiRzozKWRGs+ygVQdL9Fs3LRhzJcVPVro1jPS4bO2kyIrx6I41x0kcjkePqn692ijTQhrawBJzNFzO2FkR54ciZ3VdOfNzQf6ZKQX4tk1sWc1WN94KSHnY/xrGIXMLhc9PM5NZzb6la0sv+908oaB1VUO5MlQh3rAhd7oKg+8He24nTGX8itwbVCzXWjp7AucNhZmKjNeSfDy8POAnez5o9llDdyzvu67E0s5TQS3DfeT2+HwmpCiBv3+E+1kUMXapq55/qUiU40jYrohBiZe8cpu19kcuCeLy9zrsY+MSPYKAabDAQ70kVbmeg5VU0oYBV92MPXCCEDNzHIidMZuvtqMc5mi/D0T9c5Wdn/d8dIg46m8mV1WDe2SdDAKhwMBnuoqLkJHz6O2s8S93awAHshiiaGi7Lz4f2G4WfQFh6Ph7fvjIYHaz7E3sRSXC+qZ24vjfXUmwGE/TEp2Bk2Zsqoij9TKvA7awBgsKs1Rnho7+JNLGu/kF3VhCbWYOGBYEe5mAh4Wt3m25W5iQCf3jMad45RxmnI5cBLv6XiIw1nSX9yihv79ej0IAS72nAuJB5MKdd43MD2M8oudD4P2DAlQKPPrymutuacOQ2DyUVnDxUNdLFSO/eGfdEQAI4Pohv9tyTlagVTAR8LojwG/Bza9sTMYM6w1g+Pq8/iHwxOzBQNFdUbj00P4qy63XosixO709YpxdZjWcxtazMhHpvBvZBnbEJUYmqytbDaZ6BEKg1zqvPRCBkqwzliJ4T0y/hAR07uWgtr2WW0tx2emGncb+bquNooD/JFWopzUe1GmHubRLkQoi08Hg+PsrrR2yUy3LczgbOUfP1Ef9w7Xv+WzQ+Er0qBuLB2aAVodlxBsKs1BHztn3wL+Dx4WCuLqprpRFf+HvwMMMqFzcHKFB/eNarHf4sVBhLl0sVMKMDMEa7M7eMZlZzOuyXR2oly6cJuBJDLuV2bA8HuHg5zt4GZ8PZYoTfchAI+3loZjQ2TucXld45kcYZxDkWeqAl/sgaHTg9zYSIwFkV7sB6n2UiXrEoxTtxU5rwviPLQ6/0Vuxv9WmEdpwDYl9YOKefin2qUS5cYb3tO0WqguehN7RIcTVceU88Id+HMYdAXwa42WBip/Ns6nSVC+hAGqXZRHSpKUS76w9XWHOtYx5xZlU2cgcXfXSpEeYPyPHPjlEA4Whl3lIi9pSnn9Z5dqftO9OomlSI6daITDaMiOiFGhsfj4V7WcrMuZkI+3lsdY1BD9zTF1Ub7cS5HWCcJgc5WRjdAhhBdmBvhxnktsRsIp4W64KVFI9R8lWFhdwYC3BiTwWB3oocOQ5RLF08bZQEyb4hF9E6pDCV1yggAfyfD7UTvEufviC1zuueihrnZIMLDVgdbNDQLWNFxqnW4JTHa7RqNVolduTGISBeZTM7JHVZ9TqJZfD4P/1w0An+bF8a5/63DmZyLGYP16alczvvDE6zuz4UqXcx/aDDSZee5fM7tTVMDNfbc2sDORW9qlyCjvP9F3ysFteiQypjb7LklbHw+D7NZF9ku5dWgsa3/K6z+Sq1AW6fy+7CHeOob1cYk9oWcwbpZIaahonps07QgWJgoj3fev9WNLm7r5MxkcLIyxUN6uipF09jd6Fl6EOdSLeYOt6c4F6Jpt181jZDbwIox3pw3eAB4YX44gl1vz6XK7CK6SNyudhDMUJTVt3IGNM0Z6UZLLwnRAD6fh0emBXW7P8TVGtvuHmVQERg98XHgFoiLagdfRK9v6UAlK7JqOPLQu3jZKDvRC2ta0MkqtgxUaV0rp4igz52dA/HotCBMVokeWjHayyDfL6aFunY7zgCASC/tDPJjc7Ex42TMDiYXPa+6ibOqhYaKah+Px8PjM4LxyuII5r6ujPTBRvIAQGl9K/axZgzEBzhyht0Fu1ojnDVg+Y8bmol0qWlq58w2GBfgqPcXY1Rz0QcS6cKOcuHxgAmBPceosXPRO6VynGZl1ffltyTl79TGXIjpYa69PFq3RnjYYgTrIuiR9O4Z2QOVojJUlIro+sXFxgz3TVR2o+eJmvF7cim+OJvPyfF/YmYwrFmxZ8aM3bCRUynWeGTWQIlUOtGdrI17NQAZfoZ/9kkI6cbW3IQz/GRikBPWT/TX3QbpmKutMs5FIpOjrqWjl0cPnOpS1XmUh06IxiyN9eQUzBwsTbDj/jjYmuvf8u7BsDAVcC70FQ2hEz1LZRltqPswFtFtlSeLEpl8SBcDClQGrPo7G34nOqC4KPTemhjm79nTzpyTFW1ILEwFmBHu0u1+9rA9bYrxURaWkosHHudyo0S1UGU/1E0i/fTAJH8sZkWsFNS04F+/pw36+b44kwcJ66Lb42oyiBexutHzqptxs2LokS4/JhShQ6K8WPjQZP3vOvVzsuRELwxkuOj5XGURPcrLrtdhzxODnDkX2Y71Mxe9StzGKdYvjPSAuZqLdfqEHd+YVdmEguqhRbKlsvZNtuZC+Dga71BKQ7VpahCsTJV/l+8eycKXrGgqL3vuEFJjx14x2twh5UTa6AI7E93B0uS2XIVPtIv+oggxUi8uCMeLC8KxZU4odtwfB/4w5OLqK1eVgSKajnRhd5642JghVs87kQgxJCYCRRSVi40ZfB0tsWN9HHyNIN6DjR3pMpTis2rW7/B2onMLHUMZLqoaaWMsneiAYkbHX5un4vN1Y7Dv8UlwMuBlxvMju8e2LIoepiI66322tL612yCxvrCL6GZCPkLdKIJtuPB4PPxneRTn4uieayU4kFzWy1epV93Ujh8TipjbkV62mBrSvUN6YbRmI106JDJ8e7GQue3raIlZI/R/Fg6Px+N0oyfk1/ara7S+pQNprLzviT3koXcxNxFgaqjyMSdvVvVrddLB5HJOPNTSUcOzPxmKOSozkAaaAa+K3YkeSUNF9ZKjlSnWT/JnbpfUtXJmkG2eE3pbzdhQjQ7M0vFwUXYmOkW5EG2gIjohRspEwMcj04Lw1KwQWJjePm/k6rA70QHNFtHrWzpwKU/ZyTMnwu22vmBBiDaMC3TClX/Oxum/TcdoXwddb47G+Toqi8RDKaJnsborbcyE8LAz7+XRmuVpw122PJThouxOdGszIZyMbDCXtZkQc0e6w812+P59tGFmuCtMhcpTiTh/B05hVJvYw0WBgeeisyNgRnraGkU0lCGxszDB+2tjwT5c+se+FJTUDWz/t/NcPtpZ3eCPTw9WW3QMclGJdEkZWqTLHyllnGPJ9RP9h2WIsybEs3LRa5o7kNePzumLuTWczPme8tDZZrMuKjS2SXClH9Ex+1lRLu625hgf0Pf30bWRnrac/d5QiuhtnVJOAZKiXPTXximBsFET1xLiaq3XOf7aEKIyByxHx7no7IvqLjZURCeaR0eMhBCj160TvVFzy8xO3KziZPdSlAsh2mOsHVm+jspO9LKGVrRLpL08umfsTvRQd5th/X1ZmfDhwur4GcpwUXYnup+TpdH+uxs6azMh7o5XLll/eGr3+QXaoujQVN4eyHDKTqkM6ayuWn3PsTZWcf6OeGJmCHNb3CbBMz8lQdLPeQoNrZ3YxeoGD3Kx6vUYjB0hk1/djIzywXVLyuVy7GANFLU2E2LVWMOJZYpTyUXvT3GbHeViKuBjrJ9jL49WmBnuyrlIcqSP4nKeqAnJrBUid8R6GkRTCo/H43SjXy2sRU3T4Jp1MivEnGiiSCqi6y17S1M8qCbC6W/zwgzmgpqmOFiZwpmVO06d6MTYURGdEGL0VN9ANdmJfiRNeVJgYybEhED975ohhOgXdpyLXK4YrDlQcrmcc+Kiurx2OAS6KDvqc4cU56L8Wj8ji+4xNv9cNAJfPRCHA09M7hZroE3WZkJO91tSSf9z0bMqxZzu5WgaKqozT80Mxmhfe+b21cI6fHwyt19fu+tiAcSsgaSPTQ/utei6MEol0iVl4PExXduYWqq8CLN6rA9sDGhGR5i7DWzMlR20Cf3IRb+QU8N8PNrPvl8rXJ2szTDGT7ly7Gh6Za/d/78lcf89lsUaTjcvOxddJgeO36wa1POkltFQUUPy4OQA2LJeS6N87Yf1fVCfhLgqjzmzddyJXt2knH1GRXSiDVREJ4QYPVMhH46sOICBZqf2hj2UaVqYC2dpOyGE9IePI7dQXDiISBeRuB31LZ3M7TAdZDwHsYrog+1El8rkKK5VXkQwpjx0Y2Qi4GNGmCuidFCIZuei3yip73c8R4pKwZ060XVHKODjg7WjOLEIH57IxrXC3gu7LR0S7DxfwNz2srfAHbG952cHulhjhIctc/vPlIpBRbrsZHWh83iKKBdDIuDzMJZV3O5ruGh5Qysn8mVSH3nobOyCYml9a48DXeVyOSfKJdTNGiM8hv9C8GDFBThyiqnsBpuBSGXloduYC+kisp6zszDBu6tjYWUqgJe9Bd6+M/q2XTkXwjrmzKlsGlJc1lC0S6RoaFUeC1OcC9EGqvYQQm4L7EiXKrFm4lzEbZ2oaVZe7aaOEULIYKieKBfVDLyIrjpUNNRdB53ozsqCd11LJ2pZ+8f+Km9oRQcrzsGfigikB+xc9PqWzn7PE2BHRlibCTl/t2T4+Tha4t/LI5nbUpkcT/+UhMa2zh6/5qeEYs7+5ZFpgTDpR669aqRLenljL4/urri2BX+lKYfJzxnhZpCDruNYuejFta0ob+h59dN5Vhc6AEwM7n8RffaI/g3dTCqu58R4LY31MqhipImAzxksey5HhNaOgceycYaKetJQUUMwJ8INaa/Nx/kXZyLY1XAu/GhaCGv1o7hdggoNRqcORE0T97iTHTNDiKZQEZ0QcltgX4muatRMJ7rqCTt1jBBCBsPJyhRWrOXxgxkumqnS4Rem4zgXYHDDRQtrVPerVOAk6sWodJAn9TMXPaVU+bhIL1uDyF02dktjvbCCNYyvpK4VL/+WqvaxHRIZPj+Tx9x2tjbDqrE+/fo+3SJdbpQPaDu/vVgAVmS12kxkQzAugJtpntBLLvqFHGUeurWZEDEDWHUS6GLNWaF0LEN9EX2/SpTL0j5WFegjdtd9W6cMZ7JFA/r6domU8z6ui9U9hAyW6nDR7ErdRLpUq8wjcKZOdKIFVEQnhNwWXG3MmY81lYmu2i3q60jFHkLIwPF4PE6ki2ohuT/YeejO1qZw0kEOpGpH72AiXQpquFnq/lREJz0Ic7fhRKglF/edi97WKcVN1kBJinLRH/+3dCRnyPL+pDLsu17S7XF7E0s4XY4bpwTA3KTvjG4ACHC2QgQn0qW837EDTe0S/HSlmLkd4WHbrRhtKKK87GHGeu30FOkil8s5Q0XHBThC2I+Of7bZrOLyjZIGVDRwO1Q7pTIcSFYW0eP8HeDtYHhNKVNDuZGOPXXd9ySrogmdUuXf4khP214eTYh+US2i62q4qGpkqwtlohMtoCI6IeS24GrLjXPRRFabam6xIS7pJYToB/ZKluLBdKKzun50MVQUADztLTiFmcEMF2VfQDA34XOiuAhhMxXyOYWm5JL6Pr/mZoUYElYrMQ0V1R825ib4YG0sBKyVAS//lsYZNCyRyvDpaeXgUVtzIe4Z7zeg77OIFelSUNOCtLL+Rbr8eq0E4jblINMHJwcYbNyGqZCPWFYc0pX8OrWPyxU1o5K1enMgUS5d5qoMWlTtRj+XU82JRlw2ynAGirJZmwkxKciJuX08oxISVjRZX9hRLgBFRBLD4mRtBifW/LEcHQ0XVe1Ep0x0og1GWUS/cuUKHnnkEUyePBlhYWE4duxYr48/cuQIHnjgAYwfPx6jR4/GmjVrcPbs2WHaWkLIcGAXYto6ZRC3S3p5dP+wiz3O1qawZg3GIoSQgWDHlhTVtgzoQp9MJkc2q+tHV0V0AZ+HAOehDRctYA2w83O0oqgN0it2pEtaWQM6+yha3VAptEd72at9HNGNUb4O2Dw7hLnd1C7B0z8lMf+uf6ZWcI691k8KGPCx1yKVSJc/U/qOdJHJ5PjqvHKgqLO1KZbEePTyFfovntVFn1kpRn1L9xkWF1hd6AAwKdip22P6EuvjwCmuqXZo77+uHChqIuB1+/cxJHNHujMf17V04mqh+osT6rCL6NZmQlqFRQxOMKsbXVed6NUqmeiOVpSJTjTPKIvoLS0tCAsLw6uvvtqvx1+5cgUTJ07E559/jr1792LcuHF49NFHkZ6eruUtJYQMF3acC6CZXPSiWmWxh70EmRBCBood59LaKe22JLU3pfWtaGENMQvTwVDRLkEuypOowXSis/Pgac4E6Qu7m7atU9bnifsN1lBRB0sT+DhaaGvTyCA9Oj2YU+BNKq7Hh8ezIZfL8cnJHOZ+S1MBHpjoP+Dn93e24qxg+KMfkS4nM6tQwCre3zveD2bC/kXI6Kt4lSiaKwXdC77nWXnoztamg5q1IeDzMGuEK3P7Ym4Nmm41srR0SHCEVVSfFuoKe0vDLXrNGuEK9uKEgUS6pJUp900jPWlWAzE87AaO7Komjaz6Hij2sbOjlWm/Bk4TMlBG+Vc1bdo0bN68GXPmzOnX4//5z39i48aNiI6Ohr+/P7Zs2QI/Pz+cOHFCy1tKCBku7DgXAKjSwNRwdjcUDb8jhAyFn8qFuIEMF1UtHIa6WffwSO1jD5Erqm1Bh6T/y9nlcjknE93fmfarpHeqcSx95aKzO9GjvO0NNo7DmAn4PLy/Jha25soO849O5uDNwzdxkzV48Z5xvnAYZJchO9KlsB+RLjtZXeimAj7uGTewCBl9NNrXgROdo5qLLpXJcTG3hrk9Ich50K+X2SOUkS4dUhnOZCmGbh5Nr+RcAF42yvAGirK52phjFOvC3pH0in4VEjskMs6sBopyIYYohHXsKW6TcKKghouIFefibG24F+SIfqPsATVkMhmam5thb28/4K+VSqV9P8jIdP3Mt+PPTgyHk6UJ53ZFQ+uQ/mY7JDKU1bcyt30czG/b1wDtAwgZOm977mqZ/OomjPLp34n0zXJuASjI2XJYX4/sfYA/q3tcKpMjXyTmLPHtTWVjG9o6lUV3H/vbd79K+sfH3hy25kI03sqqTiqqw5qx6jOVm9slnJzWSE8b+vvSAG0cA7jZmOK/yyPxxI9JAAC5HNh+Oo/5vKmAhwcm+g36e86PcMNbhzOZ2weTSzHCXf1+KrNCjPM5ymLykhgPOFoKDf5vx1zIQ4SHDVJKFe8fCfk1nJ/pRkkD87oCgAkBDoP+mScGOsJMyEf7rYuqR9IqMC/CFXsTlYNjrc0EmBHqbPC/19kjXJFYVA8AKK5tRUZZQ5+rw26WN6KDFUU10sD2TXQeQADFsSdbZkUDXKxNeni0dlSLlU1yTlam9Dc5DIzp9d/fn4GK6Grs2LEDLS0tWLBgwYC/NiUlRQtbZBhu55+d6L92CbcTJCkzH36oGvTzlYklYM0mg7ypGklJA48uMCa0DyBk8CQyOfg8MPuVhPQ8BPJE/frayzfrmY+dLfjIvZmmhS3sW0pKCiR1nZz7jl9JRZOXeQ9fwZUm4mZZdtaXIymptodHE6IQYMdH8q3z5ks5FUhKUj/zJF3UwXnftumoRVJSkvY38Dah6WMADwCzAixwPL+12+em+5mjPO8m+k4z71mgvRB59Yq/lX3XijDbtUVtp/UnV7mrGyY6tRnN342fpQRd/2opJQ24dDUR5reGQ++7yZ1p4dBRiaSkGgxWlIsJrpYrukSPpZfj5MVOnM1WxsXEeZjiZprhH0d687j7n29PJGNVRO8Xko/lcVee8RvKkJQ0+HMUXaHzgNtbexu3AHnqeiasm0p6eLR2FFcrm0qEkhaj2Vcbgtvp9U9FdBUHDhzAxx9/jE8++QROTgMfnhIVFQWBwLAz8gZKKpUiJSXltvzZiWGx/vMYk8NoYuOE2NjwQT9XQ5YIgPLgf0psOGL9HIa6iQaJ9gGEaIbXidMorlMUjDpN7RAbG92vrxOdPQ9AUUUc6eOI2NhYLW2heux9QLBEjudZA93l1q6IjQ3s1/NkXy0BoCyaz4qPgrcD5aKT3k0SZSG5UtGlXNIoQWhEJCxNu5/iXDuXD/bf1x2TY+Fu178LPKRn2jwG2DpCgqWfXEB+tbLIKODz8PflcUOeRXNnYx7e+isLAFDZLIXQNbBbjEZNcwfO7jvF3B4f6Ihl0+OG9H31yUKTShzMvg4AkMoBmYMfYoMU579br18BoCik+zpaYO6ksUP6XislJbi6LxUA0NQhx68FfM5FrQdmjERssPOQvoc+iAXwwbWzzEyQ1Do+/tPHe/KvhWkAFMU/K1MBFk4eY1CZ6HQeQABFJJ/D8ROoa1E0U7QI7RAbGzms29B0QHn8GeLjhtjYEcP6/W9HxvT67/pZ+kJFdJY//vgDL730Ej744ANMnDhxUM8hEAgM/o9nsG7nn50YBldbMzSJFEV0UVPHkP5eS+q5mer+Lta3/d8/7QMIGRpfJ0umiF5U29Kv15NEKuMM8Ax3t9XZ61AgEMDOVAB3W3NU3Jo7kVfdv58DAIrqlB2nJgIevB2tOZm9hKgT66O8gC2TAxkVzd2GJgJAapkyc9jVxgxejpS5r0naOAawtRTgw7WjseLT8+iUKiqud8R4IsBl6MOTF0d7MUV0ADicVoVYX+7fzc9XSjhzHR6aHGhUxznjArkNY1cL6zEl1BXtEimuFioHjU4Kdh7yzz07wh3/+C0VXRHhh1KVQzddbcwwKcTVaPb3cyLckXs6FwCQWtaISnEHPO17HmLMzuQf6WkHExPDLNHQeQAJcbNBQr7iYnWuqHlY/x7aJVJOBJWLjQX9PQ6j2+n1b5SDRQfj4MGD+Pvf/453330X06dP1/XmEEK0wNVGOVy0Sjy0waLsoaKWpgK4WJv18mhCCOmbryN3KGd/FNS0cLJUQ92GXlwaqkDWcNFcUVMvj+Ri71d9HC2NpqBCtCuWNcgPAJKL69U+jj1UVHUgKdFfUd522LomFs7WZoj1scc/Fmqms9DXyZLTef5HShlnCGSHRIZvLxUyt/2cLDEz3FUj31tfOFmbcWZWdA0XTSys58ynmBg09A5xFxszztBNtiUxnka1v5870o1z+1hGZQ+PBDqlMmSwBuZG0lBRYsBCWPuTrEpxvwbrakp1EzcS0MWGzs2JdhhlEb25uRkZGRnIyMgAAJSUlCAjIwNlZWUAgHfffRfPP/888/gDBw7ghRdewAsvvICYmBiIRCKIRCKIxWK1z08IMUyuNspl21XioU0MZxd7fB0t1eZoEkLIQPixhnJWN3WguV19tjNbViX3WKWvAWbDIchFeRKVJ2rq90lUQY2yo97fibqESf+42prDgxXLksQqlndpaOlEAet9O9rbfhi2jGjK4mhPXH1pNn57fJJGCyOLoj2Yj4trW5FaquwIPnijDCLWseL6if5GVejtEuev7L6/XlSPTqkM53OqOY+ZGDTwiFN1Zke4qb1/Waz6YcCGKtbbnvN3eiSt5yJ6dmUTZ7VDlLetVreNEG1iN3I0tkk4+1Btq1b5Xs7WpsP2vcntxSiL6KmpqVi2bBmWLVsGAHjjjTewbNkyfPjhhwAAkUiE8nLlKJrdu3dDIpHgtddew+TJk5n//vOf/+hi8wkhWsLuRBc1Du1NvbiWW0QnhJChUt2X9KcbPZPVwcbjgdNVqCtBrE70xjZJt+4gdeRyOYpYRU72BQVC+hLDKorfUFNETynlDoeMok50AmBRlAfn9sEURcOVXC7HjnP5zP02ZkKsGuszrNs2XOIDlHFIrZ1SpJY24Hyusoge7m4DJw2ttpyrpoge5GKFSC/jKhzz+TzMHqH8WS/l1aChtVPtY1NV903UiU4MWIjKMWhWZf9XIw6VasHemVaJEy0xzMCtPowbNw6ZmZk9fv7NN9/k3N61a5e2N4kQogdcbZVvpuJ2CVo7pLAwHXh2l1wu5xS3qNhDCNEEdUX0ER69FxfYnej+TlYwN9F9HmGgC/ckKk/U1Gf3aG1zB8SsznvqRCcDEe1jh8NpFQAUHcU1Te2cwt+N0nru46lQRaCIjYr2tsONEkUh848b5XhxfjiuFNRxcqpXx/nA2swoT5s5negAcPJmFfP7ABR56JoS5GINfydLzqqQZbFeRrmac26EG35MKAIASGRynMqswlI1HffsC3yWpgIEOOv+QjghgxWiEimYXSXG5JDhGRhc3cQtortSnAvREp13okulUvzxxx945ZVX8PjjjzPFb7FYjCNHjqC6urqPZyCEkP5hx7kAg89FF4nb0dopZW77UrGHEKIBvioX5Nid2T3JZBXRQ9304+Q7SKUTiT34tCcFKj8rXZwkAxGrEs/CLgICwI1i5W0vewuNddYSw8fuRi+pa0VKaQN2srrQ+TxFlIux8nawhBdr6OXXFwoglSkjuCYFaybKBQB4PB7mqHSjqyssG4MJQU6wYjXqHElXH+nCLqJHeNgaZWQQuX04W5vC3tKEuT2cnejsIjqPBzhaUZwL0Q6dFtEbGxtx11134dlnn8XBgwdx4sQJ1NYqBppYWlri3//+N7799ltdbiIhxIioXpEebC56oUrEgh/FuRBCNMDW3AQOrJOPwtrei89tnVIUVCsfE6YHQ0UBwMPWHOYmykPMvH4MFy2s4f6sfnRxkgxApLcd2M2sSSrDRdmFqhgf6kInSgtVIl22n87DkfQK5vacCDf4GPlxXpy/MtKlsU25IkjI5yE+QHNFdABYPykAdhaK97m1cT7dLh4bC3MTAaaFuTC3T92sQrtEynmMRCpDRrlyxQMNFSWGjsfjcSJdcqqGb8YgO87FwdIUQoHO+4WJkdLpX9Y777yD7Oxs7NixA8eOHeMMnhIIBJg3bx5Onz6twy0khBgTdpwLAFQNMhe9kDomCSFawl7ZUlTb2utjc0VNYDUMIlQPhooCijzYQNaS9Nx+FNHZnegCPo/TGUlIX2zNTTgDbdm56NVN7SitV76WaKgoYfNxtEQMKyP/j5Ryzn71wUkBOtiq4RUX4Kj2/hgfe43H2HjZW+DCizNxbMtUvLEiSqPPrW/mRrgzHzd3SHExt4bz+RxRE9rZQ0WpiE6MADvSJauy/8Plh4o9f8eFVpsRLdJpEf348eNYt24dJk2apDYLzd/fH6WlpTrYMkKIMXLRUJxLEatjUsDnwZOKPYQQDWGvbCmq6b0TnZ2HDuhPJzrAjXTpT5wLuxPdy94CpkLqICIDwx4umlzSwJy4qw4apTx0okq1G73LSE9bxPdQYDYm8f7qf8ZJQZrtQu9iZSZEsKuNUWahs80Ic+XEs6hGuqSU0MBjYnzYnegNrZ0QNQ2uaW2g2N/H2YaiXIj26PQMRSwWw9vbu8fPSyQSSKXSHj9PCCEDYWsuhBmrMKOJOBcvewuY0HIxQoiGsIeLltS1QiKV9fjYzAplh7eJgAd/Z/2JQAlkbUtJXQvaOns/nmN3otPqHjIY7JiW2uYOlNQpus9V89EjqVBFVPRURH9wUoDRF3oBINjVWm1+8EQNDhW9HdlZmmB8oPICxdH0SshYyxxSWTFTFiYCzmoaQgxVqEpDR84w5aJXs87rnakTnWiRTis/vr6+SEtL6/Hz58+fR1BQ0DBuESHEmPF4PE6kiybiXKjYQwjRJHY+rEQmR3lDzytmslmd6EEu1np1QY/diS6Td4/BUsXuuvenPHQyCDEqMS1duejsInqgsxVszU1ACJuPoyVifOw59zlbm2FxjPriurHh8XgY6+fAuc/chI9Rvva62SAjMmeEcpCqSNyOZNbKGM5QUU8aKkqMQ4jKcHnVVZPawu5EpzgXok06Pdu688478euvv+LPP/9kllzyeDx0dHRg69atOHv2LNasWaPLTSSEGBlXVqTLoONcWJ3ovkY+bIoQMrxU9ylFtT0XnzNZJyYhehTlAnA70YHeh4s2tHSirqWTuU0XJ8lghHvYwJR1IelGST3kcjmniB5NXeikB4ui3Dm31433g5lQoKOtGX6qsTVx/o631c+vLXNGcv+uuiJdJFIZ0tlDRT1th3W7CNEWFxszZngwAGRXab8Tva1TCjFrKLKzDRXRifbotIh+//33Y9myZdiyZQvmz58PAHjuuecwevRobN++HatXr8aqVat0uYmEECPjynpTFQ0izkXc1onaZuXgEir2EEI0SXWf0lMRvaldwsRVAECYm34tAw904RbRexsuWljLzUynTnQyGGZCAUawClHJxQ0ob2hDNas7LYqGipIeLIv1gq25Yoimk5Up7hnvq+MtGl5xKrnokyjKRSO87C0wkrVfOnqriJ4rakZbpzKuLZJmNRAjwePxON3o2cMQ51KtkrtOcS5EmzQ7bnuAeDwe/v3vf2PZsmX466+/UFhYCJlMBl9fXyxYsABxcXG63DxCiBFiF9EHk4muGklAneiEEE1yszGHqZCPDoni5LqnGJRsleWxqhmUumZpKoSnnTnKbsXR9DZctEDlZ/R3pv0qGZwYbzsk34pxSSltwPWi+m6fJ0QdV1tz/ProRJzOEmFmuOttV4SJ9LJDjI89kovrYWMuxPJRXrreJKMxN8IdaWWKrvOcqibkiZo4US4ADRUlxiXEzRpXC+sAAFlVYsjlcq3Ol6hu6uDcdqFOdKJFOi2idxk7dizGjh2r680ghNwGXG2VcS61zR3okMhgKuz/ohzVrlBfR+qYJIRoDp/Pg4+DBVN0LqpVX3xWzZgMc9evIjqgyEXvKqL3FudSWK38GXk8wNuBiuhkcBS56IUAgNZOKX5NLGE+x+cpcocJ6UmIm43eRWMNFwGfh+83jMP5nGpEe9vBjXW8TIZmToQbth7LYm4fTa/kzDsxN+EjmIaKEiMS4qrcj9a3dKKmuUOrFyarxaqd6N0HJROiKfozgYoQQoaB6pVp1eVffelWRKc4F0KIhvmx4kx6inPJrFAWpc1N+PDRw8JzEKsokCtqZubfqGJ3onvYmsPchHJ4yeCoDoc8mVnFfBzqZgNLU73oHyJEL1mbCTFvpDs87Cx0vSlGZYSHDbwdlL/TI+mVSGV1oo/wsIVQjwaDEzJUIW7DO1xUpHI+T4NFiTbp9Ehy5syZfS7r4PF4OHbs2DBtESHE2LmqFNGrxO3wtO//yQI7WsHZ2hTWZnRCTgjRLHZMVGFNi9plsOwTklA3G/D52lsmO1jsXPSmdglE4nbOaqAuhTXKTnQ/ykMnQxDobAUbMyHE7YoBY+zrNlGUOUwI0QEej4c5EW746nwBACCxqI4zBJn2TcTYqEYM5lQ1YWKQ9uYssDvReTzA0Yo60Yn26LT6Ex8f3+2kUCqVoqysDImJiQgJCUFERISOto4QYoxcbbgFnKrGth4eqR47WoHy0Akh2sDet4jbJKhv6YSDyglBpkoRXR8FqSxPzxE1qS2iszvRKQ+dDAWfz0O0jx3O59R0+1y0Spc6IYQMl7kR7kwRXS4H2iU0VJQYL1cbM9iYCyFuU1zQ1nYnOntluaOlKa3sIFql0yL6m2++2ePnbt68iYceeghLliwZxi0ihBg7V9vunegDwe5Ep45JQog2+KnERBXVtnCK6LXNHRCx9l1helpEZ3eiA4pIF9VOpOZ2Cefkh/arZKiive3VF9GpUEUI0ZE4fwfYW5qgvqWz2+ciPWnfRIwLj8dDqJsNrt0aLppd2fNcHE1gx7ncbkOhyfDT20s04eHhWLNmDd555x1dbwohxIg4WppCyIo9GEgRvUMiQ1l9K3ObOtEJIdqgWkQvVMlFV+3oCdXDoaIA4G5rDktTZb65uuGi7AuTAOBPcybIECmGi3KZCHgI99DP1wkhxPgJBXzMDHftdr+pkN8tP5oQYxDiqvy7zq7SbhG9WtzBfKw6/4wQTdPbIjoAODk5IScnR9ebQQgxInw+j3OFWiTuf5xLaX0rZKx8VdVCFyGEaIK3ypDQ4j6K6Praic7j8Tjd6Lmi5m6PYeehA9SJToYuVk1sywgPW5gJaWAtIUR35ka4dbtvhIctTCh6ghihYFYRvba5AzVNA1v9PRDVnE50ykMn2qW3e+y6ujr8+uuvcHd31/WmEEKMDDvSpaqx/2/o3Ys9VEQnhGieuYkA7qzscNV9T2aFsohuay6Em63+dt2wc9HVdaIXqHSi036VDJW7nXm31wQN7iOE6NrUUBeYCbnllygvWx1tDSHapTqvR5vd6OyIQ4pzIdqm00z0++67T+39YrEYeXl56OzsxFtvvTXMW0UIMXaurGVeA4lzKVLpBvV1pI5JQoh2+DpaouLW4GPVyBN2J3qYu023Ie36hF1EL61vRVunFOYmyo5g9gUCVxszWJrq9NCUGIlob3scTa9kbquLeCGEkOFkaSrElBBnHMuoYu6jC3zEWKnGFGVXijE+0Enj36etUwpxu4S5TXEuRNt02okul8u7/QcA3t7euOeee3DgwAEsXrxYl5tICDFCLjbKDs+qAcS5sAtZlqYCWi5GCNEaX1ZHNjvORS6XczrRVTt99A07zkUuB/KruV31BawiOnWhE01RjXSJ8qZCFSFE9+aoRLpEednrZkMI0TJ3W3PYmCkbI7TViV6tEhNDnehE23Ta7rNr1y5dfntCyG2K3Yle3dQBqUwOAb/vTk52Ed3X0VKvuz8JIYbNjzW4uLyxDe0SKcyEAlSJ29HYpuy4CdPToaJd2J3oAJArasIID+XydfZ+lfLQiaYsivLAB8ey0SGVIcLDVu8vNhFCbg9LY73w7cVCpJU1Yv5Id4yggcfESPF4PAS7WeN6UT2A7vN8NEWksqrcmTrRiZbRmllCyG2HnYkulclR29zRr6VfRbXUMUkIGR7sTnS5HCiubUWwqzWnCx0AQlz1+wQ8wNkKPJ7iZwCA3CrlfrStU4ryBuVqIH/arxIN8Xe2wm+PT8L14jrMjXDv14VyQgjRNnMTAQ48MRkVjW3wtLfQ9eYQolWhrjZMET1Ha53oHZzbLtSJTrRsWIvov/3226C+btmyZRrdDkLI7c2VFecCKCJd+iqiy+VyTia6ryMVewgh2qO6jymubUGwq3W3Tp5QlcxJfWNuIoCXvQVK6loBAHnVypOo4lrVoaLUiU40J8LTFhGeNLSPEKJf+HweFdDJbYGdi17d1IHa5g44Wmk2DrVbnIsNxa0S7RrWIvqLL7444K/h8XhURCeEaJSrSsG8StyOkX18jUjcjrZOGXPbl4o9hBAtUi0odw3gZHeiO1ubwckAOm6CXKyZInquSFlEL1AZmOpP+1VCCCGEEKMQ7Np9uOg4DQ8XZce58HiAoyUV0Yl2DWsR/fjx48P57QghRC12nAsAiBrbe3ikUqFqxyR1ohNCtMjB0gTWZkI0tSvyz7v2QexO9DB3/e5C7xLoYoXTWSIAQJ6oGXK5HDwej7kw0MWX4lwIIYQQQoyC6jyS7KomjRfR2Z3oTlamEAr4Gn1+QlQNaxHdy8trOL8dIYSo5WxtxsnorRK39f4F4A6/AygTnRCiXTweD76OlkgvbwSgiD6RyeTIqlR2chvKsET2cNGWDikqGtvgYWeBAlYR3dHKFHYWJrrYPEIIIYQQomEeduachpBsLQwXZRfRnQ1gdSYxfHSZhhBy2zER8DlLvarEfXeiF7GKPQLKMiSEDAP2xbrCmhaU1LWitVPK3BdmIEX0QBduTEvXcFH2xUm6MEkIIYQQYjx4PB4n0iVbC8NF2XEuVEQnw2FYO9HVEYlE2LNnD9LT0yEWiyGTyTif5/F4+Oabb3S0dYQQY+ViY4aaZsU076oBxrl42VvAhJaKEUK0jD1ctKi2BTcrGjmfD3U3jCJ6sAs3diavugmTQ5w5neiUh04IIYQQYlxCXK2RVFwPAJzVlJpS3dTBfOxiQ0V0on06LaLfvHkT9913H9ra2hAQEICsrCwEBwejsbERlZWV8PX1hbu7uy43kRBipFxtzXHz1oC+gca5UMckIWQ4sDPC2yUynMup5nw+xNUwMtFdbMw4y3lzq5rQIZGh9NawUYB7wYAQQgghhBg+dvRgdVM76po74GClueGf1ZxOdBoqSrRPp62U7777LiwtLXH48GF89dVXkMvl+Mc//oHTp09j69ataGhowHPPPafLTSSEGClX1pXqfsW5sDrRqdhDCBkOfo7c7uxj6ZXMx172FrAxN4wMcR6PhyBWpEtedTNK61shkysf4+9M+1VCCCGEEGMS7MZt+NBkpEtbpxTiWw0aAMW5kOGh0yJ6YmIi1qxZA09PT/D5ik2R35r0t2DBAixZsgRvvfWWLjeREGKkVIvoXfsedcRtnahtVi4Vo050QshwUL1gV9agXDUTZiBRLl3Yw0Vzq5o4US4A4EdxLoQQQgghRkV11WR2leaGi4pUGuGoiE6Gg06L6DKZDM7OzgAAW1tbCAQC1NfXM58PCwtDWlqajraOEGLM2EX0DokMja2SHh/LjnIBAF9HKvYQQrTP094cAj5P7edCDWSoaBf2cNGyhjZklHPz3SkTnRBCCCHEuHjZW8DKVMDcztZgLnp1E7eITpnoZDjotIju7e2NkpISxYbw+fD29sbFixeZzycmJsLGxrBOEgkhhsHV1pxzu7dcdHaUC0Cd6ISQ4SEU8OFlb6H2c2HuhpGH3iVIZbjoqZsi5mMbcyEcLA0jmoYQQgghhPQPj8dDMKsbnTrRiaEb9iJ6Q0MD8/HkyZNx+PBh5vZdd92FX375BevXr8f999+P3377DYsXLx7uTSSE3AZcVa5U95aL3r0TnYrohJDh8f/s3Xl8nHW99//3zCSTzEz2SbqkWUoLhAItZRFoKRYQRIEiiwsq6w0qIqKAx4Pe+kOQWyqCHkEPNx6R1aM3HhChUpRSQISyCBRSaFm6ZGnSNslknZlkMjPX74/Zs7RJemUmybyej0cfzTWZmes7kLnavvPJ+zvaN+2m3yR6aoj+rwZP/OP5bpcslpEn7gEAADB9HZT0d1ZzJ9EDKcflhWwsismXk+4TnnDCCVq5cqVWrVqlyy67TGeeeaYGBweVm5urSy65RD6fT3//+99ltVp11VVX6Wtf+1q6lwggC8wqHM8keqK7t7wgT668tF86AWSpkb5pZ7UMn+ye6mrdTlktim8mmrypKD/dAwAAMDMl96Lv6R1Qt29QxSb8BGJynYvVIrldTKJj8qU9CTr99NO1fv16rV+/Xi6XS6eddprOPvtsHX/88bJYLLrqqqt01VVXpXtZALLMrKIhk+g9o0+iJ9e5EPYASKeRQvT55S7l59pGuPfUlZ9rU1Wpc1g9lkQfOgAAwEw19KcnP9zTq2Pml+338ybXuZS57KPuIwSYKe0h+h133KH+/n6tW7dOa9as0ZNPPqnHH39cbrdbZ511llatWqXDDjss3csCkGXyc20qys9RT39kQ9Gx1rlQ5QIgnUb6xt3Bs6ZXlUvMwgrXiCF6Dd+cBAAAmJGSO9El6YPdfaaE6MmT6PShI10y0kmQn5+vs846S2eddZa6u7u1du1arVmzRg888IAeeOAB1dbW6uyzz9aqVatUXV2diSUCyAKzivLV0x/pZRstRA8Ew2rp8sePCdEBpFNN2fAp7YPnTNcQvUDPvd827HYm0QEAAGameSUOOXJt8g+GJJm3uWhyiF5RSIiO9Ej7xqJDFRcX64ILLtDDDz+s559/Xtdff70cDofuvPNOffKTn9QFF1yQ6SUCmKGSNxfd0zNyJ/rOLj/dvQAyZqQp7bpptqlozNDNRWPmc10FAACYkaxWiw6anfg74Ed7zNlcNLnOhUl0pEvGQ/Rks2fP1hVXXKHVq1frE5/4hAzD0Ntvvz3u53n99dd15ZVXasWKFaqrq9O6dev2ev89e/bo+uuv1+mnn65DDjlE/+f//J+JvgQA00hyiN42yiR6Q4c35ZgQHUA6FeTlyO2yp9xWN2d6bSoas7Bi+MS5I9fG9BAAAMAMllzp8sFusybRA/GPywvse7knYJ6M1LmMpKWlRWvWrNGaNWv04YcfyjAMHXnkkVq1atW4n8vn86murk7nn3++rr766n3ePxAIqLS0VF//+td1//33T2D1AKajWUX58Y9Hq3MZ2t87UrUCAEymGrdTHd7IPxTsNqtqp2n9yUiT6LVupywWNoICAACYqZI3F93dM6Bu/6CKHbkTfj5/IKS+gWD8mIEMpEtGQ3SPxxPvQ9+4caMMw9CCBQt0zTXXaNWqVaqqqprQ865cuVIrV64c8/2rqqr0gx/8QJL06KOPTuicMaFQaL8ePx3FXnM2vnZMb+WuxB/cfQNB9fgG5MpLvSzuaE/8uJnTblOpw8bX+hBcA4DJddCsAr3V2CVJOmROoawyptT7bazXgFJH6obOUiREn0qvBcD48HcAILtxDcBYLCxP/Wnu91u7dXRt6YSfb3d36qBbmTOXr8EMmEnv/7G+hrSH6D6fT88884zWrFmjDRs2KBgMqqKiQpdccolWrVqlww47LN1LMlV9fX2ml5Ax2fzaMT35PP6U4xde26jKwtTLYv32zvjHFQ7LhCqmsgXXAGBynFge1D8LbfIPGjpnoVUbN27M9JJGNJZrwBynRclbUDiCfVP29QAYO/4OAGQ3rgHYm0BfMOX4uTc2y9Y58ZrUDzoCKcfdu5u1cePwzeuRHtn0/k97iL58+XINDAzI6XRq1apVWrVqlY4//nhZrVOqnn3CFi9eLJvNlullpFUoFFJ9fX1WvnZMb/1FHdKrr8ePy6sWaOkBZSn36Xrhn5IiVS9189xaunRpGlc4PXANACbXUkmfXmEobEg269SrPhnPNeDwj97RB56W+PHHFs3X0qXVk71EAJOEvwMA2Y1rAMYiHDaUv+4Z9Q+GJUn9eaVaunTRhJ+v7b3dkjzx4+OXHqpD5hSO/gBMipn0/o+9ln1Je4i+bNkyrVq1Sp/4xCeUlzfzeotsNtu0/+KZqGx+7Zie5pSkfve73TuY8jVsGIaaOhM/Kja/3MXX+F5wDQCy21iuAQfOTv0HzgEVBVw3gBmAvwMA2Y1rAPbGZotsLrppZ48k6aM27359vXT4BlOOZxU5+PrLoGx6/6c9RL/77rvTfUoAGNGsIRuQDN1cdE/vQPy75ZJUUzbxHzkDAEhL5pXEP861WVI2mgIAAMDMdPCswniI/uHuvn3ce+/aexN1LlaLVOay79fzAWM1MzpUAGACCvJy5MhNfMd0T29/yucbOlI3LKl1E6IDwP444UC3Ljq+VvNKHPr+GYtUXjDzfioRAAAAqQ6cXRD/eFdPv3r6B/dy771r70sMv5W58qZk3SFmprRPoqeD1+tVY2Nj/Li5uVmbN29WcXGxKisrdccdd2j37t267bbb4vfZvHlz/LEej0ebN29Wbm6uDjzwwLSvH0B6WCwWzSrKi4flbT2pk+gNHd6U49oyV9rWBgAzkcVi0Y/POVw/zvRCAAAAkDYHz0r96cOP9vTpqJrSCT1XW9JPkJcXMIWO9JmRIfqmTZt08cUXx49vvfVWSdK5556r1atXq62tTa2trSmPOeecc+Ifv/vuu1qzZo3mzZun9evXp2XNADJjVmEiRB9a59LkSUyi26wWVZbkp3VtAAAAAABMdwclTaJL0oe7eyccoidPolcU8lONSJ8ZGaIfd9xxev/990f9/OrVq4fdtrf7A5i5ZhUmgvFhdS5JIfq8EodybDRgAQAAAAAwHlWlTuXlWDUQjOw59v6uifeip4ToVAMijUiEAGS15O9cD51ET+5Epw8dAAAAAIDxs1ktOmROotJlY1PnhJ8rpc6FSXSkESE6gKw2qyjxh26Xb1ADwVD8uDFpEr2mjBAdAAAAAICJOKo2Ud+yaWeP+gdDe7n3yPyBkLyBxOPoREc6EaIDyGrJdS5S4rvavf2D8ngD8duZRAcAAAAAYGKOTgrRA6Gw3m3pHvdzJFe5SHSiI70I0QFktVlD/tCNVbokV7lIUk2ZK21rAgAAAABgJkkO0SXpjYbxV7oMrWAtpxMdaUSIDiCrJde5SNKensgfyslVLhKT6AAAAAAATNTcYocqixM/CT6REH3oJDohOtKJEB1AVhte59IvaaRJdEJ0AAAAAAAmKrkX/Y2GLhmGMa7HU+eCTCJEB5DVSp25yrVZ4sexHw9r9Hjjt5UX5MmVl5P2tQEAAAAAMFMkV7q09w2oyeMf1+PbkupcrBap1MnGokgfQnQAWc1isagi6UfAYnUuyZPoVLkAAAAAALB/hvWiN3rG9fjkSfQyV55sVste7g2YixAdQNarKEpUuuwZoc6llioXAAAAAAD2y6K5RcrPTUSR4+1Fb+8NxD+mygXpRogOIOvNSvrDd0/vgALBsFq7Ez9WVsMkOgAAAAAA+yXXZtURVSXx4zcausb1+LakSfTyAqpckF6E6ACy3tAQfWeXX+Gk/U2ocwEAAAAAYP8lV7q8v6tHvf2DY35scp1Lci0rkA6E6ACy3qzCRJ1LR9+Atrf3pXy+psyV7iUBAAAAADDjJIfoYUN6u6l7zI9N3liUOhekGyE6gKw3qyjxh2/YkN5q7Er5fA2d6AAAAAAA7Lcja4ZsLjrGXnRfIChfIBQ/LmcSHWlGiA4g680a8h3s13ckdgh32m10rQEAAAAAYIIyl10LKhI/7f1G49hC9ORNRSWpvJB/pyO9CNEBZL3kOhdJ2tjUFf+4pswpi8WS5hUBAAAAADAzHZ00jf5WQ6fCyZuSjaKtrz/luKIgf5R7ApODEB1A1kuuc5Gk/sFw/GM2FQUAAAAAwDzJvei9A0F9uKdvL/eOaGMSHRlGiA4g67lddo02bF7rZlNRAAAAAADMkhyiS2PrRW/vG0g5phMd6UaIDiDr5discrtG/gOYTUUBAAAAADDPwooCFeXnxI/HEqK39SZCdJvVolInk+hIL0J0ANDwzUVjqHMBAAAAAMA8VqtFRyVNo785hs1FkyfRy1x22azsXYb0IkQHAA3vRY+pLaPOBQAAAAAAMyVvLrq93auOIXUtQyWH6FS5IBMI0QFAI0+i51gtqixhx28AAAAAAMw0tBf9zcauvd4/uc6lYpSfJAcmEyE6AEiaVTg8LJ9X6lCOjcskAAAAAABmOqK6RMmNLPvqRW/vC8Q/Li+gDx3pRzoEABq5zoVNRQEAAAAAMJ8rL0eL5hbFj9/cZ4ieNIlOnQsygBAdADRynQubigIAAAAAMDmSK13ebu5SIBge8X7egaB8gVD8mDoXZAIhOgBIqhihzoVNRQEAAAAAmBzJIfpAMKz3WntGvF/7kE1H2VgUmUCIDgAaeRK9mjoXAAAAAAAmxVE1qZuLjtaLToiOqYAQHQA08o+DUecCAAAAAMDkqCp1pAy0jdaL3tabGqJT54JMIEQHAEn5uTYVO3JTbmNjUQAAAAAAJofFYkmpdPlXg0eGYQy7X1tfIOW4vMA+6WsDhiJEB4Co5O+AlxfkyZWXk8HVAAAAAAAwsyWH6Lt7BtTS3T/sPu1Jk+g2q0WlTkJ0pB8hOgBEVZY44h8vKGdTUQAAAAAAJlNyiC6N3IveltSJ7nbZZbVaJn1dwFCE6AAQdcnyWuXaLLJZLfpfKw7I9HIAAAAAAJjRDqsslj0nEU+O1IuePInOpqLIFLoKACDqlENma8P3PiGJP5gBAAAAAJhs9hyrjqgq1us7IuH5SJPo7UmT6OVsKooMYRIdAJKUF+QRoAMAAAAAkCZHJVW6vNfaI18gmPL55DqXCv69jgwhRAcAAAAAAACQEUfXJEL0UNjQ203dKZ9v7w3EPy4vZFNRZAYhOgAAAAAAAICMOGrI5qJvNiYqXbwDQfkHQ/FjJtGRKYToAAAAAAAAADKivCBP893O+HFyL3pb0qaiklRBJzoyhBAdAAAAAAAAQMYkT6O/2dipcNiQlLqpqCT2MEPGzMgQ/fXXX9eVV16pFStWqK6uTuvWrdvnY1599VWde+65Ovzww3XaaafpscceS8NKAQAAAAAAgOx2dFKI3uUb1LZ2ryRCdEwdMzJE9/l8qqur04033jim+zc1NelrX/uajjvuOP3lL3/RJZdcoh/84Ad68cUXJ3mlAAAAAAAAQHY7epRedOpcMFXkZHoBk2HlypVauXLlmO//xz/+UVVVVbrhhhskSQsXLtQbb7yh+++/XyeeeOJkLRMAAAAAAADIegfNKlRhXo56B4KSpDcbOvX5Y6rV1heI38dmtajEkZupJSLLzcgQfbw2btyoZcuWpdy2YsUK/eQnPxn3c4VCoX3faYaJveZsfO0AuAYA2Y5rAJC9eP8D2Y1rAMy2tLpYL37UISmyuWgoFFJbT3/8826XXYYRFl9ymTeT3v9jfQ2E6JLa29tVXl6eclt5ebn6+vrU39+v/Pz8MT9XfX292cubNrL5tQPgGgBkO64BQPbi/Q9kN64BMEulPVHd8uGePv3ztTf10c7u+G0uW0gbN27MwMowmmx6/xOim2zx4sWy2WyZXkZahUIh1dfXZ+VrB8A1AMh2XAOA7MX7H8huXANgtr6Cdv2/9/4VPw4WVytoG5AUCderK0q0dOnSzCwOKWbS+z/2WvaFEF2RqfP29vaU29rb21VQUDCuKXRJstls0/6LZ6Ky+bUD4BoAZDuuAUD24v0PZDeuATDLUbVlslgkw4gcb2zuVrs30YleUZjP19oUk03vf2umFzAVLF26VK+88krKbS+//DLf3QIAAAAAAADSoDA/V3WzC+PH/9rRqbbeRMVLRWFeJpYFSJqhIbrX69XmzZu1efNmSVJzc7M2b96slpYWSdIdd9yh7373u/H7X3DBBWpqatJtt92mrVu36ve//73Wrl2rSy+9NBPLBwAAAAAAALLO0bWl8Y/faOxU/2A4flxeYM/EkgBJMzRE37Rpk8455xydc845kqRbb71V55xzju68805JUltbm1pbW+P3r66u1j333KOXX35Zn/nMZ3Tffffplltu0YknnpiJ5QMAAAAAAABZJzlEDwTDKZ9jEh2ZNCM70Y877ji9//77o35+9erVIz7m8ccfn8RVAQAAAAAAABhNcog+VEUBIToyZ0ZOogMAAAAAAACYXmrKnKPWtpQziY4MIkQHAAAAAAAAkHEWi0VH1Yw8jV7OJDoyiBAdAAAAAAAAwJQwUqVLjtWiEkduBlYDRBCiAwAAAAAAAJgSRgrR3QV2Wa2WDKwGiCBEBwAAAAAAADAlHD6vWLm21MCcKhdkGiE6AAAAAAAAgCkhP9emw+cVp9xWwaaiyDBCdAAAAAAAAABTxtFDNhdlEh2ZRogOAAAAAAAAYMoY2otOiI5MI0QHAAAAAAAAMGUMDdHnFBGiI7MI0QEAAAAAAABMGbOK8vXZo6skSWUuu1YdUZnhFSHb5WR6AQAAAAAAAACQ7GefXaKvfnyBqkudcthtmV4OshwhOgAAAAAAAIApxWKx6ODZhZleBiCJOhcAAAAAAAAAAEZFiA4AAAAAAAAAwCgI0QEAAAAAAAAAGAUhOgAAAAAAAAAAo2BjUZMYhiFJCoVCGV5J+sVecza+dgBcA4BsxzUAyF68/4HsxjUAyF4z6f0few2xbHc0FmNf98CYBAIB1dfXZ3oZAAAAAAAAAIBxWLx4sex2+6ifJ0Q3STgcVjAYlNVqlcViyfRyAAAAAAAAAAB7YRiGwuGwcnJyZLWO3nxOiA4AAAAAAAAAwCjYWBQAAAAAAAAAgFEQogMAAAAAAAAAMApCdAAAAAAAAAAARkGIDgAAAAAAAADAKAjRAQAAAAAAAAAYBSE6AAAAAAAAAACjIEQHAAAAAAAAAGAUhOgAAAAAAAAAAIyCEB0AAAAAAAAAgFEQogMAAAAAAAAAMApCdAAAAAAAAAAARkGIDgAAAAAAAADAKAjRAQAAAAAAAAAYBSE6AAAAAAAAAACjIEQHAAAAAAAAAGAUhOgAAAAAAAAAAIyCEB0AAAAAAAAAgFEQogMAAAAAAAAAMApCdAAAAAAAAAAARkGIDgAAAAAAAADAKAjRAQAAAAAAAAAYBSE6AAAAgBTNzc2qq6vTDTfckOmlAAAAABlHiA4AAAAAAAAAwCgI0QEAAAAAAAAAGAUhOgAAAAAAAAAAoyBEBwAAALBPF110kerq6jQwMKBf/OIXOvXUU3XYYYfprrvuyvTSAAAAgEmVk+kFAAAAAJg+vvnNb2rLli068cQTVVRUpKqqqkwvCQAAAJhUhOgAAAAAxmzPnj164oknVFJSkumlAAAAAGlBnQsAAACAMfvmN79JgA4AAICsQogOAAAAYMyWLFmS6SUAAAAAaUWIbrLXX39dV155pVasWKG6ujqtW7duUs931113qa6uLuXXpz71qUk9JwAAALJXeXl5ppcAAAAApBWd6Cbz+Xyqq6vT+eefr6uvvjot5zzooIN03333xY9tNltazgsAAIDsY7FYMr0EAAAAIK0I0U22cuVKrVy5ctTPBwIB/eIXv9CaNWvU29urgw46SN/5znd03HHHTficNptNFRUVE348AAAAAAAAAGBkhOhpdvPNN+ujjz7SL37xC82aNUvPPPOMrrjiCj355JOaP3/+hJ6zoaFBK1asUF5enpYuXarrr79elZWV5i4cAAAAAAAAALIQnehp1NLSoscee0y//OUvdcwxx6impkaXX365jj76aD322GMTes4lS5bo1ltv1W9/+1v96Ec/0s6dO/XlL39ZfX19Jq8eAAAAAAAAALIPk+hp9MEHHygUCg3b+DMQCKikpESStHXrVp1xxhl7fZ6vfOUr+s53viNJKdUxhxxyiI444gidfPLJWrt2rT73uc+Z+wIAAAAAAAAAIMsQoqeRz+eTzWbTo48+OmzzT6fTKUmqrq7WU089tdfnKS0tHfVzRUVFmj9/vhobG/d/wQAAAMhKVVVVev/991Nue+ihhzK0GgAAACCzCNHTaNGiRQqFQvJ4PDrmmGNGvI/dbtfChQsnfA6v16umpiY2GgUAAAAAAAAAExCim8zr9aZMgTc3N2vz5s0qLi7WAQccoFWrVum73/2ubrjhBi1atEidnZ3asGGD6urqdNJJJ437fD/96U918sknq7KyUnv27NFdd90lq9Wqs846y8RXBQAAAAAAAADZyWIYhpHpRcwkr776qi6++OJht5977rlavXq1BgcHdffdd+vxxx/Xnj17VFJSoqVLl+qb3/ym6urqxn2+a6+9Vq+//rq6urpUVlamo48+Wtdee61qamrMeDkAAAAAAAAAkNUI0QEAAAAAAAAAGIU10wsAAAAAAAAAAGCqIkQHAAAAAAAAAGAUbCxqknA4rGAwKKvVKovFkunlAAAAAAAAAAD2wjAMhcNh5eTkyGodfd6cEN0kwWBQ9fX1mV4GAAAAAAAAAGAcFi9eLLvdPurnCdFNEvtOxeLFi2Wz2TK8mvQKhUKqr6/PytcOgGsAkO24BgDZi/c/kN24BgDZaya9/2OvZW9T6BIhumliFS42m23af/FMVDa/dgBcA4BsxzUAyF68/4HsxjUAyF4z6f2/r3ruGbmx6Ouvv64rr7xSK1asUF1dndatW7fX+7/66quqq6sb9qutrS1NKwYAAAAAAAAATEUzchLd5/Oprq5O559/vq6++uoxP+7pp59WQUFB/Njtdk/G8gAAAAAAAAAA08SMDNFXrlyplStXjvtxbrdbRUVF+3XuUCi0X4+fjmKvORtfOwCuAUC24xoAZC/e/0B24xoAZK+Z9P4f62uYkSH6RJ1zzjkKBAI66KCDdPXVV+voo48e93PU19dPwsqmh2x+7QC4BgDZjmsAkL14/wPZjWsAMP30BcLa1RfSbm9Qu/tC8gcNHTcvXweW5Y7rebLp/U+ILqmiokI33XSTDj/8cAUCAf3pT3/SxRdfrEceeUSHHXbYuJ5rJuxKO14zaUdeAOPHNQDIblwDgOzF+x/IblwDgKkrHDa0q6dfDR6fmjx+NXp8avD41NjhU1OnX93+wWGP+du2fv3zuyepyLHvIH0mvf9jr2VfCNElLViwQAsWLIgfH3XUUWpqatL999+vn/3sZ+N6rpm0K+14ZfNrB8A1AMh2XAOA7MX7H8huXAOAzOgfDKnJ41NDRywg98bD8maPX4FQeFzP5w2E5AsaKh3H+zmb3v+E6KNYvHix3nzzzUwvAwAAAAAAAECWMQxDXb5BNXh8aujwqrEjMU3e6PFpV0+/aeeaW5yvy1ccoHklDtOec6YhRB/Fli1bVFFRkellAAAAAAAAAJiBQmFDLV3RupVoON7o8UY+7vCpdyBoynlybRZVlzpV43aqpizyq9btUq3bqepSpxz27Jgm3x8zMkT3er1qbGyMHzc3N2vz5s0qLi5WZWWl7rjjDu3evVu33XabJOn+++9XVVWVDjroIA0MDOhPf/qTXnnlFf3ud7/L1EsAAAAAAAAAMM35AsFIOB4NyWP1K00en5o7fRoMGaacpyg/R7VuVyQkdztVW5YIzecWO2SzWkw5T7aakSH6pk2bdPHFF8ePb731VknSueeeq9WrV6utrU2tra3xzw8ODuqnP/2pdu/eLYfDoYMPPlj33Xefjj/++LSvHQAAAAAAAMD0YBiGOryB6CR5Yoo81k/e1jtgynksFmlOUX50ityZCMyjxyVOuynnwchmZIh+3HHH6f333x/186tXr045/spXvqKvfOUrk70sAAAAAAAAANPMYCisli5/yhR5Q0ckMG/y+OQNhEw5jz3HGgnFy5yqjoflTtWUuVRV6lB+LrUrmTIjQ3QAAAAAAAAAGKu+gWB8A8/YFHlkM0+vWrr6FQqbU7tS4syNVq24Ir/H6lfcTs0uzJeV2pUpiRAdAAAAAAAAwIxmGIbaegfUENvEs8MbCcqjYXmHN2DKeSwWqbLYEa9ZifSTJ7rKix25ppwH6UWIDgAAAAAAAGDaCwTD2tnlj0yUxzbxjHaVN3p86h8Mm3Ke/FxrtI/cFa1bSWzmWVXqlD3Hasp5MHUQogMAAAAAAACYFnr6ByM1K9GqlaaksLy12y+TWlfkdtnjwXhNrH4lelxRmCeLhdqVbEKIDgAAAAAAAGBKCIcN7e7tj06Qx3rJE/UrXb5BU85js1o0r8SRMkUe28SzusyhwnxqV5BAiA4AAAAAAAAgbfoHQ2ru9A2pXIn8avL4NBA0p3bFabdFa1di/eSueFheWeJQro3aFYwNIToAAAAAAAAA0xiGoW7/YLRyJTJFHgvMGz0+7erpl2FS7Up5QV68ZqXGnRSYl7lUXmCndgWmIEQHAAAAAAAAMC6hsKHWbn+ibiVeveJVQ4dPvf1BU86TY7WoqtShGrdLNWUO1Za5IvUr0cDcaSfexOTjqwwAAAAAAADAMP5ASE2dscqVxDR5k8enpk6fBkPmjJMX5uXEp8gj/eSueEg+tzhfOdSuIMMI0QEAAAAAAIAsZBiGPN5AvI880U8emSbf0ztg2rlmF+UlpshTqldcKnXmUruCKY0QHQAAAAAAAJihgqGwWrv7o/3kXjV2+FL6yfsGzKldsdusqipzRDfudKm6zBnfxLO6zKn8XJsp5wEygRAdAAAAAAAAmMa8A8GUqpVYL3mjx6ednX4Fw+bUrhQ7cuOheG3SBp61bqdmF+XLZmWaHDMTIToAAAAAAAAwhRmGoba+gWFT5JGecr/a+8ypXbFYpLlF+fFe8kTlSuS42JlrynmA6YYQHQAAAAAAAMiwwVBYOzv9avD41NiRmCSP/fIFQqacx55jjQTj8U08Y/3kLlWVOqhdAUZAiA4AAAAAAACkQd9AMDI93uFTgyexiWejx6eWrn6FTKpdKXXmqsbtSukkj3WVzyrMk5XaFWBcCNEBAAAAAAAAExiGoT29A4kp8g5vUljuk8cbMOU8VotUWeKIdpIneslrolPlRfnUrgBmIkQHAAAAAAAAxmggGFJzpz8akqdOkzd6fOofDJtyHkeuLR6KxzfxdLtUU+bUvBKH7DlWU84DYN8I0QEAAAAAAIAk3b7ByMadnmg3eVI/eUu3X4Y5rSsqL7BHN+50RSfKY2G5UxUFebJYqF0BpgJCdAAAAAAAAGSVcNjQrp7+lCnyWOVKQ4dP3f5BU85js1pUVepIDciTqldceURzwHTAOxUAAAAAAAAzTv9gSE1J4XgkII90lDd7/AqEzKldcdltwzfxdDtVW+ZSZUm+cmzUrgDTHSE6AAAAAAAAph3DMNTlG4xu3OmN9JN7fPGu8l09/aada1ZhXlI/uSslLHe77NSuADMcIToAAAAAAACmpFDYUEuXPzJR7kls4hnrKe8dCJpynlybRVWlyZUrznhXeXWZQ047ERqQzbgCAAAAAAAAIGP8gVC8aiVRuxL5vbnTp8GQObt4FubnxGtWEpUrkenyucUO2axMkwMYGSE6AAAAAAAAJo1hGOrwBtTQ4Yt3lDd4EvUrbb0Dpp1rbnF+JCCPTZRHu8prypwqceZSuwJgQgjRAQAAAAAAsF+CobB2dvlTpsgjk+V+NXZ45Q2ETDmPPceq6lKHat2ulOqVWrdTVaVO5efaTDkPACQjRAcAAAAAAMA+9Q0E1RjtJN/e7tVbH3TL99bravT4tbPLr1DYnNqVEmdutGbFpZoyh2rLXJENPd1OzS7Ml5XaFQBpRogOAAAAAAAAGYahtt6BpA08fWrs8KrBE9nEs8MbGOFR/nGfx2KRKosdiUny2DR5NCwvduTu/4sBABMRogMAAAAAAGSJQDBSuxLbxLOhwxfvKm/0+OQfNKd2JT/XqppoF3lNmSteu1Ljdqqq1KG8HGpXAEwfhOgAAAAAAAAzSE//YLR2JTZR7o2H5a3dfpnUuqIiu0ULZhepNrp5Z3WZM/Kx26lZhXls4glgxiBEBwAAAAAAmEbCYUN7egfUkFS10ujxRT/2qtM3aMp5bFaLKkvy4zUrkcqVyDT5vOJ8bd2ySUuXLpXNxlQ5gJmNEB0AAAAAAGCKGQiG1OTxq9HjVWOHLx6WN3gi1SsDwbAp53HabfHalUg/eWSqvNbtVGWJQ7k264iPC4XMqX0BgOmAEB0AAAAAACADunyBpMoVX2SyPNpP3trTL8Ok2pWKwryUKfJIP7lLNWVOlRfYqV0BgH0gRAcAAAAAAJgEobChXT39kU08Y9PksYnyDq96+oOmnCfHalFVqSM+RV6TEpY75bQT/wDA/uAqCgAAAAAAMEH9g6H4NHlDh1dNnkT1SnOnX4GQObUrBXk5SZUrzkhPefR4bnG+ckapXQEA7D9CdAAAAAAAgFEYhiGPN1K7kgjLfZGuco9Pu3sGTDvX7KK8aD+5S7XRSfLqaA1LmYvaFQDIFEJ0AAAAAACQ1YKhsFq7+xPd5LHNPKP95L0D5tSu5Nosqi6NTZJHNvGMTZNXlzrlsNtMOQ8AwFyE6AAAAAAAYMbzBYKJTTw7okG5x6/GDq+aO/0Khs3ZxbMoP0e1blciKI/3k7s0pyhfNivT5AAw3RCiAwAAAACAac8wDLX3BdTo8cYnymObeTZ0+NTeZ07tisUizS3Kj9SsRMPx2DR5bZlLxc5cU84DAJg6CNEBAAAAAMC0MBgKa2enP7Jxp8enxo6kwNzjky8QMuU8eTnWaDd5onql1u1SdZlTVaUO5edSuwIA2YQQHQAAAAAATBm9/YPDpsiboj3lLV39CplUu1LqzFWN2xUNyBMbeNa6XZpVmCcrtSsAgChCdAAAAAAAkDbhsKG2vgE1dPjU0OGNT5HHJso93oAp57FapLnFjmjlilM1Za7o75Hp8qJ8alcAAGNDiA4AAAAAAEw1EAypudOfmCjv8KV0lQ8Ew6acJz/XqtqyyCaesV7yWA1LValT9hyrKecBAGQ3QnQAAAAAADBu3f7BaOWKd0hY7lNLt1+GOa0rcrvs8V7y5PqVmjKnKgrzZLFQuwIAmFyE6AAAAAAAYJhw2NDu3v5IMB4NyxP95D51+QZNOY/NatG8Ekc8GE9Mk0cmzAvyiC4AAJnFn0QAAAAAAGSp/sGQmjt90X5yX1I/uVdNnX4FTKpdcdltqnG7VFPmUK3bFQ/La8tcqizJV46N2hUAwNRFiA4AAAAAwAxlGIa6fINqiIbjjR2RafKGaP3Krp5+085VUZgXrVxxRnvKHfHNPN0uO7UrAIBpixAdAAAAAIBpLBQ21Nrtj1auJDbxbIx+3NsfNOU8OVaLqkodw3rJY5t6Ou1EDACAmWlG/gn3+uuv695779WmTZvU1tamX//61zr11FP3+phXX31Vq1ev1ocffqi5c+fq61//us4777w0rRgAAAAAgNH5A6GUqpVGT6J+pbnTp8GQObt4FublRCbJ3dFO8qSwvLLEIZuVaXIAQPaZkSG6z+dTXV2dzj//fF199dX7vH9TU5O+9rWv6YILLtDtt9+uDRs26Ac/+IEqKip04oknpmHFAAAAAIBsZhiGPN5AvGalMWmivKHDpz29A6ada05RfrRyJTFJXhudLi9x5lK7AgDAEDMyRF+5cqVWrlw55vv/8Y9/VFVVlW644QZJ0sKFC/XGG2/o/vvvJ0QHAAAAAJgiGAqrpatfDdGqlcboZp4NHp+aPD71DZhTu2LPsaqq1BGtXEnaxNPtVFWpU/m5NlPOAwBAtpiRIfp4bdy4UcuWLUu5bcWKFfrJT34y7ucKhUJmLWvaiL3mbHztALgGANmOawCQvXj/j8w7EIzWrvjj9Sux45Yuv4Jhc2pXShy5qi5zRCbJyyJT5bHjOUX5su6ldoX/ZzAD1wAge82k9/9YXwMhuqT29naVl5en3FZeXq6+vj719/crPz9/zM9VX19v9vKmjWx+7QC4BgDZjmsAkL2y7f1vGIa6BsLa3RfSrr6QdnmD2t0X0m5v5Lh7IGzKeSyS3E6r5rhyNLvApjkuW+T3ghzNcdnksluT7u2L/OqR9vRIe0xZATA22XYNAJCQTe9/QnSTLV68WDZbdv1oXCgUUn19fVa+dgBcA4BsxzUAyF4z+f0fCIa1sysxSd7k8Ue6yqMf+wfNmbzLy7FGJ8kTE+Wx43mlTuXlWPf9JECGzORrAIC9m0nv/9hr2RdCdEWmztvb21Nua29vV0FBwbim0CXJZrNN+y+eicrm1w6AawCQ7bgGANlrur7/e/oHR9zAs9HjU0uXXya1rqjMZU9Urrhjv0d6ymcV5u21dgWYDqbrNQDA/sum9z8huqSlS5fqH//4R8ptL7/8spYuXZqZBQEAAAAA9ks4bGhP74AaOrzxifLYJp6NHV51+gZNOY/VIlWWOKIBuSselNeUOVXjdqooP9eU8wAAgMyZkSG61+tVY2Nj/Li5uVmbN29WcXGxKisrdccdd2j37t267bbbJEkXXHCBfv/73+u2227T+eefr1deeUVr167VPffck6mXAAAAAADYh4FgSM2dfjV2+KJhuT9lonwgaE4/uSPXljRFHgvIXaotc2peqUO5NmpXAACYyWZkiL5p0yZdfPHF8eNbb71VknTuuedq9erVamtrU2tra/zz1dXVuueee3TrrbfqwQcf1Jw5c3TLLbfoxBNPTPvaAQAAAAAJ3b5BNSQF440dPjV4vGrs8Km1p1+GSbUr5QV5KVPkte7Ir+oypyoK8mSxULsCAEC2mpEh+nHHHaf3339/1M+vXr16xMc8/vjjk7gqAAAAAMBQ4bCh1p7+aD+5N6lyJRKad/vNqV3JsVpUVepQdSwgL3OpJik0d+XNyH8eAwAAE/C3BAAAAADApOofDKlpSCd5oyfycbPHr0DInNqVgrycROVKLCiPHs8tzlcOtSsAAGACCNEBAAAAAPvFMAx1+gYTm3gmTZM3eLza3TNg2rlmFealbOKZXMFS5rJTuwIAAExHiA4AAAAA2KdgKKzW7v7IBHmHTzva+/TOtk71/PMlNXn86h0ImnKeXJtF1aXOlKqVWnckMK8udcpht5lyHgAAgLHKeIgeCoX09NNP69VXX1VHR4euueYa1dXVqbe3Vxs2bNBRRx2l8vLyTC8TAAAAAGY8XyAYD8mH1q80d/oVDI+0i+f4p8wL83NSeslryxKh+dxih2xWpskBAMDUkdEQvaenR1dccYXeeecdOZ1O+f1+XXjhhZIkp9OpW265Reecc46uu+66TC4TAAAAAGYEwzDU4Q2oIWkTz+T6lbZec2pXLBZpTlF+vI+81u2KbOgZPS5x2k05DwAAQDpkNES//fbb9eGHH+ree+/VokWLtHz58vjnbDabTj/9dL3wwguE6AAAAAAwRoOhsFq6/PEp8shEuTc+Xe4NhEw5jz3HqlkOiw6uLFONO7mf3KWqUofyc6ldAQAAM0NGQ/Rnn31WF110kU444QR1dnYO+/z8+fP15z//OQMrAwAAAICpq28gGNnEMzpJnryJZ0tXv0Ij1q6MX6kzVzVuV2SiPFq5Evu93Jmrd955W0uXLpXNRmAOAABmroyG6L29vaqqqhr188FgUKGQOVMSAAAAADBdGIahPb0D8X7yxg5vJCiPhuUd3oAp57FapLnFjpQp8lgFS43bqaL83FEfy7/VAABAtshoiF5TU6N333131M+/9NJLWrhwYRpXBAAAAADpMRAMaWenPzFFHusn93jV6PGpfzBsynnyc62qKYsE5ImwPNJTPq/EIXuO1ZTzAAAAzFQZDdE/+9nP6vbbb9dxxx2n448/XpJksVgUCAT061//Wi+++KJuvvnmTC4RAAAAACas2z8Yr1lpHBKWt3T7ZZjTuiK3y66aWDhe5kx0lJc5VVGYJ4vFYs6JAAAAslBGQ/RLLrlEH330ka677joVFRVJkr7zne+oq6tLwWBQX/jCF/S5z30uk0sEAAAAgFGFw4Z29/ZHK1diYbk/Xr/S5Rs05Tw2q0XzShyRifJoOF7rdqq6LBKcF+6ldgUAAAD7J6MhusVi0S233KJzzjlHf/vb39TQ0KBwOKyamhp9+tOf1sc+9rFMLg8AAAAA1D8YUnOnX40erxqik+RN0c08mzw+DQTNqV1x2m3xPvLa6GaesePKEodybdSuAAAAZEJGQ/SYY445Rsccc0ymlwEAAAAgS3X5ApGA3BPZxDO+oafHp109/abVrlQU5kXrVhIBeayr3O2yU7sCAAAwBU2JEB0AAAAAJlMobKi126/GaDAe38zT41Vjh089/UFTzpNrs6iq1JkyRR7bxLO6zCGnnX+CAQAATDcZ/RvcKaecss9JC4vFonXr1qVpRQAAAACmK38gFNm80+NTQ9I0eZPHp+ZOvwIhc2pXCvNyIr3kSVPktWWRfvLKEodsVqbJAQAAZpKMhujHHnvssBA9FAqppaVFb775pg466CAdeuihGVodAAAAgKnEMAx1eAORoDzaTd7g8Ub6yTt82tM7YNq55hTlp2ziWRPtKa8tc6rEmUvtCgAAQBbJaIi+evXqUT+3ZcsWXX755Vq1alUaVwQAAAAgk4KhsFq6+tUQ3cQzFpDHNvHsGzCndsVus6qqzKHaeNWKM/pxZKI8P9dmynkAAAAw/U3ZQr5DDjlEX/jCF3T77bfrsccey/RyAAAAAJjEOxBM2rjTG9/As9Hj085Ov4Jhc3bxLHbkxkPx2iGbeM4uyqd2BQAAAGMyZUN0SXK73froo48yvQwAAAAA42AYhtr6BhJT5EkheUOHT+195tSuWCxSZbFD1WUO1Za5knrKnaotc6nYmWvKeQAAAJDdpmyI3tnZqUcffVRz5szJ9FIAAAAADDEYCmtnp18NHp8aO1KnyRs9PvkCIVPOk5djjYTi0SnymjKHat2RwLyq1KG8HGpXAAAAMLkyGqJffPHFI97e29urbdu2aXBwULfddluaVwUAAABAknr7B4dNkcfqV1q6/DKpdUWlzlzVRDftjE+SuyO1KxUFebJSuwIAAIAMymiIbhjD/9ZtsVhUVVWlZcuW6fzzz9fChQszsDIAAABg5guHDe3pHYgG5N54UB7bxNPjDZhyHqtFqixxpEyUx8LyGrdTRfnUrgAAAGDqymiI/tBDD2Xy9AAAAMCMNxAMqbnTr8aOWFDuT9nMcyAYNuU8jlxbPBSPTZRXRyfK55U4ZM+xmnIeAAAAIN2mbCc6AAAAgLHp9g9GQvJoON7kSWzm2dLt1wg/ADoh5QX2eNVKdVkiLK+J1q5YLNSuAAAAYOZJa4j++OOPT+hx55xzjqnrAAAAAKaTcNjQrp7+eCd5Y1JI3tDhU7d/0JTz5FgtmlcaqV0ZWr1SXeZUQR4zOAAAAMg+af1b8A033DDux1gsFkJ0AAAAzHj9gyE1pWzgmegpb+r0K2BS7UpBXk5qSO52qrbMpZoypypL8pVjo3YFAAAASJbWEP3ZZ59N5+kAAACAKcMwDHX6BhPheHQDz0aPT40dPu3q6TftXLMK80bcwLO2zKkyl53aFQAAAGAc0hqiz5s3L52nAwAAANIqFDbU0uWPT5M3eLyJfvIOn3oHgqacJ9dmUVVpcuVKpKe81u1UdalTDrvNlPMAAAAAYGNRAAAAYFx8gWA8JG+Kh+U+NXZ41dzpVzBszi6ehfk5iSny6ER5bXSifG6xQzYr0+QAAABAOmQ8RG9ra9P//M//6L333lNvb6/C4dSuR4vFogceeCBDqwMAAEC2MQxDHd5AfBPPWD95rH6lrXfAtHPNLc5XdVkkHI/0k0e6yWvLnCpx5lK7AgAAAEwBGQ3Rt2zZoosvvlj9/f064IAD9MEHH+jAAw9UT0+Pdu/erZqaGs2ZMyeTSwQAAMAMFAyF1dLVr4YRQvLGDq+8gZAp57HnWFVd6lBtLBx3JypYqkqdys+ldgUAAACY6jIaot9xxx1yOp16/PHHlZ+fr+XLl+v73/++li1bprVr1+pHP/qRbr/99kwuEQAAANNU30BQjUOnyaP1Kzu7/AqZVLtS4syN1qy4VFPmUG2ZK7KJp9up2YX5slK7AgAAAExrGQ3R33zzTV1xxRWqrKxUV1eXpMiPz0rSpz/9ab3xxhu67bbb9PDDD2dwlQAAAJiKDMNQW+9AdHo8MUXe4Il0lbf3BUw5j9UizS12JPrJ3U7VRjvKq8ucKnbkmnIeAAAAAFNTRkP0cDis8vJySVJRUZFsNls8TJekuro6PfrooxlaHQAAADItEAxrZ5dfDR3e+BR5bEPPRo9P/kFzalfyc62pG3hGA/Laskjtij3Hasp5AAAAAEw/GQ3Rq6qq1NzcLEmyWq2qqqrShg0bdMYZZ0iKTKoXFhZmcokAAACYZN3+QTXFAnKPN1rBEjlu7fbLpNYVuV32SDDudsbrV2IfVxTmsYknAAAAgBGlPUTv7u5WcXGxJGnFihV6+umnde2110qSvvjFL2r16tVqamqSYRh67bXXdNlll6V7iQAAADBROGxod29/pJc8FpAnVa90+QZNOY/NalFlSb5qy1xDwvJIDUthPrUrAAAAAMYv7SH6CSecoJUrV2rVqlW67LLLdOaZZ2pwcFC5ubm65JJL5PP59Pe//11Wq1VXXXWVvva1r6V7iQAAABingWBYLR3++CaescqVWD/5QDBsynmcdlu0diUSkte4XZGgvMypeaUO5dqoXQEAAABgrrSH6KeffrrWr1+v9evXy+Vy6bTTTtPZZ5+t448/XhaLRVdddZWuuuqqdC8LAAAA+9DlC8RrViK/e9XQ4dXWXd3q+J+/yzCpdqWiMC8Sksc28Yxt6FnmUnmBndoVAAAAAGmV9hD9jjvuUH9/v9atW6c1a9boySef1OOPPy63262zzjpLq1at0mGHHZbuZQEAAGS9UNjQrp7+yCaeHdHKFU+kgqWhw6ue/qAp58mxWlRV6kiZIk8Oy532jG7bAwAAAAApMvIvlPz8fJ111lk666yz1N3drbVr12rNmjV64IEH9MADD6i2tlZnn322Vq1aperq6kwsEQAAYEbqHwylTJPHeskbO3xq7vQrEDKndqUwLycejFeXOVVb5oqH5HOL85VD7QoAAACAaSLjYz7FxcW64IILdMEFF2j37t168skn9de//lV33nmn7rrrLh1xxBH64x//mOllAgAATAuGYcjjDcS7yGP95I0erxo9Pu3uGTDtXHOK8lVd5lCB+rX0wCrNL3dFu8pdKnXmUrsCAAAAYEbIeIiebPbs2briiit04okn6s4779Szzz6rt99+O9PLAgAAmFKCobBau/sjAbknWr0Smyz3+NQ3YE7tit1mVVWZI6lyJVK/Epsuz8+1KRQKaePGjVq6dKFsNpsp5wUAAACAqWTKhOgtLS1as2aN1qxZow8//FCGYejII4/UqlWrMr00AACAtPMOBJNqV7wpFSw7O/0Khs3ZxbMoP0e1bpdqolUric08XZpTlC+blWlyAAAAANktoyG6x+OJ96Fv3LhRhmFowYIFuuaaa7Rq1SpVVVVlcnkAAACTxjAMtfcF1OjxJlWuRDbwbPT41d5nTu2KxSLNLcpPhOTuWOVK5LjEaTflPAAAAAAwU6U9RPf5fHrmmWe0Zs0abdiwQcFgUBUVFbrkkku0atUqHXbYYeleEgAAwKQYDIW1s9MfCcdjm3gm1a74AiFTzpOXY43UrcSmyKNheXWZU1WlDuXnUrMCAAAAABOV9hB9+fLlGhgYkNPp1KpVq7Rq1Sodf/zxslqt6V4KAADAfusbCEamxzsiQXlDR3RDT49XLV39CplUu1LqzB3WSR4Ly2cV5slK7QoAAAAATIq0h+jLli3TqlWr9IlPfEJ5eXnpPj0AAMC4GIahPb0D0coVbzQgT9SveLwBU85jtUiVJY5ozUpq5UqN26mi/FxTzgMAAAAAGJ+0h+h33313Ws7z+9//Xvfee6/a2tp0yCGH6Ic//KGWLFky4n0fe+wxfe9730u5zW63q76+Ph1LBQAAGTYQDKm506/GeC+5L95V3tTpU/9g2JTzOHJtQ6bInfHp8soSh+w5/GQeAAAAAEw1Gd1YdLI89dRTuvXWW3XTTTfpiCOO0AMPPKDLL79cTz/9tNxu94iPKSgo0NNPPx0/tlj4kWgAAGaSbt+gGjzeREie1E3e0u2XYU7risoL7PENPFPDcqcqCvL4OwYAAAAATDMzMkS/77779PnPf17nn3++JOmmm27S888/r0cffVRf/epXR3yMxWJRRUVFOpcJAABMFA4b2tXTnzJFHgvJGzp86vYPmnIem9WiefHaFWdS7YpLNW6nCvJm5F+vAAAAACBrzbh/5QUCAb377rv62te+Fr/NarVq+fLleuutt0Z9nM/n08knn6xwOKxDDz1U1113nQ466KBxnz8UCk1o3dNZ7DVn42sHwDUA6TUwGFJTpz8SjEcD8saOyHFzp0+BkDnj5C67Ld5FXl3qUG3045oypyqL85VjG712JdveC1wDgOzF+x/IblwDgOw1k97/Y30NMy5E7+zsVCgUGlbb4na7tW3bthEfc8ABB+gnP/mJ6urq1Nvbq9/97ne64IIL9Ne//lVz5swZ1/mzuUc9m187AK4BMIdhGOoLGNrlDWp3X0i7vKHI731B7fKG5PGb000uSaX5Vs0psGm2y6bZBTma47JpdoFNc1w2FeVZk2pX+iO/+jzy9Eke01Yws3ANALIX738gu3ENALJXNr3/Z1yIPhFHHnmkjjzyyJTjM844Q3/84x/17W9/e1zPtXjxYtlsNpNXOLWFQiHV19dn5WsHwDUA4xcKG9rV3Z+YJI//ikyU9/YHTTlPri1Su1IT7SWPTZXXuJ2qKXXKYefr1QxcA4DsxfsfyG5cA4DsNZPe/7HXsi8zLkQvLS2VzWZTR0dHyu0dHR0qLy8f03Pk5uZq0aJFamxsHPf5bTbbtP/imahsfu0AuAYglT8QinaRJ23kGQ3Lmzt9GjSpdqUwP0e1bqdqo33kyYH53GKHbFY28UwXrgFA9uL9D2Q3rgFA9sqm9/+MC9HtdrsOO+wwbdiwQaeeeqokKRwOa8OGDbrwwgvH9ByhUEgffPCBVq5cOZlLBQBg2jIMQx3eQHwTz8YOvxo8XjV2RLrK23oHTDvX3OL81A083a5IUF7mVIkzN6l2BQAAAAAA8824EF2SLrvsMv37v/+7Dj/8cC1ZskQPPPCA/H6/zjvvPEnSd7/7Xc2ePVvXX3+9JOlXv/qVli5dqtraWvX09Ojee+9VS0uLPve5z2XyZQAAkFGDobBauvwpU+QNHV41dPjU5PHJGzBnExl7jjWyeafblRKW17qdqip1Kj83OyYbAAAAAABT04wM0c844wx5PB7deeedamtr06JFi/Tb3/42XufS2toqq9Uav39PT49++MMfqq2tTcXFxTrssMP0xz/+UQceeGCmXgIAAGnRNxBUY3SavCE6Rd4UrV/Z2eVXKGxO7UqJMzd1mrzMqZoyl2rdTs0pypeV2hUAAAAAwBQ1I0N0SbrwwgtHrW956KGHUo6///3v6/vf/346lgUAQFoZhqG2voFIzUo0JG/s8EZ/96nDGzDlPBaLVFnsSKpcifWTR7rKix25ppwHAAAAAIB0m7EhOgAA2SIQDGtnl18NHd74FHksJG/0+OQfNKd2JT/XGp8gHxqWV5U6lJdD7QoAAAAAYOYhRAcAYBro6R+Mh+INSfUrjR6fWrr8Mql1RWUue6KTvCy6iWc0KJ9VmMcmngAAAACArEOIDgDAFBAOG9rTOxDZuDNpijxWv9LpGzTlPFaLNK/Uodoyl6pTwvJIUF6YT+0KAAAAAADJCNEBAEiTgWBITR5/yhR5Y9JmngPBsCnnceTaVOt2RkLyeO2KS7VlTs0rdSjXZt33kwAAAAAAAEmE6AAAmKrbN6iGpJC8oSPycZPHp9aefhkm1a6UF+TFa1bi9SvR4LyigNoVAAAAAADMQogOAMA4hMOGWnv6o3Ur3pRNPBs6vOrpD5pynhyrRVWljqTKlUT9Sk2ZU648/ggHAAAAACAd+Bc4AABD9A+G1BTdwDPWSR7rJ2/2+BUImVO7UpCXE58ij3WS15ZFNvKcW5yvHGpXAAAAAADIOEJ0AEDWMQxDnb5BNUTD8cbkaXKPV7t7Bkw71+yiPNWWuRIheVIFS5nLTu0KAAAAAABTHCE6AGBGCobCau3uT3STe7zRypVIP3nvgDm1K3abVVWlDtW4I5t4xjbwrHE7VV3qlMNuM+U8AAAAAAAgMwjRAQDTli8QjG7emZgib/T41djhVXOnX8GwObt4FuXnqNadNE0eDclr3S7NKcqXzco0OQAAAAAAMxUhOgBgyjIMQ+19gcQGntEp8oZocN7eZ07tisUizSnKj9esRDrKIxPltW6nSpx2U84DAAAAAACmH0J0AEBGDYbCaunyp2ziGatgafT45AuETDmPPccanyKvjobjkX5yl6pKHcrPpXYFAAAAAAAMR4gOAJh0fQNBNXR4I1PkQzbxbOnqV8ik2pVSZ65q3K7UypXo77ML82WldgUAAAAAAIwTIToAYL8ZhqE9Pf1q7h6I9pN7I0F5NCzv8AZMOY/VIs0tdqRMkddGe8pr3E4V5eeach4AAAAAAIAYQnQAwJgEgmE1dyamyGMbejZ0eNXQ0adAaLcp53Hk2uKheGyKPNJT7tK8EofsOVZTzgMAAAAAADAWhOgAgLhu/2AiIPd4I5Ur0ePWbr9Mal1ReYE90ktelrqBZ43bqYqCPFks1K4AAAAAAICpgRAdALJIOGxoV09/vGalwRPZxLPJE5kw7/INmnIem9WieSWORNVKWaJ+pcbtVEEef/wAAAAAAIDpgRQDAGaY/sFQpHYlaYo8Ur3iVVOnX4Fg2JTzOO22eECeH+zVxw6Zr9ryAtW6naoscSjXRu0KAAAAAACY/gjRAWCaMQxDXb7BaOVKdBPPjkRX+a6eftPOVVGYN6SXPLGZp9tll8ViUSgU0saNG7V0aY1sNptp5wYAAAAAAJgKCNEBYAoKhQ21dvuT+slT61d6+4OmnCfXZlFVqTPeT16btIlndZlDTjt/TAAAAAAAgOxGOgIAGeIPhFKqViK/R/rJmzv9CoTMqV0pzMtRjTt1ijxWw1JZ4pDNyiaeAAAAAAAAoyFEB4BJYhiGPN5AYoo83k8emSbf0ztg2rnmFOVHgvLoNHl1dJq8tsypEmeuLBaCcgAAAAAAgIkgRAeA/RAMhdXS1R+vWWmKTpM3eCIf9w2YU7tiz7GqutShWrcrPkVeG50uryp1Kj+XLnIAAAAAAIDJQIgOAPvgHQhGp8gTlSux33d2+RUKG6acp9iRm9RJ7lRtmSs6Ue7UnKJ8WaldAQAAAAAASDtCdABZzzAMtfUNxCtXIvUr3nhfeXtfwJTzWCxSZbFD1WUO1Za5knrKI4F5sTPXlPMAAAAAAADAPIToALLCYCis5k5/JBjv8CaF5ZGg3D8YMuU8eTnW+CR5TZlLNWXRCha3U1WlDuXlULsCAAAAAAAwnRCiA5gxevsHkzbv9MUrWBo6fGrp8suk1hWVuexJQXmsfsWlWrdTFQV51K4AAAAAAADMIIToAKaNcNjQnt4BNSRVrSRv4unxmlO7YrVI80od0YDcFe0nd8b7yQvzqV0BAAAAAADIFoToAKaUgWAoUrvS4VNDhzcekMcmzAeCYVPO47TbkqbInapxu6Ld5E7NK3Uo12Y15TwAAAAAAACY3gjRAaRdt29QDdGalcZoL3mDx6vGDp9ae/plmFS7Ul6QN2yKPNZVXl5gl8VC7QoAAAAAAAD2jhAdgOlCYUO7evrV0OGNT5HHNvFs6PCqpz9oynlyrJZ47UokLI9s4BmbMHflcYkDAAAAAADA/iFhAjAh/YOhpClynxqj1SuNHp+aPX4FQubUrrjsNtW4XaqN165Ew/IypypL8pVD7QoAAAAAAAAmESE6gBEZhqFO32BiE8/kaXKPV7t7Bkw716zCvHjNSm1skjxaw1LmonYFAAAAAAAAmUOIDmSxUNhQS5dfjUkbdzbGuso7fOodMKd2JddmUXWpM6VqpdYdCcyrS51y2G2mnAcAAAAAAAAwGyE6MMP5AsF4SJ7aT+7Vzi6/BkPm7OJZlJ+jWrcrZYo8FprPLXbIZmWaHAAAAAAAANMPITowzRmGofa+wLAp8kZPJCxv6zWvdmVucX58E89IWJ7oKi9x2k07DwAAAAAAADBVEKID08BgKKyWLn/KFHnydLk3EDLlPPYcq6pLHfGJ8lhYXut2qqrUqfxcalcAAAAAAACQXQjRgSmibyAYnSD3JoXlkU08W7r6FQqbU7tS4syNVq24VFPmUG2ZK1K/4nZqdmG+rNSuAAAAAAAAAHGE6ECaGIahtt6BpHA8MlEeO+7wBkw5j9UizS12JFWuOFVbFt3Es8ypYkeuKecBAAAAAAAAsgEhOmCiQDCs5k5ftJ88uolntHKl0eOTf9Cc2pX8XGskII+G47GAvLYsUrtiz7Gach4AAAAAAAAg2xGiA+PU0z8YmSSPVq00JYXlrd1+mdS6IrfLHp0ij1WvRMLy2jKnKgrzZLFQuwIAAAAAAABMNkJ0YIhw2NDu3n41dPiiHeWp1StdvkFTzmOzWjSvxJEyRR6pYIl0lBfk8fYEAAAAAAAAMo2UDlmpfzCk5s7I9HisdiXyu1dNnX4FgmFTzuO02xIT5NFp8thxZYlDuTZqVwAAAAAAAICpjBAdM5JhGOryDaZOkXckusp39fTLMKl2paIwLxKMxzbxjE2TlzlVXmCndgUAAAAAAACYxgjRMW2FwoZau/2RYLwjFpZHesobOnzq7Q+acp4cq0VVpQ7VuF2RoDwlLHfKaedtBAAAAAAAAMxUpH+Y0voHQyl1K7Fe8sYOn5o7/QqEzKldKczLiQfjkX5yVzwkn1ucrxxqVwAAAAAAAICsRIiOjDIMQx5vIF6z0tARC8y9avT4tLtnwLRzzSnKT0yRx6fJI7Urpc5calcAAAAAAAAADDNjQ/Tf//73uvfee9XW1qZDDjlEP/zhD7VkyZJR77927Vr98pe/1M6dOzV//nx95zvf0cqVK9O44pkrGAqrtbs/EpB7vGocspln34A5tSt2m1VVZQ7Vlg3fxLO6zKn8XJsp5wEAAAAAAACQPWZkiP7UU0/p1ltv1U033aQjjjhCDzzwgC6//HI9/fTTcrvdw+7/5ptv6vrrr9d1112nk08+WU8++aS+8Y1v6LHHHtPBBx+cgVcw/fiDYW3Z1aumzv74FHksJN/Z6VcwbM4unsWO3HjNSiwgr4lWr8wpypfVyjQ5AAAAAAAAAPPMyBD9vvvu0+c//3mdf/75kqSbbrpJzz//vB599FF99atfHXb/Bx98UCeeeKKuuOIKSdK3v/1tvfzyy3r44Yd18803p3Xt0004bOjaR97WE2/vkbRnv5/PYpHmFuVHK1dcKRt41pa5VOzM3f9FAwAAAAAAAMAYzbgQPRAI6N1339XXvva1+G1Wq1XLly/XW2+9NeJjNm7cqEsvvTTlthUrVmjdunXjPn8oFBr3Y6azf+3o1BNvt47rMfYcq2pKHfF+8vhUeZlD80qdyssZfRPPbPvvC0wHsfcl708gO3ENALIX738gu3ENALLXTHr/j/U1zLgQvbOzU6FQaFhti9vt1rZt20Z8THt7u8rLy4fdv729fdznr6+vH/djprNuf0jOHIt8wdS6lkK7RXMKcjTbZdOcAptmu2yaXZCjOQU2leZbZY1v4jkQ+eXvVM9OqWdn2l8CAJNk2/UPQCquAUD24v0PZDeuAUD2yqb3/4wL0TNt8eLFstmyawPLxxb06M//3KTDDpqv+eUFqilzqDCf2hUgW4RCIdXX12fl9Q8A1wAgm/H+B7Ib1wAge82k93/stezLjAvRS0tLZbPZ1NHRkXJ7R0fHsGnzmPLy8mFT53u7/97YbLZp/8UzXgfNLtInFzq1dEll1r12AAnZeP0DkMA1AMhevP+B7MY1AMhe2fT+H718epqy2+067LDDtGHDhvht4XBYGzZs0JFHHjniY5YuXapXXnkl5baXX35ZS5cuncylAgAAAAAAAACmuBkXokvSZZddpkceeUR//vOftXXrVv3oRz+S3+/XeeedJ0n67ne/qzvuuCN+/4svvlgvvviifve732nr1q266667tGnTJl144YWZegkAAAAAAAAAgClgxtW5SNIZZ5whj8ejO++8U21tbVq0aJF++9vfxutZWltbZbUmvn9w1FFH6fbbb9d//Md/6Oc//7nmz5+vX//61zr44IMz9RIAAAAAAAAAAFPAjAzRJenCCy8cdZL8oYceGnbbpz/9aX3605+e7GUBAAAAAAAAAKaRGRuip5thGJIiO7pmm9hrzsbXDoBrAJDtuAYA2Yv3P5DduAYA2Wsmvf9jryGW7Y7GYuzrHhiTQCCg+vr6TC8DAAAAAAAAADAOixcvlt1uH/XzhOgmCYfDCgaDslqtslgsmV4OAAAAAAAAAGAvDMNQOBxWTk5Oyh6aQxGiAwAAAAAAAAAwitHjdQAAAAAAAAAAshwhOgAAAAAAAAAAoyBEBwAAAAAAAABgFIToAAAAAAAAAACMghAdAAAAAAAAAIBREKIDAAAAAAAAADAKQnQAAAAAAAAAAEZBiA4AAAAAAAAAwCgI0QEAAAAAAAAAGAUhOgAAAAAAAAAAoyBEBwAAAAAAAABgFIToAAAAAAAAAACMghAdAAAAAAAAAIBREKIDAAAAAAAAADAKQnQAAAAAAAAAAEZBiA4AAAAAAAAAwCgI0QEAAAAAAAAAGAUhOgAAAAAAAAAAoyBEBwAAAAAAAABgFIToAAAAAAAAAACMghAdAAAAAAAAAIBREKIDAAAAWeaiiy5SXV1dppcBAAAATAuE6AAAAAAAAAAAjIIQHQAAAAAAAACAURCiAwAAAAAAAAAwCkJ0AAAAYJIEg0Hdc889OvXUU7V48WKddtppuueee9TU1KS6ujrdcMMN8fuecsopOuWUU9TT06Obb75ZK1eu1KGHHqrHHntMkrRp0ybdfPPNOuuss3T00UdryZIlWrVqlX7zm99ocHBwxPP/61//0oUXXqilS5fquOOO07e//W21trbu12u64YYbVFdXp6amJj344IP61Kc+pcMPP1wnn3yyfvWrXykcDo/43+G+++7T2WefrSVLlujoo4/WRRddpPXr1w+772OPPaa6ujo99thjWr9+vS644AIdeeSROuWUU4ad/95779Xpp5+uJUuW6IwzztBf//pXSVIgENAvfvELnXLKKVq8eLFWrVqlF154Yb9eNwAAALJXTqYXAAAAAMxU3//+9/WXv/xF1dXV+vKXv6xAIKD7779fb7311oj3DwQCuuSSS+Tz+XTKKafIZrPJ7XZLkh555BE999xz+tjHPqaPf/zj6u/v12uvvaY77rhD9fX1uuuuu1Kea8OGDfrKV74ii8WiM844Q7NmzdKGDRv0xS9+UUVFRfv92n72s5/ptdde08knn6wVK1bo2Wef1V133aXBwUFde+218fsZhqFrrrlGzz77rObPn68vf/nL8vl8Wrt2rb7+9a/re9/7ni699NJhz//000/rpZde0kknnaQvfelL6uvrS/n8rbfeqnfeeUcnn3yyrFarnnrqKV1//fUqKirSww8/rI8++kgrV67UwMCA1qxZo2984xt66qmnVFNTs9+vHQAAANmFEB0AAACYBBs2bNBf/vIXLVq0SH/4wx/kcDgkSVdeeaXOPffcER/T1tamuro6/eEPf1B+fn7K56688krdeOONstls8dsMw9D//t//W48++qjeeOMNHX300ZKkcDisH/7whwoGg3r44Yd1zDHHxO//ne98R2vWrNnv1/fuu+/qiSee0KxZsyRJV111lU4//XQ99NBD+sY3viG73S5J+stf/qJnn31Wxx57rO6999747V/72td03nnn6Wc/+5k+8YlPqLq6OuX5X3zxRd17771avnz5iOffunWrnnjiCZWVlUmSzj//fH3uc5/Tddddp4MOOkhPPvmknE6nJGnFihW69tpr9eCDD+oHP/jBfr92AAAAZBfqXAAAAIBJ8MQTT0iSvvGNb8QDdEmaNWuWLr744lEf92//9m/DAnRJqqysTAnQJclisejLX/6ypEhoH/PGG2+oqalJJ510UjxAj93/uuuuG/Y8E3HVVVfFA3RJKisr0yc+8Ql5vV5t3749fvuf//zn+OuKBeix13PppZcqGAzG/1sl+8QnPjFqgC5JX//61+MBuiQtWbJE1dXV6unp0bXXXhsP0CXp9NNPV25urrZs2TKxFwsAAICsxiQ6AAAAMAligW1sOjzZUUcdNeJj8vLyVFdXN+LnAoGAfv/73+uvf/2rtm3bJp/PJ8Mw4p/fs2fPsHMnB+gx8+bN05w5c7Rz586xv5gRHHbYYcNumz17tiSpt7c3ftvmzZvlcDi0ZMmSYfc/7rjjUtabbKT7JzvkkEOG3VZRUaGmpiYtWrQo5XabzaaysrKU/0YAAADAWBGiAwAAAJOgr69PVqtVpaWlwz4X6zkf6XaLxTLi56655ho999xzmj9/vs444wy53W7l5OSop6dHDz74oAKBQPy+sRB7tPOUl5fvd4heUFAw7LacnMg/L0KhUPy2vr4+zZkzZ8TnqKioiN9nqNHWPpbzj/a5YDC41+cEAAAARkKIDgAAAEyCgoIChcNhdXZ2ptSOSFJHR8eIjxktQH/nnXf03HPPacWKFfrNb36TUseyceNGPfjggyn3Lyws3Ot52tvbx/w69ldBQYE8Hs9e1zFS6D3afwsAAAAg3ehEBwAAACZBrG7kzTffHPa5t956a1zP1dTUJEk66aSThvWZ/+tf/xr13CN9bufOndq1a9e4zr8/Fi1aJL/fr3feeWfY51577TVJI1ezAAAAAFMFIToAAAAwCVatWiVJ+vWvf63+/v747W1tbcMmx/elsrJSUmTD0GQffvihfvOb3wy7/9FHH62qqio9//zzKUG6YRj6+c9/nlK3MtnOPfdcSdIdd9yhwcHB+O2tra267777lJOTo7PPPjtt6wEAAADGizoXAAAAYBIsX75cZ511ltasWaNVq1bp1FNPVSAQ0Nq1a7VkyRI999xzY64sWbJkiZYsWaK1a9eqra1NRxxxhFpbW7V+/XqtXLlSf/vb31Lub7Va9eMf/1hf/epXddlll+mMM87QrFmz9Morr6itrU11dXV6//33J+NlD/OZz3xGf//73/Xss8/q7LPP1kknnSS/36+1a9eqq6tLN9xwg6qrq9OyFgAAAGAimEQHAAAAJslPf/pTfetb31I4HNZDDz2kF154QZdccomuuuoqSSN3gY/EZrPpnnvu0fnnn6/GxkY9/PDD+uijj/Td735X//Zv/zbiY5YvX677779fRxxxhJ5++mk98sgjqqys1H//93+ruLjYtNe4LxaLRXfeeaf+/d//XTk5OXr44Yf1xBNP6OCDD9Z//ud/6rLLLkvbWgAAAICJsBiGYWR6EQAAAEA2+dOf/qQf/OAHuvHGG/WlL30p08sBAAAAsBdMogMAAACTpK2tTUNnVnbv3q27775bNptNJ598coZWBgAAAGCs6EQHAAAAJslvfvMbvfDCCzr66KPldrvV2tqq5557Tl6vV9/85jc1d+7cTC8RAAAAwD4QogMAAACT5MQTT9TWrVv1wgsvqKenR3a7XXV1dfrSl76kVatWZXp5kqTNmzdr3bp1+7zfvHnzdN5556VhRQAAAMDUQic6AAAAkMUee+wxfe9739vn/Y499lg99NBDaVgRAAAAMLUQogMAAAAAAAAAMAo2FgUAAAAAAAAAYBR0opskHA4rGAzKarXKYrFkejkAAAAAAAAAgL0wDEPhcFg5OTmyWkefNydEN0kwGFR9fX2mlwEAAAAAAAAAGIfFixfLbreP+nlCdJPEvlOxePFi2Wy2DK8mvUKhkOrr67PytQPgGgBkO64BQPbi/Q9kN64BQPaaSe//2GvZ2xS6RIhumliFi81mm/ZfPBOVza8dANcAINtxDQCyF+9/ILtxDQCy10x6/++rnpuNRQEAAAAAAAAAGAUhOgAAAAAAAAAAoyBEBwAAAAAAAABgFIToAPZL/2BI697brZ1d/kwvBQAAAAAAADAdG4sC2C9X/f5Nrd+yR3abVc9ev1LVZc5MLwkAAAAAAAAwDZPoACaso29A67fskSQFQmE98XZLhlcEAAAAAAAAmIsQHcCEbW/3phw3dHhHuScAAAAAAAAwPRGiA5iwoSH6jnZfhlYCAAAAAAAATA5CdAATNixEZxIdAAAAAAAAMwwhOoAJGxqa7+kdkC8QzNBqAAAAAAAAAPMRogOYsG1twyfPGzqodAEAAAAAAMDMQYgOYELCYWPEwJzNRQEAAAAAADCTEKIDmJDdvf3yD4aG3b6DSXQAAAAAAADMIFkRot9zzz06//zzdeSRR2rZsmW66qqrtG3btpT7DAwM6KabbtJxxx2nI488Ut/85jfV3t6eoRUDU9/QTUVjmEQHAAAAAADATJIVIfprr72mL3/5y3rkkUd03333KRgM6vLLL5fPl5iY/clPfqLnnntO//Ef/6GHHnpIe/bs0dVXX53BVQNT2472kSfOR7sdAAAAAAAAmI5yMr2AdLj33ntTjlevXq1ly5bp3Xff1cc+9jH19vbq0Ucf1e23365ly5ZJioTqZ5xxhjZu3KilS5eO+Vyh0PB6i5ku9pqz8bVns61tvSPevqPDy9dCluEaAGQ3rgFA9uL9D2Q3rgFA9ppJ7/+xvoasCNGH6u2NhH/FxcWSpE2bNmlwcFDLly+P32fhwoWqrKwcd4heX19v6lqnk2x+7dnonW2dI97e2t2vV994S3k2S5pXhEzjGgBkN64BQPbi/Q9kN64BQPbKpvd/1oXo4XBYP/nJT3TUUUfp4IMPliS1t7crNzdXRUVFKfd1u91qa2sb1/MvXrxYNpvNtPVOB6FQSPX19Vn52rOZ5/kXJQ1Ikuw2iwIhI/65suqDdNCsggytDOnGNQDIblwDgOzF+x/IblwDgOw1k97/sdeyL1kXot9000368MMP9d///d+T8vw2m23af/FMVDa/9mwTChtq9CS6z49b4NaLHyY24m30+HXI3OJMLA0ZxDUAyG5cA4DsxfsfyG5cA4DslU3v/6zYWDTm5ptv1vPPP68HHnhAc+bMid9eXl6uwcFB9fT0pNy/o6NDFRUV6V4mMOXt7PRrMGnyfOXBqe+Thg42FwUAAAAAAMDMkBUhumEYuvnmm/XMM8/ogQceUHV1dcrnDz/8cOXm5mrDhg3x27Zt26aWlpZx9aED2WJ7hzfl+IjqEpW57PHjHUM+DwAAAAAAAExXWVHnctNNN2nNmjX6z//8T7lcrnjPeWFhofLz81VYWKjzzz9fq1evVnFxsQoKCnTLLbfoyCOPJEQHRrC9rS/leL7bpVq3Ux5vQBKT6AAAAAAAAJg5siJE/8Mf/iBJuuiii1Juv/XWW3XeeedJkr7//e/LarXqmmuuUSAQ0IoVK3TjjTemfa3AdLAjKSQvzMtReYFd890uvdXYFf08k+gAAAAAAACYGbIiRH///ff3eZ+8vDzdeOONBOfAGGxvT4Tk88tdslgsqnU747e1dPk1EAwpLyc7NpcAAAAAAADAzJUVnegAzJUcoh9Q7pIUqXSJCRtSc6c/7esCAAAAAAAAzEaIDmBcAsGwmjsTdS7zoyF68iS6JDVQ6QIAAAAAAIAZgBAdwLg0enwKG4njA8qd0d9dKffb0c7mogAAAAAAAJj+CNEBjMuO9tQJ8wPKCyRJJU67ih258duZRAcAAAAAAMBMQIgOYFy2Dw3Rk7rQ5ydVuuzoYBIdAAAAAAAA0x8hOoBx2Z40YV7msqvYmZg+r00K1HcwiQ4AAAAAAIAZgBAdwLgk17nMH7KZaPJxc6dfg6Fw2tYFAAAAAAAATAZCdADjklznEutDj0meRA+FDe3s9KdtXQAAAAAAAMBkIEQHMGb+QEit3f3x4wPKh0yiDzmm0gUAAAAAAADTHSE6gDEbGorvbRJdkhrYXBQAAAAAAADTHCE6gDFL7kOXhk+eu112FeTlJO7PJDoAAAAAAACmOUJ0AGO2bWiIPmTy3GKxqDZpc1Em0QEAAAAAADDdEaIDGLPkSfTZRXlyJU2dxyQH60yiAwAAAAAAYLojRAcwZsmh+NAp9PjtSRUvTR6fQmFj0tcFAAAAAAAATBZCdABjtj1pEn1BxcghevLmooMhQy1d/klfFwAAAAAAADBZCNEBjElP/6Da+wLx41En0YfcTi86AAAAAAAApjNCdABjsmPIpqIHlI8WojtTjrfTiw4AAAAAAIBpjBAdwJhsH2OIXlGYJ0euLX7c0E6IDgAAAAAAgOmLEB3AmCSH6BaLVF3mHPF+FotFtUnT6DuocwEAAAAAAMA0RogOYEyS61zmlTiUnzRtPlRyL3oDdS4AAAAAAACYxgjRAYxJ8iT6aFUuMbXliUn0Bo9P4bAxaesCAAAAAAAAJhMhOoB9MgxjXCF68iR6IBjWrp7+SVsbAAAAAAAAMJkI0QHsU6dvUD39wfhxckg+kuROdEnaQaULAAAAAAAApilCdAD7tL29L+X4gIqxT6JLUgObiwIAAAAAAGCaIkQHsE/b21ND8AP2MYk+pyhfeTmJywuT6AAAAAAAAJiuCNEB7FPyJHqO1aKqUsde72+1WlIqXRramUQHAAAAAADA9ESIDmCfdiSF4DVlTuXY9n3pqE2aVmcSHQAAAAAAANMVITqAfdrWngjB55fvvcolfr+kSfQdHV4ZhmH6ugAAAAAAAIDJRogOYK8Mw1BD0iT5AWMM0ZMn0fsHw9rTO2D62gAAAAAAAIDJRogOYK/29A7IFwjFj8c+iZ56vx3tVLoAAAAAAABg+iFEB7BX29pSw+8FY55Ed6YcN3SwuSgAAAAAAACmn6wI0V9//XVdeeWVWrFiherq6rRu3bqUz99www2qq6tL+XX55ZdnaLXA1DJ0U9CxTqJXljiUa7OM+jwAAAAAAADAdJCT6QWkg8/nU11dnc4//3xdffXVI97nxBNP1K233ho/ttvt6VoeMKVtT6phycuxam5R/pgeZ7NaVF3mjE+yM4kOAAAAAACA6SgrQvSVK1dq5cqVe72P3W5XRUVFmlYETB/JIfp8t0tWq2Uv90413+2Kh+hMogMAAAAAAGA6yooQfSxee+01LVu2TEVFRTr++OP17W9/W6WlpeN+nlAotO87zTCx15yNrz0bbG/ri388v9w5rv/PNWWO+McNHV4Fg0FZLGMP4TE9cA0AshvXACB78f4HshvXACB7zaT3/1hfAyG6IlUup512mqqqqtTU1KSf//zn+spXvqL/9//+n2w227ieq76+fpJWOfVl82ufqUKGkTJB7gj2aePGjWN+vM2feGzfQEj/ePVNFeeP7z2F6YNrAJDdxnoNMAxDXQNhlfLnATBj8HcAILtxDQCyVza9/wnRJZ155pnxj2Mbi5566qnx6fTxWLx48biD9+kuFAqpvr4+K1/7TNfc6VMwvDt+fNyhB2jp0qoxP77H1aZ733ojflwwd4GW1o7/JzwwtXENALLbeK4BgWBYn7vnFW1q6dF5R1bqZ59dkqZVApgM/B0AyG5cA4DsNZPe/7HXsi+E6COorq5WaWmpGhoaxh2i22y2af/FM1HZ/NpnqgZPf8rxwlmF4/p/vKCiMOW4sbNfxy7ga2Sm4hoAZLexXAPWv7tHm1p6JEmPvdWia0+rU3WZMx3LAzCJ+DsAkN24BgDZK5ve/9ZML2Aq2rVrl7q6uthoFFlv6Gag88vHF3TMK3EoJ2kj0gY2FwWArPb+7t6U4yaPL0MrAQAAAICxy4pJdK/Xq8bGxvhxc3OzNm/erOLiYhUXF+tXv/qVTj/9dJWXl6upqUk/+9nPVFtbqxNPPDGDqwYyb1tbIvQuyMtRRUHeuB6fY7OqqtShHR2RkCT2OwAgO21L2qxaknZ2+TO0EgAAAAAYu6wI0Tdt2qSLL744fnzrrbdKks4991z96Ec/0gcffKDHH39cvb29mjVrlk444QR961vfkt1uz9SSgSkheRJ9frlTFotlL/ceWa3bFQ/PmUQHgOyW/M1ZSWrt7h/lngAAAAAwdWRFiH7cccfp/fffH/Xz9957bxpXA0wf29sTYccB5QUTeo75bqdeSHo+wzAmFMYDAKa3cNjQtvbUSfTWbibRAQAAAEx9WRGiAxi/QDCs5s5EuHGAe2Ibv9W6XfGPe/uD6vINqtSVnp/yeK+lR/U7u1TitKuiME8VBXmqKMxTfm52bHoBAFNJa0+/+gfDKbft7GISHQAAAMDUR4gOYERNnT6Fwkb8eH65ay/3Ht3QzUh3dHjTEqK/uq1DF/zXKzKM4Z8rzM9JCdUrCvNUnvTxojlFmlOcP+lrBIBssnVP37DbWulEBwAAADANEKIDGNGO9tTe2gMmGKInT6JLUkOHT0fWlE54XWP10CsNIwboUmQivrc/OKybN8ZikX7x+aU658h5k7hCAMguQzcVlaSWLj81XwAAAACmPGumFwBgatpuUoheVeqQNSkb2ZGGzUXDYUMbtnZM+PGGId3y183qHwyZuCoAyG5bR/jGpTcQUk9/MAOrAQAAAICxYxIdwIiSQ/RSZ65KnBOrYMnLsamyxBHvV2/o8Jmyvr35YE+vOryB+PE3TzlQxx5QprbegcSvvtSPu3yDKc/R3jegR/7VpIuXzZ/09QJANhi6qWhMa7dfxY7cNK8GAAAAAMaOEB3AiJInxifahx5/vNsVD9HTMYn+0kepU+jnHDlPCysK9vqYgWBIe3oG9JlfvyRPNIC/54Vt+uKxNcq18UM7ALC/RqvQau3q1yFzitK8GgAAAAAYuymXDIVCIf31r3/V//f//X/6xje+offff1+S1Nvbq7///e9qb2/P8AqB7LA9KeyYaJVLTPLmokO71ifDhq2J68SconwtGMP683Jsqi5z6n+dMD9+284uv/781s7JWCIAZBXvQFCt3f0jfq6lm81FAQAAAExtUypE7+np0Re/+EVdf/31WrNmjdavXy+PxyNJcjqduuWWW/Tggw9meJXAzNc/GFJLUthxgHv/J9FjOn2D6h5SnWKmYCisV7d54sfLF7rHtWHdRcvmqzAv8UM6dz+/VaHwKHZmWh0AANWySURBVDuUAgDGZOg+G8laugjRAQAAAExtUypEv/322/Xhhx/q3nvv1bp162QYieDKZrPp9NNP1wsvvJDBFQLZYWjlygEV+xei1w4J4Rs8kzeNXr+zW70DiU3qlh9YPq7HFztydfHy2vjx9navnqpvNW19AJCNtral9qHnJO043do18oQ6AAAAAEwVUypEf/bZZ3XRRRfphBNOGHFydP78+dq5k2oFYLINrVyZv9+T6M6U4x2TuLnoy1tT+9CXL3SP+zn+1wkHyJFrix//+rmPUr6pBwAYn61JFWFWi7S4qjh+TJ0LAAAAgKluSoXovb29qqqqGvXzwWBQoVAojSsCpo4mj0+vbOtIS7XI9vbUkHt/NxatLnMq+ftiDZPYi/5yUh/6AeUuVZY4xv0c7oI8ffHYmvjxll29enbzHlPWBwDZaFvSJHpNmTPlm7MtTKIDAAAAmOKmVIheU1Ojd999d9TPv/TSS1q4cGEaVwRMDW83demk25/XBb95Rf/2p7cn/Xzb2xNhx6zCPBUkdYRPRH6uTXOL8uPHkzWJ3j8Y0r92dMaPl01gCj3mqx9fILstcYn8VYan0Q3DUGu3n4l4ANNS8iT6gooCzS1O/Jmwq7tfYfaeAAAAADCFTakQ/bOf/aweffRRPfXUU/GgyGKxKBAI6Be/+IVefPFFfeELX8jwKoH0u/ef2+MT6I+9tVPPvT+5U9E7kibR93cKPSa5F72hY3Im0d9s7NRAMBw/nkiVS8yc4nydf3TiJ2M2NnUNq4pJl2AorC/916tadut6fem/Xp1SYdOOdq8u+t3ruvkfHjYHBDCicNhI+ebswgqX5ib9lFAgFFaHN5CJpQEAAADAmEypEP2SSy7ROeeco+uuu06f+tSnJEnf+c53dNRRR+mee+7R5z//eX3uc5/L8CoxEqZjJ08gGNZzW1JD8x8/+Z4CSWGx2bYl1a0sMClEn1+e6EWfrEn0DUNC7mULJh6iS9LXVy6ULWnzu1+t/2i/nm+innu/TRu2RV7bhm0demV7ZsL8kfz06S16eWuH3t4d0E1Pbs70cgBMQS3dfvUPJv7MWlBRoHkl+Sn3aaUXHQAAAMAUNqVCdIvFoltuuUUPP/ywPvOZz+jjH/+4DjnkEH3+85/XQw89pJtuuinTS8QQobChb/5hoy58fI/ueOaDTC9nRtqwrUO9A8GU27a1e3X/y9sn5Xy9/YNq7xuIH0/GJHp734D6hrwmM7z0UaIP/ZA5hXIX5O3X89W4nTr7iMr48YZtHXqjoXMvj5gcz7y3K+X4vZaetK9hJIZhpEznv/Bhm7r9gxlcEYCpaFtb6k8fLSh3aW5x6n4V/CQLAAAAgKls/4qOJ8kxxxyjY445JtPLwBi80dCppzZFAr7/fH6bPnX4XC2pKsnsomaYv727a8Tb73z2I51z5DzNKswf8fMTtWPIpqIHmDWJ7namHDd0eHVYZbEpzy1JfQNBvd3cHT8+4cByU573qpMW6s9v7Ywf//q5j/S7Sz9mynOPRShsDNvU9L3WqRGib2/3poTmgyFD697bnVKDAwDJm4pK0sJZBcq1ps5xsLkoAAAAgKlsSk2iY/pJ3hhMkh7a0JChlcxM4bChZ97bHT9O3uCzbyCo255+3/Rzbh/SV25WiJ48iS5JDSZXury2vSPeGy/tXx96soNmF+pTh82JH6/fskfvtnTv5RHmerOxc1hX8FSZRH+rsWvYbU/Vt6Z/IQCmtORNRYvyc+R22VXkyJHTbovfTp0LAAAAgKlsSk2in3LKKbJYLHu9j8Vi0bp169K0IuxLdZlTJyx066VopcMTb7fof5+5SCVOe4ZXNjO81dSltt5Etcr1nzxYf/pXc3wS+X/eaNaFx9dqaXWJaefckdSHbrFINWXOvdx77GqHTKJvbzd3c9GXP0rUitisFh17QJlpz/2Nkw/U00k/EfCfz23Vr798lGnPvzfJ30SJ2drWp0AwLHtOZr8PurGpa9htL37Yrp7+QRXl56Z/QQCmpG3Jm4rOKoj/Xa+yxKGP9kQ+xyQ6AAAAgKlsSk2iH3vsscN+HX300Zo7d65aW1tVUFCgj30sfTUKGJsLj6+JfzwQDOtP/2rO4Gpmlr8PqXL51OFz9KOzD0u57cYn3lU4bN7GrsnhdmWxQ/m5tr3ce+yc9hzNLkp0lDd0mBuiv5TUzb2kqliFJoa4i6uKtfLgivjxU5ta48HPZDIMY8QQfTBkpOX8+/JW0/B++EAorGc3D18zgOy1dU/yZtUF8Y+Tf5qthUl0AAAAAFPYlJpEX7169aif27Jliy6//HKtWrUqjSvCWJxSVyG3w6oOf1iS9PCrDbp8xQGyWvf+UwWTbWeXX7945gOVOHL1ndPrTAuD08UwjJQ+9COqijW32KG5xQ6tOqJST77dIkl6u6lLj77ZrM8dU23KeZNDdLOqXGJq3S7t7olM1u8wsc7F4w1oc1JP+AkLzelDT3b1KQfqhQ/aJEmGId39/Fbd8fkjTD9Psq1tfaNO7L/X2qNDK4sm9fx74w+EtKW1d8TP/fWdXTr3SHrRAUjegaB29SSmzBfOSvy5Upm0uWgrk+gAAAAAprApNYm+N4cccoi+8IUv6Pbbb8/0UjBEjs2qTy5IVHU0dPj04kftGVxRxLf+8Jb+541m/faf2/V//ro508sZtw/39KUEzZ9M6uX+3qcPkSPpmwI/ffp99fYPygyTGaInby5q5iT6hqQpdMm8PvRkH5tfllIR8/jGnWrymNvrPtTfh0yhJ7ddbc7w5qKbWroVTPoJiKK8xB8n//iwzbSvRwDT29BvBCZPoleWJEL0Pb39GgyF07YuAAAAABiPaROiS5Lb7dZHH32U6WVgBJ9Y4FCuLZHwPbRhR+YWo8hmjP9qSFRN/PH1RjV3Tm7gaba/bUqtcjn9sNnxjytLHPrGyQvjx+19A7pr/f6/Nzq9AXX7E+Hn/EmYRI/Z3TMgXyBoyvO+vDXxTRt7jlVH1Zaa8rxDXX3ygfGPQ2FD9/xj66ScJya5yqXW7dThlcXx40yH6BuHbCr6hUMTwVggGNb6LXvSvCIAU9HWttTqqYUViT8H5pYk6lzChrS7h2l0AAAAAFPTtAnROzs79eijj2rOnDn7vjPSrjTflhLyPrtlz6RP6e7NfS/tSDkeDBn69XOTG3iaLXkKeUGFSwfOKkz5/BUnLlB1WWKK73f/3D4srBivbcMmBs2eRE99vkaTvkaSJ9GPqS2dtOqeEw8q15KqRJD9yL+atWeSQp89vf0pG3eetmi2Dp2bqG95r7VHhmFeF/54JfehzynK08kHOOSyJ/67P1XfmollAZhitrYl/lyxWS2qSfqJpOQ6F0lq7SZEBwAAADA1TakQ/eKLLx7x17nnnquTTjpJW7Zs0TXXXJPpZWIUFx6X2GDUMKQ/vNaYkXXs6u7X2hECvD/9qymjwf547Ozyq35nd/z49MOGf/MoP9emH5x5aPw4GDZ085Pv7VewumNIiG7+JLoz5XhH+/7//2jt9qeE/5NR5RJjsVj0jaRp9EAwrP96cduknOvZzXuU/L/ytENna9HcxDdSunyDKT3D6ZY8ib60ukR5NotOOWRW/Lbn32+Td8CcnzQAMH0lf3O3utShvJzEN9uSJ9ElqaWLzUUBAAAATE1TKkQ3DGPYL0mqqqrSl7/8ZT355JM666yzMrxKjOaY2lIdMicR8v2/15s0EAylfR0Pv9KQ0tUcEwwb+pUJlSfp8Pd3U6tcPnno7BHv98lDZ2vFgYlNNF/4oG2/ajSSu2tzrBZVlTr2cu/xGxqim9GL/vJHQ/rQDzR/U9Fkpy2arYNnJ6pLfv9qozq9AdPPk1zlUurM1dG1pVo0N3Uj0UxVuuzu6VdL0sToEdUlkqRPH574Zs9AMKxnqXQBst62pEn0hRUFKZ8bOonewuaiAAAAAKaonEwvINlDDz2U6SVgP1gsFl14fK1+8PgmSVKHN6C19bt0zpHz0raG/sGQ/jtpAv7AWQUqyMuJ12L8z5vN+sbJB6b8OPlU9LekEH12UZ6OqCoZ8X4Wi0U3rjpUn/rliwpFv3Hw4zXvacVB5SnTfmO1PSnUri5zKtdm7vfZCvNzVV5gV3tfJHRO3jh1ol5K6kMvyMvRknnFe7n3/rNaI9Po3/rjRkmSLxDSfS9t13WfrDPtHN6BoP6ZtDnvKYfMVo7NqkWVqSH6ey09OuWQkb/BMpneGtKHfmR1sdTVrZUHl8tpt8kXiHzzbG19q84+ojLt6wMwNYTDhra3JybRF1Sk/nSTw25TqTNXnb7IXhyt3UyiAwAAAJiaptQkOqa/c46cp4K8xPdmHkzzBqNPbGyRJ2kq+NLl83XdaQfHj0NhQ3et/zCtaxqvTm9Ar233xI8/eegcWa2WUe9/0OxCXbJsfvx4R4dPv/vnjnGds6NvQP/7z/UpNTgHmFzlEpO8uej+TqIbhpHSh37cAWXKMTn4H8mZi+emTNXf//IO9fYP7uUR4/OPD9oUCIbjx6dFfxKhKD835acDNrf2mnbO8UjuQ7dZLfENT/NzbSmVLs+9v8e0zWMBTD8t3X71DyauZQuGTKJL0tykaXQm0QEAAABMVRmdRH/88ccn9LhzzjnH1HXAPAV5OTr/qHl6YEODJOnNxi5t2tmtwyd5OliKBKq/e2l7/LgoP0fnHTVPjlybjq4t1RsNkeDvsbd26hsnH2h637dZ1m3ereQ2mk8etu9J42+depD+snGnOqLfQLhr/Yc676h5ml2Uv9fHDQRDeuDlHbrr2Y/UO6S/erL+n9W6nfH/Fw37OYm+vd2bshHdsknsQ0+WY7Pq6ysX6obH6iVJPf1BPfRKg6466cB9PHJskqtc8nKs+vjBiYqaRXOL1NwZmdbMVJ1Lch/6IXMK5UjaUPSMxXO15p3IN2P6B8N6bkubzlwyN91LBDAFJFe5SMPrXCSpssSh96LXMibRAQAAAExVGQ3Rb7jhhnE/xmKxEKJPcRceXxsP0SXp96826Nbzlkz6eV/Z5tGWXYnJ3C8eWyOnPfIlfu2pB+vCe1+VFJlGv3P9h/r555dO+pom4m/vJgLUovwcHb9g38FwsSNX/3Z6XTzU9QVC+unaLfr5F5aOeH/DMPS3d3fr1rWbRwyyT100S1eceMDEXsA+zE+aRI9MKYaUnzv+6hlJenlrah/6CZPch57svKOq9MtnP4yH+Pe+uF2XLT8gJVCeiGAorPXvJ7rETzyoPP51LEmHzi2Kh+zbO7zyBYIpn59swVBY7zQnNr1dGu1Djzm5bpYcuTb5ByOVLk/VtxKiA1kqeVNRaXidiyRVJm0uysaiAAAAAKaqjIbozz77bCZPj0ly0OxCHb+gTK9si1SSPP5Wi2749CIVO3In9bz3v5yYQrdapIuW1caPTzjQrY/NL9XrOzqja9qpq08+cMQfLc8kXyCoFz9six9/YtHsMfeSf+6Yav3+1UbV74wEnI+9tVNfPr5WR9eWptxv085u/XjNe3o1qTIm5uDZBfrBmYfq4wdX7Mer2LvknwAwjMg0+dANM8fq5aQ+9DKXXXWzC/dyb3PZc6z66scX6KYn35MU2QPgv19r1OUr9u+bD6/v6FSXL1ENc9qQTWWT/1sZhrRlV6+Oqkn9fzyZPtjdFw/IJenIIed22G06+ZAKPVUf6fVfv2WP/IHQfn9zAcD0kzyJXuzIldtlH3af5DqXTt8g1wsAAAAAU1JGO9HnzZs3oV+Y+i5O6uj2D4b06BvNk3q+Jo8vpQLj9MPmqKo00VltsVh0bVI3etiQ7lr/0aSuaSL+8UGbBpK6sD956Ng3jbRZLfrR2Yem3PajJ95VONoNs6enX//2p7e16lf/HBagl7nsuuWcw/XUNSdOaoAuSQuHTCLe/fzWCT1POJzah75soXuv3fGT4YKP1ai8IBEK3f38R/vdAZ78dWyxaNjGoYcO+YZDuitdkvvQpeGT6FKk0iXGPxjS80mT9QDSY2NTl8745Yv6wj0bMlaTsm3IpqIWy/BrdPIkukSlCwAAAICpiY1FMSlOO3S2ZhXmxY8ffqVBhmHs5RH758ENO1J6xC9dPn/YfZYvLNdxB5TFj/+ycac+2tM37H6ZlFzlkpdj1cq68QXaR9eW6dwjE99oqt/ZrYdeadCv1n+ok25/Xn96o1nJ/xtybRZ99eML9Nx3TtKFx9emZVPOQ+cWaUlVom/9ibdbUibKx2rzrh51Jk1sL09TH3oyh92mK1cujB+39wX0wMsNe3nE3hmGoWc274ofH1VTqoqk95EkVZU6VJi0eW+6Q/TkPvSi/BwtGGFvgZPrZik/N/G19NekDWsBpMdP/rpZ77X26NXtHn3nT29P6p/Bo9m6JzGJvqB85J/8qixxpByzuSgAAACAqWjKhehtbW26++679c1vflOXXnqpLr744pRfl1xyybif8/XXX9eVV16pFStWqK6uTuvWrUv5vGEY+uUvf6kVK1ZoyZIluvTSS7Vjxw6TXlF2yrVZ9cVja+LH29q9eumjjr08YuK8A0H98fWm+PGhc4t0bFJYnmzoNPqdz344KWuaiMFQWM9uToToJx5UMaGu6xs+fYicST8Kf+MT7+r2v38gXyCUcr/TD5utZ65dqe+fMflVO8ksFot+/JnDlTyQeONf3tVgKDz6g0awYWgf+sL09aEnu/D42pRvGP3fF7aqp39wL48Y3fu7e9XkSUxhDq1ykSSr1aJD5iZqa95rSfckelf84yOqS0ac/nfl5eikg2fFj9dv2aP+wdCw+wGYHIOhsDY2d8WPX/qoQ//4cPzfrNwffQNB7epJBOILZ428mffc4tRJ9BYm0QEAAABMQVMqRN+yZYvOPPNM3X333WpsbNSrr76qzs5ONTQ06LXXXtOuXbsmNEnl8/lUV1enG2+8ccTP/9d//Zceeugh/ehHP9Ijjzwih8Ohyy+/XAMDA/v7krLal46rkS0pYHvolR2Tcp7H3mxWb3+iQuOyE+aP+CPjknT8AnfKxPKT77Tow929I9433V7d5lFP0us4/bCxV7kkm12Ur6tPOXDUzx9WWaQ/fOV43XPRMSn95Ol0RHWJLvhYdfz4wz19uv+lHeN6jpc+SgRClcX5qnU793LvyZOfa9M3k/57d/sH9bt/bt/LI0b3TNJPIkgjh+hSai/6ll298cqeydbtH0z56Y2hfejJzkjaTNQXCOn599tGvS8Ac21t61MgmPqNyVuf2qxQmq4VkrQ9qQ9dGn0SfXZRfso3VVuZRAcAAAAwBWV0Y9Gh7rjjDjmdTj3++OPKz8/X8uXL9f3vf1/Lli3T2rVr9aMf/Ui33377uJ935cqVWrly5YifMwxDDz74oL7+9a/r1FNPlSTddtttWr58udatW6czzzxzXOcKhbJv2jL2moe+9nJXrj556Cyt3RQJBp95b7eaPd5hU2f7Ixw2dF9S+FrmsuvMw2fv9f/Dt045UC9Hp5gNQ/qPdR/ozguWmramiXp6U6LywmqRTjq4fMJfT5cuq9UfX2tSo8cXv62iIE/f+eRBOvfIebJZLRn/Wr3u1IO0tn6XuvyRqe3/WPeBzlw8W7OL9v31MRgK67WkXvfjF5QpHB7fJLuZzj9qnv7vC1u1Mxr+/PbF7brwuGqVOodvorc3f38vUeWyoNyl+WWOEf8/1c1OhFG+QEjb23s13z353xDZ2Jjapb9kXpFCodCI14CVB7llz7HGg7yn6lt02qLJ7dsHELEpaQo9ZsuuXv3PG4367FFVpp9vpGvAR3tSf0rmAPfI1zOrpNmFedrVExlc2Nnly/ifTwDGbrR/BwDIDlwDgOw1k97/Y30NUypEf/PNN3XFFVeosrJSXV1dkhSfPP/0pz+tN954Q7fddpsefvhh087Z3NystrY2LV++PH5bYWGhjjjiCL311lvjDtHr6+tNW9t0M9JrP84d0Nrox2FD+o8nX9cXDy8cdr+JemvXgLa1J6bdTqnJ1eZ39/7/IEfSkll2vbMnIEl6qn6XTp37umqK01dpMlTYMPTUO4lJ3UXldjV++J4a9+M5r1yar1v/6VcwLJ1xkFPn1rnksLWr/p30/kj/3nxhkUP3vBkJ0b2BkL7736/o2uNL9vm49zsC8ibV08zL9Wrjxo2TtMqx+cyBdv3nvyIhet9AUP/n0Vd14eKxf613+EKq35kInZa4NepryulNrYtZu6Fey6rM++bUaJ5+L3UPgZyuJm3cuDN+PPQasHRWrl5riQRjz7y7S6+9EZbdlt7NX4Fs9MI7I9c8/fSp91RttClvkt6HydeAl99N/JSX1SJ1Nn+ojS0jn7coJ6zYtxDfb9qT8es5gPHL5n8DAeAaAGSzbHr/T6kQPRwOq7w80mtcVFQkm80WD9Mlqa6uTo8++qip52xriwSXbnfqpoRut1vt7eMPGxcvXiybzbbvO84goVBI9fX1I772IwxDD7/3T30U/bHu55sG9eMLlsieY06T0J0P/Cv+cY7VouvO/tiYJpl/UNqpz//mVUmSIenvLTn61cqlpqxpIt5u6pLHn6jyOO/YBVq6dP5+PedSSZ892VDYMJSbhg1DJ2LxEkMv794QD4//2dSvKz9ZrWUL9r5J6D+f2yopMRV9wUlHao6JP+EwEYcvDuup7f/Ujo7I9P/TW/v17+ccO2xj0NE8/EqjpMQ3Ur580uFaOkpdyiGDIX1v/TPxzXT9eW4tXXrQfq1/LH71zhuSIkF6rdupE487StLo14AvWlr02iPvRNYYNNTtqBy1ogaAedreeE2Sb9jtHn9Yb/UV68qVC0w930jXgHu3bJQU+bO/usypjx115KiPP3DzRn3gicTo3nCuli5daur6AEyevf07AMDMxzUAyF4z6f0fey37MqVC9KqqKjU3N0uSrFarqqqqtGHDBp1xxhmSIpPqhYXmTTFPBpvNNu2/eCZqtNd+0bL5uvGJdyVJ7X0BrdvSplVHVO73+ba29emFDxLf6DhzyVxVlo6t0uLYBeX6+MEV+scHkdBy7abd+mCPN6VrOp2e2ZLaF3364XNN+Tqa6l+KNpv043MW69z/fEmx7Q5uenKznvrWiXsN/l/ZlgjQF1S4NK8sM93uyWw2m6497WB9648bJUn+wZDueXG7blx12Jgev27LnvjH5QV5OrrWPeKmnZLkstl0QLlLW6PfnHp/d++kX3cMw9Dbzd3x46NqSoedc+g14NRD58iesyle6fK39/boU4v3/70PYHSGYWhza2IK/OwjKrVhW4faeiM/FfJ/X9imC46tkbtgbN/gG4/ka8D29kSIf2BFwV6vUfNKHfGPW7v7ZbVaR93bBMDUlM3/BgLANQDIZtn0/s/4eGp3dyKUWbFihZ5++un48Re/+EX96U9/0qWXXqpLLrlEjz/+uM466yxTz19REeno7ejoSLm9o6MjPhWP/XPuUfPktCfeUA9t+P/Zu+/wpsr2D+DfjKZ7p4MuRlvaMkrLHi2KbBRBFBBFEfdGxfm6fvqquBU3ii/iAlFAQYYMWYVCGV2UAh1A90j3TjN+f5QmOd0jbUP7/VxXL3pOzkmf0OQ0uZ/7ue8rRrnf9ccuC7aXTxrYrvOfnibM3F29L6mzQ+qwPQn6WtjDPO3g5dgzTTJ7Qkg7m4xW16pxOq1It23YKLanzQ32QICbfqLvl+NpyCquavW80upaHE/VX4OmBbk2G0CvN8TDXvf9uaymSzcYU1phJQorlLrtEG+HVs+xtTDDZH99HfR953JRo7r266URmbKskmqUVOlLPo0Z4Ihnpg/WbZfVqPD5v8ldOgaNRotLCn35p0EuLU909rPXB9ErlGqUVqlaOJqIiIiIiKj79XgQfdKkSXjsscewe/duLF++HB999BFqa+s+/C1btgxPPvkkiouLUVZWhkcffRRPPfWUUX++l5cXXFxcEBkZqdtXXl6O2NhYhIY2v/SY2s7Owgy3hHrqtqMuF+J8TueCfiVVtfjjdIZuO9THoU1BPUOhPo6YEqAP8O1OyEFCVkkLZ3SN5LxyXUYxAMwc4t7tY+hpz80MhIOVvib9p/suIre0usljT18p0mU2A8AkX9OZ7BKLRXjaIFilVGvwxYHWg1WHLuSjVq3Vbbel5ElQP32wPqukGsWVyhaO7ryY9GLBdqiPQ5vOmzNc/3wuq1EhIsl0avIT9UYJmcK/Y0M87LBwlBf8XPUNiX8+fgWXDfqJGFtWSRWqa/XXaV8XmxaOBjwchOW4skpan3wkIiIiIiLqTj0eRJ85cyaOHTuGp59+GnPmzMEvv/yCU6dOQavVQiQS4dFHH8Wff/6JLVu24IknnoBMJmv3z6ioqEBiYiISExMB1DUTTUxMRFZWFkQiEe6++258/fXX2L9/Py5cuIDnn38erq6umDZtmrEfbp+1dHx/wfbPxzuXjf77qXRUGjSWbG8Wer2npg0WbH/aA9no/xhkoQPAjKF9L4juZC3DczMDdNsVSjXe3pHY5LFHk/VBWJEIGN9K/fTuNnOoG4Z76rPEN51MR1pB49rEhvae09fDtzSTYJJf6xMDDUsPGZZv6ArRacW672VSMQLd21b6aNoQN8gMSvPsiM829tCIyMC5bP0ktUgEBLrbQSoR46XZgbr9Ko0WH+y50GVjMJwYBoBBrQTRDTPRASCbQXQiIiIiIjIxPR5E/+ijjxAZGYkPPvgAo0ePxvbt23HvvfciPDwc7777LhISEjr9M86ePYv58+dj/vz5AIBVq1Zh/vz5+OyzzwAADzzwAJYuXYrXXnsNt912GyorK7F27VqYmxu/XmhfFdTPDmMG6Jskbj2TibLq2hbOaJ5ao8X6yMu6bTc7c8we1rHA8whvB0wNdNVt7z2Xi7OZ3ZuNbljKZYCzFQa7tRxs6K1uH+MjCD5vi81CZEpBo+OOGewb0s8Ojtbtn1jrSiKRCCtn6CdnVBotPt1/sdnja9UaHLigr4c+ebAcFmat1xMb2iCIbhg46wrRBpnowz3t29wc2M7CDOH++kmBvSzpQtSlDMs7DXS2hrV5XfubGwJdMW6gk+62HXHZiDYojWVMqfnlgm3fVsq5eDgIg+iZxU2vRCIiIiIiIuopPR5EBwALCwvcdNNN+Oabb3D06FG8/vrrGDBgANavX4/bbrsNs2bNwldffYX09PQO3f+4ceNw4cKFRl/vvvsugLqg14oVK3D06FHEx8fjhx9+wMCBHctspubdNWGA7vsKpRpbozM7dD/7E3ORXqjPUrtrfP8Wm1C2xrD8BlBXSqS7ZJdUCZo1zhzq3mebqUnEIvx3/jAYPvzX/jqLWrW+JEBpdS3iMop126ZUD93QdYNdMLq/ftLoz+hMJOc1nSl+IrUQZdX6+r/T21jOx8XWHM4GEwiJXRhEr65V45xBqaP2lk6aPbyf7vuyahWOJTeeHCEi40gwCKIHeegn20QiEV6aEyQ4dtXO89BqtTC2VINMdHtLMzi1MtnpbC0TrFjJbkMvCSIiIiIiou5kEkF0Q/b29rj99tvx888/4+DBg1i5ciUsLS3x2WefYcaMGbj99tt7eojUQbOGukNuo/8g/VPklQ59eF9n0HRSJhVjyVifTo1rmKe9oAb1vsQ8xDao/9xVDMt4AMCMoa3Xwu7NQrwdsHh0801Go1ILoTF4ykxsQ9mTnlCXja4vT6PRAp80Uypo7zn9SgSxqC5btK0/w7CkS1cG0c9llwpqtre1Hnq96UFuMJPoZ0dY0oWoa5RU1iLTIAA9pMGKlRBvB9wUrJ/UirpciH2JeTC2lHxhU9HWJofFYhHc7fV10bNLmIlORERERESmxeSC6Ibc3Nxw//33491338XUqVOh1WoRGxvb08OiDpJJxbh9jD7gnZRXjuOphe26j8TsUkSm6rNY54d4wNmm82V3nprmL9jurmx0w3roLrbmCPV2bOHovuH5WYGwt2y6yejRFH09dKlYhDEDnBqdbyom+Dpjkp8+U35HXLagzAIAaLVawUTK6AFOrWZsGhpikGWalFsuyNo3JsN66ED7M9HtrcwEdd73JOQImsMSkXEkZAvLkQ31aNy74PmZgYJJrXd3JUJl5GuHYSZ6a01F6xk2F81iJjoREREREZkYkw2iZ2Vl4dtvv8XNN9+M+fPnY//+/QgNDcWrr77a00OjTrhjnA/EBglpz/4ei/d3n0dsenGbstINs5KBjjcUbWiohz1mGmSBH7iQj1U7E1GpVLVwVueUVNYKJhGmD3GDWNw3S7kYaqnJqGGN9BHeDrC5WuvXVBlmowPAx3uFkzMJWaXIMsi4nDGkfSsRgvrZ6r5XqjWC7E9jijFYmSG3MYdng/rFbTHHoKRLabUKxwwmRIjIOBpO1A1pIoju42yFu8YP0G2n5Fdg06kMo42hvEaFnFL9dW1QK/XQ63kYNBfNYmNRIiIiIiIyMSYVRC8sLMQvv/yCJUuWYOrUqfj444+hUqnw5JNPYt++fdiwYQPuuOOOnh4mdYKHgyWmBekDhZnFVfjqYArmfXkUE9/9F6//dRbHkhVNZtQWVijxZ4y+jvr4QU6Cchad9dQ0YW30NYdTMeOTwzhw3vhL3QFg//lcqA1qk8wc2rHmqL3RkrGNm4z+HZeF8zn6uuKTTLQeuqGRPo6CxrX7EnMFAelG5XzaWA+9XsPnf1eVdDFsPhjq49Chuv0zhrhBajBJtJMlXYiMzrDBsNzGHK62Fk0e98QNfrC10E9Cfrz3IipqjDNpfMkgCx1oeyZ6P4NM9JySamg0xq/VTkRERERE1FE9HkSvrKzEX3/9hQceeACTJ0/Gf//7X2RkZGDZsmXYvHkzdu7ciUceeQReXl49PVQyksdv8BMsJa+XXVKN9ZFXcMfaExj91j48sykG/yTkoEqpBgBsiEpDjUEJiHsmGrf5a1A/Ozxyva9gX0ZRFZb/cBKP/XJGV1LEWAxLudiaSzFhkOkHhbuLRCzCm/OGCvY9s0lYymmCr2nWQ2+oYePaj/Zc0H2/xyCIHuBmCx9nq3bdt6+LjaAZX2J2081LOyO/rAYZRfqs0PaWcqnnYCUT1LDfcy63y8rPEPVVhpnoTZVyqedoLcOj1/vpthXlNfjuSKpRxpCqEK6I8W1jJno/g0z0WrUWiooao4yHiIiIiIjIGHq8FsLEiRNRU1MDKysrzJ07F3PnzsX48eMhFvd4fJ+6SLCXA/Y9cx3+jM7CPwk5gsy5eiVVtdhyJhNbzmTCwkyMyf4uggxeL0dLQTNQY3lhViBCvB3wf9sSBI3NdsRn4/DFfDw3KwB3jusPSSfLrlQp1Th0MV+3PSXQFTIpn/OGQn0ccfsYb2w8mQ4Aghra5lIxRvZ36KGRtc8wT3vMGe6OnfF1kyZHkhQ4nloATwdLQeZ4R57PZhIx/N1skHA1cNawlIMxxDRostvepqKGbhzujsNXn/fFlbWITCnA5MEunRgdEdWrrlUjOU8fwG6qlIuh5ZMG4MfIy7q/dd8eTsUd43yazV5vqxSDMUjEIvg4tS2I3rBMVFZxdafHQkREREREZCw9HrWbMGECPvroIxw7dgyrVq3CxIkTGUDvA/o7W2PFNH/sXBGOI89Pwas3DcHYgU5oKjZdXavBnnO5yCvTZ6UtmzCg04Hs5swc6o69z1yHeycNFIynrEaF1/5KwIKvjyEhq6T5O2iDI0n5qK7VB4VZyqVpDZuM1hszwAnmUkkPjKhjnpk+WPBc+njPRexLFJZy6eikkGFJl8Ts0jb1FmiPmHR9KReRqG4SrKOmD3EXvG53nWVJFyJjSc4rh8qgBMqQVsqdWZhJBH0bKpVqrN6X1OlxpCj05Vx8nKzaPEFsWM4FALLZXJSIiIiIiExIj0erv/76a8yZMwfm5uY9PRTqId5OVrgvbCA2PTQBJ1+ehvduHY6pLWRmW8kkWDTGu0vHZGMuxWtzh2Db42GC2twAEJtejJu/OIp3OtF49J8EfQBVJhXjugBm4zalYZPRehP9rq3SN36utpgf4qnbjrpciK8Opui23ezMGz3P2sowiF5QoUR+mXFLIBhmoge42XaqmauTtQwTDWrZ/5OQCxVLuhAZRcPJ3ZbKudS7JdRTcA3ZeDJdkM3eEYaZ6IPkbctCB4TlXAAImi4TERERERH1tB4PohMZcrYxx+IxPvj+njE48+p0fHnHSMwL8YCtQeBuxVT/JrOTu8IwT3v8+dgkvD53CKxl+sxntUaLbw+nYvrHh7G/QUZxQ+U1KiTnleNYsgJbozPw9cEU7D2nr4ce5ifvVGCyt1sy1gfDPIXBoInXSD10Qyum+QuysA2D3dOC3CDu4MqKhtmmCUZsLqrWaBGbrg/MdbQeuqHZw/rpvi+sUOJ4amGn75OIhOWcrGQS9HduPYAtEYvw0uxA3bZao8V7u893eAwajRaXC/SZ6L6ubWsqCgB2FlLB30JmohMRERERkSlh5I5Mlo25FDcG98ONwf2gVGkQk14Mc6kYI4wQyGsPiViE5ZMGYtYwd/zftgRBFnlmcRXuW38Ks4a6Y8xAJ+SVViO3tBo5pdXIK61Bbmk1Kq42Rm3OzKHGr+3em0jEIrw9fzgWrYlEjUqDQHfbDmdt96T+ztZYNNoLG6LSG93Wmfr+DYPoidmlmBLg2uH7M5SSX47yGv1qi87UQ683c6gbXv3rLNRXy05sj81CmP+1NylCZGoM+4sEutu2ueTZ5MEuCPeX40iSAgCw91wuoi4VYuxAp3aPIbukWlCqrD2Z6CKRCP3sLZB0NZM9q4RBdCIiIiIiMh3MRKdrgkwqxtiBTt0eQDfUz94Sa+4aje/uHg0Pe2Ht1t0JOfjv3+ew5nAq/ozJwvHUQqQqKloNoJtLxZgaxCB6a0Z4O+Cvxyfh/VuDseGB8V1WD7+rPX6DP2QS4WXXxlyKCb4dL09jb2UmeD4mZpd1+L4aik4rEmyHeDt2+j6dbcwFJV12xmejurbl1wlRR2i1WiRml6Kksranh9LlNBqtIBN9qEf7JhpfnB0IkcFl9Z2diR3qr5CiEJaCGeTS9kx0AOhn0Fw0q5jlXIiIiIiIyHQwiE7UTtOHuGHvM9fh/rCBTTZCbQsHKzMEe9lj9e0hkNuwH0BbBLrbYdEYbzhay3p6KB3m6WCJO8b5CPZdF+DS6SapQwxqH5/rZNNbQ4b10G3MpfBrR2mGltwSqq8PX1ajatRklcgY/m9bAmavPoLxq/Z3us63qUsrrBRM2g5pQz10Q0M97AWvy5j0YuyMz2nhjKal5lcItn1d2p6JDkAwIZjNTHQiIiIiIjIhLOdC1AHW5lK8ctMQzA/1xGt/ncWZtGIAgKWZBO72FnCzM4ebnYXBV922u50FXGzNYWHWuaApXbseneKLTafSUXk14DU3uF8rZ7QuqJ8d9iXmAQAuKSpQXas2ynMs+urzGgCCveyNtgJg5lB3WJqdRdXVDPStZzJxU7CHUe6bCKjrOfDT8SsAgKpaNb6PuIRVC4b38Ki6zrkGvRAalnlqi5UzAvB3XDaUqrpyLB/8cx4zhrrBTNL2fAvDILq9pRmc2jnp6WGQiZ5XVgOlStNsk3EiIiIiIqLuxCA6UScM87THlkcnoahCCalEBBtzKUSia7PUCHUPV1sLfHf3aKw9koqRPo6YOdS90/cZZBAw02iBCzllnS59VF6jwsVcfWkYY9RDr2dtLsXsYe7YEp0JADh4MR+K8hquyiCj+SchBxqDaiT/ns+FRjOsww18TV2CwQoUiViEAHfbdt+Hp4Mllk8cgDWHUwEAlwsqsTEqDXdNGNDm+0hVGDQVdbFu99/DfgaZ6FotkFtaDW8nq3bdBxERERERUVdgeg+REThay2BrYcYAOrXJJD851i0fiyem+hvlOdMw67RhVmpHxGUUC4KQxqiHbuiWkfrSEWqNFttjs4x6/9S37YzPFmznltbgrBFLHZkaw3rovi7WHV6J8uj1frC3NNNtr96fJGgu3JpLBkH09tZDB4SZ6EBdo1IiIiIiIiJTwCA6EdE1zsfJCtYyfdAs0QhBdMN66AAQYuSmvhN95XCz02eeb72alU7UWYryGhxPLWi0v77kUW9kOHHWkVIu9eytzPDYFF/dtqJcie+uZqa3pqpWg5zSGt32oHbWQweaCqKzLjoREREREZkGBtGJiK5x4gblG4wRRDesh+7laAkXW+OWWpGIRZgXos9Gj8so6fXNH6l77EnIFayiqLe/lzawVZTXINcgeD3Uw75T93f3hAGCBp/fHUlFXlnrGeFZ5WrBtm8HMtENy7kAQGYxg+hERERERGQaGEQnIuoFhnjos08Ts8ugaSqK2EZarVaQiR7qY9xSLvUWGJR0AYCt0Rld8nOob2lYyqVeQlYpsnphUNawlAsgvBZ0hIWZBM/MCNBtVyrV+Hx/cqvnZZYKy774diAT3cJMImhGml3Mci5ERERERGQaGEQnIuoFDJuLlteokFHU8WBhZnEV8sv0ma3GLuVSL9DdTjDuP6OzOhX8JyqsUCLSoJTL0AYB5f3ne19Jl4Y9EDpTzqXeLaGeCDRY3bIhKk1Q77wpmWX6ILpELIKPU/uD6ADg4aDPRmc5FyIi06XVarH2SCoWrYnE5tNMhCAiot6PQXQiol4gyIjNRRvWQw/1cejwfbVmQag+Gz2zuAonLhV22c+i3m9PQg7UBhMxz88KhNxGn9ncG0u6JBhkonvYW8DRIJO7oyRiEV6YHajbVmm0+OCf8y2ek1WmL+fi42QFmbRjbzH72evromcyE52IyGRtPpOJt3YkIupSIZ7fHIe0gsqeHhIREVGXYhCdiKgXCHS3hUik3+5MEN2wHrqZRGSUzNbmzAvxgNhg3CzpQp2xw6CUi6OVGSb6OmNKgKtu37HkAlTUqJo69Zp1LqtE931nS7kYun6wC8YPctJt74zPQXRaUbPHG2aid6SUSz3DeuzMRCciMk1ZxVV4Y1uCblut0Qr+BhMREfVGDKITEfUCVjIpBjrrA1edaS5qmIk+xMMeFmaSzgytRa52Fgjzd9Ft74rPQXWtuoUziJpWVKHEsRR9KZcZQ9xhJhFj2hA33T6lWoMjSYqeGF6XqFSqkGpQZsWYE14ikQgvzQ4S7Fu16zy02sYllzQaLbINguiDOtBUtF4/B30menFlLaqUvB4QEZkSjUaL5/+IQ1mDSendCTk9NCIiIqLuwSA6EVEvYVjSpaNBdKVKg/hMfWZraBfVQzdkWNKlrEaFved6ruTGmbQi/HT8Ck5fKUSNisG7a8nec7mCUi5zgvsBAML95YLSIl1R0qWnstvP55TBMKY9xMPeqPc/wtsBN179fwSAqEuFOHChcV35rJIqKDX67UHyTmSiGwTR6++biIhMxy8nriAiufGEdGx6ca9s4E1ERFSPQXQiol4iqJ++EWBGURVKqmrbfR/nc0qhVOmjYV1ZD73ejKFusJLps923nOn+ki4F5TV4+rcYLPjqGF798yxu/ToSw/9vDxZ9E4n3d5/HgfN5KKls//8ndZ+dZ/XLyO0t60q5AHWrNOq/B4B/z+cJgu2dUV2rxh3fHcfQ1//Bs7/HNpml3ZXOZQknyxo2UjWG52YEQGpQc+m9XRca/f+lNmg66uva8Ux0w3IuABiQISIyIZcVFXhnp75HhsSwJh+A3WeZjU5ERL0Xg+hERL1Ew3rI5zuQjW5YDx0AQrohE91KJsWsYe667cNJCuSX1XT5zwUArVaLTafSMfXjQ9ganSm4TanSIOpyIb46mILlP5xEyH/3YOYnh/Hy1nj8GZ2JjKLKbg+aUtNKKmtx1CArbsYQN5hJ9G9xpgXpS7oUVCgbNc/tqN9PZ+hKyPxxOgMnLzdfM7wrGDYVtbWQwsvRsoWjO2aA3Bp3jvPRbV/ILcPmBhNdqfnCIHpnMtH7NchEz2ZzUSIik6DWaLHy91hUGZTde+XGILjZmeu2WdKFiIh6MwbRiYh6iaAG9ZA7UtLFMLjoZC2Dj5NVZ4fVJgtCvXTfqzVabI/N6vKfmZpfjju+O4Hn/4hDcRuyzLXaugDiLyfS8NRvMQh77wAmvvsvntgQjT9OZ0BjpOxmar8953JQq25cyqXe1CBXwbYxSrpotVr8eOyyYF93N8Y1bCA8pJ8dRCJRC0d33BNT/WFtsFrkk70XBb0LDIPoDlZmcLKWdfhnudmaC5oNs5wLEZFpWHskFaev6CeLJ/o6Y9mEAZg1VJ8IcfJyYbclQhAREXU3BtGJiHoJdzsLOFiZ6bYTs8vafR/RafoPRyHeDl0WlGtogq8z3O30ZRwaZoUbk1KlwWf7kzBr9RFEphYIbgtws8WP947F50tCsWxC/6uByebvK7ukGttjs/Ds77F48KfTDKT3kF0Gy8ftLKSY5CsX3N7P3lJQ6mSfEYLokSkFSMorF+z7Oy672xrjqtQawWqThitRjEluY44HJ/vqtrNLqvGDwQSCYTmXQXLrTl03pBIx3AyuBSznQkTU8y7klOGjPRd12zbmUnywcATEYhFmDdNPXGu1dRPbREREvRGD6EREvYRIJMIQg2z0c+3MRC+qUOJyQaVuuzuaitaTiEWYF+qh247PLEFSbvsnAVpz8nIh5nx2BB/vvSio/W4uFeP5WQH4+8kwTB7sgrkjPPDGvGHYuSIcsa/PwPp7x+KJG/wwYZAzLMya/tO5LzEXayNSjT5mallJVS2OJOXrtqcPcRc0Eq1nWNLlYm450gsrGx3THusjLzfaV1bdfY1xLykqUGPwHB5q5KaiDd0fPhByG/2S/a8OJKO4UglAmIk+yKXj9dDr9TOoi55dwnIuREQ9qVatwTObYqBU6//mvDZ3CDyvlt8aM8BRsAKJddGJiKi3YhCdiKgXMSzpciG3DCqDDzytaVgnOqQbmooaMizpAgBbjJiNXlJZi5e2xGPhN5FIbpA9HO4vx56nJ+PR6/0EdbTr2VmY4brBLlg5IwAbHhyP+P+biT8fm4RXbgzCzKFugoDt+7svCLL5qevtT8wVlHK5Mdi9yeMMg+hA57LRM4oqmw2Wd1dj3IaTZEP6dV0mOgBYm0vx1DR/3XZptQpfHkhGeY0KuQZL932NEUQ3qIvOTHQiop71+b/Jgh4cUwNdsXCU/j2bVCLGjCH6v7GRKQVsxk5ERL0Sg+hERL2IYRBdqdLgkqKihaP1knLL8NaOc7ptkQgY0Y2Z6AAQ4G4rCAT+FZ3Z6fIoWm1dffWpHx/Chqg0wW1O1jJ8sngEfrx3LPo7t70RoplEjBBvB9wfPghr7hqN/84bqrtNpdHiiQ3RKKnih8fusjM+W/e9rbkUk/zkTR43zNNO0PysM0H0X06kwfCpOdhNHzg+nKRAXlnXZ0+fMwhomElE8HPtfPC6NYvHeAuahq4/dgURBqsAAGCQS8ebitbzNAiiZ5dUs4EvEVEPicsoxpcHknXbDlZmWHXr8EZluwwbxKs0Wuw1Qtk0IiIiU8MgOhFRL9IwG7UtJV22x2Zh3pdHkWJQkmFMfyfYWZi1cFbXWDDSU/d9Vkk1jl8qaOHolmUUVeLeH07iiQ3RUJQLm1wtHOWF/c9ch1tCvTpd933RaG/MHaEvRZNRVIX/bIln4K8blFbX4vBFhW57+hA3mEslTR4rEokw1SAb/URqIUqr2z/ZUV2rxkaDCZkAN1u8Plc/kaLWaLEtpusb4xpmBQ52s22yhI2xmUnEeG5mgG5bqdbglT8TBMcYJRPdoJxLpVLNSSkioh5QXavGM5tioTaYNX5r/jC42lo0Onairxy2FlLd9u6z2Y2OISIiutYxiA7g888/R0BAgOBr1qxZPT0sIqJ283O1gZlEHxRuKYiuVGnwxvYEPLEhGpVKfTNETwdLvLNgWJeOszk3h3hAbBDT3nqmYyVd4jNKcNPnEThwoUGWrNwaGx4Yjw8WjoCjQf3OzhCJRHjnlmHwcbLS7dsRn41fG2S+k/H9m5gnqNE6e3i/Fo4GpgW56r5XabQ4fDG/haObtj02C0UGy9SXTRyACYOc4WEQ+N3SwedtW2m1WsFru6tLuRiaNcwdoQalngwnqCRikeB10FH97C0F21nFrItORNTdPtpzQVAC76bgfrgp2KPJY2VSsaBs2uEkBcprVF0+RiIiou7EIPpV/v7+iIiI0H39+uuvPT0kIqJ2k0nFgkzQxOymm3PmlFRjyXfHse7oZcH+cH85tj8RBj9X264cZrNcbS0Q7u+i294Zn40qgwB/W0SnFeGOtcdRbBDoNJOI8OQNfti5IhwTfJ2NNt56thZm+OKOUMEExpvbz+F8Tvuau1L77DAo5WJjLkW4f9OlXOpN9JXD0kyfqb6vnU1AtVqtoKGonYUU80M9IBaLMD9Uv4riXHYpEtvZ2Lc9cktrUFih1G0P8ei+ILpIJMJLs4OavM3b0dIoGfGG5VwAILuEddGJiLpT1KVCrI24pNt2sTXHf+e1nGBhWNJFqdLgwPm8LhsfERFRT2AQ/SqJRAIXFxfdl5OTU08PiYioQwwDaoZ1k+sdS1bgps+P4PQVYQPMJ6f644flY+FkpAztjjIs6VKhVGPPuZw2n3vqciHu+j4KZdX67KcR3g7Y+WQ4npkRAAuzpkt9GEOwlwNemBWo265RafD4r9GoVDITqyuU16hwyCCTfFqQa6u/XwszCcIMAu0HLuS3q/numbRinM3Uv6YWjfaGlaxu+fqCkQ0a43Zhg9GErBLB9lAP+y77WU0ZO9BJkNVfz9cI9dABoJ+DsFRAdzYXjc8oaXMvCSKi3qiiRoVnf4+FYVW6dxcMb3UF33WDXQQT1bvPtv39GxER0bVA2vohfcOVK1cQFhYGc3NzhISEYOXKlfDwaHq5WkvU6vZlTPYG9Y+5Lz52IlMUaNDkUFFeg9ySSshtzKHVavHtkUv4cM9FQVNEe0szfLQwGFMCXACtBu19KRv7GjA1wAXWMgkqrmagbzmTgZuGu7dyFnDiUiHu//G0oDTNJF9nrFk6EpYySbdco+6Z4IOjyQpdGZnkvHL8318JWNVD5XF6s70J2VCq9AHwmUPd2vQ7viHABXuvZqCXVNXi5KUCjB3Ytonz9cf0WXkiEXDHWG/dzxzobIkRXvaIzagLcP8ZnYVnp/tDKjF+vsLZTGEQfbCrVbf/DV453R//ns8TXEsGOBtnHA4WEsikYt3vN7Oosssfn1arxSt/JWDjyQxIxCJ8sSQEM4a4tX4iUR/HzwG9z9s7ziGtsFK3vXCUJ64fLG/1d2wmBq4PkGPX2bq/sQcu5KGiWtmlCQzXmooaFf7z51kcSSrAwlGeeGFmAMTizvXm6Wm8BhD1Xb3p9d/Wx8AgOoDg4GCsWrUKAwcORH5+Pr788kvceeed2L59O2xs2tcgKz4+votGafr68mMnMiVmFcImmn9HxMDfyQyfnyzBySzhbYMcpHh2oj0cqzIRE9O5Os7GvAaM9ZDhwOW67NMjSQocPH4aDhbNfwiLy63BqqNFMKz8Euouw2MjpLhwrnuvTXcHiBCbJkZhVV0AcNPpDHhIyxDuY9nKmdQeG4/pV1JYSEWwr8pCTEzrjcxcVWqIANTHfjccPgtZSevlUIqq1dgZp898H+lujqL0iyhK1x8zxkWD2KsJ6PnlNfhp70mEupu35eG0S2Si/rG7WUuQcj6hhaO7zvX9LfHvZX2WuKymCDExMUa5bydzEXKuLuI4dzkbMTFdm42+4WwZ/kisy0BXa7R4eXMs7CrlsOiGhq1EvQE/B/QO0Tk1+DVK/zdGbiXGzd61bb62B1pXY9fV7yuVavy45yTGejZuRNoXlSs1ePtIES4W1pUbXBtxGRnZebg/1LbTTe5NAa8B1FVyylXYk1oFP0czTPAy7xWvl96mL73+GUQHcN111+m+DwwMxIgRIzBlyhTs2rULCxcubNd9DR8+HBJJ35ptV6vViI+P75OPncgU+VQo8cbhf3Xb5yqssD6hCGmFwgD64tFeeP2mIJh3MkOoK64B99oW4MD/TgIANFrgktoZy0MGNHnsoYv5ePdYtCCAPjXQBZ8vCYV5DwXAvnAtxNLvo3RZumtjyjF3UjAGOBun3EVfV1GjQsxW/XN82hA3jBsV0ubzR8REIia9Lps7vhAICWn93M//TYZKqw+iPzZjGEIGuwiO8fFXYn3cAdSq637xsSUWWD5rRJvH1VZZ+w/pvg8dIG/T+LvCmwOrEbU6AuU1KohFwG3hwRggb1/yQXMGnI5CTmohAKBKbNmlj/G3k+n4I1FYdqCwWoOTpXZ44ga/Lvu5RL0BPwf0HiVVtXjsnwjBvk9uH4WJ7egl4xekwpen9kN59e/ghUorPBgSbNRxXosKKpS4Z91JXQC93u6USgzydsfK6YN7aGSdx2sAdaXyGhUe+/QIckrrPsf+39wg3DW+fw+Piur1ptd//WNpDYPoTbCzs8OAAQOQlpbW7nMlEsk1/+TpqL782IlMiYudJdztLJBTWg0A2HxGmGFuLhXjv/OHYdFob6P+XGNeAyb5uaCfvQWyS+oew9aYLNw/2bfRcfsTc/HIz9FQqoVlPT5fMtIoDQ47aqKfC56c6o9P9yUBAMpr1Hjqtzj88cgEmEvb/390NrMEhy7mY4CzNW4M7mfs4V5zDiXlosaglMtNwR7teu5NC3LTBdEvKSpxubBK0JC3IaVKg1+j9CnnA+XWuD7ArdESbBc7S0wJcMWeq+Vi9pzLRUWtBnYWZm0eW2tKq2uRVqjPyh7qYd9jf3u9nKyx8cHx+CsmE57iYgyQ2xhtLB4GzUWzS6q77DEeuJCHV7eda/K2NYcvYcm4/nCzYxYlUWv4OeDa99aOeF2gCgDumTgA4YMb979oib2VBOH+Lth/tanov+fzoNaKevQ9WU/LKanG0u+jkJxX3uTtXx1Mhb2lDA9d1/h97rWE1wDqCj9GpgquS+/svICR/Z0Q7OXQc4OiRvrS67/v/jVrQUVFBdLT0+Hi4tL6wUREJiion22T+32crLD5kYlGD6Abm1gswvxQfYPRhKxSXMwtExyz+2wOHv75tCCAfmNwP3xxR88G0Os9cYM/xhnU2o7PLMH7uy+0+XyVWoOd8dlY+M0x3PR5BD745wIe+/UMfjp+pSuGe03ZdVZftsVKJsH1Ae37kD+tQa3r/Ym5LR7/T0IO8sr0b+DvGt+/2Rqmt47SNxitUWmwK771EjPtcT5b+DowbCTcE4Z52uPFWQEY7mrcsjUe9vogem5pNTSGxdeNJD6jBI/9cgZqg/s2zLisqlXjoz1tf80SEV2r4jNKsCVan3QxUG4taJbeHrOG6fvYlFarEJla0OnxXavSCyuxaE2kIIDuZmeOp6cJM89X7TqPDVHtT+Aj6s2KK5X49nCqYJ9SrcFjv55BSVVtM2cRda2ejzKYgPfeew9RUVHIyMjAmTNn8Pjjj0MsFuOmm27q6aEREXVIUL/GgbWpga7Y/ngYhnna98CI2m+BQRAdALYYZNRvj83CY7+e0ZXNAID5IR5YvTgEZl3QyLEjJGIRVt8eCidrmW7f9xGXWg3YFlcq8c2hFFz3wUE8+ssZnLxcJLj9/V3nkV9W08zZvV+lUoV/r2a4AcCUQNd2Ny0LcLOFp0Gm877EvBaOBn6MvKz73komwW2jvZo9dkqAKxys9JnnDVeCdFZClrCp6FCPa+P13F6Gmei1ai0U5cZ9zqcXVmL5DycFjYiXjPXGT/eNwzBP/fXz99MZOJdVatSf3dtUKdVdMslBRN3ns3+TBNsfLgyGpaxjWYXTh7hBYjDRvPuscSeTrxUp+eVYtCZS0KTV28kSvz80ESum+eO1m4YIjv/P1nhsj83q7mESmaw1h1NRVqNqtD+9sArP/R4LrZbvPaj7mUakoYfl5OTgmWeewaxZs/DUU0/BwcEBmzZtgpOTU+snExGZoCmB+sxcsQh4bmYAvrt7NOytjFdWoqv5u9kKgll/xWRCo9Fia3QGVmyMFmSP3jbKCx8tCoHURALo9dztLfDhQmEt0Gd/j0V2SeMmiRdzy/DSlniMX7Uf7+46j8ziphspltWo8O6u810y3mvBgfP5qK41WH0wvP3lbUQiEaYF6V8jp68UoahC2eSxCVklgomMBSM9WyzPIpOKcfMID9121KVCpBt8gO4sw4Cuk7UMbnbGb1xqCvo5CEuoNPd66IjiSiXuWRclCMxPCXDBf+cNg0Qswstz9IENrRZ4Z2ciP6g1Y0NUGka8uQdj39mHCzllrZ9ARCYnIasEe8/pJ/inBblhVP+Ofw52sJJhwiD9qp49CbmC92x9QWJ2KRavidSVJQSAQS7W2PTQBPg4WwEA7g0biKem+etu12qBp3+LwYHzLU/sE/UFeWXVWHf0km67v7MVBlx97QB1JRO/j7jU1KlEXcq0og095JNPPkFERATOnj2Lw4cP45NPPoGPj09PD4uIqMPGDHDChwtHYMlYb/z20AQ8NsWv2fITpuyWUH3Gb3ZJNV7+8yye2RQLw89iS8Z64/1bgwVZT6bkhkA33B82ULddVFmLFRtjoFJroNFosT8xF0vXnsCMTw5jQ1SaIEAMABZmYtwxzgeB7voSPZvPZOD0lcJuewymZKdBRpuFmRjXB3Ss9JphSRe1RouDF5v+0PrjMWH5nLsnDGj1vheMFGaqbzFiNnqCQRB9SD87iESm+bzvLMNyLgAEgYjOqK5V48EfTyMlv0K3b7inPb64Y6RuEm6CrzOmBemfHxHJChy8kN/ovvq6k5cL8cqfZ6FUaaAoV+LlrfGcbDAhG6PSsHTtCfx2kiUiqGVf/Jss2H5yaucbKhuWdCmoUOLk5b7zniU2vRi3f3scinL95Hyguy02PTQB/Rr8bVsx1R/3TtK/R1RptHj459M40YdL4BABwJf/Jgs+Ez0zfTC+vFNYsvPdXedxJq2oqdOJugyD6EREvdRto7ywakEwxgy4dlfV3DzCQxAc3xCVBsMYzd0T+uPt+cNNfoLg+VmBCPbSl92IulSIx349gxs+Ooj71p9CRLKi0TmeDpZ4aXYgjr80Fe/cMhxvzR8muP3VPxP6XGZXlVKNfw1Kr9wQ6AorWcd6pI8b6Awbc/25TZV0KapQ4s8YfQB8wiBnDHZrut+AoRFe9hjkYq3b3hKdYZTgolKlQVKePtt3aA/XQ+9KHg0y0bOMkImu0Wix8vdYRBkEc7wcLfH9PaNhbS58Hr00JxBSg+vK2zsToVILJ7j6suJKJVZsEK4IOnWlCIcucrLBFOyMz8aLW+IRkazAC5vj+XuhZp3PKcWuszm67SkBLkZp2DdjqBsM53h3G/yM3uxEagHuXHtCUK95hLcDNj44HnKbxivHRCIRXr0pCItGC/up3Lf+FOIyirtjyEQmJ72wEr8a9AgIdLfF3GAPDPWwx//NHarbr9Jo8fgvZ5pdTUrUFRhEJyIik+Via47J/vImb7t30kC8cfNQkw+gA3UlPj5fEioI2v6TkIvLBY3LfIwd4ISv7xyJQ89dj4eu84WDVV1N9dEDnLBgpL5O/LnsUvx6om81GT10MQ9Vtfoa1nM6UMqlnkwqxnWD9Vnshy/kQ6kSBkk3nUpHjcG+ZRMHtOm+RSIRbjXIRr9SUGmUTJnkvHJBH4CebiralWwtzGBr8HrJKu58JvqqXYnYEadfyWBvaYYflo+Fq61Fo2N9XWywdHx/3XZyXjk2nEzv9Bh6A61Wi+f+iENWE6sDPt57kdnoPSyvrBovb40X7Pu/bQmoUambOaNzLikqcLFAiVpOMl2TPm+Qhf7EVP9mjmwfV1sLjO7vqNvefTan1/dOOHQxH8vWRaHcoIbz2IFO+OX+cbr3ck0RiURYtSAYc4brs/fLa1RY9r8oJOWyTBb1Pav3Jwne7z47I0D3eW/JWG/MC9GXTcwqqcYzm2J6/fWFTAeD6EREZNJuGdm4ieND1w3CqzcFXVOlLPo7W2PVguFN3iaTiHHrSC/8/UQYNj08AbOH92uyvvtLs4MEgcUP/rmAAiM3XDRlO+L1mWzmUjGmBLi2cHTrphrURS+rUSHqkj5DWa3R4qfj+kkKD3sLQR311swP9RRk4RmjwWjDpqJDmmgg3JsY1kVvqo9Ae6w7egnfHdHXzpRJxVi7bDT8XG2aPefJqf6wtdC/3j7dexGl1bXNHt9X/Bh5RVA/2fB5HpchrK1M3Uur1eKlzfEoqhQ+Ty8pKrD2iPFrx+4+m4M5n0XgpX8LMePTCF3vEro2JOWWYWe8fmIx3F+OkT6OLZzRPrOG6Se6c0qrEduLM6v/ScjBA+tPCcpPXDfYBeuXjxUkUDRHIhbhk8UhmGwwuV9UWYul358wal8VIlOXnFeGLWcydNuhPg6C9+sikQjv3DIcvgYrPg9cyMeaw6ndOk6grkTgJ3sv4rnfY3GloKL1E6hXYBCdiIhM2owhbuhnrw+mPXGDH16cFXhNBdDrzR3hgWUT9NmtLrbmeGb6YBx98QZ8tGgEhnnat3B23fFPTx+s2y6tVuH93Re6bLympLpWjf2J+uDclADXRiU42mtKgCsMFzLsM7j/f8/nIaNIH7hdOqF/uxrXejpYChqr/R2bherazmWCnsvW10O3MBNjkEvzAeDewLB2bFNZz221+2w23vz7nG5bJAI+XRzSaqkrJ2sZnrhBXxu4oEKJrw6kdHgcvUFCVgne3pGo25ZJxVh792jIDF4bH++9yEBqD/n9VAb2N9OU8It/k43aoDevtBovbI6D8mq2YFphJVZsjMGNn0fg3/O5XJFwDfjiQLKgRN4KI2Wh1zOsiw703pIuf8Vk4tFfzkBpsBpj5lA3fHv3KFjKJG2+H3OpBN8sHSnI4M8trcGda08gr9Q4fUGITN3Hey8Kel89NzOg0Wc+a3MpvrpzFCzM9O89PtxzoVt7CWi1Wjz7eyxW70/C76czsGhNJPLK+DrtCxhEJyIik2ZhJsGP947FQ9cNwnd3j8bKGY3fTF1L/u/moVh/71j8sHwMjr5wA56c6g8X28Z1Mptz94T+CDCoy/3bqXRE92BTnepaNdILK7s8YHLoYj4qlfog9Ozh7i0c3TaO1jKM7q8PpO43CPz8GHlZt18mFWPxaO92379hg9HSahX2N1F3vT3OGTQVDXC3M9lmusbi4aAPomd3MPh3+kohVmyMEQSKXp4T1OZSQMsmDoC3k34c/zt6qc9mBVbUqPDEr9GCQNErNwZhapAb7hjno9t3PqcMOwyyW42FgfmWpRdWCiaLZBIxHpw8SLddVavG2zvONXVqu2m1Wry0JV5Q97leYnYp7v3hFBatiRSs7iHTkpJfju2xWbrtib7OGG3kHjqeDpYYYdAPZtfZnF43ubIhKg1P/RYj6A9xS6gnvrxjJMylbQ+g17OSSfH9PWMEK83SCiux9PsTrPtMvd7ZzBLsNFh1GuYnx0Tfpst6Brjb4r/z9P2i1BotntgQDUU3rdD9dF8S/jYoEZhbWoPHf4lmabM+gEF0IiIyef5utnhpdhCmD3Hr6aF0mkgkwnWDXXB9gKugw3xbSSVivDlvqGDfa3/1TJPR46kFCHvvX4S/fwDzvzzapQETwyXnMqkYU4OM81wwXCKaXliFi7nlSM4rx5EkfbPXucEecG6iIVhrZg9zh6WZ/kO04fLU9tJqtYJM9N5eygWoK6FTL7+8plHN+tak5pfj/vWnBHXt7500EPeHD2rhLCFzqQQvzgrSbStVGnzwT99Y/dHQa38lIFWhX648Y4gb7rpaN/7RKb6CjLBP9l00WiNWtUaLlZtiMfiVXXjop1NNBm77Oo2mLiPOsBbzyhmD8eKsQIR4O+j27YzPQURS40bW7fXHaWHGu9xKLLjWAcDJy0VYtCYSy9dFNSpFZaqOJOXj9b/O4lhK5/+PTN2XB5IF2Z5PGjkLvd5Mg2z0tMJKJGb3jhrfWq0WH++9iJe2xAsmaZeM9cFHC0e0a+VaQ/aWZvjxvrGCBuUXc8txzw8n8VdMJjadSscvJ65g3dFLWHMoBV/8m4SP91zAql2JeHP7ObzyZzye/yMWT/8Wg9X7kpgdS9eMD/cI3189OzOgxeMXjvbGwlH6hJW8sho8tTGmyz8TbYvNwur9SY32R10uFKzWo96JQXQiIqJrzLhBzoKmOvGZJfitm5se7knIwd3/i4KivC4zKjajBIvWROLhn07jssK4dQHrSrnoAzbXD3ZpU43RtmgYjN+XmIufDLLQAeCeNjYUbcjaXIrZBgGEgxfzO5whk1FUhbJqfYBsaC9uKlqvn0EmulYL5LZjObuivAb3rDspqA09e5g7XrkxqIWzmjZnuDtGGSyv3xab1aOrP3rCljMZ2GwwCeRhb4H3bwvWrQpytbXAsgkDdLen5lfgr5ishnfTIV8dSMbmMxlQabT4JyEXC785hiwjliXpDdYdu4wTBpOYYwY44v7wQRCLRfjvvGGCuvWvbTvb7gkpQ1nFVXhzu2HGuwgvhzniwMrJuHtCf0gbrJA5cCEfN34WgSc2RBv9b4MxfXMoBXd9H4X1kVdwx3cn8MW/Sb0ua7reZYXw9TluoBPGG5QfM6bZw4SrfnafNf4qle6mVGnw7O9x+KxBEO2+sIF455ZhRml4L7cxx8/3jYOnwd/B2PRirNgYg+f/iMPLW8/ije3nsGrXeXy45yI++zcZaw6l4n9HL+Hn42nYdCoDW6Mz8cm+i5j7eQTiM66NiSzqu6IuFeLghXzd9owhboJJ4Oa8OW+YYIVuRLICXzRomGxM0WlFePb3WME+w0nkH45dxtbojifNkOljEJ2IiOga9J85QbA2qLX5/j/nu22p76aT6Xj459NNBmJ2J+Rg+ieH8Ob2cyiuNM54Dl3MF2RYtrUUR1v4ulhjoFyf7bU9Ngt/nBY2NBru1XKt+pYYlnRRa7TY1sHAYoJBKRcAGNIHgugeBo1FAbQ5cFqlVOO+9aeQZlB2ZVR/R3yyOKRDwQ2RSISXGwTf39qR2KEAm1arxY64bDyzKQZfH0xBan55u++ju6Xml+OVP8/qtiViET5bEgoHK5nguIeu8xVckz7df7HTy5pPXynCpw0CVRdzy7Hgq2M4n1PazFl9S3JeGd7bfV63bSWT4MOFI3TlnoZ72eNOg3I7qfkV+D6iY01GtVotXtgchzKD6/FT0/zhY28GF1tzvDlvGP5deT1uadBYGai7tk77+BD+szW+XRNiXU2r1eLDfy7g3V3nBfs/3HMRz/0R16kJB1P11cFkQaamsWuhGxoot0aguz7Atesar4teVl2Le384KZhUBICV0wfjlRuN2/Dew8ESP98/DvIOrIQzlFtag4VrjmFHnPEnMMqqa7EzPgeXi7lCiDpOq9Xig3/012CRCFg5o+Us9HqWMgm+vHMkrBq8/ziabPwVRVnFVXjgR+Hnn8em+OKrpSMFf/Ne3ByPs5mcuOqtGEQnIiK6BrnZWWDFNP0H3+LKWnywp2vLTGi1WnxzKAXPb44TLAMPcLMVvHmsVWvxv6OXcN0HB7H2SGqHghAllbXYdDIdd31/Ao/8fFq3XyYRC0qwdJZIJMLUQP39nc8pQ4VB7fWOZqHXm+DrDHc7fTB4SwezU84ZlEMQiSAISvRWHgaNRQEgq6T1ILpao8WTG6MRm16s2zdIbo21d4+GhVn769PWG+njiLkj9Ks/Tl8pancwKDqtCLd+fQyP/XoGW85k4r3d53HDR4cw/eND+PCfC4jPKDG5zNcalRqP/xot6Efw9DT/JmsnO1nLcF/YQN12emEVfj/V8Wys0uparNgY3eSy7JzSaiz8OhLHuuBD8rWkVq3BM5tiBdfYl28MQn9na8Fxz84IgKOVmW7783+TkN2G11NDv0alCUpdhfo44H6D3zkA+Dhb4ZPFIdi1IhzTGlyrVRotfj2RhsnvH8DbO84hpYcnkTQaLd7Yfg5fHGg6a/GP0xlY9r8olFT2ngBhemEltpzJ1G2P7u+ICb5dk4Veb+ZQ/YqspLy6kmnXouySKiz8JhIRBtcdqViEjxaOwBNT/bukX89AuTV+vHcs3OxaDqTLpGLYmkvhZC2Du50FfJys4Gytn+isrtXgsV/P4NN9F432d2Z/Yi6mfXwIT2yMwcq9Bfi/bedQpexcA3Xqmw5dzMfJy/oVfvNDPBHQjve5fq42WLVguG5bqwVWbIw2akPeihoV7lt/SrCidNZQd6ycHoApAa54etpg3f4alQYP/3yafQx6KeOshSYiIqJut3zSQGw6laH7QLohKg23j/FGsJeD0X+WRqPFql2J+O6IMINx6XgfvHHzMCRml+LtHYmITC3Q3VZSVYu3diTip+NX8OKsQMwa5t7ih8yKGhX2JeZie2wWDl3MR6268Qe9yYNdYGth1sTZHTc1yA1rm8jMlNuYN1qK3l4SsQjzQz3xzaEUAMDZzFJcyClr14cDAIJ66IPk1rCS9f63cO72DTPRW/8w9N+/z2HvuVzdtrO1DD8sHwtHa1kLZ7XN8zMD8E9Cji5g+e6u85ga5Npq87jM4iq8v/t8s+VNkvLKkZSXjC8OJMPTwRLTh7hh5lB3jBng2Km6usawaud5wXNvoq8zHrner9nj7wsfhB+OXUbp1dJDn/+bhAUjPTs0gfHan2eRUaQP9M4c6oYrBZU4n1NXU7msRoVl66Lw4cIRmBfi2e777w2+PJCMOIMyDdcNdsEdY30aHedgJcMLswLx4pZ4AEClUo23diTiyztGtvlnpRVUCmq9mkvFgoz3hgLd7bB22RicvlKI93dfEJSbqVFp8N2RS/juyCUM87TDvBGemDvCo9FrviupNVq8tCUOmxpM9NwS6ontsVlQXZ28iUwtwIKvj2LdPWPh42zVbePrKl8dTNE9NqCuFnpXN2ufPdxdUD/4n4Qc+Lk2fx0xRYnZpVi+7iRyDIJytuZSfHPXKEzya7rxobEM8bDD4eenIKu4GlKxCOZSMcwkYsikdV9SsajJ32FFjQorNsZgX6L+b+Kn+5KQnFeODxeO6PDEcnGlEm9sP4et0ZmC/T+dSEPkpUJ8ujgEwzw7voKP+haNRivoNSMVi/DUtPavjpkX4okTlwrx64k0AICiXIknNkTjl/vHdfq9lEajxVO/xSDR4P3QME87fLx4hG6F4+NT/BCXUaJ7vWUUVeHJjdH4YfnYZv9O0rWJmehERETXKDOJGG/crG8yqtXWNf/TGLmhTq1ag+f+iGsUQF8x1R//nTcMErEIwzzt8esD47D27tGCZlgAcKWgEo/8cgaL1kQKMoSBunrnu8/m4LFfz2DUW3uvfuDLazKA7uVoiedaaTLUEaMHOMLesnFg/o6x3h1q/trQrSOFAb6ONBg9Z1DOZYhH3/hwamEmEWTStZY5+33EJfxw7LLB+WJ8f88YowW+vJ2ssHzSAN12WmElfjx2pdnjK2pU+GjPBdzw4cFGAfTmYlaZxVX44dhlLPnuOMa+sx/P/R6LfedyUV3b/dl9e8/lCv4/na1l+HRxSIsfBu0tzfDgZH3j1uySamyMSmv3z94anYE/Df7PvJ0s8eHCEfj94QmY5KfPmq1Va7FiYwy+OZRicln8XS0+o0RQ99Xe0kxQp76hRaO9McKgvuyOuOw2L3fXaLR47o9YwYqE52cFwtfFptVzR/V3wsYHx2P9vWOb7OVwNrMUb+9MxIR392PJt8exMSqtyzO/lSoNntwYLQigi0TAe7cOxyeLQ7D+3rGwtdBPVKbkV+CWr47izDXeCyGzuAp/nNb3TwnxdkC4f9cGgIG61WqGZdN2XWN10SOSFFj0TaQggO5uZ4HfH5nQ5QH0euZSCQbKreHtZAVXOws4WstgbS6FmUTc7Gve2lyKb+8ahYev8xXs/zsuG4vWRHaorNLuszmY9vHhRgH0esl55bjlq6P4+mBKjzS8p44rKK/Byk2xWPRNJDafzui2v6m7E3IEJQsXjfFutJqqrV67aQiG9NP/nTlxqRDv7jrf6efi+/9cECRouNqaY+3dYwQJLWKxCB8vHoFBBte6I0kKfNTFq4Sp+zGITkREdA2b5CfHjQY1wmPSiwU1vTurSqnGwz+dFtT/FImAN+cNxdPTBws+vIlEIkwb4oZ/npqMN+cNFZQPAICTl4sw78ujWLExGrvP1tWFHv3WPjz882nsiMtGdW3jsi/2lmZYMtYbv94/Doeem9LuDO62MJOIcX2Ai2CfVCzCneP7G+X+/d1sEWxQV31rdGa73tAXVSiRVaL/sGv4AaG38zBoqpbdQib6rvhsvLVD3+xQJAI+uz20TU2p2uOxKX5wMgjsf/5vUqPluhqNFptOpWPKhwfx+b/JqGlQzmheiAciXrgBu1aE46lp/ghq5vdZWKHE76czcP+PpzDyv3vx2C9ncCyle8qXZBVX4bk/hI2zPlo0Aq52rWcK3zNpoOD/6IsDKe1a4n+loAKv/pmg25aIRVh9eyhsLcxga2GGdfeMxS2hwompd3edx/9tS+gzQZvqWjWe3hQjyCh+c95QuLXw+6lrMjpUMIHz+raENpXb+qFB49KxA52wvB2lrkQiEa4b7ILtj4fhiztCmyxHpdXWZX2/uCUeo9/eiwd+PIW/47KMXh6iulat+5tTTyoW4bPbQ7F4TF0W/yQ/ObY8MhFejvrrT0GFEku+Pd4ldaW7yzcHUwQT1Cu6IQsdqPv9G5Z0OZtZinSDnhWm7I/TGbhnXZSgD0Cguy22PjYRge6m/7dYLBbhxdmB+GjhCMgMsnHjMkpw8xcRiMsobtP9FJTX4LFfz+Dhn08LylmIRMCiUV5wstDfd61ai/d2n8cd3x1HJptAC1xWVOCjPRew4KujeHlrPMqqTaNUVG5pNRZ/exybz2Qg6nIhVv4ei7v/F9Xlr1OVWiMIMsukYjx5Q8d7NFiYSfDVnSNhY64Pbq+NuIS5n0d0+P3T76fSdatJgbpVWGuXjW5y5ZSdhRnW3DVK0B/mq4MpvaKhMukxiE5ERHSNe/nGIEFn+Hd3nzdKJl9JVS3u/t8J7D+fp9tnJqkLaN09YUCz55lJxLh7wgAcfG4KHpo8SPDBDQD+isnCwz/X1YU2bBhaz1omwYJQT6y7ZwxOvjwNqxYEY6KfvEuXQ04NchNszxzm3mJAqr0WGAT98spq2tXwyLCcBoAmszl7q34GH1Ka+zB++koRnvotBoZJU6/fNAQzDII2xmJnYYanDZYZl1arBGUKjqcWYO4XEXj+jzjkldUIzg31ccCWRydi9e2h8HSwRFA/Ozw1bTB2rQjH4eem4OU5QRjd37HJLPVKpRo74rNxx3cnsHTtCcQ0WNFhTCq1Bk9tjEGxwTXkocmDcH1A23oR2JhL8YhB5qOivAY/Rl5u07m1ag1WbIwRXBeenuaPkT6Oum2ZVIyPF43AI9cLsyvXR17Bo7+c7pGs/e724T8XBHWlbwzuh5sNavY3J9jLAUsMyr0k55Xjh2MtNxlNzS/H+/80aFx624gONekVi0W4KdgDu1aEY9eKcDx8nS88HSwbHVer1mLvuVw8/ms0Rr+1F8/8FoODF/Kg6mSj2vIaFZavO4l/Df6myaRirLlrlKDnAVA3+bn10UmC7P0aVV1d6a8PXnsrH3JKqvHbSX0W+nBP+0aTx11p9jDh9fifBNNuMKrVavHZ/iQ8+3usYLIq3F+O3x+egH72jZ+3puzWUV7Y8OA4yG30E5y5pTVYtCYSf8c13/Bcq9Vie2wWpn9yuNEEkq+LNTY/MhGrFgzDxzPkmDVU+D7qxKVCzPr0MP6KaTprva8oq67FbyfTsPCbY7j+6uT6mbRi/HIiDQu/iezxiYaMokosWhPZqFfBkSQFZn56GP+LuNRlE9RbozORkl+h2142oX+ny3oNkFvj/duCBfvOZZfiju9O4P71p9rV0D3qUiH+szVesO/jRSEtls30d7PFhwtHCPat3BSLpNyyNv9cMm0MohMREV3jPBws8cRUfX3RwgolPtrbueWDeaXVWLwmUtDox0omwffLxrQpWAPUZZG/NCcI+1deh5uCW64tbi4VY85wd3yzdCROvzodHy8OwZRAV6OUU2mL6wNcdNmzYhEaNcvrrLkjPCA1CDq1VtKlpLIWhy7mY/W+JLy3+7zgtiF9KIguyEQvaZyJfllRgQd+PCXI9r4/bCDumWTc35+hJWN94GtQsujn41dw4EIeHvrpFG7/9rhgWTIAeNhbYPXtIdjyyERBMNiQj7MVHpg8CH88MhEn/jMV79wyHNcNdoGZpHGgMiJZgflfHsWDP57ChRzjfyj77N9kRF3WZx2P8HbAyhntK6O0dHx/uNrqG+F9cyilyQmzhlbvSxJMEIwd6NRkDXaRSIQXZgXiv/OGwjCW+09CLu747nivbuZ1PLUA3x/VB75dbM3x1rxhbc4ofm5GABwMVgmt3peEnCZeW0Bd3fBnf48VrBJ6aU5Qp0skiUQiBPWzw4uzA3Hk+Sn4/eEJWDrep9HqJQCoUKqxJToT96w7iXHv7Mdrf53F6StF7Q5il1TWYunaE4K+HVYyCX64Z0yjSdR6Lrbm2PjA+EYB4Pd2n8dLW+JR28mgflul5Jfjk70X8dXBZJRUdWyC/JtDKVAajLc7aqEbCvayh4dBcKy9jZm7U61agxc3x+PjvRcF+28b5YX/3TPG6H1Zusuo/k7487FJgpUg1bUaPP5rdJMNR/PKqvHwz6fxxIZoFBpcUyViER653hc7ngzX/U2zNRfjiyUh+OC2YEEWbll1XV32FRujO/zcvRZpNFocTVbg6d9iMObtfXhhc7zg/XS98zllmP/l0TavCDC2y4oKLF5zHFcKms44r1Sq8ebf53DbN8eMHgSuUanx6T59EoK1TNJiz5X2mDO8H96aP0zwXASAfYm5mPHJYby5/RyKK1t+n5BWUImHfjolWL3zzPTBuLGVzzMAMHt4P8FEf4VSjYd+Oo1SE1l5QJ0j0l5r0+gmSq1WIyYmBiEhIZBIOtak41rVlx87EfEaYCpqVGrM/vQIUhV1GR1iEbD9iTAM7UD97MuKCiz9/oSgqZ+jlRn+d88YhDYTBGyLM2lFeOvvcziTVgygLqt9sr8L5o7wwLQhboLllz0hPqMEm89kIMxPjmlDmg6qdMb960/pGg5ZmIlx8uVpsLUwg1KlQWJ2KWLSi3VflxQVTd6Hm505TvxnmtHH1hldeQ349nAK3tmpn0RIeGMmrK8+TworlFjw1VFcNvjwN3uYO768Y2SHsmTb49/zubj3h1MtHmMlk+DR631xf/igDjdwK62uxYHzedgVn4M953LQMBlMJALmh3jiqWn+Ha4hauhYigJ3rj2hy+q3NZdix5PhHQqarj92Ga9v05dlWTl9MJ6Y2vwy7eOpBVjy3XHdz7azkGL3U5MFEylN2ZOQgyc3RgsCvYPk1lh/71h4O137jSANldeoMOvTw4Jr8//uGY0bAtt3vfr1RJogu27uCA98viS00XHfHErBu7v0r78wPzl+um+sIPhqzNd/rVqDiCQF/orJxJ5zuYIa7A35OFlhXogH5oV4wM+15TJf+WU1uOv7E7qmtEDd8+uHe8c2O7FlSKPR4r1/zmPNoVTB/jA/Ob5aOhJ2XRBUVak12JeYi5+OX8HRZH3gX25T1yT21pFebb7O5ZVWI/z9A7rJxqB+dtj5ZFi3BtEB4I3tCVh39LJu+8R/prZ5xZdGo4UW6PIGfeU1Kjz6yxkcvpgv2L9iqj+emta9Ew9dpaJGhad+ixHUeAbqVrR8eNsIWJiJ8WdMJt7Yfk6wIgmoq2//wcJgQSZuw2tAWkElnvotWvder56ngyU+WjQC4wc5oztoNFoUVipRo9KgplaNGpUGSpWmblulRk2tBkq1/vv6/WYSMfrZW8Dd3hIe9haQ25i3+bV2WVGBzWcysPl0hqAMX0MyiVgwqWVhJsani0Mxa5jxV9A1Jym3DHeuPSFYNTdIbo235g/DJ/suNgr6m0lEeHyKPx653tcoCS4/Rl7Ga3/p3yOsmOqPp6cP7vT9Gsorq8bHey7it1PpaBj1dLAyw4qp/lg6vj/MGqyYLa2uxYKvjgmy828e4YHVt4e0+Rqg1mhxz7ooHEnSrzydPsQNa5aO6vL3qN2pN8UB2vpYGEQ3kt705GmvvvzYiYjXAFNy6GI+lv0vSrc9ur8jfn94Qrs+9J3NLME966KgKNdnaPSzt8BP941tNVDRFlqtFkeTC1BUqUS4vxwOVrLWT+oldp/NxsM/n9FtXx/gguLKWpzLKhV8mGrJvZMG4rW5Q7pqiB3SldeA7bFZeGJDtG573zOT4edqi+paNe5cewKnr+g/5I30ccCvD4zvcMC6PbRaLZZ+f0IQ3KonEgELR3nh2RkBbaoh3lbJeWX4eO9F7IxvnMEpFYuweIw3nrjBv91LodUaLZLzyhGdVoRP9l1Ebqn+A/XnS0IblbloqxqVGlM+OKgLJNhaSBHx/A2wbyLbuLhSidmrjwhWG3x150jMGd56xhdQN0F33w8nUWQQ8JHbmGPdPWMw3Kv9E4kNaTRa5JfXILO4CplFVcgqrkJmcf2/1ahSqiCViCEVi2AmEUMqEcFMXPevVCKGmVgEieFtEjHc7MwxzMMewzzt4eVo2abr9Iub47DRoCTH7WO88e6twS2c0TS1RotbvjqKuIwS3b5fHxiHib76JokXc8tw02cRumuTrbkUu5+e3Kj8Sle9/iuVKuxLzMO2mEwcvJAvKKnR0FAPO8wP8cTcER6Nnv9ZxVVYuvaEboIZqGuS+9N949q9qufXE2l49a+zgtIG/q42+N89Y4w2YZNXWo0NUenYEJUmaGTZ0EgfB7w5bxiGebb+/H7r73NYG6FfvfDN0pGYNaxtry1jirpUiEVrInXbj03xxdiBziiuVKK4shbFlbUoqlSipKru37p9ShRX1aKkqhZmYjEC3G0xzNMOwzztMdzTHgHutjCXdu55V12rxiVFBVLyy/H1wRTBaiKpWIR3FgzHotHenfoZpkaj0eKDPRfw9cEUwf5gL3u42JgLyvgBdf8Pj07xw+NT/BoFUJu6BqjUGnx1MAWr9ycJXi8iEfDQZF88M31wl6w01Gq1iMsowdboTPwdly2o395RUrEIbnYW6GdvgX4OlnX/6r4s4Wwjw9FkBf44ndFktnk9ByszzA/xxG2jvOBia4771p/E2Uz9c00kAl6cFYgHJw/q8smahKwS3PV9lGCFQYCbLX66fyxcbS2g0WjxS1Qa3t2ZiIoGk5kBbrZ477bgTvWcqVSqMPn9g7rfj4OVGY48P6XLVnmcyyrFWzvO4VhK4/dsg+TW+M+cIEwNcoVIJIJKrcG9608JJtJCvB2w8cH2v78sqlDips8jBCV7WksouNb0pjgAg+jdrDc9edqrLz92IuI1wNQ89NMp/JOgzy56c95QTPKT67JvlFczbZraLq1W4euDwrILvi7W+Om+ca1mg1LralRqjH17f7uXNA+SWyPE2wGT/OSYF+IBqcS0qvF15TXg9JVC3Pq1Pujy471jEeYnx2O/nhGUAxjgbIUtj04SNLTsaueySnHj50cE2U3jBznhlRuHtCmw1VHxGSX4cM8FHGqQKQnUlUVaNnEAHr7Ot9n/i7yyasSk1a14iE4rRlxGcaMPyQCwZKw3Vi1of4DW0MaoNLy4RZ/x/PgUPzw7U1gaRqvV4pGfz2C3QY3kjgSHU/PLsWxdFNIL9R9WrWQSXB/gAnOpBDKJGDKpwdfVbfOrX/X7a2o1yCquQsbVIHlWcTWyS6oES7qNzc5CqgsKDvW0xzAPOwxwthZkqzVc/eDlaIndT03u8Aqe2PRizP/qqO756+9qg50rwmEmEaNWrcGCr44hPlMfZH//1mAsGtM4kNgd7wGKKpTYeTYbf8VkIcqgwWlDIhEwfqAz5od6YNawfiiqUOLOtScEAYx+9hb4+f5x8HWx6dBYDl/Mx6O/nBH8nZTbyPBA+CAE9bNDUD87uBiUMmoLrVaL46mF+Pn4FfyTkNPihIEhsQi4c1x/PDsjoMnJKaCuJ0HYe//qVmoEuNli14rwHsmEVGu0GPfOfqMENuuZSUQY7GZbNyHlVfcaCnS3bTLYVVihREp+OZLzypGSV173fX45MoqqGmWpAnXlJb5eOgqTB3df7fjutuVMBl7cHN/iRP6Qfnb4YGFwsysbW7oGRKcV4enfYgQrxoC6ia9XbxqCUB+HTk+CAHUNqf+MzsKfMZnNruTrCRKxCFMCXHDbKC9MCXQVPNZKpQpPbojRrVCst2SsN96cN6xRdrSxxKQX4+7vT6C0Wn8NG+Zph5/uHQfHBu8bsoqr8MqfZwV9JIC6a8/ySQOxcsZgWMna/zfo64MpglKF/5kTiAcn+7ZwRudptVrsT8zDOzsTBZOq9Sb5OeOVG4fgt5Pp+OHYZd1+D3sL/Pn4JLjadiwp4mxmCW79+phuJZBIBPzvnjGY0sY+M6auN8UBGETvZr3pydNeffmxExGvAaYmo6gS0z4+JCht0FEjvB3wwz1jGr2ppo575c94/Hw8rdnbnaxlCPF20H2N8HJoNjhiKrryGpBVXIWJ7/6r237v1uFIzivHd0f0WZWOVmbY+ugkDJB3vpxJe319MAWf7L2I/s5WeHZmAGYMceu25f4nUgvw4Z4LTWa+2ZhLcV/YQNw1oT+uFFQgOq0Y0enFiEkrblMTM39XG2x7PAyWss79PmvVGkz7+JCu3qqVTIIjz0+Bs40+yLghKg0vGQTaB8mt8feTYR36YJ5fVoP71p8UZFhfq6xlEgz1sMdQz7rA7Af/XED+1WX3IhGw8YHxGNfJsggvbYnDhih9ZvsrNwbh/vBB+Gx/kqAe9JQAF/zvnjFNPre7+z1AZnEVtsVk4a+YTEF5loZkEjEszMSCQFF/Zyv8fN+4TmeNX8gpw/J1Uc2Wa5DbmCOon+3VoHrdv74uNk2WDNh6JhM/Hb/SqKlfPQszMeaN8MTtY71xLKUAX/ybjKoGzXOdrGV4YVYAFo7ybhQcX7UrUVCG5os7QnFTcMdWlxjDy1vj8cuJ5v8GGoNELIK/qw2Ge9pDIhbpAudF7Wi47mZnjv/dM6ZDJfGuNaevFOKhn04LViACdRMUT97gj4ev920xoNvaNaCiRoW3dpwTXGvqySRiDPeyx6j+jhjp44hR/R3bPAlVUF6DHfHZ+DM6s1HpmJ4W6G6L20Z5YV6IZ4uPR63RYtXORMFKEaCuVNSXd46EvaVx3/9FXSrEvT+cFEwCjvRxwLrlY5v9WVqtFttis/DG9nOCzHUA8HayxLsLgjHJT97kuQ3VqjUorFBixieHdQklbnbmOPTclG5ZRQgASpUGPx+/gtX7kxoltYhEEEyoWckk2PzIRAT161wvoi1nMvDMpljdtp2FFNufCDNKGb6WlNeoUFypRHWtGtW1GlTVqpv4vu6rSqlBtUoNqViE2cP6tXmlVm+KAzCI3s1605OnvfryYyciXgNM0ef7k/BRg4ZY7RXuL8c3S0fp6k+TcWQVV2H+l0eRV1YDmVSMYR52CPF2RIiPA0K8HODt1LayDqakK68BKrUGAa/u1i0H93O1EQSbzKVi/PrAeIzq3/Fa/Z1Vq9Z0WcZYa7RaLQ5ezMeH/1xo1NC0I2zNpZgc4IJXbxzS7rIwzWn44fHByYPwnzlBAIDkvHLc9PkR3aSfmUSErY9O6lQmf0WNCo//egYHLjTO1DcmRyszeDpawsPeEnaWZlCpNajVaKFSa6BSawXfqzQaqDRa1Krr9tWoNMgoqmxU476t7g8biFdu6nxZp8IKJW746KCu7rGNuRSf3xGKB9af0mVD21uaYc/Tk5utXd2T7wEu5JThz5hMbIvJanVyaLCbDX6+b5zRSizllVbjvvWnBNn6LZFJxPBztdEF1lMVFfgzOrPZuu8D5dZYOr4/bhvpJZhIzSyuwlt/n2uyMWeItwP+O2+YroRRYYUSYe/9q/sZfq42+OepyV1eV7wlyXllmPv50UYTAUBd8NvB0gz2VmZwsDSDo5UM9lZ1/zpYmqG0uhbxmSVIyCxFWRsaFXeESASE+7vg3QXD+9QKvIyiSty//pRuYirYyx4f3DYCAe6tl/Fr6zXgn4QcvLg5rtXJDB8nK4zq76j7Guxmq3vOVinV2JuYi7+iM3HoYstlnkb3d8SsYe5wtpHBXCq5uupIol+BZCY22C+GuVndiqWqWjWyS6qQXVKN7OJq5JRUIaukGjkl1cgqqUJ2cXWTz19HKzPMu1quZaiHXbvey/10/Ar+b1uCoPSNn6sN1hmxVFREkgL3/3hSkGQzfpATvl82pk3v9QvKa/Dm3+fwV0xWo9umBrrC2lyKSmVdQLZSqUJVrQZVShUqlWpU1apRpVQ3+ft6a/4wLB3fv3MPrgOKK5VYvT8JP0VeaXJcIhHw7V2jMd1IPZL+b1uCIMM90N0Wvz88oUtK2KTml+PdXeexLzG3Q+8z7CykONJM+b2GelMcgEH0btabnjzt1ZcfOxHxGmCKqmvVuOWrY0jMbn9QzUomweIx3nhpdlCX1KukurIuuSU1cLe36BX/x119DZi4an+TGZ8iEfDVHSMxu411s3szjUaLXWdz8NHeC0jNb9tSdrEIGOxmi1AfR4R6OyDUxwG+LjZGL/Og1mgx45NDSLk6LnOpGEeenwJ7KzPc8uUxnDO4Tr08JwgPTB7U6Z+pUmvwv6OXsCehrjlljUoNpbqufJXSoLlccwEYqVgEd3sLeDhYwsvBEh5XvzwdLeHpULe/I5nyhqqUapzPKcXZrFIkZJbgbFYJLuSUtVo2xt/VBtufCDNa1t7Px6/glT/P6rYbZuKtvj0E80I8mz3fFN4DaDRanE4rwp/RmdgRn92oGeJwT3usv3es0cs9VSpV+OCfC9gZny3oJdBRYlFd47m7xg/ARF/nFl+Lhy/m4/+2JTQqSyASAUvG+uC5GQFYG5GKLw/oa1639rvsLlnFVYjLKIaVTAoHKzM4WMrgYG0GG5m0TdcfjUaLK4WVVwPqJYjPLMHZzBLBqoPWWJiJMUhuA19XG/i52MDX1Rq+LjYYKLfutoxYU1OlVOPvuCzYWkgxLcitzaXj2nMNyCutxn+2xmNfYl6LxxmyMZci1McBjlYy7E/MbbL8WL1BLta4JcQT80I8O9QQuy20Wi1Kq1TILq0LqOeWVsPNzgKT/OSdek936GI+HmtQKsrZWoZv7x7d6USB/Ym5eOSXM1Cq9AH06wa7YM1do9r9fP/3fC5e3npW0MOko7ydLLH/met79L1wSn45Vu0836iszkuzA/HQdcYrMVOr1uCO744LVg86W8vw6BQ/3DnOxyjXnZLKWqzen4QfIy+3uSxYc46+eEOjHihNMYX3AMbCIHo3601Pnvbqy4+diHgNMFVl1bWISFKgRqXR1fs1zMAR/muwXyLuVV3jqet19TXg1q+PCRqI1qsvPUF6KrUGW6IzsXpfUqPMXFdbc4T6OCDE2xGhPg4Y7mnfbStNdsRl47Ff9U11l03oDzOJWLCEPdxfjvXLx3br9Uej0UKp1veHUKo1kIhEcLE175FsXaVKg4u5ZUjIqg8MliIxu1RXS9XGXIqND443as19tUaLeV9GCBrc1Zs11B1fLx3ZYkalqb0HUKo0OJKUj79isnD6ShFG9nfEO7cM67KGdfUKK5RIzC69+lWGxOxSJOeVt6lptNzGHHeM9cbtY33alf1co1Lj+4hL+Hx/4xIvjlZmUKo0uoDjILk19j5zXY9moXclrVaLtKuB9bOZpTibWYLE7FKIRMAgFxv4utjA18Uafq5133s6WPK9jpF05BqQVVyF01eKcPpKEc6kFSEhq1SQhd0echtz3DzCA7eEemKYZ/sywE3NhZwy3PvDScHfb5lUjI8Wjuhwk+8dcdlYsTFaEFSdMcQNn98R2uF69GXVtXh/9wX8dPxKh84H6ib81iwdhRlD3Tt8H8Z0NFmBj/ZcwIWcMtwfPghPTfM3+nMpr6wacz+PaDTp2s/eAk9O9cdto7w6tLKxVq3BL8ev4NP9SY0mkdtKLAIszSSwtzTD3Vd767SFqb0H6AwG0btZb3rytFdffuxExGsAUV/X1deAJzZEY3uscPnwPRMH4PW5Q67pD8tdqUalxs74bKQVVMHfzQYh3g7oZ2/RY/9fGo0WN34eoVsdIxWLBB/onaxl2L0i3GilNnoTlVqDlPwKpBVWIqifLbwcjZ9dGZ1WhFu+OibY52Qtw56nJ0Nu03J9Yr4HaF6tWoOU/HIkZpfifHYZzl0NsNc31hw30Al3TeiPGUPcO5WJmVVchbd3JGJHfHazx3y0cARuHeXV4Z9B1BxjXAMqlSrEZZTUBdWvFOF0WlGLwUBrmQQzh7ljfognJvo6m1zD9c7IK6vGAz+eRmx6sWD/yumD8fgNfu36O77lTAae/T1WUNJj7ggPfLxohFHK0EVdKsQXB5KRml8OK5kEljIpLM3EsJJJYWkmgaVMUrf/6veWZnXbFmYSBHs5tKlcUHfTarVd+l7pbGYJHvrpdJMlyPo7W+GZ6YMxN9ijTZN8Wq0WBy7k4e0dibrVfoYm+jrjtlFesJJJYWEmhqVZ3f+9pUwCC6kEFjIxLMzqvjeTiDr0uHvTe4C2PhYWOiUiIiIik+XhIAysTh/ihldvYgC9JeZSCW4JNZ2AmVgswjPTB+OBH08BQKNlxh/cFswAejOkEjEC3G27NNgQ6uOIxaO98dspfeO/t+cPazWATi0zk4gR6G6HQHc7IFS/X1FeA4lIZLSm3R4OlvjyzpFYkqTAa9vONirp5ONkhXkhPddMlKg1VjIpxg9yxvirzZK1Wi1SFRX6oPqVIijKaxDq44h5IR6YPsSt0yW1TJWrrQU2PjAeK3+Pwc54fe+Dj/ZexC8n0mAlq1s9aiapW00qk4hhdvVfc6kYZhIRZFIxVGottsZkCspzLRrthVULgo22ImXsQCf8OHCsUe7LVHT1e8thnvbYv/I6/HoiDV8eSEaBQbPWKwWVWLExBl8dSMEzMwa32Kz+fE4p3vo7ERHJika3DZJb4z9zgjA1yJXvlbtA77zyEBEREVGvMGOIO747nAqNFgj1ccBnt4f22pIEvdm0IFeM8LJHbIawEeM9EwdgapBxGndRx700JxAp+eU4nVaEJ6b4sddAF+qqyYkwfzl2r5iM/x29hM/2J+kaiq6cMbhXZepS7ycSia6W4LHBotHePT2cbmcpk+CLJSPxgfMFfH1Q39cgp7TjdcjvntAf/zd3KMsYmQALMwnuDRuIxWO88cOxy/jmUArKDHo6XMgtw0M/ncYIL3s8OzMAYX5yXTBcUV6Dj/ZcxG8n0xo1DbW3NMOKqf5YOr5/r+i5ZKoYRCciIiIikzWqvyO2PxGGtIJKTA1y4weDa5RIJMLKGQG4+39Run0BbrZ4cXZgD46K6jlYyfDHIxO7fCk7dS2ZVIyHr/PF/BBPbI/NQn9nK5OpOUxEbScWi/DCrEAMcLbCy1vPdqpR5EOTB+HF2YG8tpsYa3MpHpvih6Xj+uPbIyn4X8RlQX+L2IwS3PV9FMYNdMJT0wYjJr0YXx5IFjSfBepK5N01oT9WTPWHg5VxG2hTYwyiExEREZFJG+phj6EexmumSD0j3F+OBaGe2BKdCTc7c3x+RygszK7tGpq9DYMsvYO7vQUemMzGy0TXusVjfBDs5YA/YzJRWlWra4Rdq9Y3xK5VaVGj1qD26nb97RKxCMsmDMD94QN5bTdh9lZmeG5mIO6ZOBBfHUzGL8fTBE2pT1wqxJLvjjd57rQgV7w0Jwi+LjbdNdw+j0F0IiIiIiLqciKRCB8uHIFHp/jBxdYc9pZmPT0kIiIikxbUzw5B/ex6ehjUxVxszfH63KG4P3wQPt+fhN9PZ0DdzAqEQHdbvHrTEEzyk3fzKIlBdCIiIiIi6hZisQh+rsyYIiIiImrI08ES794ajIeu88Uney9iW2yW7ja5jQwrZwRg0Whv9gfqIQyiExEREREREREREZmAgXJrfLYkFI9c74s/TmfA1dYcd4zzga0FV/H1JAbRiYiIiIiIiIiIiExIUD87vHrTkJ4eBl0l7ukBEBERERERERERERGZKgbRDfzyyy+44YYbMHz4cCxcuBBxcXE9PSQiIiIiIiIiIiIi6kEMol+1c+dOrFq1Co899hi2bt2KwMBA3HfffSgoKOjpoRERERERERERERFRD2EQ/ap169Zh0aJFuPXWW+Hn54c33ngDFhYW2Lx5c08PjYiIiIiIiIiIiIh6CBuLAlAqlUhISMBDDz2k2ycWizFx4kRER0e3677UarWxh2fy6h9zX3zsRMRrAFFfx2sAUd/F1z9R38ZrAFHf1Zte/219DAyiAygqKoJarYazs7Ngv7OzM1JTU9t0H1qtFgAQHx9v9PFdK/ryYyciXgOI+jpeA4j6Lr7+ifo2XgOI+q7e9Pqvj+02h+VcjESj0fT0EIiIiIiIiIiIiIionVqL7TITHYCjoyMkEkmjJqIFBQWQy+Vtug+pVIrhw4dDLBZDJBJ1xTCJiIiIiIiIiIiIyEi0Wi00Gg2k0pbD5AyiA5DJZBg6dCgiIyMxbdo0AHWzD5GRkVi6dGmb7kMsFkMmk3XlMImIiIiIiIiIiIiomzGIftXy5cvxwgsvYNiwYQgODsb69etRVVWFBQsW9PTQiIiIiIiIiIiIiKiHMIh+1Zw5c1BYWIjPPvsM+fn5CAoKwtq1a9tczoWIiIiIiIiIiIiIeh+RtrXWo0REREREREREREREfZS4pwdARERERERERERERGSqGEQnIiIiIiIiIiIiImoGg+hERERERERERERERM1gEJ2IiIiIiIiIiIiIqBkMohMRERERERERERERNYNBdCIiIiIiIiIiIiKiZjCITkRERERERERERETUDAbRiYiIiIiIiIiIiIiawSA6EREREREREREREVEzGEQnIiIiIiIiIiIiImoGg+hERERERERERERERM1gEJ2IiIiIiIiIiIiIqBkMohMRERERERERERERNYNBdCIiIiIiIiIiIiKiZjCITkRERERERERERETUDAbRiYiIiIiIiIiIiIiawSA6EREREREREREREVEzGEQnIiIiIiIiIiIiImoGg+hERERERH3IXXfdhYCAgA6du2XLFgQEBGDLli1GHhURERERkeliEJ2IiIiIiIiIiIiIqBkMohMRERERERERERERNYNBdCIiIiIiIiIiIiKiZkh7egBERERERKR36tQp3HnnnViwYAFWrVrV6PaCggJMnjwZw4cPx8aNG3H27Fls2bIFUVFRyM7ORm1tLfr374+5c+di+fLlMDMz65Zxnz59GmvWrEFMTAyqqqrg6emJOXPm4IEHHoClpaXg2ISEBKxZswZxcXFQKBSwsbGBp6cnpk2bhkceeUR33OXLl7FmzRqcOHECeXl5sLKygru7O8aNG4f//Oc/EIlE3fLYiIiIiKhvYyY6EREREZEJGTVqFDw9PbFnzx7U1NQ0uv3vv/+GSqXCvHnzAACbNm3C3r17MXjwYCxevBi33XYbtFotPvroIzzzzDPdMuZdu3bhrrvuQlRUFKZOnYply5bB0tISX375JZYtWyZ4HImJibj99ttx+PBhjBo1CsuXL8fMmTNhaWmJTZs26Y7Lzc3FwoULsX37dgQFBeGee+7B3Llz4eLigg0bNkCtVnfLYyMiIiIiYiY6EREREZEJEYlEuPnmm/H1119j//79mDNnjuD2v/76C2ZmZpg9ezYA4OGHH8brr78OiUSiO0ar1eLll1/G5s2bcfr0aYwaNarLxlteXo5XX30VEokEGzduRGBgIADgmWeewcqVK7Fz506sXbsWjz32mG78SqUSX375JaZNmya4r6KiIt33e/bsQWlpKf7zn/9g2bJlguOKi4shlfKjDBERERF1D2aiExERERGZmPos823btgn2p6SkICEhAddddx0cHBwAAB4eHoIAOlAXiL/zzjsBAJGRkV061n379qGsrAy33nqrLoAOAGKxGM899xykUim2bt3a6DwLC4tG+xwdHdt0XP1jJyIiIiLqDgyiExERERGZmIEDByI4OBgREREoLCzU7a8PqtcH2QFAqVRi3bp1uO222zBy5EgEBgYiICAACxYsAADk5eV16VgTExMBAGPHjm10m4eHB7y8vJCeno7y8nIAwOzZsyEWi/H444/jpZdewt9//43c3NxG506ZMgVWVlZ488038dRTT2Hz5s1IT0/v0sdCRERERNQUroEkIiIiIjJB8+bNQ1xcHHbt2oU777wTWq0W27dvh729Pa6//nrdcU8++SQOHDiAAQMGYM6cOXB2doZUKkVpaSl+/PFHKJXKLh1nfXBcLpc3eburqysuX76MiooK2NjYYMSIEfjpp5/wzTff4O+//8aWLVsAAMOHD8ezzz6L8ePHAwC8vLzw22+/4YsvvsChQ4ewa9cuAMCgQYPw5JNP6srZEBERERF1NQbRiYiIiIhM0Jw5c/Duu+9i27ZtuPPOO3Hy5ElkZmZi8eLFkMlkAIC4uDgcOHAAYWFh+PbbbwVlXWJiYvDjjz92+ThtbGwAAAqFosnb8/PzAQDW1ta6faNHj8batWtRXV2N2NhYHDhwAL/++iseeugh/P333/D29gYADB48GJ999hlqa2uRkJCAw4cP46effsLTTz8NV1fXLq31TkRERERUj+VciIiIiIhMkJOTE8LCwhATE4MrV67oSrncfPPNumPqy5tcf/31jeqinzp1qlvGGRQUBACIiopqdFt2djbS09Ph7e2tC7YbsrCwwLhx4/Diiy/ioYceQnV1NY4ePdroODMzM4SEhODJJ5/Eyy+/DK1Wi4MHDxr9sRARERERNYVBdCIiIiIiE1Vf+/z333/H7t274eXlJci+9vDwAACcPn1acF5SUhK+/fbbbhnjtGnTYGtriy1btiApKUm3X6vV4sMPP4RKpcItt9yi2x8dHY2amppG91NQUAAAMDc3BwCcPXtWVyqmpeOIiIiIiLoay7kQEREREZmoG264Aba2tvjhhx9QW1uLu+66CyKRSHd7cHAwgoODsWvXLuTn52PEiBHIzs7Gv//+i+uuuw7//PNPl4/RxsYG//3vf7Fy5UosWrQIs2fPhpOTE44dO4aEhAQEBwfj/vvv1x3/3Xff4cSJExgzZgy8vLwgk8lw7tw5REZGwtvbG9OnTwcA/PXXX/jtt98wZswYXSZ7cnIyDh8+DAcHB13jVCIiIiKirsYgOhERERGRiTI3N8esWbPw+++/AxCWcgEAiUSCNWvW4MMPP8SRI0cQHx+P/v374/nnn8fkyZO7JYgOALNnz4aLiwvWrFmDvXv3oqqqCp6ennj00UfxwAMPCLLGlyxZAltbW8TGxiIqKgparRYeHh54+OGHsWzZMl3Zl5tuugk1NTWIjo5GXFwclEol3N3dsWTJEtx33326LHwiIiIioq4m0mq12p4eBBERERERERERERGRKWJNdCIiIiIiIiIiIiKiZjCITkRERERERERERETUDNZEJyIiIiLqozIyMrB169ZWj7O1tcU999zT9QMiIiIiIjJBJlcT/ZdffsH333+P/Px8BAYG4tVXX0VwcHCzx//www/YsGEDsrOz4ejoiJkzZ2LlypW65kVqtRqff/45tm3bBoVCAVdXV9xyyy149NFHIRKJAAAvvvhiow8PYWFh+P7777vugRIRERER9bATJ07g7rvvbvU4T09P/Pvvv90wIiIiIiIi02NSmeg7d+7EqlWr8MYbb2DEiBFYv3497rvvPuzevRvOzs6Njt++fTs++ugjvPPOOwgNDcXly5fx4osvQiQS4aWXXgIAfPfdd9iwYQPee+89+Pn54ezZs3jppZdga2sr+MAQHh6OVatW6bZlMlnXP2AiIiIioh40btw4XLhwoaeHQURERERk0kwqiL5u3TosWrQIt956KwDgjTfewMGDB7F582Y8+OCDjY6Pjo7GyJEjMXfuXACAl5cXbrrpJsTGxgqOmTp1Kq6//nrdMTt27EBcXJzgvmQyGVxcXDo8do1GA5VKBbFYrMtwJyIiIiIiIiIiIiLTpNVqodFoIJVKIRY33z7UZILoSqUSCQkJeOihh3T7xGIxJk6ciOjo6CbPCQ0NxbZt2xAXF4fg4GCkp6fj0KFDmDdvnuCYTZs24dKlSxg4cCDOnz+P06dP48UXXxTcV1RUFCZMmAA7OzuMHz8eTz31FBwdHds8fpVKhfj4+HY+aiIiIiIiIiIiIiLqScOHD2+xMonJBNGLioqgVqsblW1xdnZGampqk+fMnTsXRUVFuOOOO6DVaqFSqXD77bfj4Ycf1h3z4IMPory8HLNnz4ZEIoFarcbTTz+Nm2++WXdMeHg4pk+fDi8vL6Snp+Pjjz/GAw88gN9++w0SiaRN429ppoKIiIiIiIiIiIiITFNrsV2TCaJ3xIkTJ7BmzRq8/vrrCA4ORlpaGt5++218+eWXeOyxxwAAu3bt0tVO9/PzQ2JiIlatWqVrMAoAN954o+4+AwICEBAQgGnTpumy09uivoTL8OHD2xx47y3UajXi4+P75GMnIl4DiPo6XgOI+i6+/on6Nl4DiPqu3vT6r38srZXnNpkguqOjIyQSCQoKCgT7CwoKIJfLmzxn9erVuPnmm7Fw4UIAdQHwyspKvPbaa3jkkUcgFovx/vvv48EHH9QFygMCApCVlYU1a9bogugNeXt7w9HREVeuXGlzEL2eRCK55p88HdWXHzsR8RpA1NfxGkDUd/H1T9S38RpA1Hf1pde/ydQgkclkGDp0KCIjI3X7NBoNIiMjERoa2uQ51dXVjVLt639xWq1Wd0zDmQSJRKK7vSk5OTkoLi7uVKNRIiIiIiIiIiIiIrr2mUwmOgAsX74cL7zwAoYNG4bg4GCsX78eVVVVWLBgAQDg+eefh5ubG1auXAkAmDJlCtatW4chQ4boyrmsXr0aU6ZM0QXTp0yZgm+++QYeHh66ci7r1q3DrbfeCgCoqKjAF198gZkzZ0IulyM9PR0ffPAB+vfvj/Dw8J75jyAiIiIiIiIiIiIik2BSQfQ5c+agsLAQn332GfLz8xEUFIS1a9fqyrlkZ2cLMs8feeQRiEQifPrpp8jNzYWTkxOmTJmCp59+WnfMK6+8gtWrV+ONN95AQUEBXF1dsXjxYl3NdIlEgosXL+LPP/9EWVkZXF1dMWnSJKxYsaLFjqxERERERERERERE17JKpQp5pTXwcLCETGoyRUtMjkjbUl0TajO1Wo2YmBiEhIT0mVpA9fryYyciXgOI+jpeA4j6Lr7+ifo2XgOIrk1qjRbxmSWISMrHkSQFzqQVoVatxaj+jvj9oQkQi1tusAn0rtd/Wx+LSWWiExEREREREREREZHxXCmoQESyAhFJChxLKUBJVW2jY05fKcKlggr4utj0wAhNH4PoRERERERERERERL1EcaUSx1IKcCRJgYjkfKQXVrV6zggvewxwtu6G0V2bGEQnIiIiIiIiIiIiukbVqNQ4c6UYEcn5iEhSIC6zBG0p4D1Ibo0wfzkm+clxQ6ArJG0o5dJXMYhOREREREREREREdI3QarW4kFuGiCQFjiQpEHWpEFW16lbPc7QywyQ/OcL95Qjzd4Gng2U3jLZ3YBCdiIiIiIiIiIiIyITlllYjIklRV9s8WYH8sppWz5FJxRg7wEkXOB/Sz65NjUOpMQbRiYiIiIiIiIiIiExIRY0KJy5drWuepEBSXnmbzhvSz+5qprkcYwY4wcJM0sUj7RsYRCciIiIiIiIiIiLqQWqNFnEZxXUlWpIViE4rQq269cLm/ewtEOYn19U2l9uYd8No+x4G0YmIiIiIiIiIiIi6kVarxZWCShxJViAiKR+RKQUorVa1ep6NuRTjBznrss0Hya0hErFES1djEJ2IiIiIiIiIiIioixVVKHEspQARyfk4kqRARlFVq+dIxCKEejvo6pqP8HaAmUTcDaMlQwyiExERERERERERERlZjUqN05eLcCRZgaPJCsRnlkDbeoUWDHKxRrifHGH+Lhg/yAm2FmZdP1hqEYPoRERERERERERERJ2k1WpxPqdMV9c86lIBqms1rZ7nZC2ryzS/Wtvcw8GyG0ZL7cEgOhEREREREREREVEH5JRU40hSPo4mKxCRXABFeU2r55hLxRg70EnXEDTI3Q5iMeuamzIG0YmIiIiIiIiIiIjaoLxGhROpBTiSpEBEsgLJeeVtOm+ohx3C/OUI93PB6AGOsDCTdPFIyZgYRCciIiIiIiIiIiJqgkqtQWxGSV2meZICZ9KKoNK0Xtjc08FSl2k+0dcZzjbm3TBa6ioMohMRERERERERERGhrq755YJKRCTl40iSApGpBSirVrV6nq25FON9nRHuL0eYnxwD5dYQiViipbdgEJ2IiIiIiIiIiIj6rMIKJY6l1GWaH0lSILO4qtVzpGIRQn0cEObngjB/OUZ42UMqEXfDaKknMIhOREREREREREREfUZ1rRqnrxRdrWuej4SsUmhbr9ACXxdrhPu7IMxPjvG+zrAxZ2i1r+BvmoiIiIiIiIiIiHotjUaL8zlliEiuK9Fy8nIhqms1rZ4nt5Fhkp9cV9u8n71lN4yWTBGD6ERERERERERERNSrZJdU1WWaJylwLEUBRbmy1XPMpWKMHeh0ta65CwLdbSEWs645MYhORERERERERERE17jyGhWOpxQgIlmBI0n5SMmvaPUckQgY5mGPsKvNQEf1d4SFmaQbRkvXGgbRiYiIiIiIiIiI6JqiUmsQm1GsyzaPSS+GStN6YXNPB8u6THN/OSb6yuFkLeuG0dK1jkF0IiIiIiIiIiIiMmlarRaXFBVXM80VOJ5SgLIaVavn2VpIMdHX+WpdcxcMcLaCSMQSLdQ+DKITERERERERERGRySkor8HRlAJEJOXjaHIBMourWj1HKhZhpI9jXYkWfzmCPe0hlYi7YbTUmzGITkRERERERERERD2uulaNU5eLcCQ5HxFJCiRklbbpPH9XG0zykyPcX45xg5xhY86QJxkXn1FERERERERERETU7TQaLc5llyIiWYGjyQpEXSpEjUrT6nlyG3OE+TkjzN8FYX5yuNtbdMNoqS9jEJ2IiIiIiIiIiIi6RVZxFSKSFDiSrMCxZAUKKpStnmNhJsbYgc4I96sr0RLobsu65tStGEQnIiIiIiIiIiKiLlFWXYvIlAIcTa4LnKfmV7R6jkgEDPe0v9oMVI5R/R1hLpV0w2iJmsYgOhERERERERERERlFrVqD2PRiHElSICJZgZj0Yqg12lbP83K0RLi/HGF+Lpjo6wxHa1k3jJaobRhEJyIiIiIiIiIiog7RarVIya+oyzRPUuB4agHKa1StnmdnIcVE37pM83B/OXycrFiihUwWg+hERERERERERETUZgXlNYhIViAiqa4haFZJdavnmElECPVx1NU1D/ZygETMoDldGxhEJyIiIiIiIiIiomZV16px8nJhXUPQJAXOZZe26bzBbjYI83NBuL8cYwc6wdqcoUi6NvGZS0RERERERERERDoajRbnskuv1jXPx8nLRVCqNK2e52JrXtcM9Gq2uZudRTeMlqjrMYhORERERERERETUx2UWVyEiKR9HkhQ4llKAwgplq+dYmkkwbpCTLmge4GbLuubUKzGITkRERERERERE1MeUVtciMqUAEUkKRCQrcElR0eo5IhEQ7GmPMH85wvxcMLK/A8ylkm4YLVHPYhCdiIiIiIiIiIiol6tVaxCTXlxXoiUpH7EZJVBrtK2e5+NkdTVoLsdEX2c4WMm6YbREpoVBdCIiIiIiIiIiol5Gq9UiJb/8atBcgeOpBahQqls9z85CiklXy7OE+7nAx9mqG0ZLZNoYRCciIiIiIiIiIuoFFOU1OJqswJEkBY4mK5BdUt3qOWYSEUb1d7xa19wFwz3tIRGzrjmRIQbRiYiIiIiIiIiIrkFVSjWiLhfqGoKezylr03kBbrZ1JVr85Rg30AlWMoYIiVpicq+QX375Bd9//z3y8/MRGBiIV199FcHBwc0e/8MPP2DDhg3Izs6Go6MjZs6ciZUrV8Lc3BwAoFar8fnnn2Pbtm1QKBRwdXXFLbfcgkcffVTXLVir1eKzzz7D77//jtLSUowcORL/93//hwEDBnTHQyYiIiIiIiIiImqVRqNFQlYpjiTnIyJJgVOXi6BUa1o9z9XW/GqmeV1tc1c7i24YLVHvYVJB9J07d2LVqlV44403MGLECKxfvx733Xcfdu/eDWdn50bHb9++HR999BHeeecdhIaG4vLly3jxxRchEonw0ksvAQC+++47bNiwAe+99x78/Pxw9uxZvPTSS7C1tcXdd9+tO+ann37Cu+++Cy8vL6xevRr33Xcfdu7cqQvGExERERERERERdbf0wkpEJNfVNT+WokBRZW2r51jJJBg30Alh/i4I95fD39VGl0xKRO1nUkH0devWYdGiRbj11lsBAG+88QYOHjyIzZs348EHH2x0fHR0NEaOHIm5c+cCALy8vHDTTTchNjZWcMzUqVNx/fXX647ZsWMH4uLiANRlof/444945JFHMG3aNADA+++/j4kTJ2Lfvn248cYb2/UY1OrWGzT0NvWPuS8+diLiNYCor+M1gKjv4uufqG/jNaDrlFbVIjK1EEeTFYhIKcCVgspWzxGLgOGe9pjk54wwPzlCvR0gk4p1t2s0rWerE7VVb3r9t/UxmEwQXalUIiEhAQ899JBun1gsxsSJExEdHd3kOaGhodi2bRvi4uIQHByM9PR0HDp0CPPmzRMcs2nTJly6dAkDBw7E+fPncfr0abz44osAgIyMDOTn52PixIm6c2xtbTFixAhER0e3O4geHx/fruN7k7782ImI1wCivo7XAKK+i69/or6N14DOq9VocbGgFnG5NYjLVSK5sBZtCXm7W0sQ7CbDCDdzDHOVwUYmBlAJlKThXElaVw+bqE+9/k0miF5UVAS1Wt2obIuzszNSU1ObPGfu3LkoKirCHXfcAa1WC5VKhdtvvx0PP/yw7pgHH3wQ5eXlmD17NiQSCdRqNZ5++mncfPPNAID8/Hzdz2n4cxUKRbsfx/DhwyGRSNp93rVMrVYjPj6+Tz52IuI1gKiv4zWAqO/i65+ob+M1oOO0Wi2S8ysQkazA0eQCnLhUiEpl69mw9pZmmOjrhEm+coT5OcPbyaobRkvUWG96/dc/ltaYTBC9I06cOIE1a9bg9ddfR3BwMNLS0vD222/jyy+/xGOPPQYA2LVrl652up+fHxITE7Fq1Spdg1Fjk0gk1/yTp6P68mMnIl4DiPo6XgOI+i6+/on6Nl4D2iavrLquPEtSASKS85FbWtPqOTKJGKP6OyLMX45wfzmGethDImZdczIdfen1bzJBdEdHR0gkEhQUFAj2FxQUQC6XN3nO6tWrcfPNN2PhwoUAgICAAFRWVuK1117DI488ArFYjPfffx8PPvigrixLQEAAsrKysGbNGtxyyy1wcXHR/RxXV1fBzw0MDOyKh0pERERERERERL1YlVKNE5cKEJGkQESyAudzytp0XqC7LcL85Ajzl2PsQCdYyUwmdEfUp5nMK1Emk2Ho0KGIjIzUNfjUaDSIjIzE0qVLmzynuroaYrFYsK9+9kOr1eqOadh9WCKR6G738vKCi4sLIiMjERQUBAAoLy9HbGwslixZYrwHSEREREREREREvZJao8XZzBJEJCsQkaTA6StFUKpbr2zuZmeOMD8XhPvLMdHPGa62Ft0wWiJqL5MJogPA8uXL8cILL2DYsGEIDg7G+vXrUVVVhQULFgAAnn/+ebi5uWHlypUAgClTpmDdunUYMmSIrpzL6tWrMWXKFF0wfcqUKfjmm2/g4eGhK+eybt063HrrrQAAkUiEu+++G19//TX69+8PLy8vrF69Gq6urrpgPhERERERERERkaH0wkocSVIgIjkfx1IKUFxZ2+o5VjIJxg9yRphfXYkWP1ebRsmfRGR6TCqIPmfOHBQWFuKzzz5Dfn4+goKCsHbtWl05l+zsbEHm+SOPPAKRSIRPP/0Uubm5cHJywpQpU/D000/rjnnllVewevVqvPHGG7qSLYsXL9bVTAeABx54AFVVVXjttddQWlqKUaNGYe3atTA3N+++B09ERERERERERCarpLIWkamKq4FzBa4UVLZ6jlgEjPB2QLifHJP85Aj1cYRMKm71PCIyLSJtfV0T6hS1Wo2YmBiEhIT0mYL69fryYyciXgOI+jpeA4j6Lr7+ifq2vnANUKo0OJNWhIgkBY4kKxCfUQxNG6JoA5ytEOYvR5ifCyb4OsPe0qzrB0vUjXrT67+tj8WkMtGJiIiIiIiIiIh6glarRVJeeV2meVI+TlwqRKVS3ep5jlZmmOgn12WbeztZdcNoiag7MYhORERERERERER9Ul5pta4ZaESyAnllNa2eI5OIMXqAI8L85Qj3c8FQDzuIxaxrTtSbMYhORERERERERER9QqVShROXCuuC5kkKXMgta9N5Qf3sEO5fl2k+doATLGXXdgkLImofBtGJiIiIiIiIiKhXUmu0iM8sQURSPo4kKXAmrQi16tYLm7vbWdRlmvvLMdFXDhdb824YLRGZKgbRiYiIiIiIiIio10grqMSR5HxEJClwLKUAJVW1rZ5jLZNggq8zJvnVBc59XWwgErFECxHVYRCdiIiIiIiIiIiuWcWVShxLKcCRJAWOJiuQVljZ6jkSsQgjvOwR5u+CcH85QrwdYCYRd8NoiehaxCA6ERERERERERFdM2pUapy5UoyIq9nm8Zkl0LReoQWD5NaY5CdHmL8cE3ydYWdh1vWDJaJegUF0IiIiIiIiIiIyWVqtFhdyy+qagSYrcCK1EFW16lbPc7Qy05VnmeQnh5ejVTeMloh6IwbRiYiIiIiIiIjIpOSWVuuC5hHJCuSX1bR6jkwqxpgBjgjzqyvRMqSfHcRi1jUnos5jEJ2IiIiIiIiIiHpURY0KJy4VICKpABHJ+biYW96m84b0s0O4f12JljEDnGBhJunikRJRX8QgOhERERERERERdSu1Rou4jGJEJClwJFmB6LQi1KpbL2zez94CYVfrmk/yk0NuY94NoyWivo5BdCIiIiIiIiIi6lJarRZXCirryrMkKXAsRYHSalWr59mYSzF+kLMu23yQ3BoiEUu0EFH3YhCdiIiIiIiIiIiMrrhSiaPJdeVZjiQpkFFU1eo5ErEIId4OCLvaEHSEtwPMJOJuGC0RUfMYRCciIiIiIiIiok6rUalx+kqRriFofGYJtK1XaMEgF2uE+8kR5u+CcYOcYGdh1vWDJSJqBwbRiYiIiIiIiIio3bRaLc7nlOFYSiGOJCsQdakA1bWaVs9zspZhkp8c4X5yTPKXw9PBshtGS0TUcQyiExERERERERFRm+SUVCMiWYEjF/Nw6Hw+imtyWz3HXCrG2IFOCPOrawY6pJ8dxGLWNSeiaweD6ERERERERERE1KTyGhVOpBbgyNUSLcl55W06b6iHHcL85Qj3c8HoAY6wMJN08UiJiLoOg+hERERERERERAQAUKk1iMssqatrnqTAmbQiqDStFzb3dLCsyzT3l2OSrzOcbcy7YbRERN2DQXQiIiIiIiIioj5Kq9XickElIpLycSRJgcjUApRVq1o9z8ZciiBnCW4cNQiTB7tioNwaIhFLtBBR79ThILparcbu3btx4sQJFBQU4Mknn0RAQADKysoQGRmJkSNHQi6XG3OsRERERERERETUSUUVShxNqcs0P5KkQGZxVavnSMUihPo41DUE9ZdjWD9bnI2PQ0hIf0gkLNVCRL1bh4LopaWluP/++xEXFwcrKytUVVVh6dKlAAArKyu89dZbmD9/Pp555hmjDpaIiIiIiIiIiNqnulaN01eKcCRJgaPJCpzNKoG29Qot8HWxRri/C8L85Bg3yAm2Fma629RqdReOmIjItHQoiP7hhx8iKSkJ33//PYKCgjBx4kTdbRKJBDNnzsShQ4cYRCciIiIiIiIi6mYajRbnc8oQkVxXouXk5UJU12paPc/ZWoZJfnKE+csR5ieHh4NlN4yWiMj0dSiIvn//ftx1112YNGkSioqKGt0+YMAAbN26tdODIyIiIiIiIiKi1mWXVOkyzY8mK6AoV7Z6jrlUjLEDnRDuL0eYnwsC3W0hFrOuORFRQx0KopeVlcHLy6vZ21UqFZf1EBERERERERF1kfIaFY6nFCAiWYEjSflIya9o9RyRCBjqYYcwPxeE+8sxqr8jLMxYz5yIqDUdCqL7+PggISGh2duPHj0KX1/fDg+KiIiIiIiIiIj0VGoNYjOKEZFUgIjkfESnFUOlab2wuaeDZV2mub8cE33lcLKWdcNoiYh6lw4F0W+77TZ8+OGHGDduHMaPHw8AEIlEUCqV+PLLL3HkyBG8+eabRh0oEREREREREVFfodVqcUlRcTXTXIHjKQUoq1G1ep6tuRQTfJ2vBs5dMMDZCiIRS7QQEXVGh4Loy5YtQ3JyMp555hnY2dkBAJ599lkUFxdDpVJh8eLFWLhwoVEHSkRERERERETUmxWU1+BoSgGOJikQkaxAZnFVq+dIxSKM9HGsawbqL0ewpz2kEnE3jJaIqO/oUBBdJBLhrbfewvz58/HPP//gypUr0Gg08PHxwezZszFmzBhjj5OIiIiIiIiIqFeprlXj1OUiHEnOR0SSAglZpW06z8/VBmF+coT7yzFukDNszDsU3iEiojbq1FV29OjRGD16tLHGQkRERERERETUa2k0WpzLLsXR5LpM86hLhahRaVo9T25jjjA/Z4T5u2CSnzP62Vt2w2iJiKgepyqJiIiIiIiIiLpIVnEVIpIUOJKswLFkBQoqlK2eY2EmxtiBzgj3qyvREuhuy7rmREQ9qENB9BtuuKHVi7dIJMK+ffs6NCgiIiIiIiIiomtRWXUtjqcWIiIpH0eSFUjNr2j1HJEIGO5pjzA/OcL85BjZ3xEWZpJuGC0REbVFh4LoY8eObRREV6vVyMrKwpkzZ+Dv748hQ4YYZYBERERERERERKaqVq1BbHoxjlxtBhqTXgy1RtvqeV6Olgj3lyPMzwUTfZ3haC3rhtESEVFHdCiI/u677zZ72/nz53Hfffdh7ty5HR4UEREREREREZEp0mq1SFVU1JVoSVLgeGoBymtUrZ5nZyHFRF85JvnLEe4nR39nK5ZoISK6Rhi9JnpgYCAWL16MDz/8EFu2bDH23RMRERERERERdauC8hpEJCsQkaTA0WQFskqqWz3HTCJCqI+jrq75cE97SCXibhgtEREZW5c0FnV2dkZycnJX3DURERERERERUZeqrlXj5OVCXbb5uezSNp032M0Gk/zkCPeXY9xAZ1ibd0nYhYiIupnRr+ZFRUXYvHkz3N3djX3XRERERERERERGp9FocS679Gpd83ycvFwEpUrT6nkutua6ZqBh/nK42Vl0w2iJiKi7dSiIfvfddze5v6ysDKmpqaitrcX777/fqYEREREREREREXWVzOIqRCTl40iSAsdSClBYoWz1HEszCcYOdKprCOovR4CbLeuaExH1AR0Komu1jbtMi0QieHl5YcKE/2/vzqOjqtP8j3+qshKyVwUwhkVSAWQJBhUhqaioraJEQQZU2qUzeFwQOcfBX+PSmgZROD3iCLbT4tIYaZXmiOMILjMiiglEcAECuFUBYQdTlZ2Qrer+/ghWdwZihZi93q9zPIfcuk/luer9HuqTb547XlOnTlVycnKrm3rjjTf06quvqri4WMOGDdPjjz+u1NTUZs9/7bXX9NZbb+no0aOKi4vTNddco7lz5yosLEySdMUVV+jw4cOn1c2YMUM5OTmSpNtvv11bt25t8vrNN9+sBQsWtPo6AAAAAABA11BRU6+CPW5tOjXbfK/rhN8ak0lKPTdG9hSr7LYEjRkYq7DgoA7oFgDQlbQqRF+5cmVb9+HzwQcfaNGiRZo/f75Gjx6t3NxczZw5Ux999JEsFstp569du1ZLlizR008/rbS0NBUVFenhhx+WyWTSI488Ikl6++235fF4fDUOh0PZ2dm69tprm7zX9OnTNWfOHN/XvXr1aqerBAAAAAAA7ane49X2g2WNI1ocxdpxqFwe7+mbAv+v/vG9ZLclKDPFqvRki2IjQjugWwBAV9blnnCxYsUKTZ8+XVOnTpUkzZ8/X5999pnWrFmju++++7Tzt23bpjFjxigrK0uSlJSUpEmTJmnHjh2+c+Lj45vUvPTSSxowYIDGjh3b5Hh4eLgSEhLa+pIAAAAAAEA7MwxDe4qrlO9wKd/p0hd7S1RV2+C3Ljo8WBmnZppn2hI0wBLRAd0CALqTFoXo7777bqvefPLkyWd1fl1dnXbv3q177rnHd8xsNis9PV3btm07Y01aWpree+89FRYWKjU1VQcPHtTGjRt14403Nvs93nvvPWVnZ582t2zt2rV67733lJCQoAkTJmjWrFlnvRv9n3e8B4qfrzkQrx0AawAQ6FgDgMDF/Y+uwFVVq8173Mp3urVpj1vHymv81oQEmTRmQKwykq2y2ywaeW6Mgsz/yAf4f7plWAOAwNWT7v+WXkOLQvSHH374rBswmUxnHaKXlpbK4/GcNrbFYrFo7969Z6zJyspSaWmpZsyYIcMw1NDQoFtuuUX33nvvGc9fv369KisrNWXKlCbHJ02apMTERPXp00c//PCDnnnmGe3bt09//vOfz+oadu7ceVbn9ySBfO0AWAOAQMcaAAQu7n90pNoGQ9+66lR4vFaFx+tUVO5/p7kkDYgOVmrfUI3uG6bzE0LUK9gsqVKGu1I73e3bc0/HGgAErkC6/1sUon/yySft3UerbdmyRcuXL1dOTo5SU1N14MABPfXUU3rhhRd0//33n3b+mjVrdOmll6pv375Njt98882+Pw8dOlQJCQn63e9+pwMHDmjAgAEt7mfUqFEKCgqsh4x4PB7t3LkzIK8dAGsAEOhYA4DAxf2PjuD1Gtp9tEKbnG7lO136+kCZ6hq8fuv6RIUpPdkiu82ijGSL+kSHd0C3gYU1AAhcPen+//la/GlRiH7uuef+6oZaIi4uTkFBQXK7m/4Y2O12y2q1nrFm6dKluuGGGzRt2jRJjQF4dXW1nnjiCd13330ym82+cw8fPqzNmzfr+eef99vL6NGjJUn79+8/qxA9KCio2//P01qBfO0AWAOAQMcaAAQu7n+0tYMl1drkdCnP6dJmp0ul1fV+a3qFBGnc4HjZUxJkt1k1pG/kaSNc0T5YA4DAFUj3f5d6sGhoaKhGjBihgoICXXXVVZIkr9ergoIC3XbbbWesqampaRKUS/L9xzOMpk/dfuedd2SxWHT55Zf77eW7776TJB40CgAAAABAOyo/Wa+CPW7lO4uV73CpyF3tt8ZskkYlxSrz1ANBxwyIU2iw2W8dAACt0eoQvbi4WG+//ba+/fZbVVZWyutt+utUJpNJubm5Z/2+2dnZmjdvnkaOHKnU1FTl5ubq5MmTuummmyRJv//979W3b1/NnTtXkjRhwgStWLFCw4cP941zWbp0qSZMmNDkJyFer1fvvPOOJk+erODgppd94MABrV27VpdddpliY2P1ww8/aNGiRbr44os1bNiws74GAAAAAABwZnUNXm0/WKZ8R7HynC7tOFgmr+G/bqAlQnabVZkpVo0fbFVMREj7NwsAgFoZon///fe64447VFNTo/POO08//vijbDabKioqdPz4cQ0YMED9+vVrVUPXXXedSkpKtGzZMhUXF+v888/XK6+84hvncvTo0SY7z++77z6ZTCY999xzOn78uOLj4zVhwgQ9+OCDTd538+bNOnLkiKZOnXra9wwJCVFBQYFef/11VVdX65xzztHVV1+tWbNmteoaAAAAAABAI8Mw5PypSnkOl/KdLm3Z69aJOo/fupheIcqwWWS3JSgzxar+8REd0C0AAKdrVYi+ZMkSRURE6N1331V4eLjS09P16KOPavz48frwww/1xz/+Uc8880yrm7rtttuaHd+ycuXKJl8HBwdr9uzZmj179i++p91u1w8//HDG18455xz97W9/a12zAAAAAACgieLK2sa55g6XNjldOlZR47cmNMisCwfGyZ5ild1m1chzYxRkZq45AKDztSpE/+abb3TXXXcpMTFRZWVlkv4xf3zixIn6+uuv9ac//YlgGgAAAACAAHCyzqMt+9zKP7Xb/PtjlS2qG9YvSvZTc83HnheviNAu9eg2AAAktTJE93q9vvEq0dHRCgoK8oXpkjR06FCtWbOmTRoEAAAAAABdi8draPeR8sYRLQ6Xvt5fqjqP129d3+gw2W0JsqdYlGGzqk9UeAd0CwDAr9OqED0pKUmHDh2SJJnNZiUlJamgoEDXXXedpMad6lFRUW3XJQAAAAAA6FQHS6pPzTUv1uY9bpVV1/utiQgN0rjBFt8DQW19ImUyMaIFANC9tDhELy8vV0xMjKTG+eIfffSR7+Gdt956qxYvXqyDBw/KMAxt3bpV2dnZ7dMxAAAAAABod+Un61Wwx+V7IOh+d7XfGrNJGt0/tnFEi82qtAFxCg02d0C3AAC0nxaH6BkZGbrsssuUlZWl7OxsXX/99aqvr1dISIjuvPNOVVdX63//939lNps1a9Ys3XPPPe3ZNwAAAAAAaEN1DV59c6DUN9e88FCZvIb/ukGWiFMPA03Q+GSLYnqFtH+zAAB0oBaH6Ndcc402bNigDRs2qHfv3vrNb36jG264QePGjZPJZNKsWbM0a9as9uwVAAAAAAC0EcMw5Pip6tRc82Jt2Vei6jqP37rYiBBlJFtPBedW9Y+P6IBuAQDoPC0O0ZcsWaKamhqtX79e69at09q1a/Xuu+/KYrFo0qRJysrK0ogRI9qzVwAAAAAA8Cv8VFGjTadGtGxyunS8otZvTWiQWRcNipM9xapMW4JGJEbLbGauOQAgcJzVg0XDw8M1adIkTZo0SeXl5frwww+1bt065ebmKjc3VwMHDtQNN9ygrKws9e/fv716BgAAAAAALVBd16At+0oaR7Q4XPrheGWL6ob1i1JmilX2lASNHRSvXqFB7dwpAABd11mF6P8sJiZGt9xyi2655RYdP35ca9eu1fvvv69ly5bp+eef1+jRo7Vq1aq27BUAAAAAAPwCj9fQrsPlyne6lOco1jf7y1Tn8fqt6xcd3rjTPMWq9GSrEqLCOqBbAAC6h1aH6P+sb9++uuuuu5SZmally5bpk08+0Y4dO9rirQEAAAAAwC844K5WnrNY+Q6XNu9xq/xkvd+a3qFBGjfY4gvOkxMiZTIxogUAgDP51SH6kSNHtG7dOq1bt04Oh0OGYSgtLU1ZWVlt0R8AAAAAAPgn5dX12rzHpTxn44iWAyXVfmuCzCaNToqRPSVBdptVaQNiFRJk7oBuAQDo/loVopeUlPjmoW/fvl2GYWjw4MGaM2eOsrKylJSU1NZ9AgAAAAAQkGobPPpmf5nyT+0233m4XF7Df9151t6y26yyp1g1Ptmi6PCQ9m8WAIAeqMUhenV1tT7++GOtW7dOBQUFamhoUEJCgu68805lZWVpxIgR7dknAAAAAAABwTAM/Xi8SnmOYuU7Xdqyt0Qn6z1+6+IiQpRhs/qC86S4iA7oFgCAnq/FIXp6erpqa2sVERGhrKwsZWVlady4cTKb+fUvAAAAAAB+jeMVNcp3uJTvbPynuLLWb01osFkXD4qT3ZagzBSrhp8TLbOZueYAALS1Fofo48ePV1ZWlq688kqFhfGUbgAAAAAAWutEbYO27itRnsOlfGexfjxe1aK64edEKzPFqgybVWPPi1d4SFA7dwoAAFocov/lL39pzz4AAAAAAOixPF5DhYfKlO9ofCDotgOlqvf4H2x+Tky4bzxLhs0qaySb2gAA6GiterAoAAAAAAD4ZfvdJxp3mjtc2rzHpYqaBr81kWHBGjfYIrvNIntKgpITestkYkQLAACdiRAdAAAAAIA2UFZdp01Ot/KdjQ8EPVhy0m9NkNmkC/rHym6zKjPFqtH9YxUSxLPHAADoSgjRAQAAAABohdoGj77eX+p7IOjOw+Uy/E9o0eCE3o0jWmxWjUu2KDo8pP2bBQAArUaIDgAAAABACxiGoe+PVfpC8637SnSy3uO3Lr53qDJsVmXarMpIserc2F4d0C0AAGgrhOgAAAAAADTjWHmN8p0u5TuKle90y1VV67cmNNissYPiZU9p3G0+/Jxomc3MNQcAoLsiRAcAAAAA4JQTtQ36Yq/7VHDukuOnqhbVjUiMlj3Fqkxbgi4aFKfwkKB27hQAAHQUQnQAAAAAQMBq8HhVeLi8cUSLw6VvDpSqwet/sHliTHjjTvOUBGUkW2SJDOuAbgEAQGcgRAcAAAAABAzDMLTfXa28UyNaNu9xq7KmwW9dVFiwxiVblHlqRMt51t4ymRjRAgBAICBEBwAAAAD0aKUn6rRpT+NO8zyHS4fLTvqtCTKblNY/tnFES4pVo5NiFRxk7oBuAQBAV0OIDgAAAADoUWobPPq6qPTUbnOXdh0pl+F/QouSE3orMyVBdptVlwyOV1R4SPs3CwAAujxCdAAAAABAt+b1Gvr+WKXyncXKc7j0ZVGJauq9fussvUOVYbM2zja3WZUY26sDugUAAN0NIToAAAAAoNs5Vl6jPEex8p0ubXK65Kqq81sTFmzW2PPiZT8VnJ/fL1pmM3PNAQDALyNEBwAAAAB0eVW1Dfpij1v5TpfynS45f6ryW2MySSMSo2W3JSgzxaoLB8YpPCSoA7oFAAA9CSE6AAAAAKDLafB4teNQufIdLuU7i7XtQJkavP4Hm58b28u30zzDZlV879AO6BYAAPRkhOgAAAAAgE5nGIb2uU4o3+lSnsOlL/a4VVnb4LcuKixY45Mtykyxyp6SoEGWCJlMjGgBAABthxAdAAAAANApSk7UaZPTdWq3uUuHy076rQk2m5Q2IFZ2W4LsKVaNTopRcJC5A7oFAACBihAdAAAAANAhauo9+qqoVHnOYuU7XNp9pKJFdbY+kbLbrMpMseqSwRZFhvFRFgAAdBz+5gEAAAAAaBder6HvjlX4dppv3Vei2gav3zprZJjsNosyTs02PyemVwd0CwAAcGaE6AAAAACANnOk7KTyHS7lOV3a7HTJfaLOb014iFljz7Mo81RoPqxfFHPNAQBAl0GIDgAAAABotcqaBn2536V8R7HynC7tLT7ht8ZkkkadG6MMm1WZNqvGDIxTeEhQB3QLAABw9gjRAQAAAAAtVu/xasfBMuX9WKz/2eGWY80n8ngNv3VJcb2UmWKV3Zag9GSL4nqHdkC3AAAAvx4hOgAAAACgWYZhaK/rROOIFodLX+x1q6q2wW9dVHiw0pMtsqckKNNm1UBLBCNaAABAt9QlQ/Q33nhDr776qoqLizVs2DA9/vjjSk1Nbfb81157TW+99ZaOHj2quLg4XXPNNZo7d67CwsIkSVdccYUOHz58Wt2MGTOUk5MjSaqtrdXixYv1wQcfqK6uTna7XTk5ObJare1zkQAAAADQRbmrarVpj1v5jmLlO1w6Ul7jtybYbNKYgXG+ueajzo1RcJC5A7oFAABoX10uRP/ggw+0aNEizZ8/X6NHj1Zubq5mzpypjz76SBaL5bTz165dqyVLlujpp59WWlqaioqK9PDDD8tkMumRRx6RJL399tvyeDy+GofDoezsbF177bW+Y08//bQ2btyo5557TlFRUXryySc1e/ZsrVq1qv0vGgAAAAA6UU29R18Wlfh2m397tKJFdUnRwbpq5Lm6dEiCLjnPot5hXe4jJgAAwK/W5f6Gs2LFCk2fPl1Tp06VJM2fP1+fffaZ1qxZo7vvvvu087dt26YxY8YoKytLkpSUlKRJkyZpx44dvnPi4+Ob1Lz00ksaMGCAxo4dK0mqrKzUmjVr9Mwzz2j8+PGSGkP16667Ttu3b9cFF1zQ4v7/OawPFD9fcyBeOwDWACDQsQYA3ZPXa+i7Y5XKd7q0yenWl/tLVdfg9VtnjQyV3WZVhs2iSwbFqni/Q6NGDVFQUONDQVkLgMDB3wGAwNWT7v+WXkOXCtHr6uq0e/du3XPPPb5jZrNZ6enp2rZt2xlr0tLS9N5776mwsFCpqak6ePCgNm7cqBtvvLHZ7/Hee+8pOzvbN49v165dqq+vV3p6uu+85ORkJSYmnnWIvnPnzhaf29ME8rUDYA0AAh1rAND1FVd7tON4rQqP12nn8VpV1Pl/GGhokDQiIVSpfcM0um+oBkQHy2TySipW8f5iSdz/QKBjDQACVyDd/10qRC8tLZXH4zltbIvFYtHevXvPWJOVlaXS0lLNmDFDhmGooaFBt9xyi+69994znr9+/XpVVlZqypQpvmMul0shISGKjo4+7fsWFxef1TWMGjXKtwsjUHg8Hu3cuTMgrx0AawAQ6FgDgK6rsqZeX+wt0SanW/l7XNrnqvZbYzJJoxJjlGGzKMNm0ZgBcQoLPvNcc+5/ILCxBgCBqyfd/z9fiz9dKkRvjS1btmj58uXKyclRamqqDhw4oKeeekovvPCC7r///tPOX7NmjS699FL17du3XfoJCgrq9v/ztFYgXzsA1gAg0LEGAJ2v3uPV9oNlynO4tMnp0vaDZfJ4/e827x/fS3ZbgjJTrEpPtig2IvSsvi/3PxDYWAOAwBVI93+XCtHj4uIUFBQkt9vd5Ljb7ZbVaj1jzdKlS3XDDTdo2rRpkqShQ4equrpaTzzxhO677z6Zzf/YNXH48GFt3rxZzz//fJP3sFqtqq+vV0VFRZPd6G63WwkJCW11eQAAAADQZgzD0J7iE8p3FCvf6dIXe0tUVdvgty46PFjpyVbZU6zKTLFqoKV3B3QLAADQfXWpED00NFQjRoxQQUGBrrrqKkmS1+tVQUGBbrvttjPW1NTUNAnKJfl+AmIYTXddvPPOO7JYLLr88subHB85cqRCQkJUUFCga665RpK0d+9eHTly5KzmoQMAAABAe3JV1WqT06V8h0v5TpeOltf4rQkJMmnMgDhlplhlT0nQqHNjFGQ2dUC3AAAAPUOXCtElKTs7W/PmzdPIkSOVmpqq3NxcnTx5UjfddJMk6fe//7369u2ruXPnSpImTJigFStWaPjw4b5xLkuXLtWECROa/DqB1+vVO++8o8mTJys4uOllR0VFaerUqVq8eLFiYmIUGRmphQsXKi0tjRAdAAAAQKepqfdo674S5TtdynO49N3RihbVDekb6RvRMva8ePUO63If/QAAALqNLvc3qeuuu04lJSVatmyZiouLdf755+uVV17xjXM5evRok53n9913n0wmk5577jkdP35c8fHxmjBhgh588MEm77t582YdOXJEU6dOPeP3ffTRR2U2mzVnzhzV1dXJbrcrJyen/S4UAAAAAP4Pr9fQ7iMVynMWK9/h0lf7S1XX4PVblxAVpkxb44gWu82qPtHhHdAtAABAYDAZ/3fmCVrF4/Fo+/btuuCCCwJmoP7PAvnaAbAGAIGONQD49Q6VVivf4VKe06XNTpdKq+v91vQKCdIlg+Nlt1mVmZKgIX0jZTJ17IgW7n8gsLEGAIGrJ93/Lb2WLrcTHQAAAAB6svKT9SrY41a+s1ibnG7tc53wW2M2SaOSYn27zdMGxCosuHt/aAUAAOguCNEBAAAAoB3Ve7zadqBM+Y5i5Tld2nGwTN4W/D7wQEuEMmxWZdqsSk+2KiYipP2bBQAAwGkI0QEAAACgDRmGIedPVcpzuLTJ6dIXe906UefxWxfTK0QZNovvgaD94yM6oFsAAAD4Q4gOAAAAAL9ScWWtNjldvuD8WEWN35qQIJMuHBinzJQE2W1WjTw3RkHmjp1rDgAAAP8I0QEAAADgLJ2s82hrUUnjiBaHS98fq2xR3bB+UbKfmms+9rx4RYTykQwAAKCr429sAAAAAOCHx2to95Fy5Tlcyne49PX+UtV5vH7r+kSFyZ5iVWaKVRk2q/pEhXdAtwAAAGhLhOgAAAAAcAYHS6qV72wMzTftcamsut5vTURokMYNtshuawzObX0iZTIxogUAAKA7I0QHAAAAAEnlJ+tVsKdxrnm+06X97mq/NWaTlJoUq8wUq+w2q9IGxCk02NwB3QIAAKCjEKIDAAAACEh1DV5tO1Cq/FMPBC08VCav4b9ukCVC9lOh+fjBVsVEhLR/swAAAOg0hOgAAAAAAoJhGHL8VHVqrnmxtuwrUXWdx29dbESIMpKtvuC8f3xEB3QLAACAroIQHQAAAECP9VNljTad2mm+yenS8YpavzWhQWZdNChOGafmmo9IjFGQmbnmAAAAgYoQHQAAAECPUV3XoC37ShofBup06ftjlS2qG9YvqnGueUqCxg6KV6/QoHbuFAAAAN0FIToAAACAbsvjNbTrcPmpuebF+mZ/meo8Xr91faPDZLclKDPFqgybVQlRYR3QLQAAALojQnQAAAAA3coBd7XynMXa5HRpk9Ot8pP1fmt6hwZp3GCL7CmNI1qSEyJlMjGiBQAAAP4RogMAAADo0sqr67V5j0t5TpfyHS4dKKn2W2M2SaP7xyrT1jiiJW1ArEKCzB3QLQAAAHoaQnQAAAAAXUpdg1df7y9VvrNY+Q6Xdh4ul9fwX3eetbfsNqvsKVaNG2xRTK+Q9m8WAAAAPR4hOgAAAIBOZRiGfjxepTxHsfKdLm3ZW6KT9R6/dXERIUq3WU/tNrcqKS6iA7oFAABAoCFEBwAAANDhfqqoUZ7DpU1Ol/KdLv1UWeu3JjTYrIsHxfkeCDr8nGiZzcw1BwAAQPsiRAcAAADQ7qrrGrRlb4nyHC7lO4v14/GqFtWdf060MlOsstusunhQvHqFBrVzpwAAAEBThOgAAAAA2pzHa6jwUJk2OV3Kc7j0zYFS1Xv8DzY/JybcN9c8w2aVNTKsA7oFAAAAmkeIDgAAAKBN7HefaNxp7nBp8x6XKmoa/NZEhgVr3OD4U8F5gpITestkYkQLAAAAug5CdAAAAACtUlZdp8173L4RLQdLTvqtCTKbdEH/WNltVmWmWDW6f6xCgswd0C0AAADQOoToAAAAAFqktsGjr/eXKt/R+DDQnYfLZfif0KLB1t6yn5prPi7ZoujwkPZvFgAAAGgjhOgAAAAAzsgwDP1wvFL5jsa55lv3lehkvcdvXXzvUGXYrMq0WZWRYtW5sb06oFsAAACgfRCiAwAAAPA5XlFzaq55sfKdbrmqav3WhAabNXZQvG+3+fBzomU2M9ccAAAAPQMhOgAAABDgauo9enHjHr1feFSOn6paVDMiMfrUw0CtunhQvMJDgtq5SwAAAKBzEKIDAAAAAe4/P9ujZZ84fvGcxJjwxp3mKQnKSLbIEhnWQd0BAAAAnYsQHQAAAAhwFSfrTzsWGRascYMtykxp3G0+2NpbJhMjWgAAABB4CNEBAACAAHf/BJvcJ+pUeqJOFw2KU2aKVaOTYhUcZO7s1gAAAIBOR4gOAAAABLiEqDA9f2taZ7cBAAAAdElsLQEAAAAAAAAAoBmE6AAAAAAAAAAANIMQHQAAAAAAAACAZhCiAwAAAAAAAADQDEJ0AAAAAAAAAACaEdzZDfQUhmFIkjweTyd30vF+vuZAvHYArAFAoGMNAAIX9z8Q2FgDgMDVk+7/n6/h52y3OSbD3xlokbq6Ou3cubOz2wAAAAAAAAAAnIVRo0YpNDS02dcJ0duI1+tVQ0ODzGazTCZTZ7cDAAAAAAAAAPgFhmHI6/UqODhYZnPzk88J0QEAAAAAAAAAaAYPFgUAAAAAAAAAoBmE6AAAAAAAAAAANIMQHQAAAAAAAACAZhCiAwAAAAAAAADQDEJ0AAAAAAAAAACaQYgOAAAAAAAAAEAzCNEBAAAAAAAAAGgGIToAAAAAAAAAAM0gRMev9sYbb+iKK67QqFGjNG3aNBUWFnZ2SwDa2PLlyzV16lSlpaVp/PjxmjVrlvbu3dvknNraWs2fP1+XXHKJ0tLS9MADD8jlcnVSxwDa00svvaShQ4fqqaee8h1jDQB6ruPHj+uhhx7SJZdcotTUVGVlZWnnzp2+1w3D0NKlS2W325Wamqrf/e53Kioq6ryGAbQZj8ej5557TldccYVSU1N11VVX6YUXXpBhGL5zWAOAnuPLL7/UvffeK7vdrqFDh2r9+vVNXm/J/V5WVqa5c+dqzJgxuuiii/Too4/qxIkTHXgV7YMQHb/KBx98oEWLFun+++/Xf/3Xf2nYsGGaOXOm3G53Z7cGoA1t3bpVv/3tb7V69WqtWLFCDQ0Nmjlzpqqrq33nPP300/r000/13HPPaeXKlfrpp580e/bsTuwaQHsoLCzUqlWrNHTo0CbHWQOAnqm8vFy33nqrQkJC9PLLL+v999/XvHnzFBMT4zvn5Zdf1sqVK/XHP/5Rq1evVq9evTRz5kzV1tZ2YucA2sLLL7+st956S0888YQ++OADPfTQQ3rllVe0cuXKJuewBgA9Q3V1tYYOHaqcnJwzvt6S+/2hhx6S0+nUihUr9OKLL+qrr77SE0880VGX0H4M4Ff4l3/5F2P+/Pm+rz0ej2G3243ly5d3YlcA2pvb7TaGDBlibN261TAMw6ioqDBGjBhhfPjhh75znE6nMWTIEGPbtm2d1CWAtlZVVWVcffXVxqZNm4zbbrvNWLhwoWEYrAFAT/bv//7vxq233trs616v18jIyDBeeeUV37GKigpj5MiRxrp16zqiRQDt6O677zYeeeSRJsdmz55tzJ071zAM1gCgJxsyZIjx8ccf+75uyf3+82eAwsJC3zkbN240hg4dahw7dqzjmm8H7ERHq9XV1Wn37t1KT0/3HTObzUpPT9e2bds6sTMA7a2yslKSfLvQdu3apfr6+ibrQXJyshITE7V9+/bOaBFAO1iwYIEuu+yyJve6xBoA9GQbNmzQyJEjNWfOHI0fP16TJ0/W6tWrfa8fOnRIxcXFTe7/qKgojR49ms8EQA+QlpamL774Qvv27ZMkff/99/r666916aWXSmINAAJJS+73bdu2KTo6WqNGjfKdk56eLrPZ3O3HPwd3dgPovkpLS+XxeGSxWJoct1gsp81KBtBzeL1ePf300xozZoyGDBkiSXK5XAoJCVF0dHSTcy0Wi4qLizujTQBt7P3339e3336rt99++7TXWAOAnuvgwYN66623lJ2drXvvvVc7d+7UwoULFRISoilTpvju8TN9JuC5CED3d/fdd6uqqkoTJ05UUFCQPB6PHnzwQd1www2SxBoABJCW3O8ul0vx8fFNXg8ODlZMTEy3/1xAiA4AOCvz58+Xw+HQm2++2dmtAOggR48e1VNPPaW//vWvCgsL6+x2AHQgwzA0cuRI/du//Zskafjw4XI4HFq1apWmTJnSyd0BaG8ffvih1q5dqyVLlshms+m7777TokWL1KdPH9YAAAGFcS5otbi4OAUFBZ32EFG32y2r1dpJXQFoTwsWLNBnn32m3Nxc9evXz3fcarWqvr5eFRUVTc53u91KSEjo6DYBtLHdu3fL7Xbrpptu0vDhwzV8+HBt3bpVK1eu1PDhw1kDgB4sISFBycnJTY4NHjxYR44c8b0uic8EQA/1pz/9SXfffbeuv/56DR06VJMnT9add96p5cuXS2INAAJJS+53q9WqkpKSJq83NDSovLy8238uIERHq4WGhmrEiBEqKCjwHfN6vSooKFBaWlondgagrRmGoQULFujjjz9Wbm6u+vfv3+T1kSNHKiQkpMl6sHfvXh05ckQXXHBBB3cLoK2NGzdOa9eu1bvvvuv7Z+TIkcrKyvL9mTUA6JnGjBnjm4X8s6KiIp177rmSpKSkJCUkJDS5/6uqqrRjxw4+EwA9QE1NjUwmU5NjQUFBMgxDEmsAEEhacr+npaWpoqJCu3bt8p3zxRdfyOv1KjU1tcN7bkuMc8Gvkp2drXnz5mnkyJFKTU1Vbm6uTp48qZtuuqmzWwPQhubPn69169bpP//zP9W7d2/fLLOoqCiFh4crKipKU6dO1eLFixUTE6PIyEgtXLhQaWlpBGhADxAZGel7BsLPIiIiFBsb6zvOGgD0THfeeaduvfVWvfjii5o4caIKCwu1evVqLViwQJJkMpl0xx136C9/+YsGDhyopKQkLV26VH369NFVV13Vyd0D+LUmTJigF198UYmJib5xLitWrNDUqVMlsQYAPc2JEyd04MAB39eHDh3Sd999p5iYGCUmJvq935OTk5WZmanHH39c8+fPV319vZ588kldf/316tu3b2ddVpswGT//+BBopb/97W969dVXVVxcrPPPP19/+MMfNHr06M5uC0AbGjp06BmPL1q0yPdDs9raWi1evFjvv/++6urqZLfblZOT0+1/ZQvAmd1+++0aNmyYHnvsMUmsAUBP9umnn+rZZ59VUVGRkpKSlJ2drenTp/teNwxDy5Yt0+rVq1VRUaELL7xQOTk5Ou+88zqxawBtoaqqSkuXLtX69evldrvVp08fXX/99br//vsVGhoqiTUA6Em2bNmiO+6447TjU6ZM0eLFi1t0v5eVlenJJ5/Uhg0bZDabdfXVV+sPf/iDevfu3ZGX0uYI0QEAAAAAAAAAaAYz0QEAAAAAAAAAaAYhOgAAAAAAAAAAzSBEBwAAAAAAAACgGYToAAAAAAAAAAA0gxAdAAAAAAAAAIBmEKIDAAAAAAAAANAMQnQAAAAAAAAAAJpBiA4AAAAAAAAAQDMI0QEAAAAAAAAAaEZwZzcAAAAAoPWqq6v1+uuv63/+539UVFSk+vp6xcfHKykpSRdeeKGmTZumAQMGSJKuuOIKSdKGDRs6s2UAAACgWyFEBwAAALqpqqoqzZgxQz/88IMGDhyorKwsxcXFqbS0VIWFhXrppZc0YMAAX4gOAAAA4OwRogMAAADdVG5urn744QdNmzZNTz75pEwmU5PXDx48qLq6uk7qDgAAAOgZCNEBAACAbmr79u2SpN/+9renBeiS1L9/f0nSoUOHdOWVV/qODx061Pfn2bNn64EHHvB9/eWXX+rVV1/Vtm3bdOLECSUmJmrixIm699571atXL995W7Zs0R133KHZs2dr/PjxWrp0qXbt2qWgoCCNHz9eDz30kAYOHNikn6KiIi1fvlxbtmzRTz/9pIiICPXr10+XXHKJHn300TNeAwAAANDZCNEBAACAbio2NlaStG/fPp1//vnNnhcdHa3Zs2crNzdXknTnnXf6Xhs7dqzvz2+++aYWLFig6OhoTZgwQfHx8dq1a5defPFFbdmyRa+//rpCQ0ObvPf27du1fPlyZWZm6vbbb5fD4dDHH3+sr776SqtXr/YF+cePH9e0adN08uRJXXbZZbruuut08uRJFRUV6a233tK8efMUHMzHEwAAAHQ9JsMwjM5uAgAAAMDZ++STTzRr1iz17t1bN998szIyMjRixAjFxcWd8fxferCo0+nUjTfeKJvNptdee63Je7z00ktasmSJ5s2bp3/913+V9I+d6JI0f/583XLLLb7zV61apZycHE2YMEEvvviiJGnlypVauHChHn300SYhviSVlZX5fiAAAAAAdDXmzm4AAAAAQOtceeWVevjhh2UYhv76179q5syZGjdunH7zm99owYIFKioqavF7rVq1Sg0NDXr88cdPC+HvuusuxcfHa926dafVDRo0SNOnT29ybPr06Ro0aJA+++wzlZSUNHktPDz8tPcgQAcAAEBXxu9LAgAAAN1Ydna2pk2bpry8PG3btk27du1SYWGh3njjDb399tv6j//4jybz0JuzY8cOSVJeXp4KCgpOez04OFj79u077fiYMWNkNjfdm2M2mzVmzBgVFRXp+++/V3p6uiZMmKBnn31WCxYsUEFBgTIzMzV27FjfuBcAAACgqyJEBwAAALq5yMhITZw4URMnTpQkVVZW6tlnn9Wbb76pxx57TJmZmafNMv+/ysvLJck3fqWlrFbrGY9bLBZfL5KUlJSkv//97/rzn/+sjRs36sMPP5QkDR48WHPmzPH1DgAAAHQ1hOgAAABADxMVFaUnnnhCGzdu1OHDh/Xjjz9q5MiRv1gTGRkpSfr66699f24Jl8t1xuNut9vXy8+GDBmiZcuWqb6+Xrt379bnn3+ulStX6sEHH1SfPn104YUXtvj7AgAAAB2FmegAAABAD2QymdSrV68mx8xmszwezxnPT01NlfSPsS4t9c0338jr9TY55vV69c0338hkMmnYsGGn1YSEhOiCCy7QnDlz9Nhjj8kwDH322Wdn9X0BAACAjkKIDgAAAHRTq1atUmFh4RlfW79+vfbs2aPo6GgNGTJEkhQTE6PS0lLV1taedv6MGTMUHBysJ598UkeOHDnt9YqKCn377benHS8qKtLq1aubHFu9erWKiop0+eWXKz4+XpK0a9cuVVVVnVb/8471sLAwP1cLAAAAdA7GuQAAAADd1Oeff66cnBwNHDhQY8aMUZ8+fVRdXa3vvvtOX331lcxms3Jycnzz0MeNG6ddu3bprrvu0kUXXaSQkBBdfPHFuvjiizVkyBDl5OToj3/8o6699lpddtll6t+/v06cOKFDhw5p69atmjJlihYsWNCkB7vdroULF2rjxo1KSUmRw+HQp59+qri4OD322GO+8/77v/9bf//733XxxRerf//+ioyMlNPp1Oeff67Y2FjddNNNHfrvDgAAAGgpk2EYRmc3AQAAAODs7d27Vxs2bNDmzZu1f/9+FRcXS5L69u2rCy+8ULfddluTWegnTpzQ4sWL9emnn6qkpEQej0ezZ8/WAw884DunsLBQr732mr788kuVlpYqMjJSiYmJysjI0OTJk5WcnCxJ2rJli+644w7Nnj1b48eP19KlS7Vr1y6ZzWaNHz9e/+///T8NHDjQ9747duzQmjVrtG3bNh07dkx1dXXq16+f7Ha7Zs6cqcTExA76twYAAACcHUJ0AAAAAGftn0P0fw7hAQAAgJ6GmegAAAAAAAAAADSDEB0AAAAAAAAAgGYQogMAAAAAAAAA0AxmogMAAAAAAAAA0Ax2ogMAAAAAAAAA0AxCdAAAAAAAAAAAmkGIDgAAAAAAAABAMwjRAQAAAAAAAABoBiE6AAAAAAAAAADNIEQHAAAAAAAAAKAZhOgAAAAAAAAAADSDEB0AAAAAAAAAgGb8f9dL1SQw8SszAAAAAElFTkSuQmCC", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "# Get the TensorBoard event file for the training run\n", - "log_dirs = !find pretraining_demo/evo2/dev -name \"events.out.tfevents*\"\n", - "tf_event_file = log_dirs[0]\n", - "\n", - "# Extract data from your event file\n", - "df = tensorboard_to_dataframe(tf_event_file)\n", - "# You can uncomment and modify this to plot multiple metrics once you see what's available\n", - "plot_multiple_training_metrics(df, [\"reduced_train_loss\", \"lr\", \"grad_norm\", \"val_loss\"])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now you have a checkpoint that you can try out in place of the converted evo2 checkpoint in the BRCA-1 tutorial \n", - "(the path is displayed in the next code cell). To test your checkpoint, please supply the following path to the saved \n", - "checkpoint produced by this notebook as the `--ckpt-dir {checkpoint_path}`\n", - "argument to the `predict_evo2` command in the zero shot BRCA tutorial. For the 1b checkpoint you should see AUC above\n", - "0.73 if you successfully fine-tuned the checkpoint for your hardware, or to check that your hardware works with the \n", - "converted checkpoint from hugging face as is.\n", - "\n", - "In our experience running this notebook for up to an hour on a single GPU is not sufficient to recover BF16 accuracy. We\n", - "have more details about what did work in the Next Steps section below." - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'pretraining_demo/default--val_loss=0.8664-epoch=0-consumed_samples=800.0-last'" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "final_ckpt_paths = !ls -d pretraining_demo/evo2/checkpoints/*-last\n", - "final_ckpt_path = final_ckpt_paths[-1]\n", - "final_ckpt_path" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Next steps\n", - "On a small number of devices, or with the small demo fasta we provided in this tutorial, it's possible you are not at the needed\n", - "1.08 loss level to get good downstream accuracy out of this checkpoint. You can try increasing the `MAX_STEPS` parameter in the training cell,\n", - "or running a larger cluster with more GPUs. The following loss curve was generated with a global batch size of 256 at 8192 context or approximately\n", - "2 million tokens per step. With that configuration we see a good loss of 1.08 after approximately 100 steps. The following figure shows our\n", - "learning rate across the first 500 steps of fine-tuning with a global batch size of 256. Later on in this notebook we also show the slurm script\n", - "to replicate this on your cluster.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "jupyter": { - "source_hidden": true - } - }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAB2YAAAK0CAYAAADYjV3sAAAJ7GlDQ1BJQ0MgUHJvZmlsZQAAWIXtmWdQFNkWgG93Tw6EmSGnIec0MDCAwJCzBMmigEPODFHEhCyuwIoiIgLKgogCLhiWIGsABDGwKCigou4gi4CyLq4iKig76Nvaem+r3o/398291X2+PvdW9617uqq/qgZAuj48JSUBZgOQmJTO9XGypQcGBdOxcwABBCAMABAO56Sl2Hh5ufMZ/BX/vb0bA9BavKe7dq9/jv/XRoiITOPw4wr/mOSkcNMBgHh8Vs5KT+EzjOIzjctfFJ8l1zj6K+us8ZavzP4yx9fHjs++AOBIW74wIWyNo78wJWGNOTHhEQBIZ/Pn6/zruWtN197B3Z3uZ8RgGHk70O2SE5K5dC9uclRsQqQO3d6EaWCgQ18fnh6bGRnqz/hr4brAHjgAd36nAz9gBBj8bgS8+Tk6sAPJIIF/cPnsxT8ngygQy89EAh1+xh6YACYw4Pe1q/UgHKTzRzP5o6HAHzD+5xX9s6VHZqevRbvklK3c2OiYdLoNv0KRdJckjp4OnWHAYAKwVu//rOPfuaKTALD7+TWZ+Dvn5AzAOQ0AyLV/55QNARCiAtCTwMngZn7NrZUOoL+8RzQgBeSBMtDgbxsDmAILwOZvlCvwBL4gCGwGHBADEvkblQVywW5QAIrAAXAYVIIaUA8aQQs4DzrAJdADroPb4C4YBROAB6bBS7AA3oFlCIKwEBmiQlKQAqQKaUMMiAVZQQ6QO+QDBUFhUDSUBGVAudAeqAgqhSqhWqgROgddhHqgm9Aw9BCahOagP6CPMAKTYBosB6vB+jALtoHdYF94ExwNp8I5cD68H66A6+AzcDvcA9+GR2Ee/BJeRABCRMQRRUQXYSF2iCcSjEQhXGQHUoiUI3VIC9KFDCD3EB4yj3xAYVBUFB2li7JAOaP8UBxUKmoHqhhViTqNakf1oe6hJlELqM9oMloWrY02R7ugA9HR6Cx0Aboc3YBuQ/ejR9HT6HcYDEYco44xxThjgjBxmG2YYswxTCumGzOMmcIsYrFYKaw21hLriQ3HpmMLsEexZ7BXsSPYaex7HBGngGPgHHHBuCRcHq4c14S7ghvBzeCW8SJ4Vbw53hMfgd+KL8HX47vwd/DT+GWCKEGdYEnwJcQRdhMqCC2EfsJjwhsikahENCN6E2OJu4gVxLPEG8RJ4gcShaRFsiOFkDJI+0mnSN2kh6Q3ZDJZjcwmB5PTyfvJjeRr5Kfk90JUIT0hF6EIoZ1CVULtQiNCr4TxwqrCNsKbhXOEy4UvCN8RnhfBi6iJ2ImEi+wQqRK5KDIusihKFTUU9RRNFC0WbRK9KTpLwVLUKA6UCEo+5QTlGmWKilCVqXZUDnUPtZ7aT52mYWjqNBdaHK2I9gNtiLYgRhEzFvMXyxarErssxhNHxNXEXcQTxEvEz4uPiX+UkJOwkYiU2CfRIjEisSQpI8mWjJQslGyVHJX8KEWXcpCKlzoo1SH1RBolrSXtLZ0lfVy6X3pehiZjIcORKZQ5L/NIFpbVkvWR3SZ7QnZQdlFOXs5JLkXuqNw1uXl5cXm2fJx8mfwV+TkFqoKVQqxCmcJVhRd0MboNPYFeQe+jLyjKKjorZijWKg4pLiupK/kp5Sm1Kj1RJiizlKOUy5R7lRdUFFQ8VHJVmlUeqeJVWaoxqkdUB1SX1NTVAtT2qnWozapLqruo56g3qz/WIGtYa6Rq1Gnc18RosjTjNY9p3tWCtZhaMVpVWne0YW0T7VjtY9rDOmgdM50knTqdcV2Sro1upm6z7qSeuJ67Xp5eh94rfRX9YP2D+gP6nw2YBgkG9QYThhRDV8M8wy7DPxhaDA6jinHfiGzkaLTTqNPotbG2caTxceMHTCrTg7mX2cv8ZGJqwjVpMZkzVTENM602HWfRWF6sYtYNM7SZrdlOs0tmH8xNzNPNz5v/bqFrEW/RZDG7Tn1d5Lr6dVOWSpbhlrWWPCu6VZjV91Y8a0XrcOs662dsZXYEu4E9Y6NpE2dzxuaVrYEt17bNdsnO3G67Xbc9Yu9kX2g/5EBx8HOodHjqqOQY7djsuODEdNrm1O2MdnZzPug87iLnwnFpdFlwNXXd7trnRnLb4Fbp9sxdy53r3uUBe7h6HPJ4vF51fdL6Dk/g6eJ5yPOJl7pXqtdP3hhvL+8q7+c+hj65PgMbqBtCNzRteOdr61viO+Gn4Zfh1+sv7B/i3+i/FGAfUBrAC9QP3B54O0g6KDaoMxgb7B/cELy40WHj4Y3TIcyQgpCxTeqbsjfd3Cy9OWHz5VDh0PDQC2HosICwprCVcM/wuvDFLS5bqrcscOw4RzgvI9gRZRFzkZaRpZEzUZZRpVGz0ZbRh6LnYqxjymPmY+1iK2NfxznH1cQtxXvGn4pfTQhIaE3EJYYlXkyiJMUn9SXLJ2cnD6dopxSk8FLNUw+nLnDduA1pUNqmtM50Gv8jO5ihkfFNxmSmVWZV5vss/6wL2aLZSdmDW7W27ts6k+OYc3IbahtnW2+uYu7u3MntNttrd0A7tuzo3am8M3/n9C6nXad3E3bH7/45zyCvNO/tnoA9Xfly+bvyp75x+qa5QKiAWzC+12Jvzbeob2O/HdpntO/ovs+FEYW3igyKyotWijnFt74z/K7iu9X9UfuHSkxKjh/AHEg6MHbQ+uDpUtHSnNKpQx6H2svoZYVlbw+HHr5Zblxec4RwJOMIr8K9ovOoytEDR1cqYypHq2yrWqtlq/dVLx2LODZynH28pUaupqjm4/ex3z+odaptr1OrKz+BOZF54nm9f/3ASdbJxgbphqKGT6eSTvFO+5zuazRtbGySbSpphpszmufOhJy5+4P9D50tui21reKtRWfB2YyzL86FnRs773a+9wLrQsuPqj9Wt1HbCtuh9q3tCx0xHbzOoM7hi64Xe7ssutp+0vvp1CXFS1WXxS6XXCFcyb+yejXn6mJ3Svd8T3TPVG9o78S1wGv3+7z7hvrd+m9cd7x+bcBm4OoNyxuXbprfvHiLdavjtsnt9kHmYNvPzJ/bhkyG2u+Y3um8a3a3a3jd8JUR65Gee/b3rt93uX97dP3o8Jjf2IPxkHHeg4gHsw8THr5+lPloeWLXY/TjwiciT8qfyj6t+0Xzl1aeCe/ypP3k4LMNzyamOFMvf037dWU6/zn5efmMwkzjLGP20pzj3N0XG19Mv0x5uTxf8Jvob9WvNF79+Dv798GFwIXp19zXq38Uv5F6c+qt8dveRa/Fp+8S3y0vFb6Xen/6A+vDwMeAjzPLWSvYlYpPmp+6Prt9fryauLoqcAGBCwhcQOACAhcQuIDABQQuIHABgQsIXEDgAgIXELiAwAX+f10gLcro618LiGTPV4FfVlffqAGALQPgU8nq6nLt6uqnOgCQCQC6M/4EhAgnTns+0HwAAACKZVhJZk1NACoAAAAIAAQBGgAFAAAAAQAAAD4BGwAFAAAAAQAAAEYBKAADAAAAAQACAACHaQAEAAAAAQAAAE4AAAAAAAAAkAAAAAEAAACQAAAAAQADkoYABwAAABIAAAB4oAIABAAAAAEAAAdmoAMABAAAAAEAAAK0AAAAAEFTQ0lJAAAAU2NyZWVuc2hvdOcBy3sAAAAJcEhZcwAAFiUAABYlAUlSJPAAAAHXaVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJYTVAgQ29yZSA2LjAuMCI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOmV4aWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vZXhpZi8xLjAvIj4KICAgICAgICAgPGV4aWY6UGl4ZWxZRGltZW5zaW9uPjY5MjwvZXhpZjpQaXhlbFlEaW1lbnNpb24+CiAgICAgICAgIDxleGlmOlBpeGVsWERpbWVuc2lvbj4xODk0PC9leGlmOlBpeGVsWERpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6VXNlckNvbW1lbnQ+U2NyZWVuc2hvdDwvZXhpZjpVc2VyQ29tbWVudD4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+Cgn3WtQAAAAcaURPVAAAAAIAAAAAAAABWgAAACgAAAFaAAABWgAAcxuTUmXkAABAAElEQVR4AezdB7gcVd0/8F8ooYQOCTUhQGhBwfJXIFYUQRGEF0MSmoIdpQmhqBggdIgFUARp0hOMojQBRVEkIKAomlhoIaEmoSahJED+czbMZHfv3rb37uYm+czz4J6ZOefM7Gdm877P873nnF7zsi1sBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINAwgV6C2YbZ6pgAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIlAcGsF4EAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQINFhDMNhhY9wQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBDMegcIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECDQYAHBbIOBdU+AAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAHBrHeAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECDRYQzDYYWPcECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAQzHoHCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0GABwWyDgXVPgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABwax3gAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAg0WEMw2GFj3BAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQEMx6BwgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBgAcFsg4F1T4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAcGsd4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQINFhDMNhhY9wQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBDMegcIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECDQYAHBbIOBdU+AAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAHBrHeAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECDRYQzDYYWPcECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAQzHoHCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0GABwWyDgXVPgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABwax3gAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAg0WEMw2GFj3BAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQEMx6BwgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBgAcFsg4F1T4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAcGsd4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQINFhDMNhhY9wQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBDMegcIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECDQYAHBbIOBdU+AAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAHBrHeAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECDRYQzDYYWPcECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAQzHoHCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0GABwWyDgXVPgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABwax3gAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAg0WEMw2GFj3BAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQEMx6BwgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBgAcFsg4F1T4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAcGsd4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQINFhDMNhhY9wQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBDMegcIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECDQYAHBbIOBdU+AAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAHBrHeAAAECBAgQIECAAAECdQjMnDUrjj/hpKLliiusECefdEKxrzBf4IyzvhfPPjut4Bh55OGx3rrrFvtLcuGhhx+OG268OWZn79Iuu3wq3vuedy8yHN8+blS89trrxf2edOKo6NOnT7GvQIAAAQIECBAgQIAAAQItBQSzLU0cIUCAAAECBAgQIECAQLsCzz//Quyy2+5FvZVXWil+e+vNxb7CfIER++wfkx9/vOC44rJLYtNBg4r9JbXw+uuvx257fDZefvnlEkGvXr3i52Ovjg02WH+RIPn4Tp+K2bNnF/d6y803xGqrrlrsKxAgQIAAAQIECBAgQIBASwHBbEsTRwgQIECAAAECBAgQINCugGC2XaJSBcFsbae7JtwdRx51TMXJg7/+tdhv330qjvXUHcFsT30y7osAAQIECBAgQIAAgZ4sIJjtyU/HvREgQIAAAQIECBAg0GMFBLMdezTNDmavv/GmuPTSy0o3t9xyy8XYq6/o2I02udarr75aGjE7K5vGON/GXnVFDBy4Yb7boz8Fsz368bg5AgQIECBAgAABAgR6qIBgtoc+GLdFgAABAgQIECBAgEDPFhDMduz5NDuYPffH58VVV48t3dzyyy8fd9x+W8dudCHUmjhp0vw1ZrMpgT/1yZ1jyPbbLYS7qO+Sgtn63LQiQIAAAQIECBAgQGDJFhDMLtnP37cnQIAAAQIECBAgQKBOAcFsx+CaHcwef+JJcettvy3dXE8PZjsm2DNrCWZ75nNxVwQIECBAgAABAgQI9GwBwWzPfj7ujgABAgQIECBAgACBHirw4osvxic//Zni7lZeaaX47a03F/sK8wX23vdz8djkyQXHFZddEpsOGlTsd3fh6wcfGn974O+lbgWz3a27oD/B7AILJQIECBAgQIAAAQIECHRUQDDbUSn1CBAgQIAAAQIECBBomsDTTz8Ts2bPX3tzlZVXjrXXXrt07bfeeivu/PNd8a+Jk2K53r3jHVsNju2227bmfc3Opof997//E//Kpot99ZVXY7PNNo0tNt8s1ltvvejVq1fNNrUOPvX00/GPfzwYDz38SPTfYP3YYostYvOsrzlz5sRHP75T0aS1YHby5Mdj7htzi3oDN9wwll122WI/L8ycOTOeefbZfDdSf+uss06x31rh1ddei0ceeSQeeujhmDJlaqy//vqxefY9Nx20SaRgsq1t7ty5pe+VptRN5htvNDBz2qz0ucwyy7TVtOLcSy+9HA/+85/xz3/9K1ZZZZXYMjPaavCWpet/4UtfjUn//ndRvxHBbHovXs78Zs2cFYccfkT2XZ4uXS+9IxddeH5x7VRYd511Y6WV+hTHuvquTZ8+PR559LF4NPtv2vRp2TvSPzbeeKPMYPN2/VObN996s7iXTTbeOJZaaqliPxVau7907n//eyjuve++7LozYu1+/WKzTQfFu961Tc33K9Xvzq2eYHbevHnx1FNPxX/++7/Sf+k5bLH55qX3dbVVV+3w7b3wwgvxwN//EU888WQ8/8LzseGADUt9DNpk4+idPfO2tvTO33vf/aW2Tz/zdPRdq2+pbfrNpN+cjQABAgQIECBAgAABAo0UEMw2UlffBAgQIECAAAECBAjUJXDUMd/OAtg/l9p+6IMfjLPOODVefvnlOOSwI+K///tf0ee2739/nP2DMcV+Krz55ptx0SU/i8svv7Ii9Morvefd74rRJx4fa625Zn6o5mcKG48/cXTc85d7W5x/1zbbxHe/c2wMG7FfcY3Wgtk9hw6PFO7m27hrrsyCpAH5bvF5829ujdEnn1Lsf2LHj8dJ2X22tqWA6fIrr4qfXXZFpHL1ttqqq8Whh3wjdvnUztWnSvu3//4PccaZY0qBZnWFdbIg/OSTTsiC762qT1Xsp0D0J+f/NK68+ppIoVv5lvo47tvHxs8uvyLu/+vfilONCGbP+dF5cfU189eVLS7USuGUk06Mj39sh+JsPe/a66+/Hr/69Q1xVXbNadOmFX2VFzbYYIMYffx3Y3AWULe2dSTcLL+/7bfbLn7wvTMjTaN94kmnxF/ubflupkD8jNNOibXWWqu1y3bL8Y7ce/mFHn3ssRh90qlZIPvf8sNF+TO77hqHHfqN6NNnQWhenHy7kP4Y4oorr87WEL4mXnn11erTsdxyy8U+e4+Iz+2/b6xQ448Sbrvtd/GTC34aTz/zTIu26cCOH/9YHPKNg4o/BKlZyUECBAgQIECAAAECBAh0QUAw2wU8TQkQIECAAAECBAgQaIxAeRi11eDBcf5555ZC2b//4x8VF/z8/vvFQV/7SnHsyWw03ndHnVgxQrM4WVZYY4014szTT2k1ePz3f/4bx37rO/FsK6Fb6mqF5VeI1+e8HimcTFszg9n/ZiMOUzCXwq72tlNPHh0f2+GjRbXXshG2Z5z1vfjNLbcWx2oV0ojZkUd8M/bYfbdapyNN5fyt73y3NHKxZoXsYBqZvMIKK8Qrr7xSVOnJwWxH3rXf/+GOGPP9H2bh6PPFd2qtsPTSS2dB6lnx/vf9v5pVOhJulv8WNho4MI49ZmR857jjY8Zzz9XsMx1Mo7KvueryTo0Mb7WzVk505N7zpldfMy7Ov+DCmDN3Tn6o5ue62Qjx40cdF+/aZuua508+9fS48ab2pwtPofjVV/ysYvRs9R8+1LxAdjCFu5dceEFsko2+tREgQIAAAQIECBAgQKC7BQSz3S2qPwIECBAgQIAAAQIEuixQHkb1y6ZoTaNcb7n1tqLfNN1rCkRPP/Xk+OhHPlw6nvbTtLnlI/LWzEbFvu+97ylNr/ufbKTtgw/+s+hj0CabxOU/u7jF1LFp9OnwvferGOWaGqUpegduOCCbLviJePGlF4t+8kKzgtmZs2bFPvt+LqbPmJFfuvSZrr9pNpXtjBnPxZSpU0vHPviBIdlo49MqArofnH1ujLv250XbFERt+/73xfrZFM9Tn3gi7r33/iJAS1MB/3zcNdGvX9+ifl4YdcLouO23v8t3S5+p/qBs/djns6lm8+mEKypkOz05mO3Iu/aviRPjS185qOJrrbfuuqXpi6dNy6Y1fuTRYhR1qpSmyP3ZxRdWPIO8cUfCzfLfwtJLLZ2l3fNHhac+0u9glZVXqfk+jjnz9EjPv1FbR+49XTtNPX7UMd+quI30nmyeTfWcRsE//viUitHWq2ZTGo+7+opYbbXVKtpU95OmA99m63dm/21d+q3+PZvaOB8J+61jjordP7PgDwqee+75+OywEZH+KCHfNtt009J7n0bepmea/tghbTvv9Ik4MRvpbCNAgAABAgQIECBAgEAjBASzjVDVJwECBAgQIECAAAECXRIoD6PKO0oh7G67fjq22/b9kdZkXS6brjSfsnTctePjB2efU1Tf8WMfi28de1TF1Ki/vv6GOO2Ms4o6o08YFTt9YsdiPxUuu+LK0vS8+cE06jEfOZqvTXtftkZlGnX6xJNP5tWaNmL2pJNPjZt+c0tx3bSObAqi0vfI7y+N+L3gpxdG+n4pUM63FD6l8Dpf1zSF06edclL0779BXiUmTfp3HPbNIyMFwGnbY/fPxLFHjyzOp8I///mv+PLXvl5xLE1Fe9TIbxbrm6b1P884a0zcd/9fK+o1IpjNL5BGke76mf/Ld0trvN5x+4JAvzhRVqjnXSuNFH7gH7HfviOy93HXWHXVBcYPPPD3OGLkMfHqawum2v3+mDNjyPbblV11frEj4Wat+1s9Cy0PO/TgSFMbp2undYyPPOqYSCPG8y1Ny3vy6BPy3W7/7Mi9p5HSI7I/Iiif7jlNI338d79TjGadOvWJOPyIkRX3/sksHD2hKhz9/g/OjmvH/6L4HsccdWT83x67F/tpCvNfXvereDxbZ3nkEYcXx1Phjj/+KY799nHFsVo299zzl/jFL3+VTXM+qjTKu6isQIAAAQIECBAgQIAAgW4UEMx2I6auCBAgQIAAAQIECBDoHoFaYdR+++wdB2frP9ba0rS6e+41opgyd+DADeOySy4qTUtaXf/rBx8af8vCs7Sl0XYX/ORHRZU0gm+PPfeqWL/ym4cdGsOHDS3q5IXp06fHbnt8Nt9tSjD71789EN845LDimqnwkx+dE+/ORhR3ZPvil78WEydNKlVNI2Uvv/Ti2DAbBVy9XXTxpdk6vZeWDqdg+g+/u7UI0tLBr2Sh7INZOJtvH/nwh0rrmub75Z9Dh+8TT2QjcfNtUQhm23rX0vdIa7yusEL2RwHZNM21trPP+VFcM+7a4tSXv/SF+OKBBxT7eaEj4Wb1byGN9EzTcK+TTftbvqUpftNUv/mWpmW++MLz891u/+zIvZ/74/Oy9WAXrP2b1iz+6fk/bjFKPU0ZPnTY3hVrJf/kx+fGu9+1TXHfhx5+ZNx7333F/i/Hj4s0UrkjW1rnOE2lnG9p7eNdP71LvuuTAAECBAgQIECAAAECTRMQzDaN2oUIECBAgAABAgQIEOioQHUYteGAAaU1M9PUrbW2P981IUYefWxx6qgjj4jP7rlHsV9eSEFRCozSlkYe/uam64vT92YjYQ89/IhiP02FfF0WAPXOpl6t3lI4t8tuC0bsNWMq4/POvyAuv+Kq4lY+/KEPZiHdqcV+W4U0evFjn/hkUSWNXDzlpBOL/fJCGnF74Be/XBxK65VulK1vmrY5c+bERz++U7G2bjqWAt7NNts0FVtsI/bZPyY//nhxvKcHs+29a8UXaaPwx2yE5jFlIzRTCJjCwOqtI+Fm9W+hes3gvM80anbEvvvnu9G3b9+44VcLRpgWJ7qp0JF7//yBX4r/ZlOI51tr957On3TKaXHTzb/Jq5aC7BRo59sPzzk3xo5bMAX3J3b8eMk0/YFBe9uf7vxzHH3st4tqm2y8cXx/zBmx9tprF8cUCBAgQIAAAQIECBAg0AwBwWwzlF2DAAECBAgQIECAAIFOCVSHUdVrRlZ3Vj39cFojcv3116uuVtpPa6j+9KKLi3O33/abYrrja38+Pr7/wwXTIbe13uTCCGa/eeTRcfc99xT33lbQVVR6u/DPf02ML391wYjjfUYMj4997KNvn638eOmll0tT4+ZHy9crffjhR2K/zx+Yn4q+a60VN/z6l8V+dWFRC2bbe9eqv19a2/if//pXPPzwo9n6so/Ew48+Go8++ljMensq6FT/ve95T/z43B9WN42OhJvVv4XWnvmLL70Un9xlwbqqaR3XP/6hcg3gFjfQhQPt3Xty2SEL8F/Pgvx8++Pvf1tzFHs6Xz3dcPUo7H88+GAc9I1DK/4gIP1hRZpq+1Of2jkG9O+fX6bFZ5qWe/iIfUtrH+cney/bO3baacfSyNl3bbN1ftgnAQIECBAgQIAAAQIEGiogmG0or84JECBAgAABAgQIEKhHoKNhVN73qBNGx22/rS+EuvqKy2LjjTcqdZXWjb3uV7/Ou43P779fHPS1rxT75YWFEczutvueMX3GjOI22hqpWlR6u/CrX98Qp5+5YH3d6vNt7R+Zrdm512f3LFX57e9uj+8ef2JR/Z3veEdceMH8EcjFwbLCohbMthZ8ln2lUjGtZ3v99TfGr7J1i8vXUK2ul/Zbm1a4vXAzte3ob2H27NmloDe1SVsKHv90R32/ifk9tP2/7d3741OmxPC99ys6WW3V1eKWmxeMTi9OvF2oHqW9/nrrxS9+vmAa5FStfLR7dfsUfu+7z4iaa/mmun//x4OltZNff/316qaRRknvNfSz2Zq1n4k0dbeNAAECBAgQIECAAAECjRIQzDZKVr8ECBAgQIAAAQIECNQt0NEwKr9A9Zqn+fGOfF479qpitN0RI4+JCXffXTQ77JBvxN7ZyNJaW7OD2blz58aHPvrxilu58frrYq1suuWObGmNzbTWZj3bsUcflY1MnD8a88qrr4kf/fgnRTcf+sAH4qwzTyv2qwuLYzB7222/i9GnnBpvvPFGxdddMVtzdtCgTWLVVVaNO++6qzi3JAaz1dOCp/Bz3DVXFibVhaeefjr2HLrgt7bMMsvEnXfcHr169aqomvo9M/sDiieefLLieL6zw0c+EseP+k4sv/zy+aHi84knnowzzhoT993/1+JYeWHTQYPilJNPLP49KD+nTIAAAQIECBAgQIAAge4QEMx2h6I+CBAgQIAAAQIECBDoVoHOBrOjTz41bv7NLcU97LnH7rHa6qsV+20VDvjc/sUasqeedmZcf+ONRfX999s3vnHQV4v98kKzg9l07c/839CK0ZmXXHhBDB68ZflttVq+8aab4+RTTy/Ob7/dtrHlllsU+20Vdv7EJ2LDDQeUqtxy621xwuiTi+qthY55hcUtmE0jhk848eR48603S18xBYg7fWLHbLTl7tnI2C0jrYOcRmd+7esH5wRL5IjZp59+Jv5v6LDCYKWVVorf3XpzsV9dqJ5qe+DADWPsVbX/kCBNk5zWlR7/i+uykPX+mDdvXkV3af3Zk048vuJY+U4anTt+/C/id7f/vmKq5VQnTYl8yUU/jZVW6lPeRJkAAQIECBAgQIAAAQLdIiCY7RZGnRAgQIAAAQIECBAg0J0CnQ1mr7zq6vjReecXtzDquO/ELtm6k53dfnbZFXH+Ty8smn1y553ihFHHFfvlhY4Gs0OH7V0xuq+16Ydv/s2tMfrkU4pL1AqXDj9iZNzzl3uLOune0j12ZJs06d/xhS8vCJl3/fQucdy3j+1I04o6Dz74z/jKQd8ojq3dr1/8+rrxxX51YXELZnfZbY94/vnnS18zreN6xumnxnbbvr/iawtmoxSWpumOX3nllcLmd7f+ptXAM01FnqYkz7cdP/6xOHn0Cfluq5+PPz6ltGb07b//Q0Wd1n5n5ZXSWspjx10bl195Vbz55vygPZ0/7NCDY+/hC0Ll8jbKBAgQIECAAAECBAgQ6IqAYLYretoSIECAAAECBAgQINAQgc4GsxPuvieOGHl0cS8f/ciH4/RTF4zqLE60U7j1tt/G8SeeVNRaY/XVS6HjsssuWxzLCx0NZqunWf7h98e0CPJSn9Xhcq1g9uxzfhTXZEFSvrW3vmteL32++tpr8bEddy5GF/Zda63SGp69s3CxM9v06dNjtz0+W9Hk8p9dEpttOqjiWL7TzGB21qzZsePOn8ovXRq9+ttbboo+fVof/diZd23q1CdirxH7FP2naXNPO3XB+5KfEMzOl/jSVw6Kf02cmLPEMUcdWRpZXBwoKxx51DFx14S7iyNf++qXI41m7+h2ymlnxA033lRUb+taRaW3C7+55dY48aQFfxTR0VC4uh/7BAgQIECAAAECBAgQaE9AMNuekPMECBAgQIAAAQIECDRdoDNhWbq5mbNmxbDh+8QLL75Y3OupJ4+Oj+3w0WK/I4W0zuVew/YppqlNbY45amQWJn2mRfOLLr40Lrrk0uL4ytlUrb+tMVXrcd89IX73+98X9fbJ1qw9NFu7tny7449/iuNGnVCxZmmtYPb+bG3Mgw/7ZnnTaG108GtZEPvqq6/G6lm4nG+HHn5k3HvfffluDP3snjHyiMOL/Y4U0rSxe2XW5Wt8pql8R58wqkXzFLR9+7hR8frrrxfnrrjskkhreTZq+9gnPlkxSvPMbETrhz/0wVYv15l37c4/3xVHHfOtoq/tt9sufvC9M4v9vHDJpZeVRnHm+61N95xGlM6ePTuvFrfcfEOstuqqxX4qdPT+Uj+pv3zrvWzv+NMdv8t3u/3zEzvvUvrd5R3fctP1sdpqldOHX37FVXHe+RfkVaJ//w3iZxdf2CIonzhpUqQQN5+SeOmllo7LLr2otF5v3jit5/unO/8cO3z0Iy3WnU11rvvV9aX1Y/P61etD//FPd8a2739fzbVnH58yJYbvvV/eNNpbN7moqECAAAECBAgQIECAAIFOCghmOwmmOgECBAgQIECAAAECjRfoaBhVfifVU6Euv/zy8YUDPh97jxgW5SNeUwhz2eVXxtbvfGfssftu5V2UyiedclrcdPNviuMpJEpB6s477RirrLJK/Cdbn/Kqa8ZG9dSprQWzF1x4cVz6s8uK/pZbbrk44vBDI4V6KQi+K1sr8+qx4yqmUk2VawWz6XgKcNPamPnWq1ev0sjCtMZpv3594+WXX87W3fxrnPeTCyKt0/m9s87Iq8aTTz4Z++x/QEVQ+uldPhWHfOOgilAt9THu2vEx47nn4lvHHFW0zwtpPd+0rm/5ttuun44DPr9/rL/eejFl6tS47bbfxSXZ907rgZZvjQ5m9//8F+Khhx8uLvm+//feOOKbh8VGAwfG3LlzS8fL34fOvGuPTZ4ce+/7uaLvZH/BeT+Krbd+Z+nYs9Omxbk/Oq/i+aQTi2Mwm4LM9FvKt/SefWDI9vlu6TOFqZ8/8EvxyKOPFsc332yz+E42hfagTTaOOXPmxN33/CXSb658yuN99xmRvZNfL9qkwo+zqcqvyKYsf9c225R+P5tttmlxPo2UPugbh1Q89/Oz5/KubbYu1Ukj6tOI3HXWXju+mf320n0uvfTSpXPpnRjz/R/Gr6+/oejvK1/6YnzhwM8X+woECBAgQIAAAQIECBDoLgHBbHdJ6ocAAQIECBAgQIAAgW4T6ExYVn7RI0YeExPuXjAdajqXAtoNBwzIRumtmAVJU+O5LGxM2xprrBG/uPaaWGGFFUr7+f88+dRTMWzEvi2C0nQ+harloz/zNumztWC21ijc8nZ5edCgTeLJJ57Kphx+tXSotWA2haUpFCsfaZn3kYLjmTNnFiMP0/E0pXOa2jnfrrz6mvjRj3+S75Y+U0i1wfrrl4Ldp556Op5+5pkiUL3kwgti8OAtK+qnsHX4PvtFmtq3emvLKNVtdDB77c/Hx/d/eE71bcWA/v1jxowZMeq736nw6My7ltYh3WPPvWJ61k/5lgK/ZXsvW9Mj1Vscg9nq39oyyywTad3iY48eWU5Tmsr4K1/7RvE+5SdXXHHF0m+pfG3XdG69ddeNq6+8rGJk67333R+HffPIivc6vetbbrFF6fcy8V+TKka5p9/SpRf9tPQHGen3sv/nDqwYTZ/WBt4sC4hXyka5/+Mf/4hXspHl+bZi9u/BZZdeXBrdmx/zSYAAAQIECBAgQIAAge4SEMx2l6R+CBAgQIAAAQIECBDoNoHOhGXlF01h5Tnn/jh+fcON5YdrllM4c/ppp2QjV7dtcf4Pd/wxTspGhJYHNtWV0vS9abRdqpu21oLZdC6NLk2jTFvb3v++98Vpp4yOrx50cDz8yCOlaq0Fs+nkpH//p3R/aQRnW1v6jl8/6GsxfNjQoloKVcdl4eUFP70o0nTH7W1fPPCA+PKXvtCi2qOPPRbHfOs7rYaRqUEaHZl8f3b5FUX7RgezKThPo1pTIF5r23v4sDjs0IOLU51916qnMy46eruQphD+0hcPjDR1bpqiN22LYzCbRox/57vHv/2t53+kEdpjr1rwrPOTyez0M8cUfxSRH6/+TCOPR33n27HBButXnEojsM8997y48667Ko7X2km/w4svuqAUxKfzaZrzSy75Wfz8F7+smCq8VtullloqTjz+u6XR6rXOO0aAAAECBAgQIECAAIGuCghmuyqoPQECBAgQIECAAAEC3S7Q2bCs+gbuy0bYnZNNKfvoo49VjKRL9VJwk0b27bvv3rHWmmtWNy32J09+PE4YfXL893//qxipt+4668QXv3BAqY8rrrw6fvyT80tt2gpmUxiaRqmmKYvLtxSc7pFNQZymEk4jDo86+ltF+NRWMJv6SKHwxVngdN2vr4+XXnqpvNvSNK0f/MCQ0hTMaWrhWlua1njM98+Ovz3wQItRwClcTNO9HnjA57KRhQumjK3uJ00he+rpZ0QK3tL95NsqK68cu2ZTGx/01S/H/x56KL745a/lpxo+YjZd6KWXXo4TTzqlxejpdC59r/Lpnet5137/hzviez84uyJoTNMjv/td25Smyk3TJp9y2hlxw403pUsulsFseqe/l00B/IvrflX6jul/0sjrO26/rWLq8Pxkmh47jWRO6ylX/0FAGr2+f/Z7HD5sr0jhaGtb+l1fetkV8e/sDxPykeV53fT7+b/dP1Oagrh8XeX8fBrdfcGFF8Vf//q3itGz+fk0tfg3Dvpqxbq2+TmfBAgQIECAAAECBAgQ6C4BwWx3SeqHAAECBAgQIECAAIEeJ5BGTz762OR4LPtvjTVWj4032qg0XW9nbjSNwk3h7Izpz8U73/mOWHfddTrTvKJummY4rX/65JNPx1prrRmDt9wyVl11lYo69eykKXofeviRUkCbvuNGGw2sGY7V6jsFbFOyKZ7TSN0UbqW2/TfYoM2ArLqfFMqmdUST8yabbFJaP7StgK26faP2X3jhhXjkkUfjqWxq5jVWX730vTbccEC3XC69W6nvtM7qeuutW5pWt3cWtC9pW/r+D2fvXto22mhgDNxwwzbfnfx9S4F9nz59YvPNN2vzDyRSv9Vb6iMFrel3mQLxNE11GmWbptHuyDZ9+vSs7UOl6cBTu9R+5eyPCWwECBAgQIAAAQIECBBotIBgttHC+idAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYIkXEMwu8a8AAAIECBAgQIAAAQIECCwcgbHjfl6a2rYrVx9z5umx0kp9utLFEtE2rXP81FO1193tCEAaFTzquG93pKo6BAgQIECAAAECBAgQINCKgGC2FRiHCRAgQIAAAQIECBAgQKCxAmOyNUrH/+KXXbrILTffEKutumqX+lgSGu+z3+ezab0fq/urpimyr77ysrrba0iAAAECBAgQIECAAAECEYJZbwEBAgQIECBAgAABAgQILBSBG2+6Of5y731duva3jjkqVlxxxS71sSQ0PvfH58W0adPr/qr9+vWNQ77x9brba0iAAAECBAgQIECAAAECglnvAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBBouYMRsw4ldgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBJV1AMLukvwG+PwECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECDRcQzDac2AUIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEFjSBQSzS/ob4PsTIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBwAcFsw4ldgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBJV1AMLukvwG+PwECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECDRcQzDac2AUIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEFjSBQSzS/ob4PsTIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBwAcFsw4ldgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBJV1AMLukvwG+PwECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECDRcQzDac2AUIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEFjSBQSzS/ob4PsTIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBwAcFsw4ldgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBJV1AMLukvwG+PwECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECDRcQzDac2AUIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEFjSBQSzS/ob4PsTIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBwAcFsw4ldgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBJV1AMLukvwG+PwECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECDRcQzDac2AUIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEFjSBQSzS/ob4PsTIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBwAcFsw4ldgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBJV2g/WB2XkbUqw0m5/l4P1r/gfh9+H34ffh9tCbg3wf/Pvj3obVfR4Tfh9+H34ffR2sC/n3w74N/H1r7dfi/n/598O+Dfx/8+9CagH8f/Pvg34fWfh3+/wf/Pvj3YSH8+9B+MNv6T9YZAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOiAgGC2A0iqECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoCsCgtmu6GlLgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBDggIZjuApAoBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgS6IiCY7YqetgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOiAgGC2A0iqECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoCsCgtmu6GlLgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBDgi0H8zOy3rp1UZPzrft0wadUwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQILGICdeaj7Qezi5iD2yVAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEBPE2hqMDtnzpx4+eWZ0bt371hllZW7xeKFF16MyZOnxAvPvxhrrrVmrLfeOtG375rd0rdOCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0B0CTQlmn3762bj//r/HnX+6O1599dUYvNUWcdBBB3bp/h944MH45S9vihezYLZi69Ur3rXNO2L3PXaJtdZao+KUHQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECCwMgYYGs08/9Uycc+6FMWvmrIrv1pVgds6cuTFu3HVx71/+WupzpZVXikGDNorVVls1pk59Mh55+LHS8TWzUPboow+NFVdcoeLadggQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBsgYYGsykoveTiq4rv9NJLL8fcuXO7NGL2rbfeip/85NJSALvXsN1j++3fV/SfCvfe+7e44vJxpWMf+egHYujQz1Sct0OAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFmCzQ0mK3+Muec/dN46KFHuhTMpj5fffW1ePHFl2LdddeuvkRp/+yzL4iHH3o0BgzYII46+pCadRbGwWeenl5x2cOvmlaxP2r4wBjcv0/FMTsECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECCz6AotkMNse+9ixv4y7/vyX6NNnxTj9jOPbq96084LZplG7EAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIEeJdB+MDsvu99ebdxzJ87XHDHbifY176JG+3OzdW3/99+H54+YPSobMdtN99/R69eslx0sD2YffnZO/Oh3L1ZUHTygT4waNrDimB0CBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBHqQQI18suLuWjnffjBb0UvXdmoGs13rskXr//z7f3FetgbtvGwt2mHD/y8+9KHtWtRZWAfKg9lb/jk7bnlwdotbGTtyqxbHyg9MmjrbdMflIMoECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEFgGBxSKYnTNnTkyf/lzcffd9pf/mzpkbH93hg7HHHrvEUkst1WMeQ0eC2bbWmR0/YVpMeuIVo2p7zBN1IwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQ6JrDIB7PHHnNizJ79SvFt11xzjdhv/2ExaNBGxbFGFGbNeiXmvD6nU13PyQLjfPvR7S/Gw8+0bH/4J9eKzdZdLq9WfN7095fjpgdmlvbbG1VbNFIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKBHCCzywezhh3073nzzzQJzhRVXiPe9792x444fidVXX6043t2FFMzOmtlyKuKOXufwq6bVrDpond5x8Mdb3nd5/YN3XC0Grd27ZnsHCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBBoX2Cddfu2X6kbayzywWyaxvjll2eW/ps08b8xYcJ9MXPmzOjTZ8X40pc/17CRs2n0a2eD2fIRs+VBa/XzPO/A9SsOlY+WTSe23GDF+NaeG1TUsUOAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQMcFevdetuOVu6HmIh/MVhuk8PPssy+IKY9PjVVXWzWOO+7IWH75llMDV7drxn75GrNtBbPl68ymdWXHT5je4vZMZ9yCxAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECPVZgsQtmk/S0aTPipNFnldD33W+v2G67/9cjHkBrwezgAX1i0pQF0yKn/VHDBpbuecSYiTXvvTy8rVnBQQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEeoxA+8HsvOxee7Vxv504f87ZP42HHnokBm+1RRx00IHzO+1E+5p30Ur7Y445MV6Z/UrssMOHYs/P7lqzaelgK+2LBl09X3QU0Vowm0LW0eMmFzXzYLa10bKpYl6naKRAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEDjBerMD9sPZrvx1msGs93Yf97Vm2++GSOPHBVvvPFG7LLLJ+JTu+yYn1qon60Fs2la4tHXTq4YNVvrRqtH1ho1W0vJMQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQI9T6DHBrPPP/9CPPPMtNhii01jqaWWKuRmzHg+fv7zX8c++3w2Vl11leJ4eeGBBx6MSy6+qnTooK9/IQYP3rz89EIrtxXMtjU6Nt3w0CF9Y9ITr1SEt4LZhfYoXZgAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBApwQaFszOmzcv5s59o+Jmzjvv4njk4cdiyy03iy99+XPFuaWXXiqWXnrpYv/pp56J0077YaQ+3r/te2P//YcV584958L43/8ejpVXXrk0RfHmmw/KyisV5x944J9xzdW/iFdffTXWWXftOOqoQ6J372WL8wuzUCuYzacknjR1dsV0xuX32Vqd/Hh5XWUCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBHqeQMOC2eeeez5OOP6MDn3jd737nfHFL+5X1L399j/Fr667qbTfp8+KcfoZxxfnnn762bjg/J9F6j/f+q3dN1ZffbV49tnp8eILL5YOL7/88jHyqINj7excT9lqBbNpJOzQIf1KtzhizMTiVlPomrbBG6wYg/v3Kf2X9qunPDZqNqnYCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECPRsgR4ZzKaA9fRsxGxaI/YDH9g2Ruy9Z4Xi3LlzI4W3d999Xzz/3AsV59K0x9tv/77Y5dOfiFVWWbni3MLeaS+YTaNmUwjb1lY95bFgti0t5wgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0DIGGBbNd/XqzZs2OadNmxMYbb9hmVynEfSEbJfv663NirbXWiH791opll+0ZUxdX33itYLazwWr1lMemM65Wtk+AAAECBAgQIECAAAECBAgQIECAAAECBAgQIECg5wn02GC251F1/Y5qBbNjR27V6Y6rpzMunw65VmcdGYlbq51jBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAh0j0D7wey87EK92riY8237lNF1VzBbPWo2XaK1cLZ66uP8dlL9+e3mr2+bH/dJgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEAbAnXmo+0Hs21c06nOCVQHs4PW6R0n77dp5zp5u3atwLU6nK1Vp9bFqtvVquMYAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQL1Cwhm67frdMvqYPaTW/eJA3Ya2Ol+8ga1gtfykHXEmIl51XY/rVXbLpEKBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBOoWEMzWTdf5huXB7C3/nF3qoCvBbOqger3ZdCyfpnj8hOlpt9hS+Jq2SVPmX7s48XahnvVuq/uwT4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBASwHBbEuThh0pD2bzi6yz7vy1XvP9ej5rhbPV/ZSPpE3n0mjbSU+8UhHSjho+MAb3nx/eVre3T4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBA/QKC2frtOt2yUcFsupG2wtnqUDa/8eqpkAWzuYxPAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAt0rIJjtXs82e2tkMJsu3Fo429oUxZOmzo7R4yYX92yd2YJCgQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEC3Cghmu5Wz7c4aHcymq1eHs62Nls3vdMSYiXkxBLMFhQIBAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBbhVoP5idl12vVxvXdL5tnzK6ZgSz5aNg2wtl061VB7mtja4t+xqKBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBJZcgTrz0faD2SWXtNu/eTOC2XTTeTjbkZC1Opi1zmy3P3YdEiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEAjBbBNfgmYFs535SnmIm7fpyCjbvK5PAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQ6JiCY7ZhTt9RaFIJZ68x2y6PWCQECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIEKAcFsBUdjd3piMJu+cfV0xh2ZArmxUnonQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgsHgJCGab+DwXlWDWOrNNfClcigABAgQIECBAgAABAgQIECBAgAABAgQIECBAYIkQEMw28TH31GC2ep1ZwWwTXwqXIkCAAAECBAgQIECAAAECBAgQIECAAAECBAgQWCIEBLNNfMyLSjBrndkmvhQuRYAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgsEQItB/MzsscerVh4XzbPmV0PTWYTbc4YszE4k4FswWFAgECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFKgTrz0faD2crL2OuCQE8OZkdfOzkmTZldfDvTGRcUCgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgS6LCCY7TJhxzvoycHs+AnTYvyE6cWXEcwWFAoECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEuiwgmO0yYcc76MnB7KSps2P0uMnFlzGdcUGhQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKDLAoLZLhN2vIOeHMymb1E9nfHYkVtVfLkU3o6/e3qMGjaw4rgdAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTaFhDMtu3TrWcXtWC2ejrjFNymTTBbYvA/BAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBDosIJjtMFXXK/b0YLat6YzL16CtDmy7LqMHAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAou3gGC2ic+3pweziaJ6OuMUwpamMJ4wvZASzBYUCgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQ6JNB+MDsv66dXG30537ZPGd2iEMyWj4xNt55C2NHjJpd9i4jBA/qYzrhCxA4BAgQIECBAgAABAgQIECBAgAABAgQIECBAgMASI1BnPtp+MLvECDb+iy4KwWz1dMatqYwduVVrpxwnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKBKQDBbBdLI3UUhmE3fv3o641ompjOupeIYAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgdoCgtnaLg05uqgEsx0ZNTt0SN8YOqRfQ5x0SoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGBxExDMNvGJLirBbCIZMWZihUxaV3bSlNnFMevMFhQKBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBNoVEMy2S9R9FRalYLZ8OuM8hC0/llSsM9t974aeCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEFm8BwWwTn++iFMyWT2ecrydbHczmx5tI6FIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEFkkBwWwTH9uiFMwmljSdcflasuVhbTqfj6RNZRsBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAq0LCGZbt+n2M4taMJuC2MH9+1Q4lK89K5itoLFDgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoFUBwWyrNN1/YlELZmsJmM64lopjBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBNoWEMy27dOtZxeHYLZ6OmPrzHbrK6IzAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBxVRAMNvEB7s4BrOmM27iC+RSBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECi6yAYLaJj25xCGYTV/V0xmNHbtVERZciQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgsOgJCGab+MwW12DWdMZNfIlcigABAgQIECBAgAABAgQIECBAgAABAgQIECBAYJEUaD+YnZd9r15tfDfn2/Ypo1tcgtnxE6bF+AnTi28mmC0oFAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBBZ3gTrz0faD2cUdronfb3EJZidNnR2jx00u5ASzBYUCAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgZoCgtmaLI05uLgGs4MH9IlRwwY2Bk2vBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBBYDAcFsEx/i4hLMJrIRYyYWcoLZgkKBAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQE0BwWxNlsYcFMw2xlWvBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBHq6gGC2iU9ocQpmR187OSZNmV3ojR25VVFWIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgUkAwW+nR0L3FOZgdNXxgDO7fp6F+OidAgAABAgQIECBAgAABAgQIECBAgAABAgQIECCwqAoIZpv45BanYHb8hGkxfsL0Qk8wW1AoECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEGgh0H4wOy9r06tFuwUHnG/bZ4FULE7B7KSps2P0uMnFtxs6pG8MHdKv2FcgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgsFgK1JmPth/MLpZaC+dLLc7B7OABfWLUsIELB9ZVCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECPRwAcFsEx/Q4hTMJrYRYyYWeoLZgkKBAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQAsBwWwLksYdWNyC2dHXTo5JU2YXYGNHblWU6y2kKZIH9+9Tb3PtCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECPRIAcFsEx+LYLZt7PETpsWkJ14xJXLbTM4SIECAAAECBAgQIECAAAECBAgQIECAAAECBAgsggKC2SY+tMUtmE1B6vgJ0wvBOtSyWgAAMttJREFUUcMHdmm0a95fd4y8LW5KgQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEAPEBDMNvEhLG7BbJp2ePS4yYVgHsymgDVtQ4f0K851pJCvWZv305E26hAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBBYFAQEs018Sot7MDt0SN+SZj6KNu13NJwtD3kHD+hjOuMmvpcuRYAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0HiB9oPZedlN9GrjRpxv26eMbnELZtNXy0e5pnIKVNM2acrs0mf6n46GrPk0xnlDo2ZzCZ8ECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQI9SqDOfLT9YLZHfctF+2YW92C2tafTkXB29LWT6wp0W7um4wQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgR6koBgtolPY3EMZqsD1bY42xoFWz7yNu+jrfp5HZ8ECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEFgUBwWwTn9KSFMymUbLlUxon5tZGzlZPY5w/ktbq5+d9EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEFhUBASzTXxSi2Mw21qoOnbkVlHrXK2wtVa9/LEYNZtL+CRAgAABAgQIECBAgAABAgQIECBAgAABAgQIEFiUBQSzTXx6i2MwO2nq7Bg9bnKF4tAhfWPokH6lY+2dT5VqTWOcd1gryM3P+SRAgAABAgQIECBAgAABAgQIECBAgAABAgQIECCwqAgIZpv4pJaEYLY8lM1pa42ILR8J21Ywm/pIo29tBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBBZlAcFsE5/e4hjMJr7yYLW1EHX0tZNbrDlba7rjFOxOeuKVirrlIW4TH5dLESBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOg2gfaD2XnZtXq1cT3n2/Ypo1tcg9k8dK01Wrbs60deLz+WpikevMGK2Vq00/NDkULYtJVPj2w64xKJ/yFAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOgJAnXmo+0Hsz3hyy0m97A4B7PpEY0aNrDNJ1VrvdnqBvmI2+oQNz9eXd8+AQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgUVBQDDbxKe0OAezQ7fvG4P792lXs9Z6s3mj8hG3gtlcxScBAgQIECBAgAABAgQIECBAgAABAgQIECBAgMDiICCYbeJTXFyD2c4SVoeuefu2glnrzOZKPgkQIECAAAECBAgQIECAAAECBAgQIECAAAECBBZFAcFsE5+aYHY+dmtTGpdPV1w9slYw28QX1aUIECBAgAABAgQIECBAgAABAgQIECBAgAABAgS6XUAw2+2krXcomF1gUx3ODh7Qp2KN2urz5aNpF/SiRIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGDREBDMNvE5CWYrscunNK4OXgWzlVb2CBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEFm0BwWwTn59gtiV2Hs7Wmqp4xJiJRYPqEbXFCQUCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECi4BA+8HsvOxb9Grjmzjftk8ZnWC2DOPtYj4ytnx92byWYDaX8EmAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBjBOrMR9sPZqu+4axZs+O1116LPn1WjBVWWKHqbH27L730cjz88GMxc+bMWGftfrH+BuvFyiuv1GZnL7zwYrz55ptt1llmmWVjtdVWabNOM08KZmtrp3B2cP8+LU7mo2nzE7XC2/ycTwIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQI9WaBDwWwKQCdPnhJ33XVv/PX+v8dbb70Vu++xS+y440e69N3uvvu+uO22O2LG9BkV/Syz7DLxqU/uGB/f8cOx9NJLV5zLd0aPPiumT6tsl5/LPzcZtFEcfvjX8t2F/imY7dwjEMx2zkttAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBnivQbjB7zz33x7XX/jrmzplT8S26EsymYPe6626KO/7w51Kfq6yycgzeaouYN29ePPro5CJw3X7I+2KffYZWXDffOfaYE2P27FeyaYR7tTrTcgpmDzvsq3mThf4pmO3cI6gOZmutQ9u5HtUmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgsHAE2g1m77zznvj97X8q7m7GjOdK5a4EsxMm3BvXXP2LWG655WL/zw2PrbcenOWr8xeyTaNzf54FwXfd9ZfSdVKwOmjTjYvrp0IKcA877NsxLwt4v/rVA+Id79yy4nxP3RHMdu7JjJ8wLcZPmF40EswWFAoECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQKLmEC7wWz19znk4GNKh7oSzKYObrzh1thmm3dE/wHrV18i5s6dGyeecGaktWc//JEhsddeu1fUefXV1+Loo44vHRt51MGx4Yb9K8731B3BbOeeTFp7dvS4yUWjoUP6xtAh/Yp9BQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQKLisBCC2bbA7rk4qvigQcejE032yQOPfQrFdWff+6FOP7400vHThx9bKyxxuoV53vqjmC2c09GMNs5L7UJECBAgAABAgQIECBAgAABAgQIECBAgAABAgR6rkCPDWavvPLn8ZdsfduNNx4Y3zzioArBqVOfjDPPOKd07Ps/ODmWXXbZivM9dUcw2/knM2LMxKLR4AF9YtSwgcW+AgECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFFRaD9YHZe9lXmL/9a+k4tpjKuOt/ii9d5/sQTz4oZ02fEDjt8KPb87K4V3f73vw/Hj869MJZffvk466wTS/c3e/Yr2f5ysfTSS1fUjTqvX3TS1fZFRxGC2TKMDhYFsx2EUo0AAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKA5AnXmh+0Hs1W33yKYrTrfHbvlUxUf+IV94z3v2bqi2zTFcZrquM9KfWLrrQdnUx7/M17L1p1daqmlYs211oh3v3vr2GmnHWK55XpXtOvOnTlz5na6u+efe7FFmzXWXK3FsbYO9O69aIwObus7dObc6Gsnx6Qps4smY0duVZQVCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECCwqAj0ymL3ooiviH3//VylkPe64I2OZZZap8Lzrrr/E2Gt+WTrWq1evWKvvmrH8csvFM89Oj7lz5pSOr7rqKnH4N78Wa621ZkXb7tqZNeuVmDVzQWDYXf12pJ8lKZz94S0z4n9Pv16wnHfg+kVZgQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEC9Ap0dQFnvdfJ2PS6YffDBiXHhTy8v3d+Xvvy52GabliMkZ86cFePHXx8rr7xSfPKTH4+VspGzaXvrrbfijj/8OW644dZ44403SuvTpnA2hbfdvS2sYDaFsr0bOBK4u5262t/1978Q19+/YKTxUZ9ZNzZfb/mudqs9AQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDAEi6w0korNlWgRwWzL788M84845x46aWXY/MtNo2DD/5SXRi/ve2OuP7635TafvVrB8Q73rFlXf2016iz0xl3x1TG6Z6WpBGz4ydMi/ETphePYtTwgTG4//wgvjioQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKCHC/SYYHZONgXxD39wfkyd+mSstvpqcfTRh5RGxNbj9+abb8aRR3w30ueuu+0cO+/8sXq66fY2zzy9IGDMO19n3b550WcNgUlTZ8focZOLM0OH9I2hQ/oV+woECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEFgWBHhHMpimIL7jgspg08T+x7LLLxje/eVD0H9C1tURPPvl78ewz0+K9/+9dccABe/eIZyGY7fxjqA5mBw/oE6OGDex8R1oQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQWIgCPSKYHXvNL+Ouu/4S2WKwccDnR5TC1K6aHH/86fH8cy/EBz6wbYzYe8+udtct7QWz9TGOGDOxaCiYLSgUCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEFiGB9oPZedm36bXgGx1y8DGlnd332CV23PEjEVXnF9R8u9TO+V+MvyHuuOPPpcp7fna32GGHD1Z20U77WtefPfuVOPaYE0v9DB+xZ3zwg9tW9lm+V0f/5c1rXb/ifNmOYLYMoxNFwWwnsFQlQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBBorECd+WL7wWzVbbcIZqvOl+8+//wL8Uw2nfAWW2waSy21VPmpUvm6626K39/+p1L5U7vsGLvs8okWdaoPzJjxfPzut3fEnp/dNXr37l19urQ/dmw2AvfP80fgfutbh8d6661Ts16zDwpm6xMffe3kmDRldtF47MitinJ5IU17PLh/n/JDygQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgR6hECbwWxa+/WNN96suNEjjziutL/rbjtno1s/VJxbZpmlK8LXp596Jk477Ycxb968eP+274399x9W1E2FX//6N6WANZU/8tEPxK677hRz5syNN+a+kQ1CTTHzgm2ZZZaJVVddpXTghz88Px55+LFYZ921Y889d42NNx4Yyy03P6CdO3du3HjjbfH739+ZjeSdV+p36NDPLOhoIZcEs/U9gI4Es+MnTCuFsoLZ+oy1IkCAAAECBAgQIECAAAECBAgQIECAAAECBAgQaKxAm8HsPffcH1dd+fMO3cFew/aID394+6Lu7dlI2F9lI2LT1qfPinH6GccX5+688564dtx1xX4aTZtC4Na29TdYL4499rDS6UcffTwuuvCKmDlzZmm/V9Z2g/XXjWWWXSaefPLpmPP6nNLxjTbaMA459CuxbHa8p2yC2fqeRApdx0+YXjQeNXxgi5GxqU7ahg7pV9RTIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBTBBoWzD777PQ4PRsx+8Ybb8QHPrBtjNh7z+I7l09hXBxso1AezKZqr776Wvw2m8743nv/Fi+9+FJFyxVWXCF2/fRO8cEPbVcxgrei0kLaEczWB9+RYDaNqh28wYqC2fqItSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIEGiwQJvBbFevPWvW7Jg2bUY23fCGXe2qZvs0TfKzz06LF194KeZmAfC62fTGa665RvTq1atm/YV9UDBb3xNIa8eOHje5aDx0SN8WAeyIMRNj8IA+MWrYwKKeAgECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIGeItDQYLanfMmech+C2fqeRHUwWx3A5uerj9d3Na0IECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIdL+AYLb7TVvtUTDbKk27J9KI2HyrDmDLpzoeO3KrvJpPAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAj1GoP1gdl52r23NDOx82z5lj1owW4bRyWJaQ3bSlNlFq/IAVjBbsCgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAg0WqDOfLT9YLbRN74E9S+Yrf9htxXMlp8bNXxgDO7fp/4LaUmAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgAQKC2QagttalYLY1mfaPl4+KTbXLA9jyaY7Lj7ffqxoECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEmiMgmG2Oc+kqgtn6sSdNnR2jx00uOsgD2NaOFxUVCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECPQAAcFsEx+CYLZ+7OoAduiQvjF0SL+oHkmbH6//SloSIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQ6H4BwWz3m7bao2C2VZoOnSifsnjwgD4xatjAFsFsfrxDHapEgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoEkCgtkmQafLCGa7hl0rmC0/lnoXzHbNWGsCBAgQIECAAAECBAgQIECAAAECBAgQIECAAIHGCAhmG+Nas1fBbE2WDh8cfe3kmDRldlF/7MitojqYTSfTcRsBAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBniTQfjA7L7vdXm3csvNt+5TRCWbLMOooVgezaXRseVCbdymYzSV8EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIdLtAnflo+8Fst9/pktuhYLZrz378hGnZmrLTi05aC2ZHDR8Yg/v3KeopECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEFjYAoLZJj4BwWzXsCdNnR2jx01utxPBbLtEKhAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECDRZQDDbRHDBbNewBbNd89OaAAECBAgQIECAAAECBAgQIECAAAECBAgQIEBg4QkIZptoL5jtOvaIMRPb7WTokL4xdEi/duupQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKBZAoLZZkln1xHMdh27I8FsWnt21LCBXb+YHggQIECAAAECBAgQIECAAAECBAgQIECAAAECBAh0k4BgtpsgO9KNYLYjSm3XGX3t5Jg0ZXablQSzbfI4SYAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgsBAEBLNNRBfMdh27tWA2hbHlge3YkVt1/WJ6IECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBNAu0Hs/OyK/Vq42rOt+1TRieYLcOoszh+wrQYP2F6ResUyqZNMFvBYocAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKARAnXmo+0Hs4242SW0T8Fs1x/8pKmzY/S4yRUdDR3SNyY98UpFMDtq+MAY3P/twDZrk5crGtohQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0CQBwWyToNNlBLNdx24tmE09l4+kLQ9mR4yZGKY27rq9HggQIECAAAECBAgQIECAAAECBAgQIECAAAECBOoXEMzWb9fploLZTpPVbJCC1vIthbBpKx9JmwezeZCb75cq+h8CBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECTRYQzDYRXDDbPdijr51cMW1xGg2bB7D5FdL0xkOH9MtG0c5fkzatQztq2MD8tE8CBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECTRUQzDaRWzDbPdjlwWweuFYHs/nx8tG1Rs12j79eCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEOi8gmO28Wd0tBLN101U0LA9m85GxqUJ5CJuC2cEbrFix7mwe1lZ0ZocAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAEwQEs01Azi8hmM0luvZZPjq2PJgtD2zTFdK58ROmV1zMqNkKDjsECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQJNEmg/mJ2X3UmvNu7G+bZ9yugEs2UYXSiWB7Npfdl8qw5m8+Pln0bNlmsoEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIdFqgzny0/WC203eiQWsCgtnWZDp3vCvBbLpSeZhb68qp/8H9+9Q65RgBAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBugQEs3Wx1ddIMFufW61WaT3Z8mmMU53xE6a1mLq4Vtta0xmnMHb83dNj0pTZpSbthbe1+nWMAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQGsCgtnWZBpwXDDbfahp2uLBG6yYhbP9ik7LR9IWB2sU8umMU/3Sf0+8UgSyefVa4W1+zicBAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBzgoIZjsr1oX6gtku4FU1TcHs0O37Vkw53Fowm0bWTqoKX1M4m4+Oreq6tCuYraXiGAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQL0Cgtl65epoJ5itA62VJmna4vLRsqlaa8Fsmpa4tXOtdB+C2dZkHCdAgAABAgQIECBAgAABAgQIECBAgAABAgQIEKhHQDBbj1qdbQSzdcJ1ollae7Z8y6ctTseqz1XXKx9BK5gt11EmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBDoqoBgtquCnWgvmO0EVp1V0xTH5QFrmsY4H1lbfS5dIgW3aUrktI0eN7n0mf6nvF1xUIEAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAnQLtB7Pzsp57tdG78237lNEJZsswGlSsDl/TNMb5lk9nnIexg/v3yU+1mOpYMFvQKBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECJQL1JmPth/Mll9EuUsCgtku8XWocXkwWytcTeFseSCbd5qHtvl++RTI+TGfBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBOoVEMzWK1dHO8FsHWidbDJ+wrQYP2F6qVWtYLat7srXoBXMtiXlHAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQGcFBLOdFetCfcFsF/A62LR85Gv5NMYdaS6Y7YiSOgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAvUICGbrUauzjWC2TrhONMuD2c6Olk2XKJ8GOe13NthNbWwECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEagkIZmupNOiYYLZBsGXdCmbLMBQJECBAgAABAgQIECBAgAABAgQIECBAgAABAgR6jIBgtomPQjDbHOw0JXE9o12NmG3O83EVAgQIECBAgAABAgQIECBAgAABAgQIECBAgMCSKCCYbeJTF8w2BzuNmh3cv0+nL1YdzI4aPrCufjp9YQ0IECBAgAABAgQIECBAgAABAgQIECBAgAABAgQWe4H2g9l5mUGvNhycb9unjE4wW4bRA4vjJ0yL8ROmF3cmmC0oFAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBHKBOvPR9oPZ/AI+uywgmO0yYUM7EMw2lFfnBAgQIECAAAECBAgQIECAAAECBAgQIECAAIElWkAw28THL5htInYdl0pTII8eN7loacRsQaFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECDQRQHBbBcBO9NcMNsZrebXrQ5mhw7pG0OH9Gv+jbgiAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDAYicgmG3iIxXMNhG7jksJZutA04QAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKBDAoLZDjF1TyXBbPc4NrKXEWMmFt0PHtAnRg0bWOwrECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEKhXQDBbr1wd7QSzdaA1uYlgtsngLkeAAAECBAgQIECAAAECBAgQIECAAAECBAgQWEIEBLNNfNCC2SZi13kpwWydcJoRIECAAAECBAgQIECAAAECBAgQIECAAAECBAi0KdB+MDsva9+rjT6cb9vn/7N3/zGelHcdwJ/ljiNhOUpVrrXAuaGAx55AbdWmh7aiaCjBmOD17qyCYqnmEgRR0tSkWcr9A1QSWzSNpa2JtbXcdelJamOtYkkpS1GSVvRWI9Be7miB3eMq9wOSPbh1Z+kM852bne/s9zsz+/3xWnL5zo/n+Twzr4f/3nmeSdEJZlMYPXq4Y9feML3vaPJ0996yMTl2QIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQCB0mI+2D2bZViYgmK2MsrZCgtnaaBUmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECAy1gGC2wekXzDaI3eFQgtkO4XQjQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAoFBDMFvJUe1MwW61nHdWywezE1rEwfs5oHUOpSYAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgMEQCgtkGJ1sw2yB2h0NNTs2EyanZpLdgNqFwQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0IWAYLYLvOV2FcwuV6z59oLZ5s2NSIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAYBgHBbIOzLJhtELvDoab3Hw07du5Nem/edGbYvGldcu6AAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQCcCgtlO1DrsI5jtEK7BbmWD2aidb882ODGGIkCAAAECBAgQIECAAAECBAgQIECAAAECBAj0uUD7YHZ+4Q1HCt7S/WKfFJ1gNoXRo4dlg9kdu/aGze84Uzjbo/PosQgQIECAAAECBAgQIECAAAECBAgQIECAAAECtQl0mI+2D2Zre+LhKyyY7Y8533bXnuRBx9ePhoktY8l5fBC1sc1xrOGXAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgnYBgtp1QhfcFsxVi1liqXTA7OTUTJqdmw1KhbY2PpjQBAgQIECBAgAABAgQIECBAgAABAgQIECBAgECfCghmG5w4wWyD2F0MVTaYjYaY2DpmO+MurHUlQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECAyLgGC2wZkWzDaI3cVQ0fdjp/cdTSrce8vG5Dg6SN+3araFxgkBAgQIECBAgAABAgQIECBAgAABAgQIECBAgMASAoLZJWDquCyYrUO1+prp4DWqng1m0ytq8+5X/0QqEiBAgAABAgQIECBAgAABAgQIECBAgAABAgQI9LuAYLbBGRTMNojdxVBFwez0/qNhx869LdVtZ9zC4YQAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQCBHQDCbg1LXJcFsXbLV1s0Gs+ngdXJqJkxOzbYMaDvjFg4nBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECOQLtg9n5hV4jOT3jS+4X+8ROC7+C2RRGDx9mw9d2wWz0Ktntjnv49TwaAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBANwId5qPtg9luHkrfFgHBbAtHz54UBbPZ78vGL5EOb+Nreb9R7ehv86Z1ebddI0CAAAECBAgQIECAAAECBAgQIECAAAECBAgQGFABwWyDEyuYbRC7i6Gy35HdvOnMxSA1ez09RJntjLNbJMd103UcEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIDKaAYLbBeRXMNojdxVDZADYOULMrabNDLLVqNqo3+chsmN53NNtl8Tyun3vTRQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgYEQEMw2OI2C2QaxuxiqbDAbBaqTU7PJSHmrZtuFuXHnpULd+L5fAgQIECBAgAABAgQIECBAgAABAgQIECBAgACB/hZoNJidm5sLhw4dDmvWrAmnn762ErkXXjgUnnzyu+Hw4cPhjW9YF846+01h7drTKqlddRHBbNWi9dVLf0s2DlzT16KRozB1x869yUPE7eIL2YA3vh61y66ezfaN2/olQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAYDIFGgtlnnnkuPPbYt8NDX38kvPTSS2F844awfft1XQk+8si/h69+9cFwYPZAS53VJ68O777i8vDLl78zrFq1quXeSp8IZld6BsqPXzaEzX43Nr3yNW+1bHw/e08wW35utCRAgAABAgQIECBAgAABAgQIECBAgAABAgQI9KNArcHsM99/Ntz9F58MRw4fabHpJpg9fvx42L37y+HBr31jsWa08jaqNz8/H77znb1hdubVoPYdm342vPe9m1vGXekTwexKz0D58bOBa7Zn/F3Y7KrYdMCarXHvLRtbyrS739LYCQECBAgQIECAAAECBAgQIECAAAECBAgQIECAQF8LtA9m5xfeb6TgHQvu79//vfDXn/5c0jnadvjYsWOtK2YL+i92zNyfmvq38Pm/uy+ccsop4Zprt4aLLxoPIye9+oCvvPJK+MKu+8PDDz+62PWmm/4gnHfeuR0/f974ycvEB5nniy/n/Qpm81R691p21Wz6SeOVr9G1dLt0MLvU9bhONphN14zb+CVAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOgxgXb54BL32wezFb7n3R+7JzzxxFOtwWwH9f/hS/8ULrnkp8I56886oXcU/N724Y+EKAR+57s2hfe859dPaLNSFwSzKyXf2bjZ7YbTVdKrX/MC1mgl7eTUbNIlXmGbXFg4yNYXzKZ1HBMgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEBkugL4PZdlMQrdL91rceD+df8OZw442/3655Y/cFs41RVzZQNnSNCqdXxUbnedsZj599akswmxe6ZvvlhbdRfX8ECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQL9LzCQwexnP/uF8Og3HwvnnjsWbv7j7T0zS4LZnpmK0g+SDU+jjnkBanbb4ul9R1vGSK+wjW9ka2cD37idXwIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgf4XGMhg9rbb/iwcmD0QLvulXwhXX31Vz8ySYLZnpmJZD5JdNZsXzGbbpAfIax/fzwa6E1vG4lt+CRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEBkhg4ILZg8//INx66x2LU3Td7/1WeOtbL+6Z6RLM9sxULOtBsitby6x+TQ9QFMxmA9282ulajgkQIECAAAECBAgQIECAAAECBAgQIECAAAECBPpTYOCC2U996m/Df3z7v8KP/tiPhA996E/C6tWra5mZI0deDEcOt25XW8tAmaJr1pwcTls7mrnqtG6B3Y8eCLsfPRguXPh27J9efXbucNfe/b+51z9z4wW516OLt3/x6fDfT7+Y3I9qR2P4I0CAAAECBAgQIECAAAECBAgQIECAAAECBAgQqFcgyt2a/BuoYPbxx/eET97zmUW/699/bbjkko21Wa5UMFvbCyncVuCPPjcTrrh4NFxxUX4w/pcP/F948tm5ljpF7aOGX/nPo+Erj78W8N9w+RnhvDesaanhhAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoHqBN/74mdUXLajYPpidX+g9UlBhGffv/tg94YknngrjGzeE7duve7XoMvrnPsUP+x86dDh85M67wwsvHAo/ueH8cMMN11daPzv23NyxEP0L7Z4/1TFvhe1yV79GyX3T6X3qFYb6MNrSOPq3edO6XIfslsdRo6JtjKP72T7t2kd9/BEgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECKygQLt8cIn77YPZCt8pN5itoP7c3Fz46J//Vdi//3vhjNefET7wgT8Ma9eeVkHlakv4xmy1nr1WLRuyRs/X7pux2T7j60fDxJaxXns1z0OAAAECBAgQIECAAAECBAgQIECAAAECBAgQINClQN8Hs8ePHw+f+MTfhOk9/xNOPvnkcPPN28M568/qkqWe7oLZelx7qeqOXXvD9L5XtyYuG7Juu2tP8gpl+yQdHBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECPSFQN8Hs/d+/ovh4YcfXdhueST87u9sC2/7mbf0LLxgtmenprIHS6+ALbstcTrMjR6k3Srbyh5WIQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgcYE+jqYve++L4UHv/aNRayrf+PXwmWX/XxjcJ0MJJjtRK2/+qSD2YmtY2H8nNG2L5ANZsv2a1tYAwIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgZ4R6Nlg9uDBH4Rnn50JGzacH0466aQTwHbv/nL41we+vnj93VdeHq688ldOaNNrFwSzvTYj9TxPHLSWXfk6OTUTJqdmk4cRzCYUDggQIECAAAECBAgQIECAAAECBAgQIECAAAECAyNQWzA7Pz8fjh17uQXq4x//dHjqye+GCy+8IFz//muTe6tWnRRWrVqVnD/z/WfD7bd/NEQ1fu7tbwvXXLMluRcd3H//P4Z/+ecHF6+96xcvDVdd9athbu5YeHlhvPmF/9J/q1evDq973enpSyt2LJhdMfpGB45WzUb/Nm9aV2rc9CrbqEPZLZBLFdeIAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgJwRqC2aff/5g+PCtd5Z6ybf89EXhfe/77aTtAwsrYf9+YUVs9Dc6emq4485bk3sPPfTNsGvn7uQ8Wk17/Pjx5Dx7cNbZbwof/OBN2csrci6YXRH2xgftNpgdXz8aJraMNf7cBiRAgAABAgQIECBAgAABAgQIECBAgAABAgQIEKhPoH0wGy1AHSl4gCXudxPMPvfcbLhjYcXsyy+/HC699O1h229enTxAegvj5GLBQdtgdonnT0p2ez8pFIJgNoXhsEVg2117kvMomB0/+9TF79OW+UZt0tEBAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBA/QId5oftg9n6Hz13hCNHjoaZmQPh3HN/Ivd+P14UzPbjrDXzzPF3afNGi7Y2jv7Kbo2cV8M1AgQIECBAgAABAgQIECBAgAABAgQIECBAgACBlRXo2WB2ZVnqGV0wW4/rIFQtCmbT7+f7s2kNxwQIECBAgAABAgQIECBAgAABAgQIECBAgACB/hEQzDY4V4LZBrH7bKjou7Q7du4t9dQTW8cWtzku1VgjAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBnhAQzDY4DYLZBrH7eKgopI3+RX+TU7MnvIlg9gQSFwgQIECAAAECBAgQIECAAAECBAgQIECAAAECPS8gmG1wigSzDWIP0FCTUzMtAe34+tEwsWVsgN7QqxAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEBl9AMNvgHAtmG8QesKG23bUneSPBbELhgAABAgQIECBAgAABAgQIECBAgAABAgQIECDQNwKC2QanSjDbIPaADbVj194wve/V7Y2jV7Od8YBNsNchQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEBl6gfTA7v2AwUuDgfrFPik4wm8JwuCyB7HbGgtll8WlMgAABAgQIECBAgAABAgQIECBAgAABAgQIEKhOoMN8tH0wW90jDn0lwezQ/y/QMcD0/qNhx869SX/bGScUDggQIECAAAECBAgQIECAAAECBAgQIECAAAECfSEgmG1wmgSzDWIP4FC+MzuAk+qVCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgaEREMw2ONWC2QaxB3Ao35kdwEn1SgQIECBAgAABAgQIECBAgAABAgQIECBAgMDQCAhmG5xqwWyD2AM4lO/MDuCkeiUCBAgQIECAAAECBAgQIECAAAECBAgQIEBgaAQEsw1OtWC2QewBHMp3ZgdwUr0SAQIECBAgQIAAAQIECBAgQIAAAQIECBAgMDQCgtkGp1ow2yD2gA7lO7MDOrFeiwABAgQIECBAgAABAgQIECBAgAABAgQIEBh4AcFsg1MsmG0Qe0CH8p3ZAZ1Yr0WAAAECBAgQIECAAAECBAgQIECAAAECBAgMvED7YHZ+wWCkwMH9Yp8UnWA2heGwI4HsdsYTW8fC+Dmjy6oV1Vhun2UNoDEBAgQIECBAgAABAgQIECBAgAABAgQIECBAYJAFOsxH2wezg4zW8LsJZhsGH8DhssHs+PrRMLFlrPSbTk7NhOmnX1xWn9LFNSRAgAABAgQIECBAgAABAgQIECBAgAABAgQIEFhSQDC7JE31NwSz1ZsOY8X0d2aj97/3lo2lGQSzpak0JECAAAECBAgQIECAAAECBAgQIECAAAECBAhUKiCYrZSzuJhgttjH3XIC3XxnNu67nDC33FNpRYAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgUCQgmC3SqfieYLZi0CEtl93OeDnfmY1X2y6nz5Aye20CBAgQIECAAAECBAgQIECAAAECBAgQIECAQKUCgtlKOYuLCWaLfdwtJ5ANZst+ZzbdTzBbzlorAgQIECBAgAABAgQIECBAgAABAgQIECBAgEBVAoLZqiRL1BHMlkDSpJRAvPI1blxma+Lo+7KTU7OLXcqGuXF9vwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAt0JCGa781tWb8Hssrg0LhCIvxUbNymzAjbdRzAby/klQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECDQj0D6YnV94kJGCh3G/2CdFJ5hNYTjsSiC9LXFUaPOmMxf+rSus2ckq28KCbhIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEhlGgw3y0fTA7jJg1vbNgtibYISybDWbbrYDNto/IyqyyHUJar0yAAAECBAgQIECAAAECBAgQIECAAAECBAgQqEVAMFsLa35RwWy+i6udCaS3Jo4qFH1nNv192Xg0wWws4ZcAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgUL+AYLZ+42QEwWxC4aACgWwwWxS05gWz7VbZVvCIShAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECPxQQDDb4P8KgtkGsYdgqOz2xEXfmc1+XzbiEcwOwf8kXpEAAQIECBAgQIAAAQIECBAgQIAAAQIECBDoGQHBbINTIZhtEHsIhsoGs0VBa14wGxEVbX88BIRekQABAgQIECBAgAABAgQIECBAgAABAgQIECDQmIBgtjHqEASzDWIPyVDZ7Yzzgta8bYxjnqLtj+M2fgkQIECAAAECBAgQIECAAAECBAgQIECAAAECBLoXEMx2b1i6gmC2NJWGJQWywWzedsaC2ZKYmhEgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEahRoH8zOL4w+UvAE7hf7pOgEsykMh5UIZLczjopmV80utY1x1LZo++Povj8CBAgQIECAAAECBAgQIECAAAECBAgQIECAAIGMQIf5aPtgNjOO084FBLOd2+m5tEC7VbPpYDYKYqf3HU2KCWYTCgcECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgVoFBLO18rYWF8y2ejirRiBv1Wz87djsNsbRVsfTT7/YEs5mV9hW81SqECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIpAUEs2mNmo8FszUDD3H57KrZeCVsNpiNAtvJR2Zbgtk4xB1iPq9OgAABAgQIECBAgAABAgQIECBAgAABAgQIEKhdQDBbO/FrAwhmX7NwVK1A3qrZpVbHZtsKZqudC9UIECBAgAABAgQIECBAgAABAgQIECBAgAABAnkCgtk8lZquCWZrglV2USC7OjbLEq+izQaz8fVse+cECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQLVCfw/AAAA///qV9JAAABAAElEQVTsvXvMZdlZ5rfLl7bdZTOMTXcwuMeN5wKUGQRiNE63uYTEAgQGLKem3UghAg0JEEGGgImZaNIOLSRMcBIPEyE0wOSPDIrbU5qLGZTJEBOEx2UP8gSUyCV5gKHSbtN2V9sG3NVu+lb53lP9fPV8b62173ufy/fbUn1r77Xe9b7v+q2199nnPLXPOXPtaGvYViHwiYev3BTnC1952011VEBgLIF73/mRatfzd9/WnL/79k272537C2eb++65s9qPBghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgekEziDMTofY1wPCbF9S2I0lcOHiI82Fizf/B4Dw58Ls/e+53Fx68OpxmHe/9bXH++xAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQjMT6BbmI3nac+0BKa9nY+hQ5g1GOwuSuDSx6428c9FWhdfszB731vubM7dcXbRnHAOAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABA6CwEh9tFuYPQg6uzEIhNndmIfTloVEWn2NcYw/6u5/4PIxCr7O+BgFOxCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEBgEQIIs4tgLTtFmC1zoXZ9AlmYjQwQZ9efByJCAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBwegggzK441wizK8ImVCeB2u/R8rXGnegwgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAKDCSDMDkY2vgPC7Hh29FyGQEmc5cnZZVjjFQIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgdNNAGF2xflHmF0RNqF6EyiJszw12xsfhhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgFwGE2V6Y5jFCmJ2HI17mJ5DFWYTZ+RnjEQIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgdNNAGF2xflHmF0RNqEGEbj0savN/Q9cPu7D1xkfo2AHAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCMxCoFuYvXYU50xLLNrb+Rg6hFmDwe7OEbj/PZebSw9ePc7r3W997fE+OxCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAs8RGKmPdguzEJ6NAMLsbChxtACBLMzydcYLQMYlBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACp5YAwuyKU48wuyJsQg0mwNcZD0ZGBwhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCDQmwDCbG9U0w0RZqczxMOyBO5950eOA+TfmQ3h9twdZ4/b2YEABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEOhPAGG2P6vJlgizkxHiYGECta8zvnDxkebCxSsNX2+88ATgHgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgYMlgDC74tQizK4Im1CjCOSvMw4h9sIHrzSXHrx67O/db33t8f6SOzyhuyRdfEMAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEILA2AYTZFYkjzK4Im1CjCGRhtuTk/N23Nefvvr3UNGtd5BIbX588K1acQQACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgsCUCCLMrgkeYXRE2oUYTyF9nXHLkT83G1xxfeujx5r577iyZjq4LvyHKIsyORkhHCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEdohAtzB77SjbMy0Z097Ox9AhzBoMdneWQB9hVk/N6rdnYzDn/sLZqjg75muJEWZ3domQGAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEDgdBMYqY92C7OnG+uso0eYnRUnzhYiUPo64xBd/XdmI3Stzp+cDV/xG7Wxef2mouNPCMTnXnXrKl+b3JEKzRCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEBgMgGE2ckI+ztAmO3PCsvtErj3nR85TkBPx/Z5kjY6hWB7/q7bNoKsi7n3veXOQV9LHDko9nEy7EAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIACBPSWAMLvixCHMrgibUJMISIR1YbT0JO2QIGOE2bavRx4SG1sIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgsG0CCLMrzgDC7IqwCTWJQIiwsZ274+wJPxJsvTLE0/jK4QsXr39lsbf5/hCRVSLwkD4ei30IQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgsGsEEGZXnBGE2RVhE2oRAhJM5dyF0wsXH2kVZ91W/WulfA3pU/NFPQQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhDYBQIIsyvOAsLsirAJtRiBNtFUbRE8vgb50kOPN2N+Z9b9vPutr11sLDiGAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAmsRQJhdi/RRHITZFWETalEC8ZXG991zZzFGiKrxFcjxLz9h2/d3ZhFmi2iphAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAYI8JdAuz145Gd6ZlhLS38zF0CLMGg91TQSALs32/mth/y5YnZk/FUmGQEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIH9ITBSH+0WZvcHwc5nijC781NEggsQuPedHznhtY/Q6n36PmV7IggHEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQGDHCCDMrjghCLMrwibUzhDwp18jqT5CK8LszkwfiUAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIDATAYTZmUD2cYMw24cSNodGIH+dcZcwO9T+0HgxHghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABA6TAMLsivOKMLsibELtDIEstHb9zuyFi480Fy5eOc7//N23Nefvvv34mB0IQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgsI8EEGZXnDWE2RVhE2qnCOSvM277nVmE2Z2aOpKBAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEBgJgIIszOB7OMGYbYPJWwOkUAWZtu+zjgLs11P2B4iL8YEAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIDA4RFAmF1xThFmV4RNqJ0ikL/OuO3ribOIizC7U1NJMhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCIwk0C3MXjvyfKbFO+3tfAwdwqzBYPdUEcjCbJvYeu87P3KCTZvtCUMOIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgMAaBEbqo93CbEr+sceuNk888URz9uytzUte8pLUOu1wiO/PfOaPm2eeeaY14Ate8MLm8z//81pt1mxEmF2TNrF2jUB+Erb21GwWZmMcbb9Ju2vjJB8IQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUCLQS5gNAfTy5QebD3zgt5t/8+HfbZ599tnmO9/0rc0b3vANJZ+D6sb6vv/+n22uPPJoa6y/+Je+pPmRH/mBVps1GxFm16RNrF0jkJ+ajfyy4FqyKdnt2tjIBwIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQh0EegUZj/0oQ8373nPP2ueevLJE77mEGan+P6Jt/1kc/Xq40dfI3ym+k3LIcz+rb/1/Sfy3uYBwuw26RN7Fwjkp2bz1xRfuPhIc+HilZtSve8tdzbn7jh7Uz0VEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQGBfCHQKs+9//4ea33jfbx2P59FHP7XZn0OYHev72rVrR4Lrf9NcO3py9/u//3uar/irX36c3y7vIMzu8uyQ2xoESk/EuuiKMLvGLBADAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIACBbRDoFGZzUj/8Q2/bVM0hzI71/bnPPdH81z/+9k33t/74DzWvfvUd2dVOHiPM7uS0kNTKBLL46k/N5idqlZqLt6qjhAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAL7RGAvhdlPf+ozzdvf/o4N55+8/yeal7/8z+8Fc4TZvZgmklyBQBZgJbzmeqVy/u7bmvN3365DSghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCCwdwT2Upj92Mc+3vz3P/NzG9j/4//0U80LX/jCvQCPMLsX00SSKxDIX2msp2bvfedHitERZotYqIQABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQGCPCHQLs9eORnPmxohu+irj1H7D8rm9Ae03+Q4Xhf4f/ejvN//z3/vF5sUvfnHzsz/7k5v8rl59/Oj4Rc3zn//8kykU+p8wWLrdgiHMGgx2Tz0BF2G7hFm1n3poAIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQGD7BEbqi93CbBpaUTxNNmMP+/r+nd/5f5p/8Mu/0px96dnmK7/yXPM7v/P/Nk8c/e7s8573vOYVX/Dy5qu/+iubb/qmb2xe9KJbxqaySD+E2UWw4nRPCeSvLY6nYi9cvFIcDcJsEQuVEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEI7BGBvRRmP/CBf928+3/7xxvMZ86cab7gtlc0L37Ri5pPfPJK89STT27q/9yf+7zmR/6rH2i+4Atesch0PPnkU038G7I99tmrN5m/9GVnb6prq7jllhc28Y8NAvtO4MLFR04IsSG+Xnrw5nMkxjlFmI2vTT53x7DzbN/Zkj8EIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAK7R2AvhdnPfvax5sKF9zYve9lLm2/5lv+oeenRk7OxPfvss81v/l//qvnVX/0/mqeffrp5zWvu3IizId7OvT322ONNSWidO07JH8JsiQp1+0bg3z78Z827/sWjvdP++e/94t62Mvy13/3Tze63fdXnqYoSAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCGwIvPwVn78qib0UZrsI/fq//M3mve/93zdm3/8D39N8xVd8eVeXwe3bEmY3T8zu2Fc0D4ZHBwg8R+D7fuEPqyy+9Itf3Hz0408ct//SD3zJ8X7fnfd++DPNRx9+ovnxb39l3y7YQQACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgcEoIvPSlt6460oMUZp955pnmx370v22ifOO3f3Pzzd/8H64KtRaM35itkaH+tBLIvzPrHPJXG7/7ra/15l77+rrkMX17BcAIAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCPQkcJDCbIz9p37qf2g++YlHmq/5a1/VfM/3fFdPHMuaIcwuyxfv+0cgfv/1/gcu35R4iLKx+W/O3veWOwf/Vuy97/zIxs+YvpuO/IEABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACMxE4WGH27W9/R/PpT32mef3rX9fc+11vngnXNDcIs9P40fvwCNSE2fN337YZ7IWLV44H3Sauhp9zd1wXc487HO1ImA1/5+++3ZvYhwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAKrEugWZq8d5XPmRk4//ENv2xx855u+tXnDG76haVL7Dcvn9ga03+Q7XAzor9hXrz7e/MTbfnJz+JZ739x87de+Tk03lyP8n3DS1d+MEWYNBrsQeI6AxFMHEkJqCK3+NG1NmA1RNrYszLroG0/g3nfPnRs7/kAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIACBSQS69MFKe7cwm7IqiqfJRoef/vRnmk8cfZ3wl33ZX26e97znqbpa9vH96KOfbv7PX//N5s3/8RubW265pejr3e/+x80H/tW/PhKUzzR/+2//SPNFX/SFRbu1KxFm1yZOvH0gUPqd2ZIwW3vqNX5H9tJDj98kvOr3ZcWA35kVCUoIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAS2QaBVmH322Webp59+5kReP/ajf2dz/MZv/+bmG7/x647bXvCC558QXx/+o080P/3T72quXbvW/PXXfU3z3d99z7Ft7Iz1/a53/ULzB7//h80XvvLfa9785jc2r3nNnc2LXnRdoH3qqaeaf/7P/2XzG7/x/qMnba813/AfvL45f/47TsTd5gHC7DbpE3tXCfiTrcoxno6NzZ+YbRNm4yuPs/CahdnaE7ebQPyBAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAgsTaBVmP/ShDze/8g//Ua8U/sY9b2q+/uvvOrZ93/t+q/mn/+TXNsdnz97avONn3n7cFjtjff+7f/f/Nb/0i/9r89nPfnbj78zRk7iv+uJXNi944Quaj3/84ebJP3tyU/8lX/Lq5of/y/+8eeFR/a5sCLO7MhPksUsESsJsiKy5vvZ1xPoq5Cy85idxa8LuLrEgFwhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABA6XwGLC7Cc/eaV5x9ETs08//XTz+te/rrn3u958guJYYTacfO5zTzS/fvR1xr/92/938yd//Ccn/L7k1pc0b/y2b2q+9uv+/RNP8J4w2tIBwuyWwBN25wlkEVVPv0p0jQF0CbNZePW+bf2nwAnxOP7p921VTvFJXwhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABA6TQKswO3XIjz12tXnkkUePvm741VNdFfvH1yR/8pOPNH/8mT9pnjoSgF959PXGr3jFy49+WvZM0X7blQiz254B4u8qARdmXYB1cdXrNQ7/umJvz0/byl6Cr46nlp5f+Jrb/9T86A8BCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgMDuEFhUmN2dYe5GJgizuzEPZLF7BFxI9SdfXbCNrLPw6cKst+d6jTh/3bHqx5YIs2PJ0Q8CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgcPoIIMyuOOcIsyvCJtReERgrzGbhVsJrTZj1p2rnAJSFWcWfwzc+IAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQOCwC3cLstaMBt30zMO3tfGy9IMwaDHYhkAhIZHVxU3Uy9baoy8KonrbN/dR/TmHWxWT5z/mpnhICEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEDIjBSH+0WZg+I0baHgjC77Rkg/i4TkJjq4mZ+8rWtLcYm4TULtj7u/HXI3jZkP+cWfT2/Ib6whQAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhA4PAJIMyuOMcIsyvCJtTeEdATqC6cZvHThc/cpgGHzf0PXNbhTaX7uKlxQEUp/ly+B6SBKQQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAntCAGF2xYlCmF0RNqH2kkA86erCrMRaDcaFTz1hqzaV8dTspQev6nDzFG0+vu+eO4/bx+4gzI4lRz8IQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAwOkkgDC74rwjzK4Im1B7SSDEzvN3336cexZm9RuyYdD2dcXHDo528hO0+rpjtxmzXxKGPb8xPukDAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIDA4RJAmF1xbhFmV4RNqL0kEELsuTvOHueehVmJqrn+uENhJ57AzSKqP5Vb6NKrKvuMTgizvdBhBAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhA4lQQQZlecdoTZFWET6mAI+JOxEmZLXyNcG3BJmPWvRK7166r3vGSLMCsSlBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCGQCCLOZyILHCLMLwsX1wRJwAXSoMCuhND9hKz9ToHle8qN4OqaEAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAiLQLcxeOzI9I/NCSXs7H0OGMGsw2IVATwIugEpQ9bpwk39HVq5dKPU+8iO7oWUWetV/ql/5oYQABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQGCHCYzUR7uF2R0e876lhjC7bzNGvrtAIP+Wa4itFy5eOU5N4qsLr2r0ryzOfqb8zmztq5QRZkWeEgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgUwAYTYTWfAYYXZBuLg+WAJZUM0DlTBbsnPxNbe7aJt9dh0jzHYRoh0CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIFMAGE2E1nwGGF2Qbi4PmgCWVT1wUpgLYmlLszmdvVzX333sy/144lZkaCEAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAgE0CYzUQWPEaYXRAurg+aQO03XWPQEl+zTRZJc7uetB0DriYU55hjfNMHAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIACBwySAMLvivCLMrgibUAdHoPSUahZC/Xdms/CahdncdwiwmjAbPiQUD/GHLQQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAodPAGF2xTlGmF0RNqEOkkAWZ7P46oJpbgsgLtxOEWbdTwaNMJuJcAwBCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgEAQQJhdcR0gzK4Im1AHS6BNfHXhtiSQet8AVLLpAw5htg8lbCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEHAC3cLstSPzM94l7dPezsdwIcwaDHYhMIGABNYsrPrXFee2CKd+Cn3fW+5szt1xVoe9So9R6lCKW7KjDgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgT0lMFIf7RZm95THLqaNMLuLs0JO+0ggxNELH7zS3HfPnTelH0+z1r6m2J+ojY5jhNnsIycwxmf2wTEEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAKHRwBhdsU5RZhdETahDp5AiLOlp13jqdhzr7q1OX/37TcxyE+7ln6H9qZOqSILsyECX3rw6rEVwuwxCnYgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhAwAgizBmPpXYTZpQnjHwJNE+Jr/OsjzOYna0tibwixlx56/PjpXIRZVhkEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAJjCCDMjqE2sg/C7EhwdIPAAAIhrsZWepo26uOrjrW5MCvBNT/xqnrZ5t+pjaduL1y8Ipejvh75uDM7EIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIHCwBhNkVpxZhdkXYhIJAhUAWVt/91tduLCXYZmE222e3YX//A5ePq3P/4wZ2IAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQONUEEGZXnH6E2RVhEwoCFQJZaA0hNZ6y1VOvejJW3SXY6tjLsD1/120nhNkxv1vrPtmHAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEDgMAkgzK44rwizK8ImFAQqBEKE9SdcQ1y99OD1rz+OLi7MZtvsEmE2E+EYAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIACBGoFuYfbaUdczte5H9bS38zF0CLMGg10IbIlAl9gaaenriPX7srVU4+nY+C1bF3p5YrZGi3oIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAwIEQGKmPdguzB8JnF4aBMLsLs0AOEGiatq8nDj5ThFl/4hbWEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQEAEEGZFYoUSYXYFyISAQA8CXcKsxNVsF4KtPx0rAdft1LdHGphAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBwigggzK442QizK8ImFARaCNz/nssnflc2m0pcrQmu6o8wm8lxDAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQI0AwmyNzAL1CLMLQMUlBEYQkLCqriHExnbpwauqauK3Yi9cvHLi+Pzdtx8fh4/77rlzc1wTcI+N0078zm38Ni0bBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACp4cAwuyKc40wuyJsQkGghUAIo/kriaPOhdgQa7NQ68Ksux8izIage/6u2xBmHSD7EIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIFTQABhdsVJRphdETahINBCwIXZeDI2BFevK3V991tfW6re1OUncGu2Fy4+shF/9RXIVYc0QAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgcHAEEGZXnFKE2RVhEwoCHQTiKVf9lqxM/clX1amsia3R3keYlSgb9hKDY58NAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIACB00GgW5i9dgTiTAsM2tv5GDqEWYPBLgS2TKD0lcJZYFWKXUJq7pdFXBdlw2cWhBWHEgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgT0gMFIf7RZm92Ds+5Iiwuy+zBR5ngYC8dXF5+44e2KoWWBV4xRhNouy4RNhVmQpIQABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAKnhwDC7IpzjTC7ImxCQWAEgdrvzHb9JmwWdGVf84cwO2Jy6AIBCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQ2HMCCLMrTiDC7IqwCQWBkQRKvzObv5o4u85PxUqYLflS3y6fsqOEAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEDgMAggzK44jwizK8ImFARGEshPv/Z5urUkzMbTshcuXqlmkYXZ8HH+7tur9rWGiBNb/lrmmj31EIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIbIcAwuyK3BFmV4RNKAiMJJCF2a7fl40wJWH2wgevNJcevC6ahk0IvH6sp2qjLbbwEeLqUIF1bL/rUfkLAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIDAWgQQZtcifRQHYXZF2ISCwEgC+Xdhs4BaclsSZu9/4PKxaYiy515164knaLPfEITP33XbKGE2Ao152vY4QXYgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhBYnADC7OKIbwRAmL3Bgj0I7CqBLMzmrxwu5Z37xFO2/jXGIcyG6OpibRZm4/do+zydm+OHKHzpoceb++65MzdxDAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAwA4RQJhdcTIQZleETSgITCCgrzPu8/uyESYLs/lri0Nwja8odmHWRVj17xvPhxa5xoYwu8HAHwhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIDAzhJAmF1xahBmV4RNKAhMICBh1sXTNncSVms2JWHWRVj/KuQ+T+h6nHjSNrah/dwH+xCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCCxPAGF2ecbHERBmj1GwA4GdJiChdS5hVl9bLBE1Bl8TZmXbF5B8Isz2JYYdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQGA7BBBmV+SOMLsibEJBYAIBCbN9RVLZ10JKNJWIGnYuzOoJ3ajvGzNsPe6QftGXDQIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgXUJIMyuyBthdkXYhILARAIhlg753VYXXXNoCbMuwIaN6r2vC7buJ0TY+J1a3/wrkBFmnQz7EIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIHdI4Awu+KcIMyuCJtQEJhIIETP83ff3tuLi6veyYXWKcJsSSh2Ybbv1y57buxDAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCCwHoFuYfbaUTJnWhKivZ2PoUOYNRjsQuDACNSEWRdMszAbT7nGdv8Dlzel/uhJWh1LgK3Vh50LwOpHCQEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAwAIERuqj3cLsArmeVpcIs6d15hn3aSDQR5iVwCoeIczGVxRfuHhFVZsyfy2x+mVh1oVehNkTCDmAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCOwcAYTZFacEYXZF2ISCwMoEXCT10C6yhgjrT8f2FWYl+rqviKF6xcvCreopIQABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQ2D4BhNkV5wBhdkXYhILAygRqwqyLpVmYja85vvTQ482lB6+eyNafftXTsmGAMHsCEwcQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgb0igDC74nQhzK4Im1AQWJnAGGE2BNgsykbaNWHW67PIG/2ycBt1bBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCOwGAYTZFecBYXZF2ISCwMoESsKsC6lKJ3/9sOpzqSdt3d79+ZO06ru0MBticPzTdv7u27VLCQEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQAcBhNkOQHM2I8zOSRNfENgtAiVhNr6qOIuXLrS2jSBE1hBBL1y8csJMgm1JmC3FO9F54kGOubQQPDFdukMAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEdooAwuyK04EwuyJsQkFgZQJZtIzwJaG0JOCWUh0jzPoTtSWfU+ty7gizU4nSHwIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABE4TgW5h9toRjjMtSGhv52PoEGYNBrsQODACcwuzIbKWfn9WYmgWSQPn2sJsSXg+sGllOBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIACBmwmM1Ee7hdmbQ1EzkgDC7EhwdIPAHhAoCbMSUT39kqDq7V378ln7SmR91XGXnzHtOSbC7BiK9IEABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIHTSgBhdsWZR5hdETahILAygfg92PsfuHwiakkkLQm40SlEzksPPV58Stad6qnYLJLKphRTbVPLHBNhdipR+kMAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgMBpIoAwu+JsI8yuCJtQEFiZQBZmJaDmNLKd2ocIs+fvuu0mEVh+9EStjucqS3nXxjhXTPxAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABA6JAMLsirOJMLsibEJBYGUCWbisiZbZTmmGMHvujrNVwVV2UYbthYtXvOp4fylhtvSkb22Mx8mwAwEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAwDEBhNljFMvvIMwuz5gIENgWgSy41r7mN9sp3/gK4lqbbFSGIHrpwas6PFHW4p4wGnGAMDsCGl0gAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAkYAYdZgLL2LMLs0YfxDYHsEsqjaJpDm32qNrPXbsKW2/IRsmzC71FOsCLPbW1tEhgAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgcMggDC74jwizK4Im1AQ2AIBF1XbvlLY7SJNF1Pvf8/lE0/Dhijb9yuOs68SghCQw9/QLeel/hKUdUwJAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBAmUC3MHvtqOOZcudNLe3tfAwdwqzBYBcCB0jABdc2wTKLnP50bamtTZgNUTc2/2rjWux46vXSQ483991z56bPkD85L/WtxVI7JQQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEDg4AiM1Ee7hdmDI7W9ASHMbo89kSGwBoE5hNn8lcgSPt23j2WIMBs+/Olc99O1X4uv/Lr60w4BCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQOO0EEGZXXAEIsyvCJhQEtkBA4mWX+JmfPvWvPXZhtu1JWg0vbOIpWH9i1v3Jzn8jdoyYqrHJn8pSLLVRQgACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgcIMAwuwNFovvIcwujpgAENgqAQmuLqiWEnKRNNqzUCoR1P3Id/YXNrFduHjluKkklspnGJXajzsXdlwszs1DfeX+HEMAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAETgsBhNkVZxphdkXYhILAFghIPHVBtZSGC52lp2vlxwXbLObKbwijsd3/wOVNGX9y/Ny3FPO4c2HH883NCLOZCMcQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgTIBhNkyl0VqEWYXwYpTCOwMAQmqXWKlC51ZRI3BhJAa2/m7b9+U8cf7HFce7YR4m9uy8Kq81C+3q75WZmHX7brG6rbsQwACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAROMwGE2RVnH2F2RdiEgsAWCEgA9SddS2m4kFoSZqM9/vUVZiOGf1WxC681UbUrR8+75iNsSvl7X/YhAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhC4TgBhdsWVgDC7ImxCQWALBPoKs5GahNTSE6chyp674+xNI1AfNbgAq9hqk/BaE1VLcdU3l9m3tyPMOo1h+7V5HuYFawhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACENgXAt3C7LWjoZxpGc6A9ieffLL50z/9bHPLLbc0n/d5L7vudED/YhbP9X/ssavNE0880Zw9e2vzkpe85IbpTP5vOEx7Xf7NHGHWYLALgQMkECLopYceb+67587O0UlklYDa2eHIIAukfYRZxcn+vW9uy8c5rrePFWYRJa9/ZXUI8CUR3hmzDwEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjtGoEsfrLR3C7MzjPPhhz/ZfPjDv9u8/7c+2Hzuc59rzr32y5of/MHvnez5mWeeaS5ffrD5wAd+u/k3R/6fffbZ5jvf9K3NG97wDZN9L+EAYXYJqviEwO4QCGE2Nv8K4lp2EjunCLMuisqf4sUTsSF+Xrh4RVUnyiHCbE3cDYdD/HgCCLPXn5oe8uSy82MfAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAE9o/AosLsw3/0iebn/t4vNo999rETZOYQZj/0oQ8373nPP2ueOnoK1zeEWafBPgQgsCaBocJs5Nbn6VqNIX8tsQuzuU19vAwR9dKDV4+r+orCLsxmH2OF2RCSh4z9OOkD2Qlh+v4HjhgcCeg8MXsgk8owIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQh0EFhUmP3Yxz7e/INf/pXjFP7kT/60eeqpp2Z5Yvb97/9Q8xvv+61j348++qnNPsLsMRJ2IACBlQmEONr3q2lDmDz3qlt7PV2rYUjM07GLerlNNipDxI2vWXZh1vvLLpfZb/jxp3DHCLPy2VcYzjkdwrGEdBfXD2FcjAECEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAATqBBYVZnPYn/u7f7/5vd/7g1mE2ez7h3/obZsqhNlMhmMIQGAtAiE4xtbnCcgQ5mLr87XHG8OjPxI0dezCZm6TjcoQACOveEpTWx9RNfudU5jtIwwr10Mr9RQywuyhzSzjgQAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBQJ4AwW2czewu/MTs7UhxCYKcIhIjZR5SNpMM2tr72G+OjPxL04tiFWbVL8K091er9XZiNfOJfFor1ZKf8h5CYn7wt5SH7UimfHr9kd6h1wVkCOcLsoc4y44IABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgcDMBhNmbmSxWgzC7GFocQ2DvCIQ4N1SUjUHGVyDH1xH3ETUlgLr4p/4CFk+tRi4ScvNTrPLh9hc+eOXEVyKPFWbD59C+ymPpcuz89MnLmfaZxz4+sYEABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIHdJ4Awu+IcIcyuCJtQEDhQAhJWxwp6LgoGovDjvzub/Wb7EFKVgxCXxFV/Mjfs3MZ9ZiFYPrddRo4hnI8Rz7tydzaZd1df2iEAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQGB/CXQLs9eOBnemZYAD2ou/MTugfzGL5/pXf2N2Jv/F2FHZ5d86IswaDHYhAIFRBCRq+lOwQxz51+jW+rlYWhJhS3XZl4uP0VYTZndVmIwxnr/r+u/y5rFNOc78d3X8U8ZIXwhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACB0+gSx+stHcLszOSKwqzM/mvCrMz+c9uHnvs8eaxz17/jcjctuTxLbe8sLnlRbcsGQLfEIDADhP46B890fzsex9uvuOvff7Rvz8/KtPv+4U/bO33pV/84ubHv/2VG5ts+0s/8CXNez/8maN/f3zs48e/45XNl37Ri4+PleNxxdFO9NP2s7/6cPPRjz+xOfRYat+FMsadxzVHXpld+HQ2c8TABwQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCPQj8NKX3trPcCYrhNmRILcpzI5MmW4QgMABEPi3D/9Z865/8WjzI9/yBc1feeWLRo0o+oeftu3bvvplze994skTdhEv4v7a7/5p82u/89nj7jkX5XhscLTjNjm+t3mfbe7/F//Lx0/kPFcu4TdvP/+9X5yrOIYABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEVCLz8FZ+/QpQbIRBmb7AYtPfkk0818W/IVnrC9qUvOzvERbN5YvboqVk2CEDg9BKIrwn2rwYeSkJfhxz94qt04yt7Y7v/gcubsvZHX5/s/cPWv/o4jnN7tslfcyy/YbcLm/KfO6/8NcYa65S5lA9KCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQ2H0CCLMrzhG/MbsibEJB4IAJxO+f3nfPnaNHKIEwC4/5t2M9gNuqv9qnCrOl31mNGOfuGPYfV5TP1HIpYVZ+c36ZX27nGAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABA6DAMLsivOIMLsibEJB4IAJhMB3/u7bJ42w5iM/zRpBXJSN4yzM5vaSAOk2pRguTqq/10XctTYJ1CXBeEoOpXGHv22Nc8pY6AsBCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAALDCSDMDmc2ugfC7Gh0dIQABIzAkk+TShRVOBdUVdclzErYlH2U8pP7ysbFSQmY6iObtUrFn1OYrY07xuRjX2uMxIEABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIH1CSDMrsgcYXZF2ISCAARGEwhhNbb47dnS1wlnkTELmGOEWfnIwvDav7/qY1NOGxgT/+RxuTuEWafBPgQgAAEIQAACEIAABCAAAQhAAAIQgAAEIACBwyXQLcxeOxr8mRYAA9p/7u/+/eb3fu8PmnOv/bLmB3/we687rfT/9Kc/03ziE480X/alf7l53vOfV0/guf4//ENv29h855u+tXnDG77hhn3F/7HB0u3HgZoGYdZgsAsBCOwsgRAnS4KsJ6ynSqMuC5jepj6yaRMo4wnZCxevqMumXFu09PyU84mERh6UxGq52taTwYpPCQEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAgMJjNQXu4XZgXnI/Nq1a81TTz2tw0358z//y80f/P4fNl/+5X+l+b7/7D89bnv+kfD6/Oc///j44T/6RPPTP/2uJnz89dd9TfPd333PcVvsPPvss83TTz9zou7HfvTvbI7f+O3f3HzjN37dcdsLXvD85nnPaxF2jy2X30GYXZ4xESAAgXUIuPiaBUxvUzayceFTbW2l+rXZzNmW85vrid0SE+WNMCsSlBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEDpvAYsLspz716ea/e/vP9KL3VV/9V5u/+Tf/k2Pb973vt5p/+k9+bXN89uytzTt+5u3HbbHzoQ99uPmVf/iPTtTVDv7GPW9qvv7r76o1r1qPMLsqboJBAAILEnCh0cVT/ypgDy+bLHy6TW1/zadm85OtcwizeczB4tKDV4+HKzbHFexAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIDAQRLYSWH2k5+80rzj6InZp59+unn961/X3Ptdbz4BH2H2BA4OIAABCKxOwAVMFxZrwmwkGCJnFin7JO7+wz5iXPjglea+e+7s032QjQvO0XEJYTaE5vsfuHycVx7fcQM7EIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgcFAEFhNmp1J67LGrzSOPPNq85jWvnupqZ/rzxOzOTAWJQAACEwm4MBuuJGC2Ca9hk/uFKOlPj4avtt+a9f6KGX3m2Eqi8hxP63rOkWfk7QIwwuwcs7fbPmJtdf1u826PgOwgAAEIQAACEIAABCAAAQhAAAIQgAAEIACBOQjsrDA7x+B2zQfC7K7NCPlAAAJjCZTExvA1VJjNT4+Gj5KAG/V5m1uYLeW+bWE2cjp/9+156BzvGQGE2T2bMNKFAAQgAAEIQAACEIAABCAAAQhAAAIQgMBCBBBmFwJbcoswW6JCHQQgsI8ExgizJRE218XTsiFElp5ezZzmEE3d51LCbOnp2Bo/z0cMxMTb2N8vAjHfS3z19n5RIFsIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAYXbFNYAwuyJsQkEAAosSyMKiRNJc70lkEVZf4as+OlYf1es4l4qZ68ceu4AqH3PEcL8aYx5b6elfCbORC+KsZmQ/y5hvhNn9nDuyhgAEIAABCEAAAhCAAAQgAAEIQAACEIDAnAS6hdlrR+HOtISkvZ2PoUOYNRjsQgACe00gP10qAdNFyDzAmjArX/Khfi5Mqs7LsWJl+C393mcp97ExPE/3K39Dhdnwp77um/39IBBrIK/v/cicLCEAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEigZH6aLcwW4xG5RgCCLNjqNEHAhDYRQISU5WbRCcXIdWmMguzEhpDKI1/pd9SdQEz7C9cvCJ3jZ4+Pa7ouRO5X3ro8RNPMObxyJVy1PHQMovL8pfjiZ/7zzbRpv5ut/Q+T3tOJ4wwO50hHiAAAQhAAAIQgAAEIAABCEAAAhCAAAQgcAgEEGZXnEWE2RVhEwoCEFiUQBYcJSy2CbMhpF568OpxXn1Exohz4YNXmvN33bZ5ytX9TxFmQ+BVzpFQSQSN+rYcI5dov253+6bMf7Jf+cv1not8ZBvVy4eOlyyVQym/JeMemm+tldJ/Pji0sTIeCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAoE4AYbbOZvYWhNnZkeIQAhDYEoEszIZYGF8PfP8Dl48zykJsPh4jMPoTtBGo9NusxwlUdiQ2urCb/aqr26hOpYvEUVcaj2KpjwTOWr3sosw23laK5e1z7SuHNg5zxTpkPwizhzy7jA0CEIAABCAAAQhAAAIQgAAEIAABCEAAAv0JIMz2ZzXZEmF2MkIcQAACO0JgjDCbUx8jLmYBVUJn9t127D6UQxZZ1b8mSObxyz5K+Yx9CZuxH5vyzf29z3XLm/uqXuUYUVp9+5bOZY14ffPaJzvNdW0t7dNYtpFr8Cv9JvQ2ciEmBCAAAQhAAAIQgAAEIAABCEAAAhCAAASmEkCYnUpwQH+E2QGwMIUABHaagMQmJRnCYmz+G7BR58eyVSmRUsd9yprQ2aevbFxsjLq2PGtiWh6/fKuUiOkicLSpPvcvCbO5b+TiXwUtX4o5d5lZl3KcO+Yh+tNc19bSIY55zjHF+br0Wp8zX3xBAAIQODQC8TrGf5A5tFllPBCAAAQgAAEIQAACEIDANgkgzK5IH2F2RdiEggAEFiUgsUlBQnQ696pbTwixIbz6VxvLVuUYYTbHHSMWZmFW+ZTKmpiWRcvcV2PL4qoEpj7jyH2zMKsYHnvOD09LnJS/x6ztByN+U7VpfK6H8Ktx7Vs/51roG3NuO7Fbk9vcY8AfBCAAgX0ncAivJ/s+B+QPAQhAAAIQgAAEIAABCBwWAYTZFecTYXZF2ISCAAQWJ+DCXUmYDTHFbXJCY8QWCTXyVRNO1V4q23Iq2ZfyzMJsTTTNsdyXt5XGkYXZ/GTvksJsHp+4lGKqLZcIs9eJOEuf/8xr7uND4C92Q9bd3BzxBwEIQOC0EziE15PTPoenafzxXiH+aeM/CYoEJQQgAAEIQAACEIDALhHoFmavHaV7piVl2tv5GDqEWYPBLgQgsPcEXFiMwWRxMkSoLC76oMeKVB43C5pdHx5mYdfz0X5pHGpTmccVwpE/Hay82nJta4s43h7HXcKsPoia4wOoHDvix6ZxXT9q/xuMzt9126n/+kOJi0FrTYGx61xon73daNU6XJPbboycLCAAAQjsDoF4PYmvMubrjHdnTsikTqB0j87arfOiBQIQgAAEIAABCEBgIoGR+mi3MDsxL7rfIIAwe4MFexCAwP4TyB985BEtJczmuC7wdn142CXMhvAYW9dvuUow0pgjB6+TgFmqU5+2trDJ7SFyuvibv8Y5xn7poceb++65UyFGleFnjt8GjvwR1OJ3l2/wXJNH8PdzY9Ri2GIn55bX+hbTIjQEIACBU0eA/2h16qZ8rwec3yesee+11+BIHgIQgAAEIAABCEBgVQIIsyviRphdETahIACBxQm0iZwSJvOHI0pK7ToeUmafLj51iYEu9pRihgAU4qYLs6UPdLJoGmKo14Xv6NcmpLaNI/q7v+C1LWE2YjuPPnOntdHHNsZ6yJuvuTUFxlg/fm7sG2Pnxjrat9kjXwhA4JAIxP0K34BxSDN62GPJ99el+/jDJsDoIAABCEAAAhCAAAT2gQDC7IqzhDC7ImxCQQACqxBw8cQDSoCqtU8RWrJPfeCi+jbfslGuYeuiY+Qdmz8xKv/qI9FRxxpr6YOgJYXZPE6NbaoY54JwjDH8lcbW9rVwyiXnKGa7VMZ8amsbk2yGls5uLR5ao3ntDs19m/a+Dtfits3xEhsCEIDArhKI67HudXY1R/KCgAj4/UPUsXZFhhICEIAABCAAAQhAYJcIIMyuOBsIsyvCJhQEILAaAReeFFQfgkigU73KKUKLRCf5kvjkH8TUxMmcTxYd4zjbyL/i5fgaa+YQY8yir//+a7b3nGsxfIyZofzlfJV3nzKPXWPL+eTY2bf78XFlu20fi5nymMJOPnLpMbq45b5jjzVfS4xnbE5D+vn6Ub9dXkfK8VDKWD9L/CeFQ+HDOCBw2ggcojDLde5wV7HfK8codS97uCNmZBCAAAQgAAEIQAAC+0gAYXbFWUOYXRE2oSAAgdUISATygPoQpCSwhJ3avU/f/RwvfMXW9pSrfLtIFnUh9ri/OM45Z3Ert2ss7id8Z2E2+8m5eHv2pRj+YVMW+dTmfiKPIZt8qI+LYW1tslfpY3Mfat+V0vOMnKawq43JueU5q/WZWq81usR4pubWp7/yd9t9HYuPYV/24/qDMLsvs0WeEFiWgO5HdB+ybLT1vMfrf/wMBdvhEfD7rhjdWvdeh0dyOyPiHmQ73IkKAQhAAAIQgMD6BBBmV2SOMLsibEJBAAKrEshCikQUfaCXk5n6AZ9/6JIF0IhV8+/9wk6iYeQfvy0bH9LlnLOvoWONOLGJyfWjEJIfqYrJtRyykKj8w6fGlvNVvD6lfIRt/iCrLXb27X7yuGW7Cx+85DHVaDhWQgAAQABJREFUclXOY0pnEf19zsb469NHa2vKWugTZymbzCziLDE3S+W/z35j7YQoizC7z7NI7hCYj4BeT/I9wXwRtuMJYXY73NeImu8hDm3trsFwmzHimuPfMLTNXIgNAQhAAAIQOGQCu/CZ3CHz7TM2hNk+lGayQZidCSRuIACBnSTgIpdElCwwKvGpglH+0EV+VdY+hPF+2UY3JTnnnKuPM+K50Ob+lYtKt4s6fdipdjErtSmHWmzPOY9L/rtK9xG2iql+ObbnKxuVzqFmF/62/bSK5xm55zFrPFPKHCOvgym+a301V0uMpxZzrvp8XsjvPo5Fue9TGfxj40PRfZq1cq5xTY8Nkb3Mh9p+BHRNHntv0S/K+la7cA+y/qgPP2K+l40RH9raPeRZjPm78MErW39/cMiMGdtwAtxPDWd2CD1i3rmHPoSZZAxtBOI+n/f9bYSWb+sWZq8dJXGmJRHa2/kYOoRZg8EuBCBwcAT8wxCJT17nA66JdW7Tti/hqc1GObiNi2Q1oSfnnO1ybI/j/j1u7LtdHOc4zkQfhIZdbGqrxXb7sR9AuY+Imced25VT2PqW7bKfsI2x78IHL3m+xrLz8ef9HKPGLfebcqx1ssR4puTVp29eP+qzj2NR7vtUxnotnbP7NAZyvU4gziWefmY1TCWga/IhXYN1/5Xvy6ayov/2CWhucybMdSaym8e78v5gN+mQ1bYIxLqMDZFuWzOwflzmfH3mRNwOgbjPX1OY1bml0R7UdXWkPtotzIoW5WQCCLOTEeIAAhDYcQIShPwDkCxMxRCmilOK04Yjx4ibgPsfuHzcpSY+ZLv8YaSPJ7fV8sp2kUSO4/nog1Alq7H0qS/Fkp+2suZbfbraa3alfMKXvjpa/bZR+lxG/FKuU/LKcxy+NJdT/Hb11bjmHo/HjTlc4iZeuXus2F9yLDnWaT3WOQ7rw1gBu3KdPQya848iXh/24cMIv6/xe7v5iaznUa/NhzKe9cjtfiTNbc6Uuc5EdvNY9yHM127Oz2nNivup0zfzMef858bTN++nccRrr/X8WQ+v90fPel472k7j4tvGmBFmt0GdmBCAwNoE4kM8/4ra/OIb+UwVp+IG4sLFK61Dy+JC/rDGhdDsyHPOfrwt+/APMN1n9hFtbfnk8emGJdeLY44re8+ha7/LR1u+7jv7KY1dNmPy9FhT930u5WvOnDKziJHXjOLOWWpcJfZzxYkYc7KKvPL6zrnOHS/7P+3H4r/kujntjNccv+aT82Z+6nFtnyqqxuug3yvNn+U8HvV6Et4OZS3p3NA91Dyk8LILBEr3XZHXoazdXWC8ZA46N5ecrzmu30sywPfuEYh1uQv/oXj3yBxuRjHnsS3xn5APlxoj20cCsdbX/E8I/r4ieC35er8v84Ewu+JMIcyuCJtQEIDAzhDIL76R2NQX4NIHLyF4uVibxYW46fD2tg/kPGf3k+NmkS3H0CRku6jPvjyOhEv1F6/cR2PI9qpX/z5lbczqm2OXxhS27kd9lb+OZZPr1T6mDPZD3jzl8Shmzin8jr1ZLa0Hn2fF7FNGvn2EgDyuPJ4+sfrYxByOWWdtvvM6DlaXHrz+9WHRb6mxtOV0mtp0Xo5do6eJ1T6MVdefuc/TfRj70jnOIarO4SPGGdf82Pq8PmwMB/7RdSG6Hco1mHNj4CLoYd73HqWHq0kmmtvshOtgJrKbx7oPXHK+Yo0Meb+wm6TIak0Cuq4cymvgmuz2NVbMOWL8vs4eeQ8hEGs9trVeF/19RcRd8vU+/O/DhjC74iwhzK4Im1AQgMDOENCbbE9o6hubLD6F7/CZY3kcvalSHm03AX7D4CJF9pHFyVJeES/bKYdanNo4sn/5dT/hu21siu1lza/bxL7HcS5u5zaq93ycodfLdmwZfocIqHnMiptzGupXfqL0saq+xk3ttbJvHnlcfg7UfI+pj3keO5ZSvJx3+D73qlt7/2eKkk/qhhHQuTvnvA7LYBnrWFtLiVbLZDyPV11/Dm0+56EzzUu8Rk992jXOtzmuz31fG8aMOF+X8+vjGJ+70Efnhu6hdiGnfc8hzonzd9229Wut5jbzPJS1m8d1aMe6D1lyvmKNrPUB9KHNz2kdj64rS67L08p2V8cdr2mxTb3X2zjhDwR2mMDaa12v80LCdZWvMtZaWKVEmF0FM0EgAIEdIxAv9v7UW6Q3x4eR/qKuD9dyLH+h15sq4WnLIfuRbfbh/sNv/hBTsZSfjlX6GPzD81r87D/8huDgv50bvmvxFDeXeVy1/rV85S/np3rn5LG8XrZjy2AWQl7fD1v65Bq5xJhrPLpy9bHK1udZdbmM3LKQFOOLresNYo6ptbvpPNMfZzeX/5x3aW3PuV5mQnEwbjL/ueZ1FwDF2PpeF2r5ls7Jmu2u1PucHtJ8bptvrIULH7zSeS3uyjNeW+a4psU8D/lPSV15ebtf66N+jnzd/7b2dW6MfW3fVt67HLfvPcrSY9Dc5jiHsnbzuA7tWO8zlpyvWKu78J8IDm3uDnk8sWbis4w+7+EOmcNpGltci5jv0zTj/ce6j+8J20a35v1bfl8ReS35et827l1q44nZFWcDYXZF2ISCAAR2hoDezCihuW5y3a8+XMsv9h7L7SOXtg+qa7b5A5+SD32ooPFGWbvhcFvPtVYfvnJbfLiQhVn3FX26tjyuofnKf/ajes1PHDtbr5ft2DK4DBl3XiuK6znJxutk16es8SitG/mLPqUP2TXvXbnkmLW5VLwxpceYy7/7jJyCkfgrx66xy45yOIES/+FedrNHjG2qMDuHj7Xp+LV2rvN07THMFS+v7yk85KvtOt6Vt65tU/JQjMgntqlrXP681FhVN0e+8rXNUufGkHuGbea7D7HFdMp5Mcc485qVT+4fRGJ3S10XI8Ml5yvW6j4Ks8En/6fN3Z3Nw8pM1zdeMw5rXttGo/fd235Na8uRtu0QiPuMJe65tzOaG5/NrbHW/XVe4z2U9xYaz5gSYXYMtZF9EGZHgqMbBCCw1wT0ZkaDmOtNjfzmN++6kY54Hsvro63t5kO+wy422dbqr1td/5ttorZ2w5FtFcdz9TGEr9yWv+41bHKfqGvb8gdZyiP3qeUru+xH9Z6P55/nTvZDS7/Jq+WeffbJVTaef/bTdpx5ybYtx4hZ+k0b51ZbT+FfOStWm61shpYeYyybHNPHF21i5PVzxcqxd/l4rQ/inLPz32U2fXOLsWk99e2T7WLNl/7DRLbbpWO//gw9d9Zad2vx8mtWxJzy2iNfU9aUXrOm5CF2Mc9Dvi1C/fqUGqts58hXvrZZ6twYel4slfMunm8x90M+eNRryBL3HEO45zWrvoeydjWeQyx97pY8N2Ot7tt6iGvEHN/UcIjrZo0x6foWsaa89q+R6y6+nqwx7jlj6B4tfO76fM85bnz1IxD3kF3fYNbP025Y6Z54jfs3P7c0+n17PVbec5bdwuy1o3BnWkLS3s7H0CHMGgx2IQCBU0PA32jHoOd68ZXf7E83FwKsmwx/U9X1hn+Kj9w38qjd1Gdb2bXlmttKwmxbTHHxspaH28R+l11uV3/x1pzleh2PLd2v5rvLl/dxW+UadWLtdW7btV/j0ZZjxMzxSjexWis5hxyzLVbu2/fY2eVc+/pwuzw+P6c1B2E/RyyPu+v74lKb67ny9/mUzyXWjXyvXcYamsowGJ0mYXbp8cbajn/ahghA6jOkzGt8yrVEvqacIzq3p+Sh8cc1fylhNr+e+LVZ8fex9HFNvTbMMf5YD7v0JFzkM1QE0mv1HGt6ClOfW/ez7bw8F/bLBHRtjdYl56t0n13OaHdq9ZqxC9er3aEyLpMx11td3yLilNf+cRkP6xXXwEMSjYaNfh5rvxbt+nzPM2K8DCEQ14NdWhdjrmk+Xt03rTEmvZZ5/EN5b7EZ00h9tFuYdWLsTyKAMDsJH50hAIE9JeA3tzGEuV5844U9/uUPdHM83WT4m6quHPr4qH1okPvGmGtvpHUjFDaxhV2+Ycm55j6RR/4N3/Clccd+1+ZsauMKH3lsOYb7yTFjbLl/W6zcv+3Y/eacav28j9sopzwPtTn0vnk/z5Xaazl6Th7P6+VDeepYZZ6DvH5kN6XM46qNp2+MPD7POcdyLn3976ud5nIK32DbJShm/sFrSsxd4q3zeOp4+nDcpXFHLvncGcJg6fFOyW0M5xyvdv3s41u+hvDMfnXOTclDPuM6MYcf+fNSY1XdUnHkf61S19aItwuvKbEe8r3sWixKcSKf0jd3lGxV50ynnBvyN7bMa1Z+DmXtajyHWOq6GGNbar50T7CU/6XmRWy2eW4tNTb3u8a1MNbAkP8IozWjPP09iup2pYxch/6nml3JfZfy0PkWOeVzLtp8W+K1e43zwMfA/jACcb+T18UwD/NaD72m5ei6f1vj2ubnlvLYt9dj5T1niTA7J80OXwizHYBohgAEDpLAUm9owm/8yzfEOV682OffYe268cg3DXHzFZv/lmvNRyl+7X+ujolT+9Bpk6D96XvDmPOtjStcl/LVm9vsx1LZ7EY+zi8q841Y+ChtilFqizrPK/vs0yfbxAe17jPax3x4qxvd7L/G2GP6/Hm9+3Ib1eeYfXmof59y7hh5fD6uvN69rU+u+2rj454y5mDbJczm+QxmU2LuEnOtrdp44prTdX2J8cR8LPVU4ly88lh8DUWMGoNS/OgbW+21a9PY80/MQX6dzmtuSG49w54wyyymXBeVe+06fiJw5UDrMprHvLa428hnynjcV97XWFW/VBz5X6v0cS299vqMKdbnLv3m5Zjz35luc53kc138t5mTcqBsJ5Dnbuq1sRTN36ss4b8Uc446vWZMed2ZI4+lfcQ4u+5Zp+QQ8z9UuPQ1E7F3+VqiXHfhdW3KPG27r1+LMss1XuuWPg+2zXep+MFN21LXkV07xyKf+JffZ4lDn1Jreo1rW8zRhYtXTqS1RtwTAXfwAGF2xUlBmF0RNqEgAIFTTUA3GIIQN9UuCna9sc03DdE/bnr8RqLmQzdsit12s1GKE/3acvU3C4pRKvMbiZJN1OUcauMK2zw2t81+Ytz+JG+eg/AXm38wkuftukX3m2Dv18Zb/qLM+Xpb5Bpv3HP+fQQc9+N5eX0tR7f3+WvL1flFDPcRx7VY0TZ2mztG9udjymN3LmPz3/V+ecxT5jB8dT35lPkHnyGc45o0h4C3xLyIZWk8cT2Lrc95HWOMbRfHuXltOrpe5fzya8WQdaQ14efiJsCIP6X1If9yV5oftc1RZhbhc+zYlLu//g3NUetySh7RV6/JQ+Z2SK4aq/osFUf+1yp9XEuvvT5jivW5S8Ks+PQ9R7QOfax9+3qfOfZL57r8bisnxadsJ6B1J6sl5muua69yXKtU3odyDa5xW/paqPulIfdyYu85L7E23f/YfV2Ll35dizh97p3HjmPb/fxa5Pd64qv8ljofY811vXdTDlPKQ5rHNecmPg/0dTFlDqb2jbUSG8LsVJLb7Y8wuyJ/hNkVYRMKAhA41QTyBzNx4zxEZMs3d3HzFZsLs21vemo39HlScpzwGXUeJ9/4ld4gZr9xnPuVbKIu+2sbV87XY2Q/0ebjqMX3N7fOLdu35ZX7uc/sR8d5jag+yojl4rjqhr4JzXkpRumNXImfbrLbcnVfeX4inrcr/pSyFCP8tc1PVzznlPPN8XzNdfndx/a8DjSGPmtatl5q7dT61+L15az56Wvvua2xr/GV8ovc45/Os7Z8Yo3mtdlmv3Rb5J3/80jOz88r5VNbB2pXqb5Tzmv5ijWYPwSVf9mU5kdtc5Q5Xvjsy8Lja71HXebtdl37Wpdh14dxxC29/rifMeNpy9PHKrs+Y46c+pxT8rl2mcfVtfZq7OfMO9ZnVx5zxmvz1bamgoU2X4+Zadj0WdfyNWdZOtflf+5zRH4p5yGQ526JNeTrewn/85C42cu+5n3zSNprlr4WiuOQa4H6eOa7unZ0Le7zWu3jGbq/xuvi0JzmtPdrkbMUX481ZC15v7b9uG+OLd87bypn/BNrO17L/fV8Rveruspz4/M2ZyK6Hizlf2iukc9UEd/X+xLr2cckfl4X+0vHzfF27RhhdsUZQZhdETahIACBU00g35xlGF1vqHL/+MAsNhca23xIDIk+bR+2jYlTu6GJWL71vWH0XKN/241RztdjlPz4jZ7n5vvimH27Tex7LG8r9ZNPt8v7OV9vj1gu5Edb2zx6X9+vjb80ljyvblPzo1gab/ah9rY5lU3fssQ7+iqHvn5kl3POnHM85yIfUYaf2HZZENgk2PIns3DTsXy1zmv9azHzPHguvu/9azHcfu19jb80nsi975vZOAdra29bYyrF9XO9dN3oM0d+zpW4leLW6uTL8wrbnNvSbHO8yKEPi7DzTeOJuik5a12Gnz55xFotXdv8/MuMw/eUzX27n644MbalP0z0fIbu+xxG3641HvaxLfnBZdf1JXJYMv5mgM/98Xn3tdnGzfvI15TzQz7GlKVzXX58PKrbRrnmfG5jfGNi5vUVPkrzNfX64mu15H9I7mvO49DXjCHjWMJ2LJuua+HUXDX/Q+ZefTz2kP7eb+l9z7XrtXpKLrEed+lbHqaMJffN1yJ/LXO+6rcEZ53vS/hW3lHGePoIs8Ek/jPoLs95nhufNx/z1H3FWcr/0Pwin77vZWu+/b5p6Wub+OVcll7rOd6uHSPMrjgjCLMrwiYUBCBw6gn4TUaG0fXin2/K44M7F2XDX5sP3VCHXdeHfp5n3OTF7xh6rHyDlHOLGKWt7w2jxw8/beOKdrf3GF4vP7ku6vOm8eUbtfAdmwuksnUfuV+0dTEPG5+jOO7a+vh0H13zlDlnVjW2kUfcgJe4lFhETjmW5zl0vxbD8x3iM/srcXY2tTjhZ+obkyF5z2Ub6yS2/PRj9l9a+9mmdCx2tf5qz31rnLNdnr9anNxvrWONrzSeIWtGfuY8l8YwyLyzD89PObtNiYO3x77H6GOf+/uxfHlepWvj1Dges7RfYjFmrWo8EWNKzv76U7rm5TGEfUns9Hycce4/5th9e/+2OJrbNhv3tY195ajYXfMYHPp8cCl/Y8pYn215RA4lYX5MrK4+fq74OZLXg+eb2xTD+6tu6dLzz7G2kU/OIY7XnM9S/F2sK62h0nzVroV9xzT02tvmN64la/2HCc/bz722/LbZNoaNrs1Ljk/rrLS2arzUx9uXzNHjDN33XIeMcWicuM4u6X9oPnPaO8Pw63Od26J9ifsdne9LM47xdN3f6LzMLOJ4l7a15ybGvsTcD2WqtTI2F5/fiL30mlO+eZxj889+9vUYYXbFmUOYXRE2oSAAgVNPoPbCH2C6XvzzTUrclLsQ5jfpJdB+c9h1g5M/RIoPaccKsznPrnFG7h6/a1xt9u5HHzS3zYG4ydaZRVtwi3lwFqX8cr/oW7KLet/65Ob2fXxGLvrwNK8h9xX7vi5KYwibmL/sJ3jFmyj/quUaw/ARW591cN2y+28t1+jpY+r2dN0iz0Mp1z42Wn+l/n1zWduujWXOpbT+fL1lex2LS62/n1/qE2XJ3tu1n8fQt5/6x/ru+mAzYoToHv9pRVvXBwmy6zP+rjXj5+CYNa5c5igz7+zTx6Kxt9nktjjOMdxnyb6tTr6cm/NU36HrRv36lKV40c9z6uMnbDQe2Y9l43PTZ+xhX8rXr42lduU5psxjlY+2OOozlotiLFkqR8Xo4h/2S/6nH1+fNW4xzyVhXmOYs/S16XPdxi23KZ8utrKbs/T8s18fT25b83jN+VxzXFNildaQ7m3lN86V+E9sU84FXx9T12fkrHt+5bhU6XlHjNq1Yqn4Q/2OYeNrYKnx6TUzr6228amP20xdO+5rzn1nuGSOsR6X9D8nk6G+nKH6aj2W2oa8rsQ1rOs9T8TU+b4041jbbU/BlsYrFmKzK+VauWpuYty7wEL5jM3F70FjTEPWc9gP3UrX0/CxdNyhea5t3y3MXjtK6UxLWrS38zF0CLMGg10IQAACCxPINxoers/Ni250vJ/2u97QeeyuG418gxI34S4C51zdt/JRmft2xc6+usYVcXK+kV++GZafbKs8vZRt9qHc8zyoXj5qMTI32avMflVfK7veHCl/5afjmj/ZRXttDGETc+TiWfDKwqxya/PT541gLVevbxuXj8n7tO3neSjNWx5XjuM55bbgN9fY28YxtM1zLvWNceSnaDObYJfH677y+Z37t+WgNeX+Svt5bsJG53TJPtdFDl0fbJZiyE/b+MNG66s0Hvnt8uEcu2yV11Klcpb/GFft9UJjl63KrjHkddFlL7+lUvm6D+fpffL69LYp+7V4Q9ap4ms8Oh6bs89NaW3Kv8raue75OGP1m1K6b/fTFkdrp83GfW1jXzl67LZ5FIc2G/c1dN/XZ4mb2peK7/lmNn6O5DZft7nNfa6Rt8fzc8vrY9/Hk9vWPK6dz2vmsGuxSmvI11jkK5spa8rXR/Y/lElcG6aIxEPied7Rr3StGOJvadsxbDS/kduUOW4bmzgOmXu9BmS/uzgHznDIGPPY2o71mhQ2S81TW/yl20rzrXGW2oasg77nxZh1OoZLxKm9Lvpact9Dxuv9lt4v5at5mzO25iZ8bpuFn4tjc3EfMaalrhvhO7bSORT1Y/OPvju1jdRHu4XZnRrlfieDMLvf80f2EIDA/hEovfj3veHwG6888q6bB7/J6bopLOXo8Ur9S7nFuLq+Btn9xn6+ia3dnHu/nG/kV/PjHNyH72s+Sn7DLtfnHEssol/XHHm/yCE2Fzg2FelPaS5kIgaKq2O1R94usMou2j0X2UcZfWIr9fM+Yuh1m47P/fFYXj9mP8+H+1AeXte2n9dHnlv1zSzzeHzcuS36xlYT/yKH+FdrVw5zlnk87jsY6n8vZ9Y+NvnwOvcT+5lvtnVuuW/fuaz5qM1ljhNj1Hhzm44zB9VH2RYnjz+fv/Kbubj/2Bfr2O+yDZslN+cdcxSbX7d8jG7rOXXNrbioT5e97EqlcnBuztP7uI3XT92vxRszrsxmbM7iorH5vKlOpdZxaa27n7G5KE4u3be3tcURnzYb97WN/dJ6aOMvDkuNyfMpxVB7qW1ufoolv77mNLdqi1LcSm2yWyNvxdK5ouM4x/366OORzdAyGE29X4g1Neb6MzTXqfbBM7Y1/nNbaQ1lRlqfU9aUzmex0RrW8ZAyfPXJJTjGf7SLbayQm/PObIbkvYZtXzaei6+BPly9b999cRzCT31yjKVyzHGGHDvD6LdEjjoPl/I/ZLxL2JbmWxwz36EMon+fa4DnoNhLjbX0uuhznOMOOXdy3yWPp85Nn9zyPcaSc9MnH5+nsbnkMS09v6V5irH2zT/yXeOepA//OW0QZuek2eELYbYDEM0QgAAEZiZQevEv3YCWwvpNcW7v80Ze/bts8w1Rn1jy7bYxrrhRKX3Frdv5vt/QRX2fm6LMNPrEGFw81Ji7xhYxdQOYx9TmQ23RP/eLuti6xuL9IofY/AO8TUX643G9yTlqfXld2Ea9M6rZud9SXsohz0PU+5jcTxcLt+3azzEiR+em/Lr8RHuJUekDz7yOtGZKPrwt2pVvrs9952QUvmtbHkvY6dzNbzSybWnNqK4UL/PNYxSb6Bt8Yhs6l+5j48D+5HjWdLwb/bvs2mKU5lXO8/jz2pTfNobhy/102Sr2EmVpPdR+bzria3w5lzZmpX5d9tm/jj1f5+Y8ZRtl1zpw2yH7tXhjxpWZjs15iB/lX8rX/TjjIXxqtu7bbdrGrD5z5+Lxp+7n187w12dMbTZTctL8ho8SN7UvFd9z1/ypztdciZuuqaW2kg/VLVX6NSdiRP7+mubjGZtDjLXPB+s1/8pxai6xLmIr3TPVYg+tj1xjy/cmQ/30sc9rbxP3aP6ctdbZ2HNB7D0frWGv67sfOddyiVgSY30NjolXynvq+uk7xrF2bWxqPn0N1LjW+vat9xh958L7eJxdnAOdI8pzCY56TYoYpdcsxd7XsjTf4tjW1me80b9r3eXzXbH7+B9iozh5Hau+5ivb1+zWrs9rP+LPzc7X/hL+hzLzfMaei3m+l57f0jkU4+6bf4x5yfueoXMwlz3C7Fwke/hBmO0BCRMIQAACMxLINxvhuu8Lf+kGb0z/oTfgefil/qXcYlxZmO26uco3R6VYOR+/CYy2uOl1MTjq3E+OEXlmMSHs3S7nncerG+3S/Eb82LKP67U3/uZ4+WnjG5Y39hT3Rs31PWeiuF4XVjFuF2Zll8fmvsPGP9CJNrHN/TJD99N3zXuf2n7mFnaeY41RyV9m1NbX44Yv2eZ6cVU8b/e2HNvb1HeJ0vMJ/21x8/qWrftQXSnXtjHmttp5WfLrdZ6L18d+W27RrvF1rU+PET43fR+8/oFx7Ouc2DTYnzxGrRmZyG9Xnu6ny1a+lyg9j/Af3GLz64rGKLYbg8If2RWaTlyL1Z7tI5euN8aer3PzevmPMsfwtin7tXieU1//WjOy71q7svOyNDdtY1f+Od/sZ0wunpfvZ9/eVovjfXKu3n/b+/m1M/Kp8V9jTJrfyKPETfnWcox+vkXOY4Q0H6v8eT557YeNrr3K0fuNvS+QjzFlHkOsVb8++njG+I8+wUHj7vJRmgvPse+cluI48xhnbF3X5JKftrpYm0v+vrLHLq2vaHfWsqldg9xfad/PNbWPnQPNYykXnxvFUTkmnmLJh8oxvtR3yVL5ltjU4qqP2kt9w2bMtU0+c4w+/HIf+YpyjuuJ+5tjP6+9JXL0GEv4n4PDWB+1+dZa0TXI/ZfWqrdrX779mqY2L2WnuqUY63qY/ave48f+Nl7TlUOf0tel7DVvOp5aZjZ9535q3Fp/z2dsLu5DcbrWqOzGlKVzKPz0zT/mueubtsbkte0+CLMrzgDC7IqwCQUBCEDgOQL5Rm3IC7/fhArokP7Rx/+3t3zksnaTkm+W1S+PKeqVl/uq9Q/7fCOm/tHWtpXeMDin7MfzCb/RnoXZuHF2cTf7KMUMrnkMnnfb2MPO84p4sfkHeJuK9Kd0g59zUNw8R3GT6zFll+sipPP0FNQn6nLcGEMt/8zTfQ7Zz/MQfof+Z4CIF37y76dGfdsbgcwzWNTEdPnJjCJGbNG3xLg0v9d7zPO3lI9yrUXI487z7Gsi+2iLl9ti7HlOunIrrQdfg225Ra7Kocuu6xypzZv8i4vbee5d8X0OumwVa4kyjyfmJ9dpjD6+yCXy9jVfuybkfhqH/Oo45iTXqU2l5+bcvF62UdZycpsx+7V44atrjXu8EpsxOZf8tLH09ef55nE5Y897zH4pR/mpjdnzmTMXxZ2rdJ7yuc0xeT4lbrr+1XLUGKKMeYutS7yImPne1Odv4yT8HF03ZKc81Bal1q23RZ/40Mzv6dyP9597P6/bYDbkNakrH/n387CtT9jnuXDO4tfmo9bmzN0mxhwxc1y36buvXPuOt6/fbCeuuT6OFdttxq4njcfjjJ0D+cq5eJ4eR/tj4imWfKgc40t9lyyVb2bTFlN9ZFPqGzZT1naO0Ydf13xqfSrvbZf+eqJc5s4xX3vm9q+8t1HmNaIc9Pqbxx7taov94K8t3qP6f5bRWupad7I79mOvw6qbo9RY87mmesWIfCMnfy3tGoP6rlmW1r7PzRy55BiZ3RwxhvjwfMbmkuc74i85v6VzKGL2zT/697UNv/uyIcyuOFMIsyvCJhQEIACB5wjkG46+Nxt+s+Mw+/aPuH3/p3ktVu3Go2SvvHJb7Q1TvjGq2fnYYz+/Ycjt+Sa4lE+uiz5+w599RIzcJ8ab3yjkXGpjymOIePFm3z9IDF8Rw+vE2ONkjtEWcXO+Uee2MbdZWIw8smjtsZxLHkP4c+HF+9XWkdv02c8xlY+PK/zUuEdb5hJ1scnX9aOb/+bYN1vcqNE85XP/hkV5by5OZe8n/zNA2CjPmn3U13h5nxrvUl/FLM1Z5iVbj+X72b60fmu5hR/1b+Oe5z1iXO97/ffaYr+WZx6/22W/bXm6n7ZcN4kt+GfInOXxRd5+faiNI/fTcNy+z7xFP9nFvvd3ntGmzW1UN0dZixe+2+Y9x/bxqC3nHPy6BJES47brn8+755vzybkox7YyfPgHh7LNvlUfZS1O7uO5ev9t7ztP5VLjv8aY8vrM3JRvjbvGEGXk2+e+M3xmfzkP+VU+ykP1Ueqa6m3y63VhKz+xv9SW52voa1JXXvKvcXfZB4M8bvmIvmLV5afUnvm6Td/8vE9pX7nO5a8UI+oUp9Su2G4zlpv7UKzaua/2WilfOZfS9d19jImnWO4n9nPs3L6tY+U7JD/1Uc6lvnGN6vukVPjLr205Rp+5yH2Un0qtTx1vuyxdF+bOMceY238fhn3utfr4yTa1+Y71mP/Dkfr6WnU2Xh+2ujZ0rbtSDksw9td8f53K8SN2bP55SB7bxmDLf5y9UuliLbu+ZY6xbQ6ez9hc8nwHiyXWmxh7zqqLsm/+0b+vrfvf9X2E2RVnCGF2RdiEggAEIPAcAd0IC0jfmw2/YVXfKP3m1evzfsSNf/mNYbaL41qs2g1l6SZKeWVfqve4uX8tjvfRfuapepU5XimfHD9usFw0KOWT/cQ85if8op8LvLW5zmOIflmYrdX5fOZxiEEpt+CSxyB7lWGTc1NblM6lzc77xP5cN7B5vMonj6vGPfdXnpFfnw9bchz1z6Xi127+s70fa0xeN8d+nq++cXK/Ui4ab24r8Qrb2PwNtnLJ81Pzu3Fw9Kdkn8/JfD1Q3yh9fmp2efyRa+lc9fNSMdx/1GmcsV/KvSaoZT+1XMPvkpvnobHUxlHi5tfG2jUh+/PxlK5PbSzy+pNtrleMWk5qH1vW4oW/rjXuMUtscs5hE+uotpbCXx8/Htfn3fPNfnIu7qO0r/7uU3Zq07GXtTieZ9hrvr3v1P1Y13GN0Rb/uUlb6RqgNi9zntFWG1NeOyVW7nvMfs7HY/g81HL0mOGrr13003Uk9nMeURdb6by/3nLj/PG+ir8GO+Wh0nlFXbAc8pokP7VS/n2Oara6BufzQD6in1jVfNTq5bvW3ie/Wl+v1xz6OvH2ufadSfapsWSbzDX3Kx1rPN42dg7cl+eS8wz//v5mTLzs0/P32F6/zX2xKY01xlJ6jVQfzzuPre/1LXyU4mSOpfw8vvz4vVOcC37cx0f2ueSxX4sVZ84cS9eeOf0r564y1ou+zaHLdkh7XiPqG2PsEmZLbHwNy3cXL9kpdpS6Dnrd1H0/50p5yr9i57XlfWSrMsbQ955MfaaWOb/w18V6aMwcY27/U/Npm5Oa79J6W/I1PzNUXn1Y+jmmdan++14izK44gwizK8ImFAQgAAEjULv5NJObdqfeqMTNQ/zrc2NaihUJ1W6MSva6GcttpRuXbFOLcxOU5ypqN1UlP34TpXavC5dxM+YfXJRy7uoTPvKbptpNXvalvHxcURcfHriAlf25vbOK/Ev9fB26fewrh5yb22UutfjeJ/Zz3rm973FeN8on51yLl8cfdn0EWeWX46g+l4rfxac0x+FL51L2O+U4s9N8d/nsM2bNQ/ZVGn+wKT2pHdepnGPNr+Jk++CW57iNpedXi5VjlOZM8628VLr/qHPm2W8tfvTLftrGFPZLbDlfjSXXaxx53YR9fhq/NI7sz8cSvsOvfyipeG6n/Rq3vEZkH2UpJ28fs5/zcB9t+btd7NfYeM4xttjaPjAs+amt4TyPnm/mWPOxSSj98Ry0ltwk+/a2WpzM2XP1/mP3M4vsp2+8nGf4qY0pc6jZ5VyGHOd8fBw+T+HT11qO4bbuI9tljmGbz2vvE+2x+T3NpuLoT6lN6ynHWYKd8lDpDKIu8ptTmNV66DMW5RI5+H/UkA/lnNtV31aW2Po9dJ/82vyrTbnO5U9+c6k4uT6OxSfb1M6F4F5735V9hP+xY/Pz1nPRvIfv2CJ/P3fGxMs+r3u+/ld8vG7b+zU2kVeMpfRUv/dR/j42rfm+/MKf9w+fef77+Mrs55hPjW+JssSxzzj75pJ5RL85/ffNozS/ffu22eU1ItsYY37v5G1xz6c1qvooS9eGLl5rMfa14nlmBmrL9fn88nHHGGrXYbebc9/HI79drGXXpyzNb/QTnz4+5rQp5TMml7XWW4y9lLOY9Jkrz7Vt/cnnPpXdwuy1o+GcaRkS7e18DB3CrMFgFwIQgMCKBPxGoO9Ni7/4K1V94KTjrrLvjann5z5r8bK938zktvChTTfJ+ea1LxP5yf1VX8rX81G716mvl7V88psC7yPfnptzcdscv9RXOdT8ldaHYtTeuNf65Dw9pnxGqZxUV7OL9vDpH9TlvvIxpMz5u888N6UbZs83j7lvHjlO9Iv5c7EofOc30GHjQrvmPPpnn2NzC1+1LbMr8an1zfllu5ov5+19amujdl54X9/PecV6yHW13HKsml2Nm4+tNl9uE3m7Xfbr68HHGPvZTy3X3G/ocTDxD/C9f85XOdQ4ZvsYXxZm5cPj5Pnztrxuos2Zum3OK9p0vcg8vZ9svG7qflu8EoNavBobz1mxvC77y3Oj9lKfbOvrtJRPyYf8q8w+S3OYfee5z3Gyz4g1hK1yq5Ul/9m2T7zSupSfPKao13zKpsRKbWPLHKNtjks5Kq4zamPhdtE3xpRfL+UzyvAVm4tLm4qjP5Grv65GfVv+bXnJ55Qyjy3ixZz7/cGUHDRXfdaBcsnx8rmV2/uMX75lG8x9jH3yU9+2UuMNm7a11+ajT5vHyfZaT9mmxC3mOv7pvU72lX2ofczY3Jfnkucm2qb+54C8ZpR3lHPNtfucul9jE37FJzP3Popf45r7yt7L8Ke1o/oSxy5fylc+wj776fKhvmuUJY4R11n2zSPGns+lzEO+1mSg1/El1n6NX4wzX2c1duVRYuNcvN3r5Uel26kuyrY+btd338fq66O2vsVd/jVuHasMu7jmtf0HRdnOWfp45LeWo9qHlGvNS9+cSvn4PE7xMyc3zyOvIW+L/a417mNeKsec0+DjkfpotzA7OBM61AggzNbIUA8BCEBgWQJ+I9D1oq9M/MVfdX37yr5v6fl5n/ymUm3Z3u1ym/rUSu9bs8n1+aZd7TU+ullWe1eOspNflbW40a5xZJuSrzy3ua+Ow2/Nn8YUNnmLm0UXRXXzWBq3x5KfHFP1eSw1u7DPOeS+8jmkzPHcZ27Lbw5qzIfED9sawyw65fErn+gf//zDhppPtxmaZ7bP68XZZdt8nNnmsZXWUGlM2a+OlUvuU/KrPlGWxpTnWdy9X+xnu1qsbCd/mYnGoDh5LFGv8zD2c39vi3ZtJT/KQTZzlcGz5jvnq/Hm/MSxxC1s/cP7Uqwcp8/YSn5y/PAju7xuPIZsvG7qfls88eoTo+ZHOftcqK7kt8Qm7Ep9sq2v01I+WheluFGX/cku93PfETM2f03L9iW/Q9gqj1LZd032iedzlGPlMdVss132M+S4FKNtjktrRPHynNU+FC3NlXyUyogZefq1Q3bBfAlhNnKMbehrcB5bKfc2hhpXrXTGXetAtnldql4xfL5V11WWxunC+RifpZie6xRuJd9e53G8PvY1lmxTyqdr3WQfitU1l7JTmc9bzyVfr8J3rnN7+Wwrc3+3FR+v2+Z+G5vIS2NxBrmP8vdzx9e895Wtl7LNbErz3+VL+cr/HPMpX1FGrkOvc97f92scw6ZrnO5H+8ErnxtiKxuVY/yr79DSc8j5DfWV7UtrRDaxHkuvg9EeeXhe6uP5eXsbr7zm5Kutj2z6lnmtuO8c38fgfPL5pdgxztJT8WofWkau+s8tnov7yePxtloft+mz7/Pn9s7O65feL+UzJpc838p7Lm7yF2XbPEV7V8w85i778LkvG8LsijOFMLsibEJBAAIQSATixiO22odVm0b7U7p5WPIGwG92lUbtBivn5m9eo2/Jl3zmshYj2/lx6SYu5+D2ysf5qc7tYr92ox9tedxRp03xc26l8eUbO9mor3yFb9UpTowh9w97f7MWY/APseUv5696+VaZY0Z9iUvJTrZReg4aY+RQeyov+vgWtvFmSF83nOfM5zOPLedbYjb2w4g87tKc+Dhi33PNbXGc84s6MYv9KVtmU5v3WozcP8bic5FZh5/cp+bbc8l9vK3Uv5RD5lhjmOewNIaIme00j7k+x8l5hC+Pkft7W9hqy0yivouL+g4pFaeWR4l1+Fc/xVJuefzBJzb/8D4zi/YcJ+r8OhLHeSv5yfGjj+w8Rh9f2WbosccLvj4e8erj0/24vcblY67NY/TLa0++5EfHJVv3W8qn5EP+8lpRfZS5n/uOmLE5N52Hm4ajP6Uxea6yG1o6U/UNv6XXpD7xhjCo2WZWymtMWYrh4/B5CP+12JmT+8h5leYq2/hxnCOx+T2O2iNOftrWz6k8vra85FP59bFVH5Xqq+NYpzmHGkP1qZVD/LhtHkee04iXz6daDqrP8x39S2OX/ZjSxxD98zjG+Cz1yXFi/fhai7h5jYUfX2fyG1xqgkCOoz5RDl0T2Zfnkuc35ibP19B47jN4xObX46H+Ng4W+tM1Vo3F11Puo9TcRv2izXnL1kv58/7R7j5k38Uu95ljPhU7yjhv+34u4f1K+3lduk1m4W21/Rh75pOvM+o7xr/6Di01v9Ev5zfUl9u38XO70n5pXYSd59c37xpj91XKYUid55LzLK15+c65lXIK37XrsPz0KWM+JMjKvrbO2uYu5maOLY9dPksM1LZkWcqnlEvYtV1jSn4i75KvqeNpm6c+MXOuS+Q4dYxj+yPMjiU3oh/C7AhodIEABCAwE4G4GYh/fcWgfPPQ9UZwapr5ZiP8td1w+I1ztgtfsfkb901F+lO7wU1mNx2Wcm3jE/bxwYqz9/w9QFdOtX5ikOet5K/2hkT1fhOfxxpx8huFqHPBw8cT+2Ljuaku28ax8vC2kr37c9sYc/4gS3y0Ntpu0sOvj1EMnb3qPG6JlURg7xt9nLH76LPv4xYXr8s+SrlmmzjO+UeduMV+bYvY2mI/3pDGJvEgz6dyVp+u0semvs6zNL4csxZD/tTe5Vd2nlPUyU+tXv1UepyoK40h6vOcaN3kOHmeauNX/xw/Yqkt9rWV/NRyVZ8xpcfJY8ljFeuIU2tzf2EXPmPz61RpHM4l4sRaHvM6kuctYoe//HRd5OD+89ij35StxCd/6N92LVTs7Ef1USrnEnNd/9y+xCbafV5l7/OhulintXyUi2xV1uzV7v2ybWkduH34KOVZWl+K17fMTDMjj9snXvbneeQx1WxzDu5j6H4tRm2Oa7FLfkrXs8jPmZXyzedkxIzNzxv1C9vavYZs8nrPnGUX687vO6K+Zqs+ucyxShxrDLOvfJwZt+Xmtr4u87mlGG2+ZONlaZy5bqhP9x/7OVcfR7adcpzjxPz4WiutsYhXyifWdqk+7HOcqNM2lJXPb/jwmH5+qT7HHhov+4x7S38tH+pP416izGzy+eZj0TUq91Fe4hfH3s/rZeul2ypGngPZ5/xUr9J9KW721eVDvkpl+J9r/nJeOZ5Y5PrSsXxpzLJxHqqLMtt529z7fq2bM25tHfbJP9iW+vvcet5ta2YNxjlXz8fjZ765n49PnGKcsfW5t94YFv44q9ycc4p2rddsG8elHEt2XXXOxW2dndcvvV/KJ+cSXOK+qm0uaqzn4uYc8vqJuRzyHjCPubQWPN4+7SPMrjhbCLMrwiYUBCAAgQKBuCFwcbBgclyVb/Lyzc6x4Uw7pRujtpsivzlps4v0Yiybf+lD9rFjyjdWEaPtDV+MLQuzpfGGn66cav0UP89b+Mx8cv5qFydfIyVb/0Ak/EfsWl7R7mOKeVO8aCttpTG4D/Up2UVb2GYBRDG1bmr+8gejihX9fdy1/m6jmOFDcWN/jhtp8da85xhxrK2Uq9pyKb9e7zG8Xvs+NtVFqXFmn13+3If2w4efQ10+87qVn1zmXHwsyj/3ieO89sS4Vp99eBy15Vyi3u08nxzH26JfbfyK4X7DPjZfr9dryn5yLNlOKX0+xVL+8li8PXNQW+6jsfm48zhKvvJ1JHKKGFmwlX/l7HFUF/2yv8jB35T//+x9B5wtRZl98QiSRCSKgoBgQBEFA7JijgQTYlp2XRXz6oquqxgRxTUndP2ra47Ioq5hd3UNiwoqCmJEUURAlCgGcv736Xln3pkzX1V33zTzeFW/30xVV33f+c53qrpv9625d8ifPuPWUU7+pn/pDQvGdxz2oyZnnUP0u77oQ3G7ud7YPtIRa9jnlxg+D+jP2dIHtXJ1e+SHorppnJw2itkCjPDL8+f5SyjX0sdpx9pzYz9qzQnHOdtJ5AV8lFwMcIGuqjnsc7FdJ9h6PujLzRXGWOCnr+OIiaLnKW0x1rUx6zGjHNxG8fucn7SP1oNj83ylT9/a56qEE/FAHOfC2NFccSyqfb6ja8JQTI/j+WK8CxM+ev/smNGxx4Gu/vqCNePrz9cRtfV+xvQ47Eddmku1Y9uxNKbODXHJjf5qz75SrZjw9Y3ZoXilWOOO+dpXbq4D15P7KIdobWM8d63PxfB+xlB+7NPatcf1yLE4z+rXp00c6tDHp2RDvJzNkDi6xlVr1cPjDMF33yHHyqFr/obgas7ww7z6a3AOD7lHz8+qia7zEm/Nz+PpXPjYkGPlAj9dwxrfefoa83Fg0X9Urj4PwPSifDFW8tE5cJwhx8zLfZyLj0/rOOLj8wFd8Hpauo/ytUC+08jL5wl89bW9a648Z8+X3Eepr2v+N+wXf3Bh+t6pf01nXnBlWrFWSjvdbP205202SQ/dY/P2eBTcvj51Y7avUhOwqxuzExCxQlQFqgJVgTEUwA1l9AmWCNJvPke9wYywoz6/WYFNKabenJTscrHQP/QNFGK5Nl03b5Huo94Iemxw8hsz17JrnPoBGz+qi8fzm0jmnssH/GiDdp/iMeETYUR2tPUNEPij6EOmYuawWqfmF2xzvrRxDOru86Fx6Tu0Riz86Fzl5mBIPM8BvJhHxNFzcxvXrYTlvnqM3PTBynP1hxnn5esW2BEXva5E4+Tk+NTY9WM//VC7L8c8B/SX+JTGXB+NgbZuPOiYvz5EXEu6EIs19HBMjmk9JBder+Cf09t5U9tSHPfh3KkPYiK+x3VN3Ad+sIneUNaHcseB3zglykmvY33jOY5yyukEG50r+kTaYMy5uMb0x1xiTPPQMV1vpfNAzwGN7blG8bieENftyQV1lL+Od7VVK+VIP89PedFG6xJXziPtHZv9qMfNi1g5PpHm8Ik0yGH0sYWNnn+Igdxcd/S7Hfrg37UxCzvH09eyHH/4oXTN6ZzV3G+fM85TKb76l9qOHelLf42HPvLI5VrCIqbWik9fv174elb/Pu2Ia9dcQCOd21HiIEa0wRFhUVeMkS/1cHuOez+Ocz6RLfp8LaAPXEpzEM0Z/LpKDlPxgKFadGFOc9y1UW19DjjmuSi/3LUwtxY9Bu28X2PktMtpD1/lzDwUs0+bnEb19xjEYz9w9bo9JI5iUUPXw/Fpx/jTqlV7xIjigmv7s/IbjPQPW3O8fO0CV++Tcn7kEF23lJvil+bC89O4iqf9Q9seQ/nomPYzRt/x3HlFnKjWdcdxcEDRtYxjfY2L/GCDMgnNfO3PIc/9jjTS8Wm0c3ycC3UpzYXOp3J1LB0btU0+9Mcc6rNMaa5yOed8cL7hvGfR94vYx/q3512Rjvyvs9M5F13FrrSi2Zldd+25n1tscaP01Aduk7Zr6mmVujE7LWUD3LoxG4hSu6oCVYGqwDJWQG9WSjc1k0ghuuEoxeQN/jRunLryca56c9zly3Hy5zHr3A0Wx1HrvOA4iu/4iutjJZ09V8TTwth+s6k2Glv7S23PMYfhdsCELYo+UIInit4A4xj9vomL/q7CvN3OtY3i5nJxrKHHHpv+pfmljdbRnOcwIv0Vy9s53dyu69jXm2vqWmBc1wPwIy7ul8u7FF81ia5P7stcPQefB+db4qociI8aMVBcC/Q5PvpyXHO6wEcLOPIrrbVf21EM1cJz8dg6Tr0dk3glzfr4UCOfG8ZFXj7GXGHjmzjA02uS4tAvqpFHnw2AKCd/88z1jOI5jtqQs84Dx6k7j1FHdhxXLrmYwITGqhv9OT849rmmDTn5OGN7XNij6DlDDPS7PfpY1I59fWtfR5obMTx2Vzy3Jw5qziP7SvPUFYcYXXWOD3L1Tw0CyzmiL4fRxxZ56Fqixpo7cPwNUcRl8XGuI46jzq01jGksHHshJ++Pjh2LXLQ/0iXC8j7FwFgOx9ctbLlecnMFG3JFu6soF+rjcXP8urA5HnEtYTL+kDwQK1ob3kdOXlNXx4k4RPkQr5QXbbRW/dnv5xL6OTdoe04RR9h5oa7sJ6bjqRa0XYo60oa5RnOAfKLXMnJHXvrao/36R0jsd12oVxSbPuSHY9gBFz857WGneQ5dP/BHUU7KYW50+G/Fg7drN4Sn6kg/x0e/vjbQbjjz/h7OgXlyLUTjROda4LHXOqfEjdae+5Vs9bx0/Nycq51r3JVDxC3q0xgY59yV1jxx1Jd+HFN/zZ3jpVp9aaf5RnPLGNEYMWjD41HqEr5rMAr+UJ8cH+fCuSppQBvn4Fg+Psqx88b86vVf59vx3ZfjUW6+lkq5/OWya9OhH/1N+tMl1xCyrfGJ2XXXWTG3ObvOWmnzG6+bDn30LdPG66+9wG5SB3VjdlJK9sCpG7M9RKomVYGqQFVgGSnAm5XSjcKk6PpNBHBzN+0Y40NT6WYDdtMozrXEMxd/yA2WYzB39kfz4xxVJ/cv8XccxmRN35JddNNI/1zdl6PbAY/xuH7Rh/z1ARp9pQJNozeG6cMYPGZd0oE21IzHk6qj2DrvQ+K4rlG+voahGd4c8I0fjRutVR3v2/ZcnZ/OPTAx7m8wuA/sPO/cXJXsNHakv/siLopr4zn6uONoPsphDn3uN2yAqw+CHB/CNacLsViDR4TLcdS+jtBHHx9zDWCrudLPtSFf71fNPJb7EBsxUXJYjjNnPZeTb8wivp8vjEs/r4nfZQc/2hJjlHjw9VyJhxq6+CeBOe6aoV/ni3asNSfnThusgdy1mevDzx34OhfPiWvB+8HJ8RgHuKV8iAm7ocXz15jEKvGijdaem465PtPIC/H1DwpyfMAl93qtawT8Szzd1uNxnP3UmMeqT67tXImp9o7HdeHzBx9w0Ouzz4vielu1UD+PH3F0LD9WbI5FOL5uYct8ozFi0YbHudo145zBXjlq/jmsUr9rBtsSJnPrmwdjK2f0QVNi0SZXayzFiebF80Eueo5FPrm4Gos24IK50bWr/Dx+33iuBee7tA7Iada1c2J85uq5YNznwY+hod+70k+vpYzlc8M1G8Wmj84T/HM+1B5+o84nY6JWTspBbYa0FQ9+rh3z6oPpOmIOu/CBy7nuE2MUG+cADM3LeXsMnUMfU19iap/b67FrzTGN51jRnPs5BB56nSIv4o9SewxgENfHlD9jlda+zk+UHzGi2vXpE5s2GtexacN+8I+uHRyP6hI+7EdZ99Aazz9DuSBeiY9yoaaluaANcL0olo+Ncuy8/bzxudIY7ssxrl0eo/Z1HNnQ/v1fPSd97cd/4uF8rZ+YXbfZmMWnZ+99h03be9R5owk2ujdmm+9aTs1ucbbU8bI+IlzdmBUxarMqUBWoCqwGCvBmpXSjMMk0GI+YpRsi3hjPihs5sSbXUeP7TRNxSznTxn1zN5zUiH7k6v1dMZkrcVjrjZ5zog3qHD+18XZfjm4HHOaT4+2x/Jh8SzkxhvviOOJEO84BjydZR3xHjedYOtfk7A8JGiunfUk34vapu/hpfHL3eYm49LEBv5JdFFtz0nHtJ0/2lX19i2IAAEAASURBVPSFjWvQZ91ijlD0zdO2o/nl8dHvedKWsXgc1cqvZJ/TAz7+hmQ0Z+rPHJw3/VxT5ZXzYR66vpEv+5k7Y3sMjqMGhmqP+EM3ZslTuWsMbTsX6OB9ORzkx09hMCaxkau+WeZ50Q614rtmaue2HpO2iI2i8XUMb/J4HJ+71r/JT9cXbXQ9wQ6alfDcnlxQa+7az7ZqzD7Wnj/XMMdROy+uQbXRtmP6PDJGhKt6d8XRmGwD09+Ecz60LdWqqa9l91NbjPlceb48HoUXYxODx6hdT641589+j+95KLa2NT+dI8eLOCqOt50/xyNeHgu2ubyIozbaF7VzmsHWYw/NU+M5FseinDFGXrlx+nsdzVlOb/dlLMbmeJS3xoFd7twnRq7OcQMXjPnrG19DnCO55+KwP+fnPHS903fWtXNlfOaaW1O0Q41zRTXUMW1H+UbxaVeKTX7qjzWkx4hNO7QdL1pzsCsVxSDPkn3XWMRXX+OHxPDzBblH92qaA/ipRl18dZzrGfOPkvvaUc+RGIjr5x/HvOb1WPsZn320cR04Di31niC3bokDP8eKtIp4+B/ijbLWyBu1x+AYcH1M+dPO51z56PxE+RHDa/XjmOKyL8fPOdEeta57+Pt9mNrm2s7P51+5whbXfV77I0zFU36RbdRXype6a4xoHonr65L9qImlfeO0nTfw+16j3Fd5qP7o19xxnNP42utS+ru3nZKux57mfJk7WLFWsxnLT8w2m7LrND/rrbsivfEfdkprr5g3XtwYcX+0e2N2cajaM6ICdWN2ROGqW1WgKlAVWCIFeBPgL/jTosN4wM/dRDA2bzpKN1u0nUbNG7lR4/vNNTn20dp9cz5uhxiwVZ3Zx/hRzVx9zHPP2eX4OZ4eK/fSWuA6UF/Gy/FRW2/7TXgOgzHcH8fK3cddMx8f99j5jhNPsXwOohxVk2jcMSaZq2NH3HXd53RRG/Dz9UDOET7HHKNLF/p5Dr62nYtrTH/vJz5q5I2Se+NPucJO88Qxi3Nhv9bKn9x0HO0SV/j4Gz/Rm1XKkXFyc+DxdB1EOOSLXLpiwxb6eWxioEY81R46+pt9XdqSJ3NVfG87F/DTeYF9FI86cT0wJuwRF8XnRvNqDVb+Up7E1XFtKxeNqTbA09g+ho1Zz1HnmfbOBbj+iWblrnzYH2Eotygu44Nj6c0rjQcfzgX9WasdeXHMa7XFGOyVL2NE+vn80tZj5I6JqX7OJ+er/bpGiMlxz8f113iuFeaSbyQ6LvG7asekfbROonXK3HL2xMvVufz8OsA4ORzvz+kR4SgH4lAX58Fx1LTRvqjtXJSD4+tYhFXqi/KAfQ6TsX3NlWL4PNPX+3MYtHdN9Byjr+YDrf1al8uL/qw9FvvBpbSB4jmNGk/9qDk5RHlzbFI18uB1wjFz2pCzzoH78hg59LGDveebiw8714rxUPPc07jgjFz1us884OOxdAzjXmDv91AaD/aej2N0HUec/P6qTwxfq4gLjfS1En3AcltqifEhxbnDl+ep3ie4ZowRzRfOSfj6PMKH1w76e3yO59aN6+HHxCUOjp27jtE+4uHXla61Rqxc7TFoh/n0sYija6J8dEz7GSNX99GGvmrL9aZxaceaNjhmfn3OA/qjdnyfb80VMVD8fG87m1/kwGPUkc467m3VwMfIReOoBm7fB8t9Rj12HTEPGn9SPDV3co3m/PRzL08v/fhvabKyXrkxu2Ll1xjj/8yu/MTsOs2O7Asevm3adgr/a7ZuzNo0TPOwbsxOU92KXRWoClQFJq8AbiBQRvmakdZx4C+9YSndnACWNx28ARsYamxzco1udPqA+8McfLpyVlzGR1+Jg9rRtu9NIOM5Bvtd+5xdiR+xvFZ9SjfsagcM1TDHB3bgrn+lyD5/wyWH0ZUT1ydwtXT5qe0obec7TjzH0vn2/KI56mMzSo70ya1jXxPkpv3sIxZr56w50wa1xnYs103nwPEVE+2SbcRFeXDtl2LABsXfYGo7m18aH32KTxvUnrOOse06RD4lrsRhHfljTDlSA4/NvHQNwJf2ORz0l4rHia4r6o94qj14uQbRPBNjiC18nB/iuQauq8agPqox7P2NMvLL1SX9VQ/lojFzuN5PvpoDbHKaegzE1zeic3xycdyfds4Tx+AIHXP3V8pNeThWNMduw2PFBDcU1Z86Rfr5G9y0JXZXTZ7qp3y6/Dle8seYvq6r/l3rnvioPX8dK7U1ntsxf/bjnPD8eZ7ApjRGDK/VR7l4PqqhY0TH7k8bX5c5O3JxfsDR9af5M0ZrI5thHkNzKY0pXp+2clV75qJ9aNM+N+72OHa+1NPXauSLPsbytaWa0Jf86OdfPx/50Fdr58wxcEHJzafnxFzpzxq56DXR4ynPPnkTd1K181Nc58ox5qpzwDGvo+uC2/BYtUBfDh92fl3UecLc+UZ913x6rs6FHFFj7qNP6TnfEobi5dq+HqBl1JfzZ7/nhn7o4Zphnfq6hm3uOoaxXIli0haxeU64Zmqj/NCvPFwHH/f4nIvID75Y03qv5PrABoVrH23nrnlhHCXi0a6f71wwZ9D8Jrf5joENj0F36OVjUaySjeaouTNGrh7ip7bUMDdPiEcbtOmrawP9XYV+sAOeXy9UJ9hqTMV27XRMMbQ/aisfHyeOapLjA1/Fgp2eR8TyGKMeKydgYB48Ps91j6F2Pub5eRzGcr9fNM8ghx91hnVnPjHbbs6uSM/Z7xbpVluvbz7jH9aN2fE17I1QN2Z7S1UNqwJVgarAslAAL+y4+cr91dukSeoNm99keCw+DA29uXScUY8noY3fZA25iWf+XTr5zRluMv0BPXcTSG0cg/2ufV87+nfV1KekC3UgluqR40Mb9c3dfKtNFIN9UR3Fd80iv3H6POY48Tx31YhzQ665OMpH/ek3Tq3YwCEH5831o/05LnoNAmZkpziwIT7aKDleGHN89GnReCUc+kQ2pRhY+yj64Eks1Bofxz7P6EPxnOd6F/6OfDlHtHT+4BdxK8XTODy3HVfjRvZdc0q+Xrtfjr/78Ri8HKNvrsDw+SIua80VfV3xorUDPvoGHI5RtK/tWPkrWmPk6fiuF+fPNVH8rjZy9DiM776+Tny8a924P+xVc+bjuDiGXW7c+ZfWRMQhiseYHEPs3JtrEaZzymlKfK+pC/1GnWNq4XzYH3EHl5y988RxiRt0i65R8MvNJ8acF/jqOUT+sEVxe+o2N7r4t3NWPB/rwnJ058Jxz9c1djuuAfTDF0W1jHiBu27qOBc9R8fNsyW08pdy1X60NSaOPa6PwyYqrpfmH8X3tUf93VZxENf5YW3gjxD1WUDXS8SVfc6Z/TluHEetPMldx2mj+g2Z7xymxygdIz/9hKLaUkflp+POlWPg5RvhHNOac5DDUVu0aY82uaHtBetB5xp8UPTccx8cw05tNG+Pp1wci2um5M94Xc+jjg0e/INa1w3xoj7H8GPy9X491nw9ho6pT6ntGGrLde2aq423Iw4eQ68Tem4Ci3PlPowDfH39Yr/XJe4cUx/XHhxRdP0q73aw+aXrgH25OpcTcIGjeeVi5fiojtEcRJx8Xrv8lD811D6PQRvVNsrL/WCP9/8ifv76QTzGYEzF5Jj2aTvy0XG2nQ/89DpF/XQu4Ms1TRzUXVhdnKA7St/3Sn2e+l6jnKeff87T44BjlP/5f7k6/dO//xrDK8vcpiwOVmQ+Mfvyx26fNtt4HTpMrK4bsxOTshuobsx2a1QtqgJVgarAclIAL+x9bzYmwVtvPHhjlcOlbXSjkfOZZP8ktPGbxq6clT/z7/KhHX1x86wPFH4zRzuto5vpKG5k1wdfY2mbN5a84dcxbauOyov+aou22oBz7k0Y+jmO+tMmql37vn4RVt8+jTluPMVCfM6lz3MpjmJM+lz1eeE6cX7sRw70yXFRvrBXXxyjOL7n7+OKoWsVWPDNvQlArrBDiTi7DWIhB8Wc8577jTnUB1gdQ1u5uhZqy7WgfdrO+bqf6oGx3JuXrrHGcg2gU9RHn2jM56wUjzisNQf29a3B1bVyjYjldujP2dJHudHWcdjvGhDDa2jjb8qoTTSei4H1Fr0e9eWicdnumn/aoS7F8TUQrRvVF3h9Y2tcPeeAgaLjOI5s0I/ivHK2Pu/RPNHXMYes0zlWC39rPlwLzgf9pWsTEemvmBjLcWd/zp64Wjs3HYNuueurrxn1c0zP133dHliYh1xxe8UrjeXwtN/XOcc4Fzz2dcN+1JgHPdfBz68jjgc/YnIeeYwxFNXE84zw5rzKvx3HrcmF/b62lBNtotp1VT8fgz80828rQJ+vR+fn+fTVPuLs+kc26Iu015yicfJU/h5PNUKcLkzYsAAfBRv9KNEmIOYy900GnGfl1wKt/KVctB+5+h/B6DjbmBdshHjOHAeOXiNVQ3Kjrda+RnDs60jtc23VnnNFW+XCPtbURXWL+JYwiKU1MPT5zXUD36hPMaK2+0Q2nCuMuRboU61w3FWoUc4O2iGOn+s5+yi+86TeuX5gR/OEfuB3cYZdLgbGUJynx0PeKP7a4R8YQA5YC31Kjneksa5ZYrteXAvez9zpl6s9Z+Ll7H19+lwgLopeK2CjcaK8NB5jgIu/Tuf6/NqlMVwbxAJP5Yi+rtxho3ng2HFwHD1LKh/4oTgvxNdzDFjR68Sc98LXH/TBf67eqq39l649YlNr2vo5gX7PGXH8Gq75aZwSLsZe9JHT01kXXLHSTDZm/X/MNp+Y3X7L9dMLH7kdISda143ZicpZBqsbs2V96mhVoCpQFVhuCuBGwG9+p82RNxNdN2e4mdK/op82L8efhDZ+M9aVs3OAf9fG+bg3nYjpGOiLuEZ2vPGEz9BCfaKbVMWiHfqUl9/I0kdt2FeqPa8h/sphiF+JT2lMuU4inmqLuMDUhxb0dc0PNMi90QX/UYtqCww+lOT6YdN1zqh+sI80LOHDpzTO6xvsoodHjae2sI90dr7ARNGHXfTpcWuQ+aXxHVtdus5r10B9c/PE2L7mumK5PXRS7dw/sne+5Ki8c23HU7su7Tmnyhf+UXznyDiRLccUV3XQftr2rREPRd8oaztW/uI8agzG9hyAhddxXZ/QxO2A6dcdjalt+PucUGe1Q7u0xt0nwtQcmXdk53HVJpo/HYevc1E8zyHCg73bgW/0hhvu+TQvzh0wtB/HuVgY06LzSTztgy36dR3Q3+ee/s6FGnme5DiOpuSC2vn4WO6e2XmpH9rkr/3OmbmrDduOz/WIcR8r4RBPa9dax5R3yc51Iz/34Xwhhq4R9ru9xoePjg/NE/4okV66Nsl9znohT/SRK8ejWnPDuGP63NPG34xFjsoNdh7fY3F8FK3UB7FyxfOBnefkc0ee6uvx3McxmZvychuOuS3jY9zjoI9c3A9jKByfO1r1G3PkG7PI0V/TmLfyWIUSX3vIM5ej+rONOCgen+NRHZ1Lmm80Phfj/Pk4qlsuR7WJeGgfcsYGDDfkXANo43H64GteGk/bjuOxOZfqk2v79Qa+/tqMeLDTOYOdHhO/FNtzizRSf+fGGPBzLI5pzXWRw+nSEXHcl5gaB/qXNtDUNscbXKL7UfVFO8fH11rE07Fw7H6uifv4WvO5QFwUfV1wG51jxUduroGOow1fX5/MVbXVPDxH2ruWwFc/HHtxLNjr8wiw/XoLjAjX4yM3P6egXa5ovm4T+ak9NYjm07GinGGjeWt+GodYOs4+1N879a/p7V88u2mt2pRFf/SJ2YMfuE26044bY3jipW7MTlzSPGDdmM1rU0eqAlWBqsByVAA3LHzgmRU/3kzkbhqVB25Ucm+Cqd1ybfvNWO6mKccf/vpQGtn5Tafb9NE5wohuOCM73nh63D7HxItiqb/qqPnQX23R7sJzexxzXaKtMXDcVchvlLhd2NE4uU4iHrlHcdDXVwvMxaSvJT6/5OKcVQf44Cd33chhav7+gKT4sHMMntfeD765h1vgcB7Rzp1Hjgk7fRjHMYr2tR0rf4GDPoRqHM9T/dD2vHW85MsYbkOdvJ/zqvjajua7pJ3jIy50VB3IRePk2j4Hauf66hh1QJ9z0jH6aE7sQx3Zot95qY45LPh1Fcy7Y6sPtYvmxfOErb8JBHz3hZ2+AaHxvO22OX3oF2mhWtHOOfnc0ifK0a99GjPip+OIXzrXfC7IA37gwj+Kieyi649/0sDx9DyJuCOulyifSKdojhHfN6N8jpWj50mOyoF9zlOP1V77PbaOKQ/tZzuHifFojj0X2CG+ryf0u61z0dh98gcmiuPO9a76TT4+n6ss5lqIqa9D5Od+5Jbr78qja9x5RcceG1xL6z66Nug9Bsb9Xt1jUEfycUz0wwbzoVxorzW1ZV8ulseI1iAxWKu+7Itq5wAbj+c5kyfXAHw0nvZjDIU+c0eLzw8fpx1qx1PbHDf4Rbl1nSeIpesfWmtuwKX+OSyM5zR0LODlCvj7tT9ny37XCv0aMxqHjfJVG9UadiyuO/ujGvF1LiI+HqcPvuJEcdHHueJ4NGduQ1uvnWM0P9AOpWsNwaaUo84Hbf11V3lHeXEe++hEW88RsVGcq/MjF41FzDmEud8YdywdZzvKh2PQ3e81GJ82rCM+nmPEk/5a53JWG217HOStc4i4KLpW0OfHvpFd0kbjU2fXwDdD9dyMOPM+xse6dHN7zFEXF/BXPszHc47WQI6P+xKTNXXiMWrniTnw+Y/8opw9Pnl6P+NHuBz75LfPT1844QIetvUK/cRs82nZffbYPD387psvsJnkQd2YnaSaHVh1Y7ZDoDpcFagKVAWqAvM3KNENlMuDGxV988PHl/txdKM1hDNuvnhjW/LTG0G366NzdJPX52EFsfrgOyceIy7etPeHB46zVh31xjPiPSofvXHWGORQqvvmUcIYMkauuTkaghVpqP6TiKF4Q9rOjQ8lzJ9YQzjmMImFugvfMbjmdJ0Ch/16fjIHjOf6MabF+egY8FD0gVzHwUHf5NX4zlf90C7pWuLkODwmnuvHftp57bFgX9LO8XE+R5uDHqd07BxoC2x9w4T9qFVrHCtnHOt1xjljXEukkftwvcEvxxdjsCu9actYzhe+KBz3GOiP+nydRZrBNxfP17D7u85zLFf9dk6awyqrxRsAOoY258t1Zz/tPV/n5/46b8TQ2u0VD5rx2OMSV3WFbWljFnHVHseeH/q0eFyMwQe89dqDPj8Paev9PufMhXGVY5+c6Ke1Ymh/11os3ZNGaw3Yzl/jRfrxHCvZOabmwzWh/rm2x4evvp5w/t3O5yjnh7jKDcfuiz4U195zhI1rTK3ArzQ38GXxXKK1SVz4OH/XF+NRH+OhVjwcOwfa+PmOfi+uS06TXL/j8dhj+5zSDrVzQJ/7c+1gDEVzph6qrWsIH8dUG8WDbVSUQymWYmkMYuo4+0o18svp7zkRBz4eB/xhr9dSaO8bTMRADR+U6N4kN6fRfOb4t+DNL89DdXNf+qgN+6KaOiivaP5oRwydb/Zp7fY6pm2uT+3znJSb2kEXfXb3mOToeIpBndyG/WqrbZ8THUPbOUf2jOGxHQvHtPUcaevxHJM6az8xiUGO1I39UZ3jAVtw8fOG8R2rz1qDT85f8SIsHfe254C89VxGHih6TYBm+rqd09BjRcfMSeckstMYbksM+g3RIMLyPuJqrXzY71pG2sEWvv7HVVx3xPLa1zbGozydQ7SOPT/qF+HleEW4ylmx0M9PzG60/trpcXtvle5zh5uo+cTb3Ruz+ETvWoW4dbysj0hXN2ZFjNqsClQFqgJVgVAB3nx03UDAGTcf+nATAi7jTr8Z443WpClT0wg3unGM7PSGreSjdsAp2UZxvA8adb2ZpjehqqH2E3dUPoqlMYjbVc9yrUIzlC7dujhzPLd++pyjxJhWreuND13ah7hD50v9ian8dTzC17WCca4515G8ov4chvJg223ZjxqxUfQBve1Y+QvjuTci/PqkfmiX5l814gOtvnHgWNSI/dTE+zmuNW3ZB109vv5xh+uFGK4P54aYXbVjwp5rR7koDsfZ53rruI/Rh3U0F85JtVTNEIdFvxpfbebHG1tqmcuL2jlncPQNNtg6T/CJ3kDKxfP5i45L10LPU3Vi3qg9Hx3TufJ8HC/Kg5pFcdxf47KtmOSifIGvx/Djmony1/PB4zsO4wETuWOOuUbQ5/boQ+zW9jur/kIfHJ0LbKN+XyPMBfYojuP2ntOc18LfjsFR8FG92Y/aeegY2pEW6O/i41xUc/ijOLZzcQzk0ac4LrhG68M1QfzSNV/5eYwcr1xstfc8EUevO11aA8v5RGuQ/P18h7/Oj46z3/EjTm5DX8VDrKg4nmvCufcYzCnCRJ/bg5Neq9UvwnLuJZ7wR9E15PatQfMrWnt4NvR+8PVPdVFXzw3Y1AltxaIP+lncv6QN/VUP9hFP46GPuasP+uHnOcHW7+tgy8K5iWLk/iiL8YmB2tcVcWnjmqCfmnps+qiN9nmbsVU3xWS/6+UcHdc5A8fXOLHd12NhnPmqLbjrayRzoQ19vJ/jqDkfHpP9auvtPrjqo7qinzFKOOqPfFxXjruWGkvHPBY1Ag6xyYvYUU3baMznWuO7fcTH++CjPB2Dx7mcOe61z7nzhg4o+jrtGDhWbsqBtsCJMOgX5Utf1rR1fPbTzrF8nHaoFYtz5P5qzzZteYza1wOvD95PH45Hvj4PHs/njevVY2kMxo1yxpjnDd0cjxgRLsfc5w7NdW/XW26ctt18vbT7TjdO66wobYgSZWU94v5o98asxamHoytQN2ZH1656VgWqAlWBNUUB3riUbiBuKFowV+TjN3CTzNFv3BSbN4baF7X1prDk47FKtlEc74NGXZvvqqPfzCtvYPu4xysdM7dxMEr4kxqDHvgpbUYMicW81We5nJ86vzyHoj7l3tXu8u8a1/WIWBEv9HMdub7QFqXPG5Kw83joY8H5hxI9XKMfsfRNa/a1b841bx7pG1LAUpzcGnA+vAZ4nojFQhseE4MasT+qHRc+XXOk447J+fL+rmPHZE7eTxyO85g58xg183cMaK/rI+LsD9oeT+Pk2h5X47juwNBxzydaa7n8lA955+L5JzzBwddt6VroPMlJOaDteuo4OaLP8UqaEEPPJY+jY7T32rUBHz9Xwcv7ovPc9Yvi+7pAPN0IUB+3BXe3Rx9099xL/Rhj8TmLcGiLWvlpv7ZdU4xxLqOcMN6F62sDPijOf6531e+cn647z9m5eD4aE/i5+6zIT/OPNEGfn5ersplraXz0KKbb5o41f9q4Duz3Gr4o0bUhytnngLFz8Zifj0ObaCPNeeTigXOXVpwT2KK4fY6br5k571W/PRfY6+vQKst4TXtOJZ7RGqLmGgdtny/0wV9fB9DHvCM9vA/21MPzViy0UZxDSRvNg3G1L8LTcfq0gYNfyNP5qBl1cBv2R/gan1iuC/XieIRDm2iMfrThcVTTX9cQ+2DPfl9zmgfG/LoX5eT3yIrh3FxTz4X42q+8gcd5cO4aSzmoP33V1tvOUccjf8WHLWOXcByTeWs/2pwn9mssHfNYqh+x1Z54XtPW+3EMf71mlPCcD3RT7sSP9OQYap9jaqs23nYf5w2M9t7u02e464Jj1dC5cyyKxT8qcA0WgK88AA6Kvk5EOToW47fO9ku5Eqs0r+ru8+F+GtfHgKNrwsfhq3mqLXxdS3LP9cOHJcoZY5FuwNN7fWJobuxjrfjoK9nSZ9J13ZidtKIFvLoxWxCnDlUFqgJVgapAqwBvUJbipmDWU8BcEddv4CbJxW8eFZs3htoXtfXmz29s1V7t0N8XXzG0DY38wVnH2eZNpXNjP+18nP19as7XOBh94iw3G+ZNXuPOKXEmUft6w9zonI/CVf39vPRzKYcfYUR90MAxce2D5vpglYtDDV0H9sOv9ICOWCj6MMlrr2OiP7JrAeSX56PcMaYbOHSLzqm+577Hc54+h4ipc0EOrJUv+/rUrhdxvJ9YHOcxareFDUq0FtyW89Y6tD7nL/DzcdqVatdWOXt84Oh4dN3QPHRe3FY5ETOKB4yuDaCuvDU2Y2l8ttWOfax9/er60jxdT/qrjefp2PTR2n10DG3g+2YQdenyjeLn8mBc5pPTDOMo+iYo4rg958P7GQc1Y2lfyR52UU7qj3akC2NFY/Chpmjniq4N2vThU9IcOqHo+eVcnLOOAxuvE9G9lvMFV+2DJr62OG9qx1xZe85dc0Y/rTUH9g/B4XzSl7XzBlfHpW9uXsgtN85YqF0L9Hk8aoox54c+LeTGPrXXsVIM+mrta8jXgtpGOWE8xyUay60rjcO2c2M/a9XPbaGJXovoQ63cHuOen+aFccy/3i+hjyXion2w85hcT9EYcVGTs8+t2pC7xtD42k8/jc8+j6E2uXUPGxTVxvVnDq1h8MuxkY9zYT65fsACx697njuwvY/YAbVFPDwXrhPtZx/wtB/HOoZjFtWa/Eq86IfaNeFYzp/4buf9HPcaGvqcqQ3XI/o0X9XC/TV/+qi94mu7xBn+eh0o4UV8dE0zpvJkn9aOk5sD9cnNH22AUXruox259eFAG+XHPuJFNWKAr96bKAZ9PCdy4zhrtyNWHy7A0LWGY/fzuD6ua8LXUnSt0Hg57rl+8EMpjftYpPccysLnM/ZF+Jqj2k27XTdmp62w4NeNWRGjNqsCVYGqQFUgqwBusvVmJmu4mg/oDRVvLqeRksZxfL8J9XEe8wa064bNb2L74jPOqDX4ofAvOduD5hcf2HA8CY0Rx2Mw1g25po6T0HCSOnFdEhP8uh4AaZurHVOvRb6+c3pQL8TAOeObR+rn5yfGUDSPrvPIObcAzS/4oURvGKAfuUXx8ekdzQG2wFIczQHjLM5F9aMNYuIHOXZdU+iTq6M5Ue0ins5RsSN7Hc+1XUfOWS5WFMcxolj0c1z065uMrgv5RJilPl0HjA17j48+HfdcMM+lN72cL/BQyDsaZ866Lue8Vv2m/6qexS3mEq1VWns+7Nec2Uc8HhNXteQYaj0H1Eb71d7bkTZqAxwU1Z+ckJd/Ikh9aad9aCtPH8MxdG+x5euKaZdbC64xtfV+4qCmjfaxndMllxP9UEe+jOXzS78+uO5LTGKU6ogT7YGj1z1f9+6r45hLPSYmap1nrkfPQe3Rpg45O+K4n9sjp+gPeegXcS6tFfppTa7aF+WMcecHX+8jDrkpFse0zs2/50E8+OZiElf1dRyNVxojltaeSyn/SFdgOXfaORfY+nVCNcC4lsif45oz+kq29GENfp43xpyL2oC33/MRD7XyIRfH8/OVOsHfNUQfC7GJy37WujY0huJHvs4PeG7H2BjLcYQNil6r0KfHGFc+ONbi2LDNccn1Aw/54/qiz3Q+jxhzjEgL5RdhMJ7mWeJNPM+V/aoP+Wkf7XJ1hKvzp35uSztdP2rvbfByDLWhnsyDY4yDYx+jD8aot65t9EeFthjz64vba3wf89xhq3NLe+XJPq0dp8uevpoH+1iDS5+NWebnHNhPPNawQ+G3O/ic0E5raOx/YBPl6Fi5ucxxdX/loG2P7Xg+Dl/VWnn5mo7WueI5R9U5FwPxnaP6RZi5eyb1Ay5LCZ82s6jrxuwsVF4Zo27MzlDsGqoqUBWoCqzGCuBmRx+UVuNUOqnzZix3w9QJ0MPAb9zURW8atd/bvHHr4umx+uJ7vKHHWDO4+ecDA/31xrmLO31KNfLDA8+aVnL6LrUOXJc5HqPMua4Z4OqbHaUx5aB20YOp8vJzJrLvOo8cg1zAPTfG3HycD568NsEOff5GI+0Yi7X6oU/1o43WiD/OOeVrANrqGySqNePq/LAvsuNY31pxmbf2KU4uXs6evsT1eeM4cV0X+tGub604ug61n1g6jj5dC1gvujEYrZ8od/KO8mWuGodcWNOfx1GNuNHrh9pG8TFODmrreYBDpJf6RDYRtvqwnePGcdSuv+sS8YvmiJiRPccYD7XOuY5rW+PoXGr+2q++vuZ0DG3nqbHcVo/dD2Pk4/NLP9eU/Vq7LzHVpqsdcXMf5+JrRHWDtpEu7kOunoPGpg36cnZRLNhrPNrkMGDvOaIPJVorzNfx2D/nOfdb/ckDI5Fv7o9C6KdYGoNt1Yt9rNVXc3UetGfN2DhWTXGs8XxM/WDrRfnQNuLCMffHsdszL+cCW+Do9SOaK9ixRBgYYwzaoXYeOqZt58Ax5eJxqbHqRT/U6otjcPFnXsUkHmxRdAzH4Ij7M7+PiuLr3BDH8YHpvpGG9Ic9CrG9f2507jdsoo0a/+Mg10gxnBtsUfRc1JzUnhxb+0Z3rC/aOm/2w1YxIi1gw+Jri7koBmzRj5il+1XnxBjKATb48edf2ka1c4SNYqqP2zKfPq9DxHUMxSee56r658acQy4HxtM5wFpA0WsM7VBrfO1H2/n4OI+ZG4+9dl26+NNf82Afa8Ys2cCW58JQDRmnjwaRxrkclS+5MRZr58pc+3ABBu278DiOOjdHEV/nofGcu66vCIsc3E8xYeO+Q9dzFz55TLuuG7PTVljw68asiFGbVYGqQFWgKpBVADdB/pCaNV7NB3hD5Tdak0zLbxQVu29cYnTZ044xuuxpN26NG0sUfzDVG+rcw8C4sav/0ing682ZjLL+dM0AT9cNz1f05x4cMeYYeADTN1+cl+MCQx+ulAPGoqIYHKdfNKb8dZz9ffsYC7XPhz54qt0k2/5Q6VpHHNQnGh+Vn847tddYiutrgGOuIftRO1eNp3ZRm3yisa4+rgXlHPH0GCV+ngs5MBaOuRbRjuIRQ31gq8U56RjbwPY3tTmmdRQnwve8oRti6DVAcdGGzZA3p9U/0kbHo3bE29cq9Y380Uc9ME/YGHD+OT/v1ziqnXJkLPdVGx/jsealsTge1ZGm9FU89e3DxXH7+GgMth2H/awd1+2jXNzH86SPYzEmasXQuVQb4mgf2/ThtaZvLPqjJgb7iIVjx9OxaFy5Rr66GcR4qHFO+AYU+vR1HXaqF4616JpXO58X+Dg27d1W84GfxgBG7tnLcyeO4wOTY2h7cXvq7/3uh2PaRmPsc5wcF18j9Hcd2e+14uZi5mJwbogJO9dd9dZY8MEYrrXRZiwxUevcsl+xGMP5wNa5Rzaw0xhcP64H7LSAg74WAptcaEcsHrOOsLEuUPRc1DwjjrDXfmC0uso3PCiG6pHTApgonkuEDTvk6NcIX+OOBT/lhWMU2PW5h5mzXswxwqSta878I2700Ro5le4NONceRzl5rJyP66c8HAP4uU8Xwk/jKw7ajuXjPC5hwEbXII6pLdqlouvR7aiBY7sdNXSsvhxyGgDXX+c0dg6/Dw/PSbF8DDGdi89Hn5huE+lLLV0T9oOLr2/iYMxjaF6lMfhGeaPfi+fO8S582k27rhuz01ZY8OvGrIhRm1WBqkBVoCqQVQA3NkMeMLJAq8EAb4j0Bm0atHM3bn3j8mZTbxZzPDVWH/sczpB+8MNPbmNWb46H4Fbb5a+AP+wo477rW30cjxjen3vIARbPa+L6w6GfF25PP9Zuz36tHUPXvJ6T9NFx90Vu+sYZc3U75zVEI/IYt45iRtw1Tu56oTajtHmdVG3Z53hcV97PY3LsysXzp7/XPlc+XjpmDMWI8tJx4Pl60RhcU9qHtuK6ja9jauj9xNR5YN84tcdxfsSmXjyGnc4j+sHd30h2G9eTeFHt3CIb9pV0Ue65/IgDW9yr8X5N5442rBEz92adxtE1o/lrv2L6hgbHohoYXZ+Mpl+UC3mqRrQvaUob1Irb10f9tV2ac9XO4+KYsTUXnk+MoWPo47jmQFvU1Id97s9+t2M/amDjR+/lojzJX33Z1rVCzhxz7o7j48rVx4iZq+Gr5zS46AaFYkcYzKOLI8ZR9Pzi/PscuB6qrcdRTo5D7pEmHFN/th2HfLyf9lozJ+2L2oqV8ynxVk0ifPSpVhoPY8yJ84c+LTlOaoM2eRDPx7uOo/g+N+Cu5xoxXZ8cZ48BO/ImFrTStenHxHa/KG+PhxiwA1891zRPxdV58/7SJin1UH/m5zVt2Q8fx9Yx1YZacBy156y5qd3QtuJGWhPP8yFH76e9zy+w9V6Hdqypae48op3PF177NQfYlfJwfOg46sYsYikfHEela64UgzpEON7nees4NXAbnxfGG5UDYqovjsfR1PkyD+CyaDzyj8bYBz56XXAfj8m1TX/Uvm7AC0XXtM5zjmOEw3vnEg/FQ1zn6L6wiYrnThvFz9nQdpp13ZidprqGXTdmTZB6WBWoClQFqgJrvAK8ofIbrUkLwziO2zcuHsDwEz3AO6bG6ovvGEOPwQ2FN7n0542w3jRzrNY3HAV0zWlWo6w/rhni8OEw1087rXN8YBM9+Di2YkX2Os62v0mifhEfPSc8vj/M0tZxXF/HoXbkOI3a885xn0bsCBMa6eaP86PPEG2AgZ/S9de1ZxzUuha0f0gbD+86355XFMPXi8bjmtI+tpmL2+gbCLAln1yciBNjjFJ7/NwcujYeC3nhtUrf2IlsSvPt9q6Bnwdq36UL9c/lp1jedh4cL/HReWZs5xjhqh/jTKqO5pB6kKPGcr465m2uo3H5R5owFs8NHqNmXLTJN+rDOIrjK6b6wTbKJdIwZ4v+XHEesCP/yIfzw/lyG+XuOPSlj+elvrTpW1M/5uPYjpOzc12B4xsMjOX5uCaMwdj04zHrHI5zgX0pL7enrfNgXK1z3NSGbeDp6zD7tfaYQ7jounFtyNPxEVv9lEvUpj/xIptSH/3VhjlqX67NtV7i7DGArxshOEbRPo/H/HxtwI5j9CEnHqOOYug6Vx/NRfuBg7HSJins++rnugC/T/F84eNYfTl0xVPcKC79fV5o6/20dx0xF3qv4+PwA6afRzqHsNH54jxqH2xK2kT4yCG3Nj0+8LV4bB1ju8TH9SvZEo+1zh37WHN+3Ab4/jrhczOEA+K5BojteZEX6hJ+ND/6no6PO5bni3jgoxy5bjCG4j7Ubm507rfng7h+/65ccpjOX9dXaazEHwzdV7lr23PHWJTbkOcOxR+33b0xe30TYq1CmDpe1kekqxuzIkZtVgWqAlWBqkBVoFGAN1TRzeAkBfIbRWIPiQuufW7YNNYQfHKaZE199QZ4kvgVa/kooA8vZDXK+uOaIQbXjuOXsB2DWKj1AU779bzR/uhhSse1rRw1ToSt4/5wppho09ZxqA3tNT76ShrRZ9zauUMvfXON3MeN09cffPDDa6XzI860tInW3pA1RH5eIw99kwTjOt+RzhEX4kb2HEONteZvrPv6o4beT5yuGLTrW3scxnf/3JzTjueN43EcdQ5bbbStWMxb+9SW49rn7Wi+3SY6zuWOfHS9qK/yob/2wTZaS9RRsSbZdr6MR44aa8g5xnnxHBWvTzviQb9o/Wg+4Bt9ikv91B64OsYcSvFy/KgjfbvqaO5L2iEuil+vGKfE3WM5V/clJmpwyr3J7+sDOF2f9iaXKFedG4z7G+6cK+fLfvLuGqcdufBYdVEuGNcx2rP2NUFdHIP2Wjt3HRul7VyI7/3EBle9t6B9TkPXDDjRXBLfa+CidK2T1ij4FcUvzY1DMC/OkY/jmDbRGPqQr29iqK1jO57qFeXDGKj13NM8HRPzlptj5cb5ZR9w/J6EY157TB+PjjVXHXeuzktth7SJm4tLLNrhWOdL+2lLGz1PMBe+MQs7tUFOPr86h7B3TR0XNsoPx1rcP4qp9h5fx9B2PB/HcYmP59s1D4rvvjrG9eH80O994KfzMIQDYioec82tC9iXNHU/5+I5O5ZyQSyUKGfqg3H30TGMozgv5On3TsrVMcmzxN9j0Mf7Nc4cu8X82O8150f7nVOEr/a92iPuj3ZvzPaKXo36KFA3ZvuoVG2qAlWBqkBVYE1SADdFeENl1Afvvlr5zRf9optQjnmNG8TcG11qy1jRTaDazaLNm9ohec6CV40xeQU410Qedf05Dh9U9I3DLmyeA+SiNfG0j21/oEN/yZ5+rNVf/bSftjqOPs2PNqz5kOh5sZ92itGlEX3GrX2+HM/z9PFpHIOTXitVF8ab9jUJc4WCNyunNReaV6RzaW587VAX1vBFUR11HWtO2k9/1BEnHR/a1jhd2KqNx+HcK57aaG7aX2orVhd+F/dSnD5jygX2jOf9xNK1wDVDH9qwn8eomaf2TbLtc8h4ERfnW+LB6yjxSrZdY84R9rn14/qDs25mwJdz4Tk6pmKVco/4MQbi9SnOBT6lmF2Yyh22yodzQwwdQ5+P0w41bHUDQsdG4ctYzgG4mgPWkR5jnD7e72uuaxxYKCU7H2PsOc/Fv3VNcF1p32KP/JqObIf0Ma7Pj+eEcd/8Zp7EQFzmg/a46xYc+m4EIp6XKL7Pv/voMf1dG7XhGtU+tulHHPZrTRv2RbbU2eeEPtDcN0roAxv3gwZRHOKh1nlkP3zwwz+6Y39Ul/CBrZth9Hct2I9a19iQOVSMqA1t+rz/wPiuC/sVG9rrddCPc/OlX/MOPM/T5xF6+euX8yOvaD661oGuIeJo7Xx0jO0cH4z7udMVj5ioo3w4Tt0Un2tL+2APfroWh3CAv2rAGOiP1gX6yQ1tL56Ta6ex4OtYnhv5uJ/m6GOOSY6aD3iVrjfOg/FKsTx3cncs9pMXavfVMW27nhhzfHJVv1m168bsrJRu4tSN2RmKXUNVBaoCVYGqwGqhAG6o8NPnQW+chHI3brmb0EnEim4Cx8EdxXdW+o7CrfpMXgF9yBh1/fm5Ej2ERQ9Hmo1j6FjpwSfy64ql2OqvcVQX2us4+vyhkXaoaav42o+2jw3hDf9Ri8d1HHL3/lke60M9407j2ktsr6GRbnD6+KjHumYinUtzE9l38dB1rOe38lCMSa9BjdN3/qABCmvMA+cip88ovKmN+k4SX3XtapML7chJ9eMYal8LOF+8z3PR+VesSbadL+fcuSAmc+wTH/54E7rPG+JdeM4R9jltIlvHZx65OaS9jlMXjmk9qWuf45CnxurbVu7w0bXmGnlu0dwDA5ofuNeWCzYk0M+iMdjXVTOWc4AfeXKucznRjrEcK+dHe9auv+J0xSAGa8Xq0o0+zJPHk6rJXfMBNvsZB+sNRTeCMKftudz88ROLrkvOH8dQ67j2R234o/D1IrIp9UXxPc8+/iXOUQxg+nzpnGvMCNu1hz201s0+xUAs3yjRPB0PY77uFQ/tiBf6kW/f+fC4xPUNfvSj5GJijFiuK8bGKdChz/sPnD/nyH7l4HMFHz1vcAwNdT7h07Ux63MGLXRTERwifdxP7XLrFza6hnDshXPi/Xoc8eG4+3fFox/qPrw1b85byQ+4QzjAXmOor+cGWxS1metZ+FvXk2tXGgOKcsExc/Z+rDWew12YwEFxu9L1xjUmD9dEtXAf5l7iPsds7rfy035tE1P7SpzUbhbtujE7C5VXxqgbszMUu4aqClQFqgJVgdVCAdyM4afPg9E4CflNH7H0xpB949aMFd0Ejos91H9W+g7lVe2npwAfNPgwNDQS16/6YS3rGwD6YKd2bEcYHOs65/xBbGgefEBTjo4JLjqO48gG/Si09bz0HHf/obznIg3/7Zwcgdy9f5bHXJOMqbqxb3WsNa/cuuZ69PxGmReda11fvvYYa5QY9I1qxtHYkV3fPs1HfXJaqo23gYUfv5eI9J8Uf+egxxqX+VA/tUOb4+yHr/dhTDFnkQP5RLVywfhQPtDC5yqK09UXraHc9UXP1xwufd3W54Nxu/J2HMR1rBwX7Xeccc5tcie+5uBxIq4+98AhRjSG8QgH/aUCnrkNfJ5LjMtj4lEf5cO5pQ3qnJ/aoF3C6aOZ4rk9uOomDXjq/RZ8mafiTKINjfHj56KvEcyf94E3+nzTSbFUN/DlvPThDmxuIPSxj2w8/tB1iLnCJoTmpHFcE4xF68znnBg5PZw37aMa8VB0zWieHjuaN8edxHrzuIgRrSPGzmmBceo8CV6MN6RmLh7f5wlz4X+g4uczMHxjtmsOwdWvVTn+Ovc5H+ZBXSMsxYnGc9hum8Nx7XJ2jofjHG/oyD/4Uhtia1+ES7toLOqjBtSTNuznMWq30TG2uc54TD7OO8JyG55P3q++OgeqHeOzdl6+psmT9hGuY0zCh/Ecm/1DeMLHORFnFvXgjdlLLrk0XXHFFWmjjTZMG2ywwUQ4Xnfdden3Z5+TTv/tmWmzzTZNO+20Q9pwww2L2H/605/TtddeW7RZZ51106abblK0meVg3Zidpdo1VlWgKlAVqAqsLgrgBjb30DvJHPRGEbilm9Bx4yKW3vyOi1f9qwJ9FeBD2DjrL/eQQw5dDy/kQHutu3xhqw+1fLhUjFKb3DVOxEfHgRfZMI7a6nVEryGMG/mwbxp1iTfiDdVvGhxdG9VtGvFmhcm8SvnoelFeuqa0v9TWudbzW88X9R8lhvp7m3E0ttsMPaaG9BsVG9pEb+I7PuLM4pyItNL5Y76ofZ7gG90T6VoaVSeNO05buQBnKJ/cfA3lFGmaOx85J10xMB+aX4SHuPiJ5knxff1FWGqfa3ue46xhx1JOXXmDn+eEPs5/NKb4sO1bShozB+rgc8v+rnyIQ0704zFqt2GutNHYfXJ1jeCjm2p+jDgek7GnWZMnY0c6+Kcf/Vqm+oOrj0+TP7CZA9p95gZ2WpAzSvTaQjvNMRdDedAPdbTe0K9rCsdefI34sersWIiJvHRD3fFzvNyudBytF14vIz2Us+MSi2vRx6d9TL4en/2Mj3noszELHdyXGKijdUQN1C5qc+583mnLcR7r+mUf6tJ8YDzHp7QW4ceicaN8aZer1Z82ikN+PmeRH/zVl3hddS4G+9XfeegY274mOFc+lxGWx+T8eb/mqVpoP/mwdl7sZ81YPHZ7jEd9tEcdcdE+2Hgc9KE49lzv3Jzqa6v7K34pf+JNs+61MYsN0DPOOCsdf/z300kn/ihhI/URj9w3PfCB9xmb2ymnnJo+9KFPpisuv2IV1lprpbvffY900EEHphUrVqzql9arX/2mdMH5F0rP4uZOO++YDjnkmYsHlqinbswukfA1bFWgKlAVqAosawVw01h66J0Ueb0BA+Y0b8IQK7pxnlQuFacqUFIAD3EofBOkZBuN+YOc2vRd136+AaOvL2z5oMUHU/T1KeSuD2DsU38dZ3/EGWNqS1704Zj7sp9206qj3DTWUP3Ud1Jt12ya195Jce6DwzdLSuvacyfuqOuD60xj5tbAqDHI0WvGmSSu66N5efxRjh0fGLM6JzBXmg/18zxcT9hF90Sai/s45rSPuQ4ZZ1aaMp7Wqgv6VXO1y+mvNmjj+qRv5uXw3C865jWCY6Ne+5z7uPOv86eccv3kj9r1Rh/5RGPj6hfdx1APxuUxuKAwZlc+Ob85lLnfXTY6rloqhrajNaHrDeeSfoIWvsxHcabd5lxSY8RzPZU3xtUWx8RAG8XH53qn91vjj6Ih5hYluh6TtWqSuw7qGqEf6pIevk7ohzWGotqjT48V13HA0b8219ec+jPuKLVqo/q7Hn3OG8wlNj1LczEKxz4+XEc+v+wnBvJYrhuz5Ob66RwxD9Rda8DnkL6ltUgb99W1QZuuOuKt64gxHNvnjHHcjv2lmjEirZyfr50Il3gco090Dvs8wocxVQftR1vHaO/9ONbi8XVM8djvGvv1pY8PNB2HH2Kg5K6LrvUo8898J1F3bsx+73snpqOP/ny6+qqrFsSbxMbsz376i/SBD3w8XXPNNWmrrbdMu+xym3az9dRfnZauvebadOfd75ie9KQnpLXXXntBbBwc+uLD06WXXpZSs4m71qLRuQ5szD7vec/IjM6+u27Mzl7zGrEqUBWoClQFqgJUwG8UoxtD2o5bI1bp66/Gxa/+VYFpK+APLYzX9+FFH6iG+tIeGNEDL8ejGrz96w89l9y579cI4isHt8EDJ4q+kdpXo9ZxAr8irQnLB3seL0XtD/Wz1mdaOXNdlfLx9UIuuqbY16cmns4rebj/qDEch8eIg59oo4Q2Q2vnPg3Oem6Cn2o3lO8Qe6x718rP1dy1KIrDuR/iE+FMoo9ciDUrTRlPa19DufPR7RSj1B4nN4+Z41aKzzHVfNzzRLGAD7y+XN2O/qijsXFyBmau6PnlcRETb1zruR/xiPz8nO16/VKMKIbzV3sfwzHmwuenD26ENU4fePq9lF+/FD/iqHksxXVL5y7ip/xHbTPH0nUiN+d9zmPikx/yQCl94lVxVQP4gadvzMJe7dQfPqMW5a6YrkefuQEWv6Z2VD6j+lEbn2PND9jMo8954hoot9y5UsKlPzk4N9WftqgjzFx89Yv4w8///6hrBj9ff+Ss+F1tzw/2zhu5ed6RH3xH4QA/rA1/zUC/x3EesPHimjIfn6McFu08lxwX2oMHYzknHDsvtfFYGOP5QjusAX0tjmI5R+DqNS6KQ/yIH+xL3+jgHEv4jDPNunNj9tvf/l76xte/Nc/hwgv/2LbH3Zi98sqr0steekS68sor0653vH16+tOf2Oyxzm2x/uxnv0jve+9H0vXXX58e9/hHpb33vsd8fDTQ/7znvTRd33xy9xnPeFLjv8uC8eV6UDdml+vMVF5VgapAVaAqsCYoEN30RTfTk9ACserG7CSUrBhLqUD0sJN7IHSe+sDHMX9AZ3+uRvzor4Jz9uyPHpSVT/RQCF9/UEOf27omeJhD6fsA2RpP+Jfm5tB958v9Jnnsui71A/CkcuNaKK1rz52xR50Xvo6pP3kQG7WvWx0btY04KKOck7mYyn0a60LxyUG1Y9+sas4f4w2ZJ66laehEPn1rz2MpNfU5zunjdswV9nr9Zj/rcXLzmDlujFWqqfmQNZPDIxbHcQ1D0TdPc1w9J+XjY8AcRz/45wrOB97De1xw77MxC2x9/dRcGJfnHY+j6z0xcprRF7Vz1TG0oZfbRDHdbxrH4KHXe183GjPKXbWLxtV/Gm3VcVrxoUmfT3JyjTDPaK1xzGvVEetDj93WcVUD2EIHvd6pPXJBmdQGKGNH2utaisZbIsvkF/Pwa5nmAKrMw+da09BzOWdHHPVDO2evdpxPtWWf2rHtOaC/ZE8/1BoDx+CNouuL+UJD35BtjZtftOFxnzri7brhPOFrBDFz584oHIAZxUC/8/O1A5uoqKach6gv8mVM14H99IleY9yHtqi5/rWP7cjP7WGja4J5EQO1zwts9NOuURz6ezz0wx5F4+ocezwdax1n/KtzYzZd3zCSj6Q+9zkvbinOb8za+CL+mfHvfOf76VOf/EzaYMMN0hFHvDStt956C1yPOeYL6ZvHHp+2u+W26UUveu6Cscubrz1+0b8c1va98IXPSdvvsN2C8QUHmfjzNtMenw+UUt2YFTFqsypQFagKVAWqAjNWwG/CSjd541JDLLyZoW9ojItZ/asCS6GAnzd9Hy79QRDc+/pOI099sM2d+9HDnT9Aug3Gu/5CfBr5KKbmpv1oL6Xm5OKa5fSn/epSM6+Sxn7+IDdfU0Py5XnlMX0NTENj5DuN1zRyn9YbI9SMOrt27J9F7VyGzBPX0hCfaeUELlr8zU8dm0VbdS3pw7WmnGDvn6zgeAmLNl21xhwHj9ebca4f5Mq1xGOce8DXNzFLXEs56dgkuJJjqaY2tIlel3P5dPGNtPLrINdf32uYxiRn1KqX2vTFVaxptJlnhB1dV3VecvpHWJPq0/hLraFrN1QPrENcp7Bpqnm5VrqGMOa2iKvnudrDFj+TvJ5jHXetjaWeG9fQj6mh5+HXBs6pnruOpbn6mqAtcXjMOmfPcdSYT/865Rwe7CNMXROwyRXPE3FwbdQ/8GG+bkvMvrFozzri7Xlifnwtcy6Jw9rnlv2j1hrHeZU9GhgZAABAAElEQVQwPS/46vlawqKv5+LrFHOCovNUwoVtbv4iP80dvphj3WSN5rzLh+sIeFFxfuCFotopBrUilmvG/sH1iPuL3RuzxmTRxqyN9z1885v/LZ3Z/N/auzX/S/aJT3zcIrfTTz8zve2t7277X3zo89K229583uaiP/4pHXbY69vjw199aNpss5vOjy3nRt2YXc6zU7lVBaoCVYGqwA1dAb/pi24mJ6UBYqH4mzeTwq84VYFZKsCHuiHnjD/0gO/EHnxGSF75lPKIHu78wV6xQMUfOmedp3NWeWbNRWNrm9dE9t1Qro3QvqSxv+4g/+hNCerSVQPPv14SPr4GSmu8K8asx3k+lXQchxPxiTGtOMQv1b4ehswTfZeSfym3pRzTOS5p6ucJOENPaus56Bt5Ptb3WGOOiwesUn59OXm+wETJvYnpuCW9S2OOM8lj1RnX2L5/MOV+/mlBzQd8o/OPNn3nV2OqBvraQMxcTPWbVZv3glG8SBddZ321ibDH6aPWSxWf3HU+0TfKeQw9ce+kuhKfta4h9Lkt4up57jwYg3jj1iU8zk20dsaNO0l/aug80Y/7MRZ+cpp5sV9rXYfE1XG0fU447msI/Zhv3/Tya18OD/4RZskePizui9xQog2/SJO+cVpQ+xVdi/rg5TT3ubVwgw81Th9eDOCasp+1rh/2saav56JcYFuaJ2J5Hc0fsaLnuZw9fCI9nKPH95x8nLmzHzkCU691qp3y82smMWZZL8nGrH7i9SkH/13avflfsl7wdcUvbb7q+JKLL0kPf/g+6UEPvu+8ye9+9/v0xjcc2R6/9W1HpHXXXXd+bDk36sbscp6dyq0qUBWoClQFbugK+E1fdGM4KQ0QCyW6WZ1UjIpTFZilAngIRvFNyhwHf0ha6gcf5VM699VuLt8tF+XsNqrBUuSpD5jKBe2uh1m3r8fDFMBa8DfyFcFfdzA2zhpp32ho3gj0mL4GSmtc+S2H9tBry1DOOgfjaD80bmSvXDA+ZJ7oW8/pxcpSG4zom29u6ddurgf1V59JaK0xS9w0bq4NrEn9mwy9ZkAHfzO/xFX1co003yHrO5dz3/5R81G+iOX5KG40jj5uErgvxqLiMWmjeqlNX1ziTKvWedcYylv71b60ntRn0m3quNQaco0wv3H0UF2Jx9rnomQLH7cnzizq5TI3fXIFV7/vyvn5NUPtdN5zc5ObE+qleLD1b3xAX25DSn3R9nWJvlx8jGlxPsgNxTdm8V6I9/V9lmwBg1/j8Pb54X1AEGasLsbROe8CjPJSn9J1DL4orq2vM8zv0DnxuSanHJ9SHrn1Rb2IzbrP/Dg/aI68o/Mg0sM1Y+xZ1UuyMXveeRekI17z5jbHQ19ySLrFLbYJ833rW96dfvvbM9N97nvPdOCBD5+3OfXU09K73vnvaf31109vevPhbf+ll17WHN8orb322vN2y61RN2aX24xUPlWBqkBVoCqwpimgN325G8M1TZOab1VgGgr4Q9lSn2/60FZ6SO7D2x/qVL+lyFNzUy5o5x6a3a4ej6YA1kLXH+Do6w6ijLNGEA8//iaCr4HSGh8t0+l59dFwnOjA55uCfd7gGSdWH19dD0PnCdcnn/s+MdcEG54DJU1pQz30XCyN0X6UWnHHvR5j/lEmsQZ0HUZ5lbjqOeV2+hrqY1GcSfVpPkM2mnV+wMU5O260OUM93DeXm8ekna5HYkacaD/rWjlpbOWt/WhTv77auP+4x9R6qeKTv2tXuk7Rp1RTV7eJ5iJnC9/I3jGndQxNom8AmVa8cXCxjqJzP8LkmovGfB1Gtrm1oddWYsM2979baeMx2Y86wuy7Jpw74vg6j67FffGVp7c9DsZzurmvnw+T4OMxcEx9SvpHftGcwK7r/hWa4MfvD1wr4Az5umvEZi5oaynllvPJ6e3zwjg5e46jds2itcj14bZ98DXWNNpLsjH761+fno58x3vbfF7T/H/ZTTe9SZjbe9/z4fSzn/0i7b7HbukpTzlo3ubkk3+SPviBT6SNNt4o7bbb7dPJJ/80XdH839kVK1akzbfYrPkE7m7pwQ++X7rRjRb+39p5gCVq1I3ZJRK+hq0KVAWqAlWBqsBKBfSmbznciNWJqQrcUBVYbg8+yocPZ5H2/gAb2bqN4izFdSX38AtepYdm5V3boymAtTDLjVmwxFr2N158DUTrdrQMbxhe1Kfrja1ZZEsuiDV0nvqst1nksBxjUNeSprQhf7X1sUldy/W1Z9zrMeYfpeuaw/xKteertn3OE9xPR3b6+jhuvsqpq6339+CFol/xmePiOuiaAIbjRpszyHnIBpOuCcRgiWJHGtN+KWrVg/GdN/tRU9+c/mo7jTa05v9mnQZ+X0w9L+Azrh7RPAA3um7lbGFfmjuMT7tE9zPTjjkKPuav73WXaz6K4/Me2ebmxNcQ8IGXu55gvOv6EWFGawhYXjSuxtH1hv7ctzFcdtllCf/CcvPNN0vbbLO1wxePI94vf8z26fLzf936bb/9dmmrrbYIMVzzvvkC7JRTTk34UOBmm22adtppxxCfnYzjc87xrlr1he2j99oi7bDeH1q3KD9oghKt06456dLAubSBml+l3KI5gl8uFvUiNuucPcdReyzw8j7ieC65803xp91eko3Zk076cfrwhz7Z5lb6KuKPf+zodMIJJ6Wddt4xHXLIM+e1OP74E9JRn/pse7zWWmulLbbcPK1/oxulc5tP4l591VVt/01uskk65PnPTFtssfm83yQbl1xyWbrqyrlYfXGvuurqRabrrTfsa5g3vvFGaajPoqC1oypQFagKVAWqAmuoAnrTtxxuxNbQaahprwEKLLcHH+VTepDE1OgDbO46oTY6nTl7tZl0W69rjt2Vq9vX48kr4PPDNwdGjYQ3G/yNF49R532hutRnXO0Xoo52RC7wrvM0moaRF9+EK12D9XXA9ac/sSc1N8TVN64ZYylrXYfKoy9P+Oe+Vhmvj7M+1zwf5NFnY9bXhK4fzh31yeUEu0lszPqaQ04o0WZwO7AEv1xnUHDeSmupcxg6N8p90m29byxp1iduNA/wi9aoxnVsXe8+NotjzI/fz8wi7tAYQ3jm5gYxfd6By2/0ICe1ufbaa9NXvvKNdMEFf0zb77pXes//XUKztoZthEGjaD1wDHXk23dNqK/G0fWG63Ab56y5TUO0md+xxx6XPnPMF9PfP/Fx6e5336O16/tLY9PnJQdsm97/1n9tDx/z2Eeme997Lw4tqH1++uYLkDe96V3prDN/l+7c/DvOg5t/y1kq4Igf/0PKko+P4fWJX1X9qD03S8d9+u2tSZQfYuXOJZ+T3Ga5x+dxpHefewV/fQWerhXio/Z54Vif+VF+5KV9wGJcj8P1yHhLUS/Jxuxxx52QPn3UZ9NazSdcjzzyddm8jz768+nb3/pO+1XH+Mpjloub/zt7zDFfSDe+8cbpoQ99QNq4+eQsynXXXZeO/b/j0he/+JV0zTXXpFvdaod2cxabt5Mu2Ji95OJVF5dJ41e8qkBVoCpQFagKVAUmr8C7vv7ndNq5c39Y9ZwHbpp23np5fbvG5DOuiFWBpVHgtPOuSu/62p/ng7/9oK3m20vRUD5dXPQ6kbNVG80nZ682k27nuOx8s/XScx6w6aTDVbyBCvj8PHS3jdJD7zj3/DoQKmv+5Z9emr78k1XPpkuxDrPklsEAz/9paD80PZ2rOk9D1SvbH/KJ81NJU64DoERrAf65sXZghF+Mudyux+SlKQ3hiHWMEl3LoGOkr8aadNuvs46fWxd6PsJHnw18rJQTbCMtnAeOI+3R7xxhd9r5V/fGBca0i+vctWZgv/NW6y5ZDstJQ2rXpVmfOSSW20ZrNGcLX19zjleP5xTAOur7nsFQvfm6Q611Tk75xanpc5/9Yjt0k+3ukE5b+y40m7/G5q4nMIzWwzxA04h89Rqott5WX42j+WCt830X+Ova//jHj05n/e7s9PxDnp022GB9h+881jgwftNjb5re+MZ3tH4PecgD0l3vunuI4dd11Tt0kM4Pfujj6Zw/nJtut8tt0qMPWPXvNsVkvgl9JnX9BucdNlsrfebD727xS/nNE5CGr0nMlz4zdM25zjVhdc7ZF9UeOxfL7YjVd364HrjGnDP50q6Ef7NttuTwTOol2Zj98Y9+lt7//o+1Cb75La/JfuXwRz78qXTiiT9Kt73drdNznvPU3oJ89X+PTV/4wv+09s945pPSrrvu0tu3ryE+/Tp0Y3YSn5hdr/l65vqJ2b6zVO2qAlWBqkBVoCqwUIHPnXBh+twJF7Wd+MvKXbbdcKFBPaoKVAUmosAvzr4sve6zZ7dYOM9wvi1lUT4f/afbFKn0sdVrCcGWKk/oDM5eloqP81jTj32tTOO1R2PUeV+84nhO4xMHj9oz/nq7xV7T6SEXoHddi6bD4IaLimth6bVGtY/WAq+l0dioqjHmJDFH5aJ+5MW+odcN+KPAz0vXPLj9JI45dxFWKTfXQedJr6vALV27gRNpEfHxmLTx6wHsfvn7y5b8mkV+qF0T1Uvt2IY9ylJed8FhKeNTC67RLs1oX6qJ5TbRGs3ZwtfXnOOtqcef+MR/tB/8OuBR+7f/QnGIDjm9c9cht9c5Oft3f0hve9vcZtxd7nrn9OULd5unouvoiUf+ar5fG9F60PHoWtTlQ3/1VS6eD+1R0+7yyy5Pr3zl69KOt9o+PfvZB6tJ77bn/P5n7phecujhrf8BBzws3XPvPUMs5Q0D1Tt0kM63v/096XdnnZ12u9Md0j/8wxNkJG4iVt/XhRhhVS/2k/rkt8pjVcvnBPPA98Ng1WfOXW9gbHLxz9Mfzjk33fNv9ky32mmHVQGl5XrnYvlrCyH6zg9z5BrzuDz/NA/2MRbrWe+5dW/MXt9Qkw+cPvc5L265PuKR+6YHPvA+Kdk4E5mvg3F8j/jb3jp3cTn88EPTZpvfdN5cG+/+tw+kX/ziV+lud98jPbH5eHtYAnx83P+fX/CKhHr//R+SHvLQ+4eubWfgv8B43HEBq/9jVsSozapAVaAqUBWoCiyBAvq1Jsvhq0uWQIIasiowEwX0XOPXB80kcCYI+fArjjJmbTdtcZC7TqgNsZYqT/9aJvLpkyttaz09BXyt9PlarqFsNEad91g9nCcH7rVl9qveYq/J93Ku6jxNR9vcV/khGrVHO7q2czwag8+oBV8juFSvDyXO/HrDSa9FfH3hOF/fWOKcG8u9DsK+lB/nnLg6T/41jKVrN3BKa4/4rKk9j3Mch+ISb1q161XSBBxgjzJEm9Zhgr+WYj1G9KmdrrHIrk+fr036RPNROjcmfa0jj9W5vvrqq9MLnv/yNoVXv+Yl6aY3HfbNM7m5KZ3j+nXGPifnNf+y8YILLky7NJ/SfO1nfjf/Fe061349of6OxX6t3bePD/3pq1xK641r/wc/ODl99CNHpQMO2D/d7/73ItygGnG0HPrIWzT7QHPzFn3VL215HuI4Nye09XrIVxm777jH2Jjtk18Ux9ck8tav+tf5i/zRx7nmOObyjOOPTmc2X+3c9XXUGj8XS+eFMYbMD9cd15hzBhaeAfRcU1vGHKsecf+we2PWWC3amLXxPocXXnhROvxVb2hNX3zo89K22948dHvTG9+Zzmr+GuEBD7h3euSj9gttcp1HHPGWdN655yf8VcmTntT9lww5nEn2143ZSapZsaoCVYGqQFWgKjBcAb3pG/LgMTxS9agKVAX4EDfxB58RpQWfvlxg2/VAyPxIJ/ewyfFp1XwYdfwu/m5fj6ejgL7uIMI0Xns0Rt81Pp1sly8qzpPlsDELhfpcX5avkqs3s5L2OI/wM+lNRay93P9jXUo1wQtlOf0P05bQCL/0jV93L10T9doJP33d9NfWSV67/f4hxxH8lnJT07V0vbo0WW78PZ9ZHlO7SdwrEsv5R9i+jumja519tU7pz3/+a3rFy1/bSjGLjVkE4vWga050LvXc037OYRcW7Ribx4rLvlxNX/WJuNCf17kPfvAT6eQf/iQd9qoXpS222JzDY9VDNi7Jm3z6Bl6TN2Z9XnGt+Y8Pvjtd2PzRQNfGLPSlf3SNwnh0TRsyP/RXfM4z8HE+TH1jFoFGKEuyMYu/QHnhP7+y/WqAvz3owLTXXndbRP3qq69JhzYfQ7/qyqvSgY95RLrPff5mkU2p47DDXp8u+uOf0j3vuWd6/BMOKJnObKxuzM5M6hqoKlAVqApUBaoCWQV4k6YPEVnjOlAVqAqMrADPNX1IGhlsAo7g0/chDw+QKKU3rPmQ2Ro2v5bqmpJ7Q7rvmzLkX+vpKMA3C4g+jXWiMfqucfJZU2pohLIcNjlw7ViOG3Vrwlroeh3AOpn0GsF8L5c/CtA5xmvHpDehFX+W7dzrIDh0XRN5rwJbfd2c5mu8Y3dxBLflUPS1RrVaDtxWBw5Ya5O4J9Z50Lwj7Ny5UedPlVvV/v3vz0mvf93b245JbsyWznFeD7rmJGfH/lVZdF/3aIv1cUrzlbsspece2rBmXL2vza03+MAO32z64hcf3n4S+WUvewGhxq5nuTG7++67paccfNDYnIcADMnPcXPXC9rp/LHPa841+3Gt+fA735TwtdR9NmbJoRRLX4sRp3TOkAdr4us1UPFwbuG++5jvXECXQfjzTlNoLMnGLPJ433s/kn7601Pa//+K/wPr5ec//2V6z//7UFprrbUSLoabbnoTN8keX3rpZenQ5kRHedzjD0h7Z75bPAswpYG6MTslYStsVaAqUBWoClQFBijAG8vSjeEAuGpaFagKZBTgA9FyOddw7vfdDMEDHn5Kb1rzWsL0lyrP3JsgQx5omUOtp6MAzwWgT2Od8A0J4Nd5hwqLCzSa9Ibb4ij9eoZci/ohVqu+CkD7WW+SLkXMvnrcUOxyr4PIr+uaqNdn3RjJ9U9CM79/0DeTJ4E/TQzq0qXrNDmsrtiY9yEbX7k89TVfbaL7i9y5UedPlUvpN785I/3q1NPSueedn3540o/bwbvveZd0oxvdaN7wgAP2S+uss057fM4556Vvf/t7zSc+N0v3b76O95prrknf+Ma303+dfHE6b+2d5n3Y2G/3jdN2a5+dzj77D+0PNrS2ufnW6ba32TlttfPu6TVHn7noWoVNzM985kstxB577JZ+dP5G7cYS544czrz65umkcxb+v++bXfubtMXVv063uMU26cEPvl9bk0tUY+Pv29/+bjr99DPS7373+3STm2ySttvuFmnPPe+att9+28glvfiDP0+XNXk84z4bpV3vuEv6618vTm896ofpl3/dIrTH+vxl8+8q/635t5UPfsj908Me9pDQ7vzzL0xf//o3G53OSX+66M9pk002Trfcfrt57el0++brnREXZcjG5Us/+st0+vnXpl1W/Dhdc9EZzTzcLN2yyXXve92juPfET8zetflm1n9ovpkVm/gnnvij9JvTfpv+evHFjU7bpZ13vlW7B4W9rFzBhwh/0uyHRWvhvvfbu90Hc98h+blv7npBO1438G9Hv/nN49O5zbfPXtbsq0EXrAHsqX39F1ct2NR8/E5npK836z1df326zW13TltvvRXh0r0ae/h6eccxP0k3vewX7dcfr7322u235+666y4JaxuFry30O+iuKV1+/mnpdre7ddptt9u36+uEE05KpzV6/+EP56Ztttm61Rwf5Nx4441af+bieNHG7HJ53Z3qxuxFF/2pnVCIuGLFCmrb1tiUxebsOuuu0/zT539Z9L3tH3j/x9OPfvTTtMvtb9v8M+inzPvia5C/9tVj0wGP3j+tt9568/3aOOqoz6bjjzuh+d+4a6WXvOSQdPNgQaj9rNp1Y3ZWStc4VYGqQFWgKlAVyCvAN0P0xi1vXUeqAlWBURVYbuca+PR9Qx4Psfgpbczqgy7fJBlVq3H86ptu46g3G1+eC4g2rdcevqGxXN5omI2yq2cUnLMopevL6pnZ8meNc3ESGyNDMsV817keothwW309du+u12e9PsOX12heU9GnG7Y4Hrf46/bqdN2mXl26jqvRDdF/ktcfXZ/UimuXx6h9rXGszh+VmKu/8IUvp6/+7/8t7LSjN7/lNc1G7dw+BD9MdrNmcwif/Pzwhz6ZTmo2dK9ef+v055st3nDc+K8/SRtc9CNDnDvc5rZ3Sz+5cpdFG7O+GbfFjru1/yOTc0cOG259m3TmBvdYgL3puV9J615xXtu3brN38qxnPTnd+ta3WmDDA2wwfqjhj38H6QUbaI945L7pfs2GoZcjPnh8+m2zmfeQ3TZsNlkfmt785nel3/9lRZg/r6FHH/2f6dvf+m765xf+Y9phh1s6ZPq/ZrPv85//n/aTtWs1+0gbb7RRurjZ9IyKbu66Vve+916LXK677rr0P//ztfTpH62TrrrR1mnLMz66wGaDDTdIj3/8o5qNwjst6OfB/Mbs3XZPW225RfrKV77R8uQ469vf4XbpyU9+Qlp//fXZ1db4muyvfPnr6bvf/UHoByPsgT3taU9M6zZ7ZVr65Kf22i69PsIO140vffErbT7qx/a6666b7nb/h6XPn7oxuxZpNz/QNPDhS2y4suDDk5/4+H+0H85kn9Z7Nn8A8bhG99d97vcL/vft43c+M339a99Md7v7Hu3/WD7mmC+0G8bqi/bmm2/Wxnz+Jy6cf/1GP1+r0EbBeaOfmI2ul3OWs/1d3JjFor3mmmsXMOI/G96/+cuG+91v1T9pXmedtRdsvp7T7F6/rvn4//XN7jn+yuTv//6xC3CAfdgrX998f/tf2r/cOOT5z5xftMcee1z6zDFfbO2f9vQnNjvjd5j3ffvb39P+NQIufvhH0be61Q7zF0Z8RfKXvvS/7V+pYNf+Pve9ZzrwwIfP+y51o27MLvUM1PhVgapAVaAqUBWYu0mDDrN+Y65qXxVY0xTAAxHKcjnXwKfvxix4d72Zrg+6fJMEfrMu9U23WSs+PB7fHOAbU8MRuj34Ju1yeaOhm/Gaa4FrB8py+QTvmjQT0H7Wui9FzDVpTpGrvh577l2bnrw+0w/XUMeb9Gu8469O123qtTpx5twudT3JawFf8zWnaE58rdF+0muauKtrjU8LnnXW2e2n+U78wcltGvvu+6CEzTqWezWfqMRGJQo3RfFJPew9/FezF7HtdjdPt7z9PRZsYNH3xhf/LD3oDus3exjbt58UxKdLj2s+cXvyyT9pTdbb7aC01643W/BHPL4Zx41ZXtPI4bqNbp7+uOUDGaqtX/3w9dMpzTeR8lONt9pph/T85z9rgQ0O8OnNI1771nT1VVe1/B/x8H2aT6du22yGXtp+GhQbidjX+cd/PDjdrvmEqhZuzO61w7Xtv6H8ZfOJ421uc5f04ytWbcrRnuvtFS//1/bfWh7x2pct+mTor371m/Sud/57Gw+fQt5v/we3H8jDvtEnPnFM+2lbfBJz//3nNr632mqLdLObzX1S07WKNmY/fdTn0nHHfS9dtumd0qXNz8seslbastlgxbx/4+vfaj8BC774NCw+FeuFG7P4ECD2nLZvNpb33HOPtOOO2yd8yvf73z8p/fxnv2zd7rz7HdPBB//dAohf//r0dOQ73puwUX6Pe9w1uxYe89hHJuffJ78Fwewgul7Q5IhHbJDe0fBCwf4d/t3oLW95i4RPZP+gOReg2T6PeVL60Hevbm1uvsk16UHbXpD++7+/2h7ftdmoxqeFWe7YfIoZm6Uo2Pt7wxuOTH9oNv83vvHG6VGP2i/dpvmUOMqvfnVawmYrPj2OT3WffMXt5jdm8by023qntBuz+KAncPBNuvdsPo172+YTuldccWU65ZRT07HHHt/OxSbNJ7zvtM/B6bF7r/qkLl+r2mDNL2Cectbc/Tf6ouslbWdZFzdmv/e9E9td7T6EfOF8vVnU//m5/2pdN9pow/T6Nxy2CAabt0ce+b50ySWXNn8NsG771xvnX/DH9p8Hw3iffR+YcCHUgovl+//9Y/N/MYG/oNi2+Wg+PnmLv/LA/6RFwYnx3H96+qK/MlCsWbfrxuysFa/xqgJVgapAVaAqsFgBbGLgf6ksl82ixQxrT1XghqEAHoj6fnXwLDLu2mh1Dn3eQONDH98kcYxZHNeN2VmoPF4MrpO6MTuejjck7z7XlxtSvjWXqsA0FchtPiFm1+szr8/khzdrHY+bCrQZt3b85fIGcZ+86nNUH5Wmb+PrFhGjdeRrjcy6zgvarWn197//w/Sxj366Tbv0P2a5KdoaNht1+zb7F/vs88BF1w7qt/9dNkl/d79Vm1fox4YnNsTwVbiX3nK/tO/et+u1Mct5Vg4X7PBEhlrwCf+jj/588wnV77Rjh7/60LTZZjedt0Pjvc03mf6s+UbTnXbeMT33uU+b33im0deaTyx+/j//u9mo2zb9y4uey+62ft+nvtF87fOP0rpXnt/4rUjPfOaTEzaJX/3pMxbY4QDX0D23uzq9sdmk+5t73j094QmPXmTDD+Lt0mwAP7vZCNZyxRVXpNe8+s3t19n+43Oe2n69rY53bVyeccZZ6S1veXe7ibfrXg9K562784L3gq5s9pLe+54Pp1//+jfpxje+cXrFK1+YNthg4SdeuTGLfaiHNF/FvM8+D1jwAUXM50ebtYONfXyV8cte/s/NV/xuqTTTt5pPC9/pTndovypaB3QtbN58PfarXvViHR70Vc0LHFcelDZm+clU5P2aI16yaA1cfPEljSYbz3/VMJ5lnvugTdMrX/G6Fr30P2bx9d6f++yXEvYFD33J85vN1U0W0DvzzN+lN7/pXWm95pPoezzsGemLJ/6lHcd6Wef8E9uNWXTs1mh20EEHpg03XPiV3cc135b76eZbc1Hute9j0mP3bb7/eGWJrpEcQ83zSPuWoj21jdnzzrug/YfZ+J71e95zz/T4JxwQ5ocd+I997Oh0dvMd5liIKFgM+Jj8gx583/bYf11++RXpq189tvlrhB+mvzR/OaEFf82y/34Pbr8b3L8+We2Wol03ZpdC9RqzKlAVqApUBaoCCxXAA+ox371gwc34Qot6VBWoCkxCAbx5h7JcvsJx6MZsHw340LeUD3e5N90m/UZyHz2qTawAN8+nuTG7HNZinH3trQpUBaoC01Ug9zqIqF2vz7x2kiE2rNpnhe9cwK52U2GS9zLKd5qvC/MJTLAB7viZpB4TpLfGQPm6za0jXWsqTt2YVTVWtUfZmMWm5iGHPLMFGar38cefkI761Gfbr0B+1rMOTnfa6SbzZKLNRsw7/7g8tzGr9//4f5zvaL55FOV5DcedG64s+ITqO5sPy2ET8RXNv5jccsvNOTRfg8PLXnpE8wnFK9LLX/HCBRuN2Jj96fFfaW0f2mxK77ff3Afrok1ArLff/uQ76cvNVwk/s/la5Ts0X/erBftB//LCw9KVV17Zfq3t3nsv/Gpm2H7kw59qP8V7v+bTtPgGVS2RVjr+trf9v3R683+E8cnKxz/tuenUP1yx6BqG/1uKb37Fp2HxCc6HPfyhCpG4MXunO++anvrUv18wxoM//vGiZlP1jS0G/l/sox/9MA511lwLMNSvzcZxV36wKRW/XtAW142drzkpffc730+32Pbm6dBDn8ehRTUxsL4esMt6nRuz7Zp52b+2c/q3zaYqPokblXe96/3p1F/+Ou2974Hpc6fMbbzqxuyGzabuG4IPewIL6+bww9+Y/tj821P8P+VDm39nykK+PNY6d71Um1m1ixuzLQnsleb/Z3GjQn4cn4TFx7nxUf1sWel/+eWXp9/+9qz2rzf4UfTWp4CPCcB3oONj7Vc3G8D4x7/4uPSCf7Jc8O/Cn8i4JF43ZkWM2qwKVAWqAlWBqsASKYAHproxu0Ti17BrlALYjMJXRs76ayNzIuPcnzSX5XA9yb0JpG/M5DSp/bNRgHM0zTnBGxAofMOuPai/qgJVgarAGqJAtBmA1Ls2ZvmHM5Qp2pidxiYW+U7zdYE5TbLG6xl+6sbsJFUdjuWbDrmNBt5/eISu88Lt15TjUTZm9Wt+c3pH15Crr74mnXDCic0n/j7XyvvYpz433evO285LHW3G4XrFc083Zm9yj4PTaefOfdWsXlMubDasDn/VG1rMJzVf0XsX+YreLzdfU4yvYN6u+TTsi+zTsPMkmsab3vjO9ut+fUP1P5tP0uJ/gOIbUF/7ry+f/4Qpr22KgfX2+te/I11wwYVNfdiibze9qvkq5Rc2G7PXN19Z++Sn/G34f1756d/d99gtPeUpByl8ceMSX4P7zy94RfOvOq9J3EBWHRXoyHe8r/3U7K1vvVP6p+c9XYfmN2ajrylWQ+qF/xf77Gc/RYeybV8LL23+bzH2uFiitcCxPrVfL+iD68Y+O/4pffQjR7Vd+hXStGFNDKyvPhuzv2k2wt/ebIjjE8bYWPVPIBMX37aLb9294988OH3jD3NfRYzz5VcnfbNdXxs1Xxf++te/kuaLavpjHb71bUfMj5PvfIc09Bz5/+3dB5gURdrA8XfJWWDJWTGiYjr1MILiiQETiIoed2fWEwVBREURFMRwZu/OUwwoiop4gndmxYDpQIKfAUUkqSBZYMnuV29BDz293TO907M7szP/fh6Y7umu7upfp9p+p6pcX0cbTTH+mDwwGy1bpHYJEJh1YTCKAAIIIIBABgWCCuMZzBKbRiDnBPTlRLoDodmGpPuo/5yXJJnIX9BLoDL5ozMTO5gD23SOUVkeE30BkU1Nh+fAYWMXEECgAgn4BQM0+8kCUM792dlVv8BssnU4aUvz6bw0LsvnQmnyw7IVS8D7g4KgwKzuld+1URbndMUS9M9tKoFZbxDNz1vvK63rF9s+O+fOnSc//7REli5bbgORTk769b9U2rffUaPVLxin9yvnb6ugwKw7CKwV5q4bPNxu4tzzzrR9mzrbe/zxZ0xTxDNtX6n7m1qgQYN2c6mV4nr07C6dOx8RW8wJzHoDZ377/48LWtoalh3320cuusi/tqkT0Dy+27GmH9k/xLbjjDhNHZ/c/XjblLDzvX76WTnztUXXW2+5y04GBX2dZZ9//t+m6eePxbtPOt+pMZssMPvII2Nk1swvTe3iJqaW8QBn1XGf2jyw9t8a5VyIW2GSCe/9wllc7xs3ntlWHhs9VmbM+MJ+rc0OH3zIgXLYYYfEBYedZ5aeX83rbE5aY1b7ptUfHahl165HO5ss8TlnzlzbN2/rvX8vn6/b1o+x3p+Czi/vCiZP/lBeHD/Jfj3ythtts8s6EbTPOi+bnrsEZvWIlNNAYLacoNkMAggggAACSQTcf9QkWZTZCCCAQEKBTP/Qw/tS2cms+8WM8x2fmRPQF1Vl+SJAz0MdMvkjgczpsmUEEMh3Ab9gQKJglePlfYbqffqrRUXy1YJ1ziJJg7uxBUsx4rzkLsvnQimyw6IVTMB73iY61/2uDQKz/ge8rAKzx9WfLl+Yvly3bt0qVapUkeYtmkmrVs1Nf55V5MMPPraZCROYdefaHZg98JRL5fVZRXa2+9gmCsxqs70//fize5UJx0855YS4LieDAmfOvc1ZmZ6bnZv+JC+88LKc98decuihBzmz4j5fNc0c//c/b4p2UXmDqTG6k2l22Bm+/vpb+ftDo20LqQMGXiFt2+6oWazLJArMzjRB0kdNsFSHAQP/Ku3atbHjfv85ferqvFGmlqcGKZ0hbGB27NMviAazNa2uwz1oDWbtszfd54J7G37jQUFK576htYo//OAT0WOg54wz7LZ7ezn99JOkdeuWsUBn2MDs+PET5b3JU5xVJf3UvoXfX7+tCevSBGbd1+z11/e315ZuLGifdV42PXcJzOoRKaeBwGw5QbMZBBBAAAEEkggQmE0CxGwEEKgwAt6Xc07GCcw6EtnxSWA2O44DuUAAgdwU8As+OS+dE+2x9xmqL2zHu/qX1bTuQEeidZVmnhO84FldGjWWdQT8ztugH2Z5r40w14WznXz7dAd5ht9ynTRoUN+XwB0UDVNjtvG8MdKkSWPR/lF/97v9pEaNGna9c76bK/fd97AdjxKY1WaQH3prtXiPbaLA7P2mf9nvTD+z2myv9oeabGjZspntPtJZLmxgVu+piz+fKNqn7UjT5HEdU4PSb9CgtfY3qiZ169aVY449Uho3KjTdXs6Xye9Nka1btprAcBc5xdP3q64rUWD2++9/ME3qbutn9/K/XiAaAAwaJk16Xd54/R0TMK8sf7v7FvvpLBs2MKsBZA0kazBz0LVXOsltV59qvtp0x5nucyG2kYAR7/3CWcx7vugx0EC29nf77ew5djHtLvTCi/pIlQZtZfhz8+zzcOXKVUlrzDqBdq0x27t3T2eTgZ8aiH/y0y12vnbLEnR+eVfw5huTZeLEV+3Xd941LHZtJQrMZtNzl8Cs94iW4TSB2TLEZdUIIIAAAgiUQkALp04zQKVIxqIIIIBA1gkE/bGdTX90Zh1aBjKkL+F7dmpcZs8ePQ904NmWgYPLJhFAIOMCTqDTnRHvS2f3PGfc+wzVNO7asmHW4ayrNJ/OS+OyCPqWJh8sW3EFnOe+swdBz3/vtZFNtcWcvGfLZ1kFZg/67R25ou+Fpp/NmnG7mu7ArPfYJgrMjn/B1Gg0Ac+WrVrI4MFXxeUrzERQ4Mx7vp16SAP59MUHpd3ObaRfv0sTrvqN19+VSZNekypVq8gW0wevM2jt05O7d5PDDz/E1pp1vnc+EwVmi4rWy7WDbraL9jrrdDnyyG21Mp207s8nnnhWpk2dYWtdau1L9xA2MHvL8LtMEHap7c9X+/V1Bg3A6/FuY/r0Tfe54Gwj6NP7nHOW854vzvf6OW/eAtHmrlcsXynVq1eXPlcMlJEvLgwdmJ054//k0UefEnPA5I47tI/Z+HPfvS1nXM8dp1uWoPPLWdb5HPfsBBtIrmcCuyNG3OB8nbDGbDb9jUxgNnbIyn6EwGzZG7MFBBBAAAEEEEAAAQTyTcBbG0L3P5v+6My34+G3v/qygcCsnwzfIYAAAtEFvMEAXWOil87uLfo9Q535YdfhLB/203lRTmA2rBjLpSrgvTbK6pxONX/ZlE77/Rzz5DibpZuGXiONGzfyzV5pa8wOOHqTHHzwASXWla7ArNba7T92WYl7XqLA7Icffmr6AJ0glSpVkmHDB0v9+juVyF+iL4ICZ97z7ez9t8rb/x4rp5kmcY899qjAVTqm2odrnz5nmf5X58u6dUWmr9bG9p82AR00JArMapohN4yQ1at/lQ4d9pDLLj/fdzW6jqFDR8la0//rQQftJ3/+S++45cIEZn80TUOPMk1E63CS6Se3m+kvV4fNmzfLNdcMtbV++/zp7LSfC3YjSf7ze84luxdocPZvdz1k16w1usdOKxCtzbpq1a9y45AR9vtzzukhh5mAuXcoTd++Tlo9d0oTmN2yZYs5ZrfLr+bY7r77rtL3youcVYnzjI194RrJpucugVnXgSnrUQKzZS3M+hFAAAEEEEAAAQQQyD8Bvz+2Ccxm13mgtaO0NktQjZaoudUXEGW17qh5Iz0CCCBQ1gLeYIBuL9lLZydPfs9QZ17YdTjLh/3Ue/b4j5fal9xh07AcAqkIeK+NsjqnU8lbtqWZbZpvffCBR2y2tLbjQb/b3zeLThBRZ3qbMvZ66zJ9O601tT0P1dG4wd0Ma5SmjJ3ArDfglCgwW1RUJFq7U5fZt+PecvHFfeLylmwiKDDrtAbgpO9WOFOmTZspiQLduuwTpnamLqcBUQ2MlmZIFph93TRP/IpppliHoH5m3f3LXnXVJbLrbrvEZcEJzO5lgruXBwR3tYao1hStWq2aCRheE+snd/nyFXKzCSDqcPY5Z6T9XIjLaMCE33NO7wUn7FfHNP9bPa7ZZmcVP/+8REaOuNtO9r3yYpm1rK55rjaxfSX362dqpxYX233RffIbHjDXkjaJrEH/G4YMsNvxW875zv23knN+aU3bkbcNsX0zO8s5n++995GMN30X66DNLe+3397OLAKzMQlGYgIEZmMUjCCAAAIIIIAAAggggECaBPz+2CYwmybcNK1GXzYE9f+Wjk0QmE2HIutAAIGKKuAXDAkbgPJL6ziEXYezfGk+y/q5UJq8sGzuCnjP77I8pyu6ou0786ZRNuDU0QR5zjuvl2mCdVt/sO59K01gturGJdKh0iwZOPCvUrVqVbsaDYo+//zLttlcZ71RA7OPfLC+xA89EgVmdbvupps1GNrrrNOkVq1aTpZk/fr1prnjj2SfffaSVqbJY/fgBM60D9FRo26KzfIGZlsveV52qldPhtw4ILaM38gjjzwls2b+n3TYe0+56KI/+gbi/NLpd8UmQDh48HApMjVs3TVVneW179TbRt4rS5b8IjVr1ZQLLjhP9thjVztb006ePEVemvCKXc+hhx4k5/2xl5M09ukEZvUL7af2zF6nmRrVhXa+Boaff+4l+fTTaXa62wld5aSTjrPj+p9uY8CAm2Tzpk3SomXzUp8LyfYvtqEEI977gC7ao1MjWfTZBFszuXfvHrLLLu1ia9DaqGPHjpeppha51lYeeduN8sOyrbEfgQ67+Q5Ztmy57XdYa6oWFjaMpXVGli5dZgK794iuq1XrFtLnj2fZZqKd+b/99ptMNU1Haz+2WqPc/beMc37pso2bNJJexnvPPXezSTWd1vgeP36iFJvx9rvuXKKZbF2X9onrHcqqewLvdsJOJ68xW2xWVZBgdcxP7OOiIzDrwmAUAQQQQAABBBBAAAEE0iLgF5j1/mo+LRtiJSkLuF82pLwSEiKAAAII+Ap4gwG6UNgfKGlaHb5aVBTXv6x+F3YdumxpBwKzpRVj+VQEvNdGWZ7TqeQv29L8619j5ItZX9psVateTXYzNScX//yLbQJXm9XVobSB2fo/v26DsloLU4Olixb9ZEMtx3Y9Wt55533bxG3UwOyUHwpK/AAwWWBW92WCCUi+++6HNhitNT1bNG8qDQsbyPJlK0RrTGozvBqYveTSP+viscEJnHkDs+6AWLtGlWTd1Cek63Gd5dRTT4il9RuZOfPLbX2SmiBmdVODs0GD+rFAtoaltJ/ZevXqyh4mOLf//vuaefFNGzvHTWtYdu5yuJx44o7AqG5v4YIf5ZFHx8jKFavs5rUWpwb8Fi38yQag9ctd2rczNYf/ZLdlF3L95wRmNXg5f/5CW2u0UaNCu6weTw3+6qBB2wtNYLmasXQPk0yN3TdMzV0dNEBfmnNB0yTbP10m0eAXmD1h/9oyb8oLsswEUHXYyZi0bdvKjBXIN998K5s2brLfa8D+yCM7xQVO9ZyZ8OIkO18Dq7vt1t46HmZqhh9xxI7a4Rp4HWeazN64YaMUmGaz9Rpq1qyJrDFNRuv5pcH0Oia4P/yW6+OOqXN+qVVBpQKbF12uSZPG8tNPi2XDhg1223oMLr3sL3a99ovt/7nPQ/f3ZRaYTTE+mjww684945EECMxG4iMxAgggkDaBtWuL7LqqVatqCkzbfrWYtpWzIgQQQACBpAL6y2L9p0OdOjt+mZ00IQv4ChCY9WXJqi8JzGbV4SAz2wUoE3Mq5IqAN/ik+xUlAKX3bP1HSwe5coZk735QJs6uY6PBzCeeeFZmf/NdXMbcfYOWJjDbuOYGqTR7vGgtPx0qV64s7XZuIz3O6C6t27QUp+Zh1MBscxNQ9Q5hArOa5rvv5soLpklYDZRp87TOoAHSTp0OluNMYFWDou7BCZwlCsx2qL9Cls54RfpffbmpjdnWndx3/O8PjZavv/7Wd577y6YmsHe1WWctU/vVGT7/fJZtDllrl2rtzZuHXevMin2uX79BXjTBxGnTZsiWzVti32sw97g/dJauJlCuQUa/wQnMapO+ut1nn50gC0yA1hnUSvvQ1X5l/dahgVs1e9/UQC7tuaDbCLN/Tl78Pv0Cs/qM3KNFDfPjgA/kww8+kRUrVsYlbdiwgZxySjffJr11H142+6Np3UOnww4RrX3rHlYsXynjTI1ibdbYCWDrfL0W9jZB/xNOODawRnaDhvWlf//LTBD4FZlpalTr8XXSaq127ePWr1Z7UGA2UYsBmSgTE5i1h7N8/iMwWz7ObAUBBBBIJKB/+KxYvu1Xcg0L6xOYTYTFPAQQQKCMBPQ+rPdj/XGM3osZogkQmI3mR2oE8lGAMnE+HvXc3ed0B2ZzV4o9yzYBysTZdkS25UeDVFozr4oJHjVr3sz0k1kvVEa9ATANBB2/by27Lg3YtWvXJmvfQWktxB9//NnURtwozZs1FQ2K+QUZE0G4A2KNNs6Wumu/lJEjhyRdz0sv/Ufeeft9W7u2S5cjbJDQCeJtNkHUxYt/sbU4v/y/b+zmjz/+GDm5+/FxWdGmczX/dWrXLtFHrHtBDSpqEFqPsQa0NZBb2v3U9WmtzwULFtnavbqeMOvQNHpepXIuhN0/974642GekUVF621tbm16uImpTayB2UqmlmuiYZ2p8armGzduNE07N7K1YYOW1+PpuOuyug0NzvoNTuBffxAwwpw/Ouj5OX/+IluztnXrVnE1bP3W4ff3cVBgNlNlYgKzfkeujL4jMFtGsKwWAQQQKIVAph64pcgiiyKAAAI5L8BLqPQeYu9LIF07TRmn15i1IZBrApSJc+2I5vf+uIMBjgTPQUeCz2wWoEyczUen9HnzBsCCAkGlX3PFSeEExOovfl2OOrCNqUHZM2Hmv/9+ntx7zz+kTZtWcs2gvoHLaoB22LA7ZPWq1b7NKwcmZIZ4z0slidKqRFmTOoFZb43s0mzXOQ/daYL2OVNlYgKz7qNTxuMEZssYmNUjgAACIQQy9cANkTUWQQABBPJGgJdQ6T3UBGbT68naEMgHAcrE+XCU82cfCczmz7HOtT2lTJxbR9QbAMvnwGzjeWPk4kv+JPvu2yHhQf63qS37tqkte+RRh0mvXqcmXPZvf3tI5v2wwDYbfNrpJyVcNtdnao3f++/7V6jdLKpcKAtqdYpbNihIGbdQhiYIzGYIPpc3S2A2l48u+4YAAhVFgJdQFeVIkU8EEMhlAV5CpffoEphNrydrQyAfBCgT58NRzp99JDCbP8c61/aUMnFuHVECsyJaU7GurJS6P74mt98x1DQ5WzXhQX7zzcky8eVXRfuOHWRqzFarVs13+enTv5DHHxsr2stov36XSvv27XyXy5cvV636VUaPfirU7q4pri/fyP5xy+ZjYDaoJY1MlYmpMRt3SpbtBIHZsvVl7QgggEAYgUw9cMPkjWUQQACBfBHgJVR6j7Q3MNuhTW25qVe79G6EtSGAQE4JUCbOqcOZ9zvjDczyHMz7U6LCAFAmrjCHKlRG9V40/uOlsWU7tKolPQ9rEpvOh5Gbx82VXxd+Jb9vuzVpDVj1WLJkqdx5xwO2n9LWrVvKsV2PtkHXunXriPZ7qvM/+ugzmTZ1hmj/sGf06C7aDy1D6QS8TfsGBSlLt9ayWTodNWa9fx9rToP2OVNlYgKzZXP++K6VwKwvC18igAAC5SqQqQduue4kG0MAAQSyXICXUOk9QN4/PHkhnV5f1oZALgpQJs7Fo5q/+0RgNn+PfUXfc8rEFf0Ikn+vgP5dUtqAtPYz+8QTz8qqlau8q4tNt2jZXI4//hg58MCOse8YCS+Q74HZRH8fZ6pMnDwwq/XDCxIcZOYn9nHREZh1YTCKAAIIZEggUw/cDO0um0UAAQSyUoCXUOk9LARm0+vJ2hDIBwHKxPlwlPNrHzU46x46tK7tnmQcgawUoEyclYeFTEUQ0L9LenZqLKW9BxcXF8tXX82WuSZIu3r1r7J+wwZpUL++FDZqKC1NUHb33dtHyBVJvX8vBtUezQaptWvX2drSlSoVSKNGhSllybu/ZRqYTTE+mjwwm9Kuk8hPgMCsnwrfIYAAAuUrwEuo8vVmawgggICfAC+h/FRS/87bn1WiPzxT3wopEUAglwQoE+fS0WRfEECgogpQJq6oR458Bwno3yX51nxzkEU2fa/HxRk0aF7awLmTtqJ8egOzPQ9rHHheZqpMTGC2HM8mArPliM2mEEAAgQCBTD1wA7LD1wgggEBeCvASKr2HncBsej1ZGwL5IECZOB+OMvuIAALZLkCZONuPEPkrrQCB2dKKsXxZCBCYLQvVCrxOArMV+OCRdQQQyBkBXkLlzKFkRxBAoAIL8BIqvQePwGx6PVkbAvkgQJk4H44y+4gAAtkuQJk4248Q+UMAgYoo4P37mBqzFfEopjHPBGbTiMmqEEAAgRQFeAmVIhzJEEAAgTQK8BIqjZhmVaX5wzO9W2ZtCCBQUQUoE1fUI0e+EUAglwQoE+fS0WRfEEAgWwS033l33/OJmm/OVJmYpozL8WwhMFuO2GwKAQQQCBDI1AM3IDt8jQACCOSlAC+h0nvYCcym15O1IZAPApSJ8+Eos48IIJDtApSJs/0IkT8EEMh1gUyViQnMluOZ5ReYLcfNsykEEEAAAQQQQAABBBDIQYE5SzbJg2+tiu1Zt461pdu+tWPTjCCAAAIIIIAAAggggAACCCCAQLBAw8L6Uq1a1eAF0jiHwGwaMZOtisBsMiHmI4AAAggggAACCCCAQGkFCMyWVozlEUAAAQQQQAABBBBAAAEEENghkF2B2WKTsYIdmSsxxvzEPi6wtWuLZNPGTaLVoxkQQAABBBBAAAEEEEAAgXQIaGB2zi87/sbYtUlV2bVptXSsmnUggAACCCCAAAIIIIAAAgggkPMCKQVmU4yPUmO2nE8np++Act4sm0MAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAY9ASoFZzzrCThKYDSuVpuUIzKYJktUggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggEFGAwGxEwGxO7gRmtRNhPdAMCCCAAALlK6DNyeu9WIfyfOCW716yNQQQQCC7BSgTZ/fxIXcIIJD7ApSJc/8Ys4cIIJD9ApSJs/8YkUMEEMhtgUyViakxW87nFQ/ccgZncwgggIBHIFMPXE82mEQAAQTyWoAycV4ffnYeAQSyQIAycRYcBLKAAAJ5L0CZOO9PAQAQQCDDApkqExOYLecDzwO3nMHZHAIIIOARyNQD15MNJhFAAIG8FqBMnNeHn51HAIEsEKBMnAUHgSwggEDeC1AmzvtTAAAEEMiwQKbKxARmy/nA88AtZ3A2hwACCHgEMvXA9WSDSQQQQCCvBSgT5/XhZ+cRQCALBCgTZ8FBIAsIIJD3ApSJ8/4UAAABBDIskKkyMYHZcj7wPHDLGZzNIYAAAh6BTD1wPdlgEgEEEMhrAcrEeX342XkEEMgCAcrEWXAQyAICCOS9AGXivD8FAEAAgQwLZKpMnDwwW2xkChLoMD+xj4eOB64HhEkEEECgnAUy9cAt591kcwgggEBWC1AmzurDQ+YQQCAPBCgT58FBZhcRQCDrBSgTZ/0hIoMIIJDjApHLxCnGR5MHZnMcvrx3jwdueYuzPQQQQCBeIPIDN351TCGAAAIIpCBAmTgFNJIggAACaRSgTJxGTFaFAAIIpChAmThFOJIhgAACaRLIVJmYwGyaDmDY1fDADSvFcggggEDZCGTqgVs2e8NaEUAAgYopQJm4Yh43co0AArkjQJk4d44le4IAAhVXgDJxxT125BwBBHJDIFNlYgKz5Xz+8MAtZ3A2hwACCHgEMvXA9WSDSQQQQCCvBSgT5/XhZ+cRQCALBCgTZ8FBIAsIIJD3ApSJ8/4UAAABBDIskKkyMYHZcj7wPHDLGZzNIYAAAh6BTD1wPdlgEgEEEMhrAcrEeX342XkEEMgCAcrEWXAQyAICCOS9AGXivD8FAEAAgQwLZKpMTGC2nA88D9xyBmdzCCCAgEcgUw9cTzaYRAABBPJagDJxXh9+dh4BBLJAMmEvCgAAH0pJREFUgDJxFhwEsoAAAnkvQJk4708BABBAIMMCmSoTE5gt5wPPA7ecwdkcAggg4BHI1APXkw0mEUAAgbwWoEyc14efnUcAgSwQoEycBQeBLCCAQN4LUCbO+1MAAAQQyLBApsrEBGbL+cCvXVtkt1itWlXRfwwIIIAAAuUroA9c/adDnTq1ynfjbA0BBBBAwApQJuZEQAABBDIrQJk4s/5sHQEEEFABysScBwgggEBmBTJVJk4emC02MAUJcJiPD+dH8AXC9cH1wfXB9REkwP2B+wP3h6CrQ4Trg+uD64PrI0iA+wP3B+4PQVcHz0/uD9wfuD9wfwgS4P7A/YH7Q9DVQfmB+wP3hwzcH5IHZoMvWeYggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCIQQIDAbAolFEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSgCBGaj6JEWAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQCCFAYDYEEosggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACUQQIzEbRIy0CCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAQQoDAbAgkFkEAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSiCBCYjaJHWgQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCCEQPLAbLFZS0GCNTEfH86P4AuE64Prg+uD6yNIgPsD9wfuD0FXhwjXB9cH1wfXR5AA9wfuD9wfgq4Onp/cH7g/cH/g/hAkwP2B+wP3h6Crg/ID9wfuDxm4PyQPzAZfssxBAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEAghQGA2BBKLIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAlEECMxG0SMtAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggEEKAwGwIJBZBAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEoggQmI2iR1oEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAghACB2RBILIIAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAghEESAwG0WPtAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEAIgeSB2WKzloIEa2I+PpwfwRcI1wfXB9cH10eQAPcH7g/cH4KuDhGuD64Prg+ujyAB7g/cH7g/BF0dPD+5P3B/4P7A/SFIgPsD9wfuD0FXB+UH7g/cHzJwf0gemA2+ZJmDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIBBCgMBsCCQWQQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBKIIEJiNokdaBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIIQAgdkQSCyCAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIRBEgMBtFr5RpN23aLD/8MF9++mmxtG7VQtq2ayNVq1Yp5VpYHAEEEEBABdauXScbNmyQ2rVrSc2aNUOjLF26XObMmSuVK1eWXXfdWRo2bFAuaUNvhAURQACBLBfYunWrLFiwSObNWyh16tSWZs2aSCtTti0oKAiV8yhl4ihpQ2WOhRBAAIEKIrBy5SpzH14gK1esksJGhdKiRTNp3LgwdO4pE4emYkEEEEAglIDzjkIXrl9/J6lSJfE73yjl2ihpQ+0MCyGAAAJZLqBlYX03kWioUqWquR/XS7SIZKpMTGA24WFJ38xXX31LXn/tnbiTRR/Qp59xshx1VKf0bYg1IYAAAjksoA9cfQE1ZcpnMm3qDPntt9/k1NNOlK5dj0661yuWr5SH/j5aflmyNG7Z1m1ayWWX/UXq1q0T9717Ikpa93oYRwABBCqywJo1a+XZZ16Ub2bPkc2bNsXtStu2raX3uT1tYCBuhmciSpk4SlpPNphEAAEEKqzA9OmzZMKE/8gq8zIqbjA/jtl/v31s2bhRo4Zxs9wTUcq1UdK688A4AgggkGsCRUVFMuLWu+XXX9fYXRt4TV9p27ZV4G5GKddGSRuYIWYggAACFUxg+PA7ZekvyxLmur2pkNOv36W+y0Qp10ZJ62SGwKwjUYafkya+Jm+88a6YagSyyy5tpZ15cfX99/Nk/vyFdqtn9OguXbocUYY5YNUIIIBAxRf45JOp8vzzL5cIBoQJzOqvn+6//1/2BVYdE4DdZ+89ZasJ6n755TdStK5ImpraXldeebHUq1e3BFSUtCVWxhcIIIBABRVYtOgnefjhJ+19tKBSJVumbd26pfl16TL55uvv7I8Pq5iWYK6/vr+psdXIdy+jlImjpPXNDF8igAACFUxAa0c999xL8tmn02zOtUyrrb9orayFC3+U7+f8YL8vNEHZQYOulFq1SrYoE6VcGyVtBaMmuwgggECpBZ584lmZan487gyJArNRyrVR0jp54xMBBBDIBYHB1w6Tdeadrsbcgtru0sDsVVddUmJ3o5Rro6R1ZyR5YLbYLB60Z7om5if0mfPdXLnvvoet+WmnnyTHHnuUHdf/Xn75VXnrzcl2+oYhA2wzcHbC/R++CX05/8zJwvXpvmLix7l+cur8+OCDT+Sdt9+PHeNly5bb8cDArOv433/fv+S7776XwsKGMvCaK2zTm3r/WLFypdx554Oy1tQCO/Cg/eQvf+kdW79zf/FNa5bSX0fdeVdAWl2La/s7VuoaY35OnZ+uI7ttlOPL8c2h57O2VnDL8Ltk+fIVss++HeTc3j2lTt3asdNeAwIPPfio/aNot93by5V9Ly5x/OPKxKeZMnHXBGXipk3i0sel1fL0MSbtdl/f8jTXX8wndpDcI/jgk0P3J/epbcdz+PzWlmL+8ffHzY+8f5Aze50qnTodHLf7n332uTw15jn73dGdD5eePU+Jm68TgeXaFQnKxNvXEirtgaY8fb6rPO3NQQ4fH7ur7B/3V+6v3qt+x3QOXx8zZ34pjz4yRho0rG+bl9edLhGY3b7/Jcq1zntiM//liQneE5v52iVT4Dvmf5u0b0223r7vmHPY3+40+8f9l/uvvRR8/8vB66O4uNgEXK+XYlM+vuSSP5v3FHv57nrQ/SGuXDvQvCfe/n5jhV+Z2OMXl1bfMdc270bM+eebVjPgSe9kNHlg1lmSz5QEnnxynEz933TZc8/d5K9XXBi3Dj2Bbr/9fvnR1EA4xjyITzcvmhgQQAABBMIJ9L3iWrtgYGB2+2q0NtfwYXfaqf5XX25rebm34PwRVblKZRkxYojts9aZHyWtsw4+EUAAgVwQ+PnnJaIv/bt3P14qmRqz3mHy5Cny4viJ9utRtw+Nu5fql1HKxFHSevPJNAIIIFCRBdav3yCrVq2W5s2b+u6GvrDXl/5tTFcd1wzqG7dMlHJtlLRxmWACAQQQyDEBra2lTRivWbNGzj3vTBn79At2D0sEZrfvd5RybZS0OcbO7iCAQJ4LaJl40DVDrYJWwNGulcIOUcq1UdJ680dg1iuSxumiovVyww23ypbNW2yfW95ftOqm3nj9XZk06TVbe+vWETdI5cqV05gDVoUAAgjkrkDYwKxTm6p+g/pyyy3XlQDZsmWLDB48XDZu2ChnmH6/uxxzZGyZKGljK2EEAQQQyAOBxYuX2JdSuqtXmT5ctHlNZ4hSJo6S1tk+nwgggEC+CIwbN0GmfPip/XGM/kjGPUQp10ZJ684D4wgggECuCTzx+DMybdpM0eYyL7jgPLn+ulvsLvoFZqOUa6OkzTVz9gcBBBDQVgyHDh1lIYYNHywNGzYIjRKlXBslrTeDBGa9ImmcnjXrK3nkX0+aqswFctttN25rOtOzfq19MHLE3fbbAQP/Ku3atfEswSQCCCCAgJ9A2MDsqFH32ZYJjjzqMOllmn3zG0aPflpmTP9C9tprd7n8rxfEFomSNrYSRhBAAIE8ENBme4betO0Poyv6XiR77LFrbK+jlImjpI1lgBEEEEAgTwQeeOAR+Xb2HN8as1HKtVHS5gk9u4kAAnkoMGPGFzL60aelStUqcu21V0qNGjXkxiEjrYRfYDZKuTZK2jw8NOwyAgjkuIB2p3SHaYlWh7vvuVWqVq0aeo+jlGujpPVmkMCsVySN0x9++Ik8N+4lqVmzptxx582+a9Y+u/qZ9rB1uPCiPrLffnv7LseXCCCAAALxAmEDs9dfd6ttVqhHz+7SufMR8SvZPjVp4mvyxhvvSouWzeW66/rFlomSNrYSRhBAAIE8EPif6bpjjOnCQ3+QeMcdQ23519ntKGXiKGmd7fOJAAII5IPAN19/K3//x+O2r61eZ50uRx75+7jdjlKujZI2LhNMIIAAAjkisHbtOtMV0t2yds1a2zWddlG3fPkKuXno7XYP/QKzUcq1UdLmCDm7gQACCMQEZpsfIj5ofpCoP4i5865h9nttWr5GjepJW6SNUq6NkjaW+e0jBGa9Immc/u9/35RX//uWNGpUKENvHhS45oEDbpKNGzeK3x9PgYmYgQACCOS5QJjA7G+mE3j98Yv26d3nT2fLwQcf4Kv2ztvvy0sv/cd09l7HtnCgC0VJ67sRvkQAAQRyWODpp56XTz+dJk2bNZEhQwbE7WmUMnGUtHGZYAIBBBDIQYFNmzbJ0qXL5eOP/2f/bd60WTp3OUJOO+3EuP7Ao5Rro6TNQXJ2CQEEELACjz02VqZ/Pss2YXzVVZeY3yYWJA3MRinXRknLIUMAAQRyTWD69Fny2OixUrtObenYsYNMN60gbjD9zlaqVEkKGzWUAw7oKH/4QxepXr1a3K5HKddGSRuXie0TBGb9VNL0ndO/i3Y+rJ0QBw3a7Js2/9bthK5y0knHBS3G9wgggAACLoEwgdlff10jN1x/q0112eXnS4cOe7jWsGP000+mytNPv2D/mLr3vpH2QR4l7Y41M4YAAgjkvsCiRT/JnXc8YH/Qck7vHnLYYYfE7XSUMnGUtHGZYAIBBBDIMYHB1w4TrRngDIWFDeW8P/aK6+PbmRelXBslrbN9PhFAAIFcEtAAwGOmO6Tq1avLYNPiViMTBNAhWY3ZKOXaKGlzyZ59QQABBFRgypRPZdyzEyyG/jCmUeNCqWHuyYuXLJXN5oeLOuy0Uz3p1/9SW2nSfmH+i1KujZLW2b77k8CsWyPN404H8Lvvvqv0vfKiwLVr0xeLTV+z+svWHj26By7HDAQQQACBHQJhArNLzAP51lvuson0Ydy+/c47VuAac/6w0q+06Xltgj5KWteqGUUAAQRyWkB/NXrXXQ/JwgWLpGWrFrZ/Lf3DyD1EKRNHSevOA+MIIIBArgloqzDaNZIz1KxV07YO07Xr0dKgQX3na/sZpVwbJW1cJphAAAEEckDANmF8699EP88+5ww5/PBDY3uVLDAbpVwbJW0sg4wggAACOSKwxjQjP378RKlrWj7s1u1YqWNqzuqg7ycmv/uhTJr0umzZskV22aWdDc467yiilGujpPVjTx6YLTbJ4t+txK+H+YE+41+YKO+9N0Vat2klgwb1jXdzpozfkCEjZPXqX+Xk7sfL8ccf48zZ9olvoK8Fwgcf7k/x9wz3VI5fH0kDs2b/i9YXybWDhlmVSy79s+yzz147hFw+H035TJ599kXbD8E9946wNWeLTA2Ea01NBB1KpNUvt6f3S6uznfl23O8/1/b9ZpPeqHB9+54a9kvOH86PLLk+Xn31Lfnvf960982r+l2y7QcwnvOzRJnYM9850YfcsL1MfLIpE3fbViYukVYX9kkfS6vl6T+YtFni4+xb3KdP/pnvEsCH85fr13VBeEZd14c2Y6y/3Nd/X305Wz766H+yZs0aqV27llx4UZ+4mrNFRQnKxK5N+JVr49JeYsrT+7rK00nSumZvG3Xlv8Q8/YL5XP9c/76Xhv2S6yMrro/RpqbsDFNjdi/TGtflplWu2GCOz/IVCfqYNfM1iBD4nth1fOPKtdvfE/uWiWMbNyPb0/ultYu51u9OFhtnflacX7Hj4R3h+HB8eD56r4od0z7Xx5tvTJaJE1+1y1ziKr/GlWud98Q+6ePKxPeY98SVCsQ3rW7Bkz4urb5j1pcTPscveWB2xy4yVkqBN15/10TnX5OGhQ1k2LDBgan797vBRvB7n9tTOnU6OHA5ZiCAAAII7BBIGpjdvmi/fqY2wZattmm3Qw89aMcKXGPOA7t+/Z3klluvj82Jkja2EkYQQACBHBWYOnWGPPnkOPOHSLF0P6Wb7cPFb1ejlImjpPXLC98hgAACuSqwyfQve999D8uC+QtlJ1Om1f6+a9SoHtvdKOXaKGljGWAEAQQQqOACn5s+ZR83fcsWmD4MzzvvTNH3B+5BK92M0bKxGXqddZo0bdrE9m+oXdzpEKVcGyWt3Tj/IYAAAnkkoK3KDLj6Rtu6jLcyZJRybZS0Xn4Cs16RNE5//PH/5Jmx480fQzXkzruG+a5Zf+WqJ4kOl172F9l77z19l+NLBBBAAIF4gbCB2RtvvE1WrVwlZ5xxsnQ55sj4lWyf+vdL/5G3335f2pgWDq5xtXAQJa3vhvgSAQQQyBGBOd/NlQcfetT+8OWAAzvK+eefG7hnUcrEUdIGZogZCCCAQI4K/PLLMrll+J127841QYPf//53sT2NUq6NkjaWAUYQQACBCi7wwAOPyLez55RqL1q0bC7XmX5odYhSro2StlQZZmEEEEAgRwRuNc3OL1n8ixz0u/3lz38+J7ZXUcq1UdLGMrB9hMCsVySN07PNw/pB89DWYejNg+I6GnY28/33P8i99/zTTl53fX9p0aKZM4tPBBBAAIEEAmEDs/fc/Q+ZO3eeHHzIgdKnz1m+a3zgfvMH1rdzZL/995ELL/xjbJkoaWMrYQQBBBDIMYGff14ien9cv369tGrdQvr3v1yqVasauJdRysRR0gZmiBkIIIBADgtoVxzaJYf+IFF/mOgMUcq1UdI62+cTAQQQqOgCL7/8qvy46KfA3di0ebN8P+cHO7/dzm2kpqmoU1jYUM46+3T7XZRybZS0gRlmBgIIIJDDAkOHjpIVy1favsC1T3BniFKujZLW2b7zSWDWkSiDT60yfYPpK2ud6RD+9NNPkmOOParEVpxaWk2aNJYbbxpYYj5fIIAAAgj4C4QNzL77zgcyYcIrUrNWTRk16iapZJodcg8aWBh87XDbQbz+gkp/SeUMUdI66+ATAQQQyCWBJUuWyn33Pmz7MdTya7/+l0rdunUS7mKUMnGUtAkzxUwEEEAgBwX0njlwwE22q6QTTzxOTjixa2wvo5Rro6SNZYARBBBAIMcFli9P0Mes2fco5dooaXOcnd1DAAEESgisMz9SHGx+rKjDWWefIUcccWhsmSjl2ihpYxnYPkJg1iuS5mkNBugBa9mqhVxzzRVSuXLl2Ba0GeMRt94tK1aslFNOPUGOO65zbB4jCCCAAAKJBcIGZteaH8cMGTLCNrd5/gXnygEHdIxb8fvvfSQvvPCyDdyOGDFEqlatEpsfJW1sJYwggAACOSKgTWRq/4W/mv6z6jeoL1dffZk0MJ9hhihl4ihpw+SNZRBAAIGKILBs2QpbZu3du4fstFM93yxPnz5LHhs91s677PLzpUOHPWLLRSnXRkkbywAjCCCAQI4LJAvM6u5HKddGSZvj9OweAgjkkYCWid96c7Kc0eNk03JXNd89Hzdugkz58FORggLbnLy7ldoo5dooab0ZJTDrFUnz9OLFS2TkyHul+LffpNNhB8s55/Qw50OBrZn1+GPPyIwZX0gVEwQYNmyw1KtXN81bZ3UIIIBAbgj8Zu6hW7ZsjduZAVcPsdPaiXuXLjv6jq1SpXKJWrFPPP6MTJs20/b5rbW7Wpp+XnSYN2+BDTJs2bxFOnc5Qnr06G6/d/8XJa17PYwjgAACFVlg6dLl9n65etVqqWNqyF7Z9yKpa8qum02TbXqP9g5169aNa944Spk4SlpvvphGAAEEKqqA0/WG3l/1RdQee+wa12LB9OlfyLPPvGibmW/WvKn5YXjfuPuw7neUcm2UtBXVnHwjgAACpREIE5iNUq6NkrY0+8GyCCCAQDYL3HvvP22z8Vre1W47dtmlnVSvvi1Aq+8nXnnlDXnHVJSU4mI5uvPh0rPnKSV2J0q5Nkpad0YIzLo1ymh82tQZ8uSY52xwVl9ktWvXRn4w/R1qleoqVarIRRf3ifslaxllg9UigAACFVbgk0+mytinXwiV/zN7nSZHHdUpbtkNGzbIQw+Nlnk/LLA/jtl557ay1QQS5s9faB/Ue+21u7kX/ymutqyzgihpnXXwiQACCFRkAW067eaht8sqE5TVocA0Ca8/Okw06D21Y8cOcYtEKRNHSRuXCSYQQACBCiqg/Xs//M8nRF/8O0OTpo1tywXazPyqlavs1zVMn4YDTWtdTc087xClXBslrTcfTCOAAAK5KBAmMKv7HaVcGyVtLpqzTwggkH8Cc+fOl0cfecp2r6R7r+8nWpkKOFr58ccff5ZNGzdZFH332/fKi9P+rjddZeLkgdli3Tu7L/7/MT+Uz+efz5SJL78W/0eU6ZdLI/Z7ddjd31a/xTeUbyAgfvhx/wq8PCrS/SWlwKzn+tcH55gxz8vXX88WrSGrQzXzi6qOHfeW3r17lnxQu9L7pjXNZXTcLyCtrtyVXidLDMzn/sT9qcRlEfuC6yOrro+N5g+bgQNujB2eMCN+gVlNF1gmPtOUic2PZOwQcPxDpdUVBKTftnLm42POAe6/scuhxAjXT1afH5s3bZa333lfPv74f7Ji+cq4w1fJvJTq1OlgOfGk44Jb4zLHd8PGBGXic0yZuNqObj3iNmAmNqw3aZ9KUJ42Pzzn+vKquaa5vjg/eP64LgjPaA5cH8tXJOhj1rN/ocu1DpMrfanT6jpc6Z1Vxn0yn/sT96e4SyJugusj666P9aZM+uabk+Wzzz4XbdXLPdSsVVNOPukPcsSRv9/WomLA8fN91+v3ntgnfei0mjGf9Pp18sCsLsWQNoGV5lesGrlv3bplYL8wadsYK0IAAQQQKCGwZcsW04TxQvtwbtu2VVzf3yUW9nwRJa1nVUwigAACeS0QpUwcJW1eo7PzCCCQUwJaS1bvh/rjmUaNGkqTJo3MDw2rht7HKOXaKGlDZ5AFEUAAgTwQiFKujZI2D2jZRQQQyAOBYtNc8ZIlv5iWY1bLZvO+t7lp3riwsKFtLTHs7kcp10ZJS2A27BFiOQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCBFAQKzKcKRDAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEAgrQGA2rBTLIYAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAikKEJhNEY5kCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQFgBArNhpVgOAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQSFGAwGyKcCRDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEwgoQmA0rxXIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAigIEZlOEIxkCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAQVoDAbFgplkMAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQRSFCAwmyIcyRBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIGwAgRmw0qxHAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIJCiAIHZFOFIhgACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCIQVIDAbVorlEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgRQFCMymCEcyBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIKwAgdmwUiyHAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIpChAYDZFOJIhgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACYQUIzIaVYjkEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgRQECsynCkQwBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAIK0BgNqwUyyGAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIpChCYTRGOZAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEBYAQKzYaVYDgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEhRgMBsinAkQwABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBMIKEJgNK8VyCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQIoCBGZThCMZAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggEFbg/wH1UiN9o7X1JwAAAABJRU5ErkJggg==", - "text/plain": [ - "" - ] - }, - "metadata": { - "image/png": { - "width": 800 - } - }, - "output_type": "display_data" - } - ], - "source": [ - "# Display the example loss curve from a larger training run\n", - "from IPython.display import Image, display\n", - "\n", - "\n", - "# Load and display the image\n", - "display(Image(\"../assets/1b_finetuning_train_curve_500_steps_256gbs.png\", width=800))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### How we fine-tuned the 1b checkpoint for bf16 accuracy\n", - "An example of the full slurm script to run the above training curve on our infrastructure is as follows:\n", - "\n", - "First make a `~/.netrc` file with your wandb login info. You can also accomplish this by setting wandb ENV variables,\n", - "assuming you want to log to wandb. If not you can pass the `--no-wandb` argument as part of the args to `train_evo2`:\n", - "\n", - "```ini\n", - "machine api.wandb.ai\n", - " login user\n", - " password PASSWORD_HERE\n", - "```\n", - "\n", - "Next, paste/edit the following sbatch script for your own configuration:\n", - "\n", - "```bash\n", - "# TODO: You may need to add more SBATCH configuration here specific to your cluster.\n", - "#SBATCH --nodes=4 # number of nodes\n", - "#SBATCH --gpus-per-node=8\n", - "#SBATCH --ntasks-per-node=8 # n tasks per machine (one task per gpu) \n", - "#SBATCH --time=04:00:00 # wall time (8 for batch, backfill, 2 for batch_short)\n", - "#SBATCH --mem=0 # all mem avail\n", - "#SBATCH --exclusive\n", - "set -x\n", - "# You may want to edit this file and/or add your own version to your mounts.\n", - "CONFIG_PATH_IN_CONTAINER=/workspace/bionemo2/sub-packages/bionemo-evo2/examples/configs/full_pretrain_shortphase_config.yaml\n", - "# You can build a `.sqsh` file with enroot which may be faster to load on each node rather than pulling down from NGC\n", - "IMAGE_PATH=nvcr.io/nvidia/clara/bionemo-framework:nightly\n", - "WANDB_PROJECT_NAME= # Set you wandb project here, or leave blank and add --no-wandb to the image\n", - "MODEL_SIZE=1b # change this to 7b_arc_longcontext etc. This version is different.\n", - "CP_SIZE=1\n", - "TP_SIZE=1\n", - "PP_SIZE=1\n", - "MICRO_BATCH_SIZE=8\n", - "GRAD_ACC_BATCHES=1\n", - "SEQ_LEN=8192\n", - "MAX_STEPS=580000 # 8T tokens given 1024 nodes and 8192 seq length\n", - "VAL_CHECK=500\n", - "CLIP_GRAD=250 # Arc trained without gradient clipping. Set to a large value so megatron still logs grad_norm.\n", - "# The following arguments will remove the EOD/PAD tokens from the loss, unlike how the original Evo2 model was trained.\n", - "# this does not impact downstream accuracy in our experience and is more standard.\n", - "EXTRA_ARGS=\"--enable-preemption --ckpt-async-save --overlap-grad-reduce --clip-grad $CLIP_GRAD --eod-pad-in-loss-mask\"\n", - "LR=0.000015\n", - "MIN_LR=0.0000015\n", - "WU_STEPS=100\n", - "SEED=1234 \n", - "WD=0.001\n", - "ADO=0.01\n", - "HDO=0.01\n", - "EXPERIMENT_NAME=fine_tune_evo2_1b_on_bf16\n", - "# NCCL performance parameters\n", - "# =========================\n", - "export TORCH_NCCL_AVOID_RECORD_STREAMS=1\n", - "\n", - "# Mounts\n", - "# =========================\n", - "DATA_PATH= # PATH to the directory that stores your data that you want to mount into the container\n", - "DATA_MOUNT=/workspace/bionemo2/data # or if you configure your data with a different base dir in the config, use that here\n", - "RESULTS_PATH_CLUSTER= # Where do you want the results to land on your shared cluster storage\n", - "RESULTS_PATH_IMAGE=/results/\n", - "CKPT_MOUNT_CLUSTER= # Path to shared location on your cluster where the checkpoint files can be found\n", - "CKPT_MOUNT_IMAGE=/checkpoints/ # pragma: allowlist secret (for some reason this line flags a high entropy string check in CI)\n", - "NETRC_PATH=$HOME/.netrc\n", - "NETRC_MOUNT=/root/.netrc\n", - "# TODO either move your config to one of the mounted paths or add your own mount to a location with your configs\n", - "\n", - "mkdir -p $RESULTS_PATH_CLUSTER\n", - "MOUNTS=${DATA_PATH}:${DATA_MOUNT},${RESULTS_PATH_CLUSTER}:${RESULTS_PATH_IMAGE},${NETRC_PATH}:${NETRC_MOUNT},${CKPT_MOUNT_CLUSTER}:${CKPT_MOUNT_IMAGE},$HOME/.cache:/root/.cache\n", - "# Generate (or retrieve) a unique, shared ID per run to handle restarts in W&B and Tensorboard\n", - "# =========================\n", - "mkdir -p ${RESULTS_PATH_CLUSTER}\n", - "if [ -f ${RESULTS_PATH_CLUSTER}/run.id ];\n", - "then\n", - " RUN_ID=$(<${RESULTS_PATH_CLUSTER}/run.id)\n", - "else\n", - " array=()\n", - " for i in {a..z} {A..Z} {0..9};\n", - " do\n", - " array[$RANDOM]=$i\n", - " done\n", - " RUN_ID=$(printf %s ${array[@]::8})\n", - " echo $RUN_ID > ${RESULTS_PATH_CLUSTER}/run.id\n", - "fi\n", - "# =========================\n", - "read -r -d '' COMMAND <\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
chromposrefaltscoreclass
01741199726TC0.159762FUNC/INT
11741209074TA-2.065569LOF
21741256913AC-0.847753FUNC/INT
31741219631TA-2.053739LOF
41741215965GA-1.671525LOF
\n", - "" - ], - "text/plain": [ - " chrom pos ref alt score class\n", - "0 17 41199726 T C 0.159762 FUNC/INT\n", - "1 17 41209074 T A -2.065569 LOF\n", - "2 17 41256913 A C -0.847753 FUNC/INT\n", - "3 17 41219631 T A -2.053739 LOF\n", - "4 17 41215965 G A -1.671525 LOF" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "OUTPUT_DIR = \"brca1_fasta_files\"\n", - "\n", - "brca1_df = sample_data(\n", - " brca1_df,\n", - " sample_frac=SAMPLE_CONFIG[\"sample_frac\"],\n", - " balanced=SAMPLE_CONFIG[\"balanced\"],\n", - " disable=SAMPLE_CONFIG[\"disable\"],\n", - " random_state=SAMPLE_CONFIG[\"random_state\"],\n", - ")\n", - "\n", - "brca1_df.head(5)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next, we'll write these to local `.fasta` files so we can use them for prediction below." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": { - "jupyter": { - "source_hidden": true - }, - "tags": [ - "hide-cell" - ] - }, - "outputs": [], - "source": [ - "def parse_sequences(pos, ref, alt, seq_chr17, window_size=8192):\n", - " \"\"\"Parse reference and variant sequences from the reference genome sequence.\n", - "\n", - " Parameters:\n", - " -----------\n", - " pos : int\n", - " Position (1-indexed)\n", - " ref : str\n", - " Reference base\n", - " alt : str\n", - " Alternate base\n", - " seq_chr17 : str\n", - " Full chromosome 17 sequence\n", - " window_size : int\n", - " Size of the sequence window to extract\n", - "\n", - " Returns:\n", - " --------\n", - " tuple\n", - " (reference_sequence, variant_sequence)\n", - " \"\"\"\n", - " p = pos - 1 # Convert to 0-indexed position\n", - " full_seq = seq_chr17\n", - "\n", - " ref_seq_start = max(0, p - window_size // 2)\n", - " ref_seq_end = min(len(full_seq), p + window_size // 2)\n", - " ref_seq = seq_chr17[ref_seq_start:ref_seq_end]\n", - " snv_pos_in_ref = min(window_size // 2, p)\n", - " var_seq = ref_seq[:snv_pos_in_ref] + alt + ref_seq[snv_pos_in_ref + 1 :]\n", - "\n", - " # Sanity checks\n", - " assert len(var_seq) == len(ref_seq)\n", - " assert ref_seq[snv_pos_in_ref] == ref\n", - " assert var_seq[snv_pos_in_ref] == alt\n", - "\n", - " return ref_seq, var_seq\n", - "\n", - "\n", - "def generate_fasta_files(df, seq_chr17, output_dir=\"brca1_fasta_files\", window_size=8192):\n", - " \"\"\"Generate FASTA files for reference and variant sequences.\n", - "\n", - " Parameters:\n", - " -----------\n", - " df : pandas.DataFrame\n", - " Dataframe with variant information\n", - " seq_chr17 : str\n", - " Chromosome 17 sequence\n", - " output_dir : str\n", - " Output directory for FASTA files\n", - " window_size : int\n", - " Size of sequence window\n", - "\n", - " Returns:\n", - " --------\n", - " pandas.DataFrame\n", - " Dataframe with added columns for FASTA names\n", - " \"\"\"\n", - " # Create output directory\n", - " output_dir = Path(output_dir)\n", - " output_dir.mkdir(parents=True, exist_ok=True)\n", - "\n", - " # Paths for output files\n", - " ref_fasta_path = output_dir / \"brca1_reference_sequences.fasta\"\n", - " var_fasta_path = output_dir / \"brca1_variant_sequences.fasta\"\n", - "\n", - " # Track unique sequences\n", - " ref_sequences = set()\n", - " var_sequences = set()\n", - " ref_seq_to_name = {}\n", - "\n", - " # Store unique sequences with metadata for writing\n", - " ref_entries = []\n", - " var_entries = []\n", - " ref_names = []\n", - " var_names = []\n", - "\n", - " # Collect unique reference and variant sequences\n", - " for idx, row in df.iterrows():\n", - " ref_seq, var_seq = parse_sequences(row[\"pos\"], row[\"ref\"], row[\"alt\"], seq_chr17, window_size)\n", - "\n", - " # Add to sets to ensure uniqueness\n", - " if ref_seq not in ref_sequences:\n", - " ref_sequences.add(ref_seq)\n", - " ref_name = f\"BRCA1_ref_pos_{row['pos']}_{row['ref']}_class_{row['class']}\"\n", - "\n", - " ref_entries.append(f\">{ref_name}\\n{ref_seq}\\n\")\n", - " ref_names.append(ref_name)\n", - " ref_seq_to_name[ref_seq] = ref_name\n", - " else:\n", - " ref_name = ref_seq_to_name[ref_seq]\n", - " ref_names.append(ref_name)\n", - "\n", - " if var_seq not in var_sequences:\n", - " var_sequences.add(var_seq)\n", - " var_name = f\"BRCA1_var_pos_{row['pos']}_{row['ref']}to{row['alt']}_class_{row['class']}\"\n", - "\n", - " var_entries.append(f\">{var_name}\\n{var_seq}\\n\")\n", - " var_names.append(var_name)\n", - " else:\n", - " assert False, \"Duplicate variant sequence\"\n", - "\n", - " # Write unique sequences to FASTA files\n", - " with open(ref_fasta_path, \"w\") as f:\n", - " f.writelines(ref_entries)\n", - "\n", - " with open(var_fasta_path, \"w\") as f:\n", - " f.writelines(var_entries)\n", - "\n", - " # Add FASTA names to dataframe\n", - " df_with_names = df.copy()\n", - " df_with_names[\"ref_fasta_name\"] = ref_names\n", - " df_with_names[\"var_fasta_name\"] = var_names\n", - "\n", - " print(f\"Total unique reference sequences: {len(ref_sequences)}\")\n", - " print(f\"Total unique variant sequences: {len(var_sequences)}\")\n", - "\n", - " return df_with_names" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Total unique reference sequences: 79\n", - "Total unique variant sequences: 84\n" - ] - } - ], - "source": [ - "brca1_df = generate_fasta_files(brca1_df, seq_chr17, output_dir=OUTPUT_DIR)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Load Evo 2 Checkpoints" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "Then, we load Evo 2 1B model, loading the Evo 2 weights from hugging face.\n", - "\n", - "*Note - for better performance, load the 7b model by setting `MODEL_SIZE=\"7b\"` which also works well GPUs that do not support FP8.*\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "%%capture\n", - "MODEL_SIZE = \"1b\" # also try 7b if you have a GPU with more than 32GB of memory\n", - "\n", - "# Define checkpoint path\n", - "if MODEL_SIZE == \"1b\":\n", - " from bionemo.core.data.load import load\n", - "\n", - " # This line will download the checkpoint from NGC to your $HOME/.cache/bionemo directory and return the path.\n", - " # To do the same from the command line, use `CHECKPOINT_PATH=$(download_bionemo_data evo2/1b-8k-bf16:1.0)`\n", - " checkpoint_path = load(\"evo2/1b-8k-bf16:1.0\")\n", - "else:\n", - " checkpoint_path = Path(f\"nemo2_evo2_{MODEL_SIZE}_8k\")\n", - "\n", - " # Check if the directory does not exist or is empty\n", - " if not checkpoint_path.exists() or not any(checkpoint_path.iterdir()):\n", - " !evo2_convert_to_nemo2 --model-path hf://arcinstitute/savanna_evo2_{MODEL_SIZE}_base --model-size {MODEL_SIZE} --output-dir nemo2_evo2_{MODEL_SIZE}_8k\n", - " else:\n", - " print(\"Checkpoint directory is not empty. Skipping command.\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Score Sequences" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next, we score the likelihoods of the reference and variant sequences of each SNV.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": { - "jupyter": { - "source_hidden": true - }, - "tags": [ - "hide-cell" - ] - }, - "outputs": [], - "source": [ - "def check_fp8_support():\n", - " \"\"\"Check if FP8 is supported on the current GPU.\n", - "\n", - " FP8 requires compute capability 8.9+ (Ada Lovelace/Hopper architecture or newer).\n", - " \"\"\"\n", - " if not torch.cuda.is_available():\n", - " return False, \"CUDA not available\"\n", - "\n", - " device_props = torch.cuda.get_device_properties(0)\n", - " compute_capability = f\"{device_props.major}.{device_props.minor}\"\n", - " device_name = device_props.name\n", - "\n", - " # FP8 is supported on compute capability 8.9+ (Ada Lovelace/Hopper architecture)\n", - " is_supported = (device_props.major > 8) or (device_props.major == 8 and device_props.minor >= 9)\n", - "\n", - " return is_supported, f\"Device: {device_name}, Compute Capability: {compute_capability}\"" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "FP8 Support: False\n", - "Device: NVIDIA RTX A6000, Compute Capability: 8.6\n" - ] - } - ], - "source": [ - "# Define output directories for prediction results\n", - "output_dir = Path(\"brca1_fasta_files\")\n", - "output_dir.mkdir(parents=True, exist_ok=True)\n", - "\n", - "# Save reference and variant sequences to FASTA\n", - "ref_fasta_path = output_dir / \"brca1_reference_sequences.fasta\"\n", - "var_fasta_path = output_dir / \"brca1_variant_sequences.fasta\"\n", - "\n", - "predict_ref_dir = output_dir / \"reference_predictions\"\n", - "predict_var_dir = output_dir / \"variant_predictions\"\n", - "predict_ref_dir.mkdir(parents=True, exist_ok=True)\n", - "predict_var_dir.mkdir(parents=True, exist_ok=True)\n", - "\n", - "fp8_supported, gpu_info = check_fp8_support()\n", - "print(f\"FP8 Support: {fp8_supported}\")\n", - "print(gpu_info)\n", - "\n", - "# Note: If FP8 is not supported, you may want to disable it in the model config\n", - "# The Evo2 config has 'use_fp8_input_projections: True' by default\n", - "\n", - "if FAST_CI_MODE:\n", - " model_subset_option = \"--num-layers 4 --hybrid-override-pattern SDH*\"\n", - "else:\n", - " model_subset_option = \"\"\n", - "\n", - "fp8_option = \"--fp8\" if fp8_supported else \"\"\n", - "\n", - "# Update predict commands to run on the full dataset\n", - "predict_ref_command = (\n", - " f\"predict_evo2 --fasta {ref_fasta_path} --ckpt-dir {checkpoint_path} \"\n", - " f\"--output-dir {predict_ref_dir} --model-size {MODEL_SIZE} --tensor-parallel-size 1 {model_subset_option} \"\n", - " f\"--pipeline-model-parallel-size 1 --context-parallel-size 1 --output-log-prob-seqs {fp8_option}\"\n", - ")\n", - "\n", - "predict_var_command = (\n", - " f\"predict_evo2 --fasta {var_fasta_path} --ckpt-dir {checkpoint_path} \"\n", - " f\"--output-dir {predict_var_dir} --model-size {MODEL_SIZE} --tensor-parallel-size 1 {model_subset_option} \"\n", - " f\"--pipeline-model-parallel-size 1 --context-parallel-size 1 --output-log-prob-seqs {fp8_option}\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Score reference sequences:" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "%%capture\n", - "print(f\"Running command: {predict_ref_command}\")\n", - "\n", - "result = run_subprocess_safely(predict_ref_command)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "assert result[\"returncode\"] == 0, result" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Score variant sequences:" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [], - "source": [ - "%%capture\n", - "print(f\"Running command: {predict_var_command}\")\n", - "\n", - "result = run_subprocess_safely(predict_var_command)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "assert result[\"returncode\"] == 0, result" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We calculate the change in likelihoods for each variant relative to the likelihood of their respective wild-type sequence.\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "First, we load the prediction files and sequence id maps:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Find and load prediction files\n", - "ref_pred_files = glob.glob(os.path.join(predict_ref_dir, \"predictions__rank_*.pt\"))\n", - "var_pred_files = glob.glob(os.path.join(predict_var_dir, \"predictions__rank_*.pt\"))\n", - "\n", - "# Load sequence ID maps (maps sequence ID -> prediction index)\n", - "with open(os.path.join(predict_ref_dir, \"seq_idx_map.json\"), \"r\") as f:\n", - " ref_seq_idx_map = json.load(f)\n", - "with open(os.path.join(predict_var_dir, \"seq_idx_map.json\"), \"r\") as f:\n", - " var_seq_idx_map = json.load(f)\n", - "\n", - "# Load predictions\n", - "ref_preds = torch.load(ref_pred_files[0])\n", - "var_preds = torch.load(var_pred_files[0])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then, calculate the delta score:" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
chromposrefaltscoreclassref_fasta_namevar_fasta_nameref_log_probsvar_log_probsevo2_delta_score
01741199726TC0.159762FUNC/INTBRCA1_ref_pos_41199726_T_class_FUNC/INTBRCA1_var_pos_41199726_TtoC_class_FUNC/INT-0.952952-0.953219-0.000267
11741209074TA-2.065569LOFBRCA1_ref_pos_41209074_T_class_LOFBRCA1_var_pos_41209074_TtoA_class_LOF-0.750379-0.750438-0.000059
21741256913AC-0.847753FUNC/INTBRCA1_ref_pos_41256913_A_class_FUNC/INTBRCA1_var_pos_41256913_AtoC_class_FUNC/INT-0.798110-0.799099-0.000989
31741219631TA-2.053739LOFBRCA1_ref_pos_41219631_T_class_LOFBRCA1_var_pos_41219631_TtoA_class_LOF-1.032214-1.032696-0.000482
41741215965GA-1.671525LOFBRCA1_ref_pos_41215965_G_class_LOFBRCA1_var_pos_41215965_GtoA_class_LOF-0.860933-0.861262-0.000329
\n", - "
" - ], - "text/plain": [ - " chrom pos ref alt score class \\\n", - "0 17 41199726 T C 0.159762 FUNC/INT \n", - "1 17 41209074 T A -2.065569 LOF \n", - "2 17 41256913 A C -0.847753 FUNC/INT \n", - "3 17 41219631 T A -2.053739 LOF \n", - "4 17 41215965 G A -1.671525 LOF \n", - "\n", - " ref_fasta_name \\\n", - "0 BRCA1_ref_pos_41199726_T_class_FUNC/INT \n", - "1 BRCA1_ref_pos_41209074_T_class_LOF \n", - "2 BRCA1_ref_pos_41256913_A_class_FUNC/INT \n", - "3 BRCA1_ref_pos_41219631_T_class_LOF \n", - "4 BRCA1_ref_pos_41215965_G_class_LOF \n", - "\n", - " var_fasta_name ref_log_probs var_log_probs \\\n", - "0 BRCA1_var_pos_41199726_TtoC_class_FUNC/INT -0.952952 -0.953219 \n", - "1 BRCA1_var_pos_41209074_TtoA_class_LOF -0.750379 -0.750438 \n", - "2 BRCA1_var_pos_41256913_AtoC_class_FUNC/INT -0.798110 -0.799099 \n", - "3 BRCA1_var_pos_41219631_TtoA_class_LOF -1.032214 -1.032696 \n", - "4 BRCA1_var_pos_41215965_GtoA_class_LOF -0.860933 -0.861262 \n", - "\n", - " evo2_delta_score \n", - "0 -0.000267 \n", - "1 -0.000059 \n", - "2 -0.000989 \n", - "3 -0.000482 \n", - "4 -0.000329 " - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# next, calculate change in likelihoods\n", - "ref_log_probs = []\n", - "var_log_probs = []\n", - "for _, row in brca1_df.iterrows():\n", - " ref_name = row[\"ref_fasta_name\"]\n", - " var_name = row[\"var_fasta_name\"]\n", - " ref_log_probs.append(ref_preds[\"log_probs_seqs\"][ref_seq_idx_map[ref_name]].item())\n", - " var_log_probs.append(var_preds[\"log_probs_seqs\"][var_seq_idx_map[var_name]].item())\n", - "brca1_df[\"ref_log_probs\"] = ref_log_probs\n", - "brca1_df[\"var_log_probs\"] = var_log_probs\n", - "# ideally probability of a broken variant is lower than a good one. So a bad var - good ref is negative.\n", - "brca1_df[\"evo2_delta_score\"] = brca1_df[\"var_log_probs\"] - brca1_df[\"ref_log_probs\"]\n", - "brca1_df.head()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This delta likelihood should be predictive of how disruptive the SNV is to the protein's function: the lower the delta, the more likely that the SNV is disruptive. We can show this by comparing the distributions of delta likelihoods for the two classes of SNVs (functional/intermediate vs loss-of-function)." - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": { - "jupyter": { - "source_hidden": true - }, - "tags": [ - "hide-cell" - ] - }, - "outputs": [], - "source": [ - "def plot_strip_with_means(df, x_col=\"evo2_delta_score\", class_col=\"class\"):\n", - " \"\"\"Creates a strip plot with jittered points and median indicators for each class using Seaborn.\n", - "\n", - " Parameters:\n", - " - df (pd.DataFrame): The input DataFrame containing data.\n", - " - x_col (str): The column name representing the x-axis values (e.g., evo2_delta_score).\n", - " - class_col (str): The column name representing the class labels.\n", - "\n", - " Returns:\n", - " - matplotlib Figure: Strip plot with median indicators.\n", - " \"\"\"\n", - " # NVIDIA theme colors\n", - " NVIDIA_GREEN = \"#76B900\"\n", - " BACKGROUND_COLOR = \"#F8F8F8\"\n", - " GRID_COLOR = \"#DDDDDD\"\n", - " FONT_COLOR = \"#333333\"\n", - "\n", - " # Determine order of classes (if not already specified)\n", - " unique_classes = sorted(df[class_col].unique())\n", - "\n", - " # Set up the plot with NVIDIA theme\n", - " plt.figure(figsize=(9, 5), facecolor=BACKGROUND_COLOR)\n", - " plt.style.use(\"default\") # Reset to default to avoid any pre-existing style\n", - "\n", - " # Create strip plot\n", - " p = sns.stripplot(\n", - " data=df,\n", - " x=x_col,\n", - " y=class_col,\n", - " hue=class_col,\n", - " order=unique_classes,\n", - " palette=[NVIDIA_GREEN, \"red\"],\n", - " size=6,\n", - " jitter=0.3,\n", - " alpha=0.6,\n", - " )\n", - "\n", - " # Add median indicators using boxplot\n", - " sns.boxplot(\n", - " showmeans=True,\n", - " meanline=True,\n", - " meanprops={\"visible\": False},\n", - " medianprops={\"color\": \"black\", \"ls\": \"-\", \"lw\": 2},\n", - " whiskerprops={\"visible\": False},\n", - " zorder=10,\n", - " x=x_col,\n", - " y=class_col,\n", - " data=df,\n", - " order=unique_classes,\n", - " showfliers=False,\n", - " showbox=False,\n", - " showcaps=False,\n", - " ax=p,\n", - " )\n", - "\n", - " # Customize plot appearance\n", - " plt.title(\n", - " \"Distribution of Delta Likelihoods Scores\\nComparing Evo 2 likelihood scores for different BRCA1 SNV classes\",\n", - " color=FONT_COLOR,\n", - " fontsize=12,\n", - " loc=\"left\",\n", - " )\n", - " plt.xlabel(\"Delta Likelihood Score, Evo 2\", color=FONT_COLOR)\n", - " plt.ylabel(\"BRCA1 SNV Class\", color=FONT_COLOR)\n", - "\n", - " # Customize grid and tick colors\n", - " plt.grid(color=GRID_COLOR, axis=\"x\", linestyle=\"--\", linewidth=0.5)\n", - " plt.tick_params(colors=FONT_COLOR)\n", - "\n", - " # Set background color\n", - " plt.gca().set_facecolor(BACKGROUND_COLOR)\n", - " plt.gcf().set_facecolor(BACKGROUND_COLOR)\n", - "\n", - " plt.tight_layout()\n", - "\n", - " # return plt.gcf()" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA3kAAAHpCAYAAAA/CfW/AAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjEsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvc2/+5QAAAAlwSFlzAAAPYQAAD2EBqD+naQAAxMZJREFUeJzs3Xd4FNX+BvB3dje9F9KB0ASk9xJQQkd6UyMiguiVIqio4L2W+1NQrxUQFBERwQIiKgiC9N5BOtIhJCQhpLdNsrvz+yNmZclsskm2JCfv53l8JDOzM9/su7PZs3PmHEmr1cogIiIiIiIiIagcXQARERERERFZDxt5REREREREAmEjj4iIiIiISCBs5BEREREREQmEjTwiIiIiIiKBsJFHREREREQkEDbyiIiIiIiIBMJGHhERERERkUDYyCMiIiIiIhIIG3kC++qrrxAVFWWXY02dOhVTp041/nz8+HFERUVhx44ddjn+7NmzMXLkSLscq6Jyc3Px7rvvYvDgwYiKisLcuXMdUse9WTlaVFQUvvrqK+PPxa/b9PR0m+x/w4YNiIqKQkJCgnHZyJEj8fLLL1vleNaQkJCAqKgobNiwwdGlEBERUTWkcXQBZJkNGzbgnXfeMf7s7OwMLy8vNGjQAF27dsVDDz0EDw+PSh8nOTkZ69atQ/fu3XHfffdVen/WVJVrs8Ty5cuxceNGPPnkkwgPD0fdunXNbjty5EgkJiYCACRJgoeHB4KCgtC8eXMMGjQIzZo1s1pdtnpejx8/jueeew6zZ89GdHS01fZL5Xfy5EksX74cly9fRmZmJvz8/NCwYUP07t0bffv2dXR5REREZGVs5FUzEydORFhYGHQ6HVJSUvDnn39i3rx5WLlyJf73v/+hYcOGxm3HjRuHxx9/vFz7v3PnDpYuXYqQkJByfeD/5JNPynWciiittlmzZsFgMNi8hso4duwYmjVrhgkTJli0faNGjRATEwOg6Crg9evXsWPHDqxbtw6PPPIIpk2bZpW6Kpq5tWzfvh1qtdpux+vfvz969+4NZ2dnux3TkbZv34433ngDjRo1wsMPPwwvLy/cunULJ0+exLp169jIIyIiEhAbedVM586d0bRpU+PPTzzxBI4dO4aXX34ZM2fOxPfffw8XFxcAgEajgUZj24i1Wi1cXV3h5ORk0+OUxda/pzWkp6cjMjLS4u1r1aqFfv36mSybPHky/vvf/2LVqlWoXbs2hg8fbuUq7a/49WovarXaro1KR/vqq68QGRmJxYsXlzhP09LS7FaHLMsoKCiwe95EREQ1Ee/JE0C7du3w5JNPIjExEX/88YdxudI9eYcPH8akSZPQr18/9O7dG48++igWLVoEoKh73cSJEwEA77zzDqKiokzuC5o6dSoef/xx/PXXX5g8eTJ69uxpfKy5+7z0ej0WLVqEwYMHo1evXnjllVeQlJRkss3IkSMxe/bsEo+9e59l1aZ0T15eXh4+/fRTDB8+HD169MCjjz6K77//HrIsm2wXFRWFjz76CLt378bjjz+OHj16YMyYMTh48GBpT7tRWloa3n33XQwaNAjR0dEYN24cfv/9d+P64vsTb926hf379xtrv/ueMEu5uLjg9ddfh7e3N7755huT38VgMGDVqlUYM2YMoqOjMWjQILz//vvIzMw0u7+yntcTJ07gtddew4gRI9CjRw8MHz4c8+bNQ35+frlrN+fee+aUJCYm4uGHH8bjjz+O1NRUAEBWVhbmzp1rzPfhhx/Gt99+W+YVXaV78oqdPHkSEydORHR0NEaPHo2NGzeW2CY+Ph6vvfYa+vfvj549e+Lpp5/G/v37S2xX1uuiWFZWFmbPno2+ffuiX79+ePvtt5GVlVViu5SUFMyZMwfDhg1Djx49MGTIEMycObPM19GtW7fQtGlTxS9i/Pz8TH42GAz48ccfMXbsWERHR2PgwIF48cUXcf78eeM2Op0OX3/9NUaPHo0ePXpg5MiRWLRoEQoKCkz2VXyf46FDhzBhwgT07NkTv/76q/F3tiS7rVu3YsKECejduzf69OmDsWPH4scffyz19yUiIiJeyRNG//798cUXX+Dw4cMYMmSI4jZXr17FK6+8ggYNGmDixIlwcnJCXFwcTp8+DQCIjIzExIkTsWTJEgwdOhStWrUCADRv3ty4j8zMTMyYMQO9e/dGv3794O/vX2pdy5cvhyRJGDNmDNLS0vDjjz9i+vTp+Oabb8r1jb4ltd1NlmXMnDkTx48fx6BBg9CoUSMcOnQICxcuRHJyMqZPn26y/alTp7Br1y6MGDEC7u7uWL16Nf7zn//g559/ho+Pj9m68vPzMXXqVMTFxWHkyJEIDQ3Fjh07MGfOHGRnZ+Phhx9GZGQk3njjDcyfPx+1atUydsH09fW1+Pe/m7u7Ox544AGsX78e165dQ/369QEA77//Pn7//XcMHDgQo0ePRkJCAtasWYOLFy9i0aJFilc7y3ped+zYAa1Wi2HDhsHHxwfnzp3DmjVrkJycrNgwt4W4uDhMmzYN3t7emDt3Lnx9faHVajF16lQkJydj6NChCA4OxunTp7Fo0SLcuXMHzz//fIWO89prr2HQoEEYMGAANmzYgDlz5qBx48bG5zg1NRXPPvsstFotRo8eDW9vb2zcuBEzZ87E7Nmz8eCDDwKw7HUBFL1OZ82ahVOnTmHYsGGoW7cudu/ejTlz5pSo7z//+Q+uXbtm3F9aWhqOHDmCpKQkhIaGmv29goODcezYMdy+fRtBQUGlPgfvvvsufv/9d3Tu3BmDBw+GXq/HyZMncfbsWWMPgvfeew8bN25EdHQ0YmJicPbsWaxYsQI3btzAu+++a7K/2NhYvPnmmxg6dCiGDBmCOnXqWJzd4cOH8eabb6J9+/aYPHkyAOD69es4ffq08fkjIiIiZWzkCSIoKAienp6Ij483u82RI0dQWFiIjz76SLGB4e/vj86dO2PJkiVo1qxZia6CQNHVhJdffhnDhg2zqK7MzEx89913xkFhGjdujNdffx3r1q3D6NGjLdqHpbXdbe/evTh27BieeeYZjBs3DkDRlYXXXnsNq1evxsiRIxEREWHc/saNG/j222+Ny9q2bYtx48Zhy5YtGDVqlNnjrF27FtevX8cbb7xhrGn48OGYMmUKFi9ejIEDB8Lf3x/9+vXD4sWLFbtgVkRxoyM+Ph7169fHyZMn8dtvv+HNN980uceqbdu2ePHFF7F9+3bFe6/Kel4nT55s0hgfOnQoIiIi8MUXXyAxMREhISGV/l1Kc+PGDUybNg21atXCxx9/DG9vbwDAypUrER8fj6+//hq1a9cGAAwbNgyBgYH44YcfEBMTg+Dg4HIdKzY2FgsXLkTr1q0BAD179sSIESPw+++/G68or1ixAqmpqfjss8+MDeIhQ4Zg3Lhx+PTTT9G9e3eoVCqLXhceHh7Yu3cvTpw4gcmTJ2PMmDHG7Z577jmT2rKysnD69GlMmTIFjz32mHH5E088Uebv9fjjj+Pdd9/Fww8/jBYtWqBly5bo2LEjWrRoAZXqn84cx44dw++//47Ro0ebNJJjYmKMV4wvXbqEjRs3YvDgwZg1axYAYMSIEfDz88MPP/yAY8eOoV27dsbHxsXF4eOPP0anTp2My5YtW2ZRdvv374eHhwc+/vjjGtW9loiIyBrYXVMgbm5uyM3NNbve09MTALBnz54KD1Li7OyMgQMHWrx9//79TUb9jI6ORkBAAA4cOFCh41vqwIEDUKvVJRpojz76KGRZLtEVs3379iaNvoYNG8LDwwO3bt0q8zgBAQHo06ePcZlGo8Ho0aORl5eHEydOVP6XUeDm5gYAxrx37NgBT09PdOjQAenp6cb/GjduDDc3Nxw/frxCx7m7gZeXl4f09HS0aNECsizj0qVLlf9FSnH16lVMmTIFoaGhmDdvnrGBBxT9vq1atYKXl5fJ79uhQwfo9foKPe+RkZHGBh5Q1JWxTp06Jl+cHDhwAPfff7+xgQcUXVkdMmQIEhIScP36deN2lrwuil+nd99bqfS6dXFxgZOTE/78889Su98qGTRoED7++GO0adMGp06dwrJlyzB58mQ88sgjxqv4ALBz505IkqQ4MJAkScZ6gaLz6G7FV6fvPa/DwsJMGniA5dl5eXlBq9XiyJEj5fp9iYiIiFfyhJKXl1fiHpu79e7dG+vXr8d7772HRYsWoV27dnjwwQcRHR1t8o1+aQIDA8s1yErxN/XFJElCREREhe5HK4/ExEQEBgaWmFaieOCT4ukJiild9fHy8lK8N+re40RERJR4/oqnR7j3ONaSl5cHoKiBAQA3b95EdnY2Bg0apLh9RQfYSExMxJIlS7B3794Sz0V2dnaF9mmpV155Bf7+/vj444+Nv2exmzdv4vLly2a/cKjI76t0VfLe10BSUpLi9BV3v67q169v8eui+HV67+9Xp04dk5+dnZ0xadIkLFiwAIMHD0azZs0QFRWF/v37IyAgoMzfrVOnTujUqRO0Wi3++usvbNu2DWvXrsXLL7+MH374AX5+frh16xYCAwNNGtP3SkxMhEqlMvlCBAACAgLg5eVV4vWu1I3U0uxGjBiB7du3Y8aMGahVqxY6duyInj17onPnzmX+vkRERDUdG3mCuH37NrKzs0t8+Lqbi4sLFi5ciOPHj2P//v04dOgQtm3bhnbt2uGTTz6xqEuULUbGK75KcC+9Xm+3blrmGrn3DtJSVVy9ehUAjHnLsgw/Pz+8+eabittX5P4/vV6P559/HllZWXj88cdRt25duLq6Ijk5GXPmzLH5lBU9evTAxo0bsXnz5hLdg2VZRocOHYxdHO9175cLlrD0iw5HeeSRRxAVFYU9e/bg0KFD+PLLL7FixQrMnz/f4qkvXF1d0bp1a7Ru3Rq+vr5YunQpDhw4gIceesgmNSu9X1ianZ+fH5YtW4ZDhw7h4MGDOHjwIDZs2ID+/fvj9ddft0m9REREomAjTxCbNm0CAHTs2LHU7VQqFdq3b4/27dsDAL755hssXrwYx48fR4cOHcw2uCrq5s2bJj/Lsoy4uDiT+fy8vLwUrwolJSUhLCzM+HN5agsJCcHRo0eRk5NjcjXvxo0bxvXWEBISgitXrsBgMJg0Eqx9nLvl5uZi9+7dCA4ONl5BCg8Px9GjR9GyZctyN8TNPa9XrlzBzZs38dprr2HAgAHG5YcPH65w7eUxZcoUqNVqfPTRR3B3dze5pzAsLAx5eXno0KGDXWopFhwcjNjY2BLL783b0tdF8es0NzfX5Gqe0jGAokZ9TEwMYmJicPPmTTz55JP44YcfzDbuS9OkSRMARffZAkXP6aFDh5CZmWn2al5ISAgMBgPi4uJMpgNJTU1FVlaWRa/38mTn5OSEbt26oVu3bjAYDPjwww+xdu1ajB8/vtQvtIiIiGq6qv3VNVnk2LFjWLZsGcLCwkqd2FjpXp5GjRoBAAoLCwH8c6+Xtbribdq0CTk5Ocafd+zYgZSUFJMuV+Hh4Th79qyxBgDYt29fiakWylNbly5doNfrsWbNGpPlq1atgiRJVuvy1aVLF6SkpGDbtm3GZTqdDmvWrIGbm5vJPV7WkJ+fj7fffhuZmZl44oknjA20nj17Qq/X4+uvvy7xGJ1OV2q3U3PPa/FV1LuvZsqyjNWrV1f697CEJEmYOXMmevTogdmzZ2PPnj3Gdb169cKZM2dw6NChEo/LysqCTqezSU1dunTBuXPncObMGeOyvLw8rF27FqGhocaGj6Wvi+LX6S+//GLcTq/X46effjI5rlarLTFtRXh4ONzd3U3OGyVHjx5VXF58/1xx19AePXpAlmUsXbq0xLbFr4EuXboAQIlpDFauXGmyvjSWZpeRkWGyTqVSGb8cune6BiIiIjLFK3nVzMGDBxEbGwudToe0tDQcO3YMR44cQUhICP73v/+VehVn6dKlOHnyJLp06YKQkBCkpaXhl19+QVBQEFq2bAmg6IOjl5cXfv31V7i7u8PNzQ3333+/yRW18vD29sbkyZPx0EMPITU1FT/++CMiIiJMpnkYPHgwduzYgRdffBE9e/ZEfHw8Nm/ejPDwcJN9lae2qKgotG3bFosXL0ZiYiIaNmyIw4cPY8+ePXj44YetdhVg6NChWLt2LebMmYMLFy4gJCQEO3fuxKlTpzB9+vQS9wSWR3JysnHew9zcXFy/ft3YSI6JiTHpwtimTRsMHToUK1aswKVLl9CxY0doNBrcvHkTO3bswPPPP4/o6GjF45h7XuvWrYvw8HDjtBMeHh7YuXNnmfcp3mvnzp3GK1h3GzBgQJkjYKpUKrz55puYNWsW3njjDXz44Ydo164dHnvsMezduxcvv/wyHnroITRu3BharRZXrlzBzp078dNPP1V4iorSjB07Flu3bsWMGTMwatQo4xQKCQkJmDNnjvGqnaWvi6ioKLRs2RKLFi1CYmIiIiMjsWvXLpMvRoCiK3vTp09Hz549ERkZCY1Gg127diE1NRW9evUqteZZs2YhNDQUUVFRCA8PNw5msm/fPjRt2tQ4l2a7du3Qv39/rF69Gjdv3kTnzp1hMBhw8uRJtG3bFqNGjUKjRo0wYMAArF27FllZWWjTpg3OnTuHjRs34oEHHjAZWdMcS7N77733kJmZiXbt2qFWrVpITEzEmjVr0KhRI5OriERERFQSG3nVzJIlSwAUdWPy9vZG/fr1MX36dDz00ENlNii6d++OxMREbNiwARkZGfDx8UGbNm3w1FNPGUfe1Gg0+M9//oNFixbhgw8+gF6vx7///e8KN/KeeOIJXL58GStWrEBubi7at2+PGTNmwNXV1bhNp06d8Nxzz2HlypWYP38+mjRpgvfffx8LFiww2Vd5alOpVPjf//6HJUuWYNu2bdiwYQNCQ0MxZcoU40iA1uDi4oIFCxbg888/x8aNG5GTk4M6derg3//+d7lGIVVy6dIlvPXWW5AkCe7u7ggKCkJUVBQGDx6M+++/v8T2r7zyCpo0aYJff/0VX3zxBdRqNUJDQ9GvXz+0aNHC7HHMPa8DBw7E+++/j08++QQrVqyAi4sLHnjgAYwcOdI4LYUltm7dqri8TZs2Fk1zoNFoMGfOHMyYMQMzZ87EvHnz0KxZMyxYsADLly/Hjh07sGnTJnh4eKB27domr2dr8/f3x6JFi/D5559jzZo1yM/PR8OGDfH++++ja9euxu0sfV0Uv07nzZuHP/74A5IkoVu3bpg6dSrGjx9v3C44OBi9e/fGsWPH8Mcff0CtVqNOnTp4++23zTbei82aNQt79uzB9u3bcefOHciyjLCwMIwbNw5jxowxmT/x3//+Nxo0aID169dj4cKF8PDwQJMmTUxeP7NmzUJYWBg2btyI3bt3IyAgAGPHjlUclVOJq6urRdn17dsX69atw88//4zs7Gz4+/ujZ8+eeOqpp6r8/ZNERESOJmm12qo5sgQRERERERGVG78OJSIiIiIiEggbeURERERERAJhI4+IiIiIiEggbOQREREREREJhI08IiIiIiIigbCRR0REREREJBA28qhaiYqKwldffeXoMqq92bNnY+TIkSbL7n1uv/rqK0RFRSE9Pd0qx7x3/xs2bEBUVBQSEhKMy0aOHImXX37ZKsezhoSEBERFRWHDhg2OLqVKOXjwIMaNG4fo6GhERUUhKyvL7jUUvz7vNnLkSMyePdtk2c2bN/H888+jb9++iIqKwu7duwEA58+fx7/+9S/06tULUVFRuHjxot1qJ7qX0nsyEVFlcDL0aiYuLg7ff/89jhw5gjt37kCj0aBBgwbo2bMnhg4dChcXF0eXKKSEhASMGjXK7Ppnn30WY8eOtVs9R48exebNm3Hq1Cncvn0bAQEBaNu2LZ5++mkEBgbarQ6qeTIyMvD666+jXr16ePHFF+Hs7Aw3NzdHl2XW7NmzkZCQgGeeeQZeXl5o0qQJdDodXnvtNTg7O2PatGlwdXVFSEiIo0tVpNVq8d1336FNmzZo27ZtmdsfP34czz33nMkyLy8v1K5dG6NGjUK/fv1M1o0cORKJiYnGn11dXVGvXj2MHDkSAwYMKLH/nJwcrFq1Crt27UJ8fDz0ej3Cw8PRtWtXjB49GrVq1SrxmNdffx3bt2/HmDFjMHnyZMW6v/nmG5w9exbnzp1DWloaJkyYgKeeeqrM37fYlStXsHTpUpw/fx5paWnw9vZGZGQkunXrhtGjR5f4fUeOHIkXX3zRZB/Fz93s2bMRHR2NV155BUePHsVvv/0GDw8PxeP+97//xY4dO7Bu3Tr4+PhYXC8Rka2xkVeN7N+/3/jBpH///qhfvz4KCwtx6tQpLFy4ENeuXcPMmTMdXaZNbd++HWq12mHH79OnD7p06VJieaNGjexax2effYasrCxER0cjIiICt27dwpo1a7B//34sW7YMAQEB5d6nvZ/b/v37o3fv3nB2drbbManyzp8/j9zcXDz99NPo0KGDo8sx8cMPP0Cl+qeDSn5+Ps6cOYNx48aZfElz48YNJCYmYubMmRgyZIgjSrWYVqvF0qVLMWHCBIsaecVGjx6Npk2bAihqmG/btg1vvfUWsrOzS1wxatSoEWJiYgAAd+7cwW+//YbZs2ejsLDQ5PmJj4/H888/j6SkJERHR2PIkCFwcnLC5cuXsX79euzevRsrV6402XdOTg727t2L0NBQbN26FZMmTYIkSSXqXbx4MQICAnDffffh0KFDFv+eAHD69Gk899xzCA4OxpAhQ+Dv74/bt2/j7NmzWL16tUkjr9hvv/2GsWPHKjZKi/Xt2xf79u3D7t27FRu8Wq0We/fuRadOndjAI6Iqh428auLWrVt44403EBISgvnz55tcrRk5ciTi4uKwf/9+B1ZoOwaDAYWFhXBxcXH4lcr77ruvxDfhjjBt2jS0bNnS5ANt586dMWXKFKxZswbPPPNMufdp7+dWrVY7tMEumry8PLtcUUtLSwMAeHp6Wm2f1qr93i8MzNVavNzLy6vSxyxmr+ffUq1atUJ0dLTx5+HDh2P06NHYsmVLiUZerVq1TN7XHnroIYwePRqrVq0yNvJ0Oh3+/e9/IzU1FZ9++ilatWplso9//etf+Pbbb0vUsXPnThgMBrz66quYNm0aTpw4gTZt2pTY7qeffkJoaCjS09MxcODAcv2u33zzDTw8PLBkyZISmRZnfbd69eohNjYW3377LV544QWz++3evTvc3d2xZcsWxUbenj17kJeXh759+5arXiIie2Ajr5r47rvvkJeXh1dffVWxO15ERAQefvhh4886nQ4rVqzA77//juTkZAQEBKBPnz6YMGGCyQehkSNHon79+oiJicGCBQtw7do1RERE4IUXXkDbtm2xc+dOfPXVV4iLi0O9evUwa9Ys3HfffcbHz549Gzt37sQ333yDDz/8EKdOnYKHhweGDRuG8ePHm3xj+/3332PXrl2IjY2FVqtFvXr1MHbsWJMPIkDRvVsjRoxA8+bNsXz5cty8eROzZ8/GAw88gKioKJNuPF999RWWLl2KVatWYdmyZdizZw9kWcaDDz6IGTNmwNXV1bjf/Px8fPbZZ9iyZQsKCgrQtm1bvPzyyxg2bFi5uwaZ8/LLL+P69etYvXp1iXXPPPMMdDodli5dWq6MlLRu3Vpxmbe3N65fv16h2u99bpUkJiZi2rRpcHZ2xvz58+Hv74+srCx89dVX2LVrF9LS0hAUFIQhQ4bgscceM2mE3mvDhg145513jB/u7nby5El8+umnuHLlCgIDAzFhwoQSH7Li4+Px+eef4+jRoygoKECDBg0wfvx4dO3a1WS7tLQ0LFq0CPv27UNOTg7q1KmDRx55BA899JDJdllZWZg3bx52794NSZLQrVs3PPLIIxY9dzqdDsuXL8cff/yB27dvw9XVFZGRkRg/fjw6duxo3O7GjRtYsmQJjh8/jtzcXAQHByM6Ohr/+te/jNtcvHgRX3zxBU6dOgVZlnH//ffjmWeeQfPmzUs8dwsWLMC2bduwY8cO6PV6bNq0CQBw4MABLF++HBcvXoQkSWjdujUmT56M+vXrG/eRkpKCRYsW4ciRI0hPT4e3tzeaNm2K559/vkQexaZOnYo///wTADBx4kQAwIABA/Daa68BKLoa/O233+LatWtwc3NDp06dMHnyZJOrJcXvGcuWLcMnn3yCkydPon379njvvffMPr8nT57E/PnzcfXqVQQGBmLMmDGK240cORJt2rTBa6+9ZnxvAICFCxdi4cKFCAkJQZs2bbBx40YAMNbdpk0bLFiwwJjR4sWLcezYMeTn56NevXoYP348unfvbtXnv/h5+OGHH/DRRx/h6NGjcHFxwYABAzBp0iSo1WqTbuJLly41/j4Veb9ycnKCl5eXRV+s+Pn5oW7durhy5Ypx2c6dO3H58mU888wzJRp4AODh4WHyOi62efNmdOjQAe3atUNkZCQ2b96s2Mgz95qzRHx8POrVq6fYaPfz81M8VrNmzbBu3To8/vjjZq/mubi44MEHH8TmzZuRlpZWYl9btmyBu7u7yWvDnAMHDuDbb7/FhQsXIEmS8X2otAaipX8zDx8+jK+//hpXr16FXq9HYGAgevTogWeffda4zerVq/Hrr78iISEBzs7OCAsLw6OPPmpy/OTkZHz55ZfYv38/srOzER4ejpiYGAwaNMjkeJbsi4gcj428amLfvn0ICwtDixYtLNr+vffew8aNGxEdHY2YmBicPXsWK1aswI0bN/Duu++abBsXF4f//ve/GDZsGPr164cffvgBr7zyCl5++WV88cUXGDFiBABgxYoVeP3110t0idLr9ZgxYwaaNWuGyZMn49ChQ/jqq6+g1+vx9NNPG7dbvXo1unXrhr59+6KwsBDbtm3Da6+9hg8++KDEB/Pjx49j+/btGDlyJHx9fcu8X+b1119HaGgonn32WVy4cAG//fYb/Pz8TO7/mD17NrZv347+/fujWbNm+PPPP/HSSy9Z9HwW02q1igOReHp6QqPRoFevXnj77bdx/vx5Y1cpoKhxdPbsWUyZMsW4rDwZWSI3Nxd5eXnw9fUt92MtERcXh2nTpsHb2xtz586Fr68vtFotpk6diuTkZAwdOhTBwcE4ffo0Fi1ahDt37uD555+v0HFee+01DBo0CAMGDMCGDRswZ84cNG7c2PghOTU1Fc8++yy0Wi1Gjx4Nb29vbNy4ETNnzsTs2bPx4IMPAihq2E+dOhVxcXEYOXIkQkNDsWPHDsyZMwfZ2dnGL0ZkWcasWbNw6tQpDBs2DHXr1sXu3bsxZ84ci2r+6quvsGLFCgwePBhNmzZFbm4u/vrrL1y8eNHYyLt8+TImT54MjUaDIUOGIDQ0FPHx8di3b5/xw/HVq1cxefJkuLu7Y8yYMdBoNFi7di2ee+45LFiwAM2aNTM57ocffgg/Pz+MHz8eWq0WALBp0ybMnj0bnTp1wqRJk5Cfn49ffvkFkydPxtdff238MP2f//wH165dMz4vaWlpOHLkCJKSksx+4H7iiSdQp04drF27FhMnTkRYWBjCw8MB/NPwadq0KZ599lmkpqZi9erVOH36NL7++muTD+B6vR4vvvgiWrZsialTp5p8GXOvK1eu4IUXXoCvry8mTJgAvV6Pr776Cv7+/qVm8uCDD8LT0xPz5883drN2c3ODv78/atWqheXLlxu7NBZ/eL969SomTZqEwMBAjB07Fq6urti+fTteffVVzJkzx/i6ssbzX/w8vPDCC2jWrBmmTJmCo0eP4ocffkB4eDiGDx8OX19fvPTSS/jwww/xwAMPoEePHgCABg0alPq7A0XvB8XvVZmZmdiyZQuuXr2KV199tczH6nQ6JCcnm2S2d+9eAEXdrC2VnJyM48ePGxvTvXv3xqpVq/Diiy/CycnJ4v2UJSQkBGfOnMHVq1dNGtKleeKJJ7Bx48Yyr+b17dsXGzduxLZt20y6/GZmZuLQoUPo3bt3mb0gNmzYgHfffdfYSPPy8sLFixdx6NChUhtGlvzNvHr1Kl555RU0aNAAEydOhJOTE+Li4nD69GnjftatW4e5c+ciOjoaDz/8MAoKCnD58mWcO3fOePzU1FQ888wzkCTJ+Hf34MGDePfdd5GTk2P8wsuSfRFR1cBGXjWQk5OD5ORki74tBIBLly5h48aNGDx4MGbNmgUAGDFiBPz8/PDDDz/g2LFjaNeunXH72NhYfPHFF8YrBZGRkXjxxRfxv//9D99//72xgeXl5YX3338fJ06cMLk3pKCgAJ06dTL+oRwxYgReeeUVfPfddxg9erSx0bFy5UqTP4ajRo3C+PHjsXLlyhKNvNjYWCxfvhz16tWz6He+7777TD68ZGRkYP369cZG3oULF7B9+3Y8/PDDmD59urHOOXPm4PLlyxYdAyj6MK80umfx89e9e3c4Oztj27ZtJo28bdu2QZIk9OzZE0D5M7LEjz/+iMLCQvTq1atcj7PEjRs3MG3aNNSqVQsff/wxvL29ARRlGh8fj6+//hq1a9cGAAwbNgyBgYH44YcfEBMTg+Dg4HIdKzY2FgsXLjRerezZsydGjBiB33//HVOnTgVQ9IVDamoqPvvsM+NVhSFDhmDcuHH49NNP0b17d6hUKqxduxbXr1/HG2+8YeyONnz4cEyZMgWLFy/GwIED4eHhgb179+LEiROYPHmy8SrR8OHDSwxgYc7+/fvRpUuXUu+J/eSTTyDLMpYuXWrypcWkSZOM//7yyy+h0+nw+eefGxtP/fv3R0xMDD777DMsXLjQZJ/e3t6YN2+e8epMbm4u5s6di8GDB5vUMmDAAMTExGD58uWYOXMmsrKycPr0aUyZMgWPPfaYcbsnnnii1N+zY8eOSE5Oxtq1a9G5c2fja7y45vr162PhwoXG87xVq1Z4+eWXsWrVKuOVP6DoPSM6OtrkdzdnyZIlkGUZn332mfF569GjR5m1NmzYEB4eHpg/f36JbtYFBQVYvnx5iS6N8+bNQ3BwMJYsWWK8mj5ixAhMmjQJn3/+eYlGXkWf/7vr6NWrF8aPHw+g6DU3fvx4/Pbbbxg+fDjc3NwQHR2NDz/8EA0bNixXV/F33nnH5GeVSoVnnnmmxFUZoCi/4gZhamoqvvvuO6SkpBi/4AOK3gM8PT3LdT5v3boVTk5O6NatG4CiRt6SJUtw4MABPPDAAxbvpywxMTF46aWX8OSTT6Jp06Zo1aoV2rVrh3bt2kGjUf6YEx4ejv79+2PdunUYO3as2QGr2rVrh4CAAGzZssWkkbd9+3bodLoyGzbZ2dmYO3cumjZtigULFpj8DZRludTHWvI388iRIygsLMRHH31k9gu+/fv3o169eiVGnr3bF198Ab1ejxUrVhjvLxw+fDjefPNNLF26FMOGDYOLi4tF+yKiqoFTKFQDOTk5AAB3d3eLtj9w4AAA4NFHHzVZXnxjffH6YpGRkSZdwYqvFrRr187kw+j9998PoOj+wHvdfY9H8TeBhYWFOHr0qHH53X+sMjMzkZ2djVatWikOXd66dWuLG3hAUcPibq1atUJGRobxuTt48CAAmHxoAVDqiJlKhg4dirlz55b4r7hWDw8PdO7cGdu3bzf5A75t2zY0a9bM+HyWN6OynDhxAkuXLkXPnj3L3Tgsy9WrVzFlyhSEhoZi3rx5xgYeAOzYsQOtWrWCl5cX0tPTjf916NABer0eJ06cKPfxIiMjTbqj+vn5oU6dOoiPjzcuO3DgAO6//36TbmPu7u4YMmQIEhISjF1WDxw4YOwGW0yj0WD06NHIy8sz1nfgwAGo1WoMHz7cuJ1arbb49eHl5YVr167h5s2biuvT0tJw4sQJDBw4sMRV6eIuzXq9HocPH0b37t2NDTwACAwMRJ8+fXDq1Cnj67nYkCFDTLrfHTlyBFlZWejdu7dJHiqVCvfffz+OHz8OoOhcdHJywp9//onMzEyLfsfS/PXXX0hLS8OIESNMzvOuXbuibt26ivcL3/1cm6PX63Ho0CE88MADJs9bZGSkSTdYa8jMzMSxY8fQs2dP41Ww9PR0ZGRkoGPHjrh58yaSk5NNHlPR5/9uSu9dSu+x5TV+/Hjj+9Nbb72F3r17Y/Hixfjxxx9LbHv48GEMHDgQAwcOxNixY7Fp0yYMHDjQ+KUKUPR3yNK/QcU2b96Mrl27GkemrF27Nho3bozNmzdX7pe7R8eOHfHFF18gKioKly9fxnfffYcXX3wRQ4cOxZ49e8w+bty4cdDr9Yr3EhZTq9Xo3bs3zpw5YzLdy5YtW+Dv74/27duXWtuRI0eQm5uLsWPHlrjipzQAzd0s+ZtZfL/pnj17YDAYFPfj6emJ5ORknD9/XnG9LMvYtWsXunXrBlmWTV67HTt2RHZ2Ni5cuGDRvoio6uCVvGqg+A9kbm6uRdsnJiZCpVIhIiLCZHlAQAC8vLxMhssGUOJDZ/EfjaCgIMXl986JpVKpEBYWZrKs+KrO3X8U9+3bh2XLluHy5csoKCgwLlf6Q3fv/spy77fLxd2MsrKy4OHhgaSkJKhUqhLd0O59jsoSERFR5oiCvXr1wu7du3HmzBm0aNECcXFxuHDhgvEKIlD+jEpz48YNvPrqq6hfv77xqqA1vfLKK/D398fHH39c4kPezZs3cfnyZbMDJSgNelAWpa65Xl5eJq+7pKSkEl0XgaIP/0DR81u/fn0kJiYiIiKixL2BdevWNW5X/P/AwMASv1+dOnUsqnnixImYNWsWHn30UdSvXx+dOnVC//790bBhQwD/fDFSWley9PR0aLVaxWNGRkbCYDAgKSnJZB/3vp7j4uIAFA3Mo6T4vcTZ2RmTJk3CggULMHjwYDRr1gxRUVHo379/hUZmLX4elWqvW7cuTp06ZbJMrVaXeH9Rkp6ejvz8fMXztE6dOuX+MqQ0cXFxkGUZX375Jb788kvFbdLS0kzu36ro81/M2dm5xH1e977WK6pBgwYm71W9evVCTk4OPv/8c/Tp08fkuMX3fRoMBly9ehXffPMNsrKyTK6CeXh4lKvxef36dVy8eBH9+/c3Pi8A0LZtW6xZswY5OTlmpyWoiKZNm+Ldd99FYWEhLl++jF27dmHVqlV47bXXsGzZMsUvDYuv5q1duxaPP/642X337dsXq1atwubNmzFu3Djcvn0bJ0+exKhRo8q8x7H4yylLu5HezZK/mb1798b69evx3nvvYdGiRWjXrh0efPBBREdHG9/3Hn/8cRw9ehQTJ05EREQEOnbsiD59+qBly5YAis6zrKwsrF27FmvXrlWspfi9vKx9EVHVwUZeNeDh4YHAwEBcvXrVJvs3NziGueVldTFRcuLECcycOROtW7fGjBkzEBAQAI1Ggw0bNmDLli0lti/vsPrWrLWyoqKi4Orqim3btqFFixbYvn07VCpViZvlrSEpKQkvvPACPD098eGHH1r1Q1OxHj16YOPGjdi8eXOJqw6yLKNDhw5mB8IobuyXR2mDtVRVrVu3xo8//og9e/bg8OHDWL9+PX788Ue89NJLNh2i/94rA8Xf5L/xxhuK96zd/YH0kUceQVRUFPbs2YNDhw7hyy+/xIoVK4zdG23J2dm5yuVc/NzFxMSgU6dOitvcfYUVqNzzr/SzrbVr1w779u3D+fPnTbrI+/r6GhuEnTp1Qt26dfHyyy9j9erVxt4GdevWxcWLF5GUlGRRl80//vgDADB//nzMnz+/xPqdO3eWexRNSzg5OaFp06Zo2rQpateujXfeeQc7duww2zPkiSeewKZNm/Dtt9+a7ULapEkT1K1bF1u3bsW4ceOwZcsWyLJs05GWLf2b6eLigoULF+L48ePYv38/Dh06hG3btqFdu3b45JNPoFarERkZiR9++AH79u3DoUOHsHPnTvz8888YP348Jk6caHzd9uvXT3EUUQDGL6zK2hcRVR1s5FUTUVFRWLt2Lc6cOWPStVJJSEgIDAYD4uLijFc2gKJ7LbKysqw+6a/BYMCtW7dMvsUv7rZW/E33zp074ezsjI8//tikAbdhwwar1mJOcHAwDAYDEhISTBoed3/DbC1ubm7o2rUrduzYgWnTpmHbtm1o1aqVyRUAa2SUkZGBF154AQUFBfj8889tNgn6lClToFar8dFHH8Hd3d3kHpSwsDDk5eXZfb604OBgxMbGllh+48YNAP9cDQwJCcGVK1dgMBhMGhVK2x09ehS5ubkmV/OUjmGOt7e3sctbbm4upkyZgqVLl2LIkCHGK9OlfVHj6+sLV1dXs7+XSqUq88N1cSPk7g/tpYmIiEBMTAxiYmJw8+ZNPPnkk/jhhx/w5ptvlvnYuxU/j7GxsSW6C8fGxpb7vsxivr6+cHFxUTxPy5ONJYqfO41GU+HXc3mff0uU1aWvPPR6PYCye4V07doVbdq0wfLlyzF06FC4ubkhKioKW7ZswR9//FHm/ZCyLGPLli1o27ZtiS7yALBs2TJs3rzZJo28uxXfM3rnzh2z20RERKBfv35Yu3atYu+AYn379sWXX36Jy5cvY8uWLahdu7bJfdfmFL8mrl69Wq6eI+X5m6lSqdC+fXtj19FvvvkGixcvxvHjx42vQzc3N/Tu3Ru9e/dGYWEh/v3vf2P58uUYO3YsfH194e7uDoPBYNHrtrR9OXqaIyL6R9X6KpXMeuyxx+Dm5oZ3330XqampJdbHxcUZ77Uonqz73nsviiepVZrMu7LWrFlj/Lcsy1izZg00Go3xA59arYYkSSb3DCQkJJR6v4Q1FX8z//PPP5ss/+mnn2xyvF69ehknFb58+XKJwVAqm1FeXh5eeuklJCcn48MPP6zQFTNLSZKEmTNnokePHpg9e7ZJZr169cKZM2cUJy/OysqCTqezSU1dunTBuXPncObMGeOyvLw8rF27FqGhocaGc5cuXZCSkoJt27YZt9PpdFizZg3c3NyM9/516dIFer0ev/zyi3E7vV5v8esjIyPD5Gd3d3dERESgsLAQQNF9ha1bt8aGDRtKdMUtvtqsVqvRsWNH7N2716Sbc2pqKrZs2YKWLVuWeaW2U6dO8PDwwIoVKxSf++IuV1qtFvn5+SbrwsPD4e7ubqy5PJo0aQI/Pz/8+uuvJt3KDhw4gOvXr5cYWMlSarUanTp1wu7du02et+vXr+Pw4cMV2qc5fn5+aNOmDdauXavYKLCk67Glz395FI88ao0unMX3RjZq1KjMbceMGYOMjAysW7cOABAdHY0GDRpg+fLlJuddsZycHHzxxRcAgFOnTiEhIQEDBw5EdHR0if969uyJ48ePl7jHsaKOHTum2GujuDtvWd2ui+/N++6778xuU/zl1pIlS3Dp0iWLR5Ls2LEj3N3dsWLFihLnXGk9TSz9m6l0T21xvsXn8r3vT05OToiMjIQsy9DpdFCr1ejRowd27typ+EXU3a/bsvZFRFUHr+RVExEREXjzzTfxxhtv4LHHHkP//v1Rv3596HQ6nD59Gjt27DDO+9WoUSMMGDAAa9euRVZWFtq0aYNz585h48aNeOCBB6w+MIezszMOHTqEt99+G82aNcPBgwexf/9+PPHEE8b7Prp27YqVK1fixRdfRJ8+fZCWloaff/4ZERER5RrdsqKaNGmCHj164Mcff0RmZqZxCoXiK46Wflt+8eJFYzeku4WHh5tcYe3SpQvc3d2xYMEC4x/Qu1U2o//7v//DuXPnMGjQINy4ccN4ZQoo+pbVmiPXAUXfFL/55puYNWsW3njjDXz44Ydo164dHnvsMezduxcvv/wyHnroITRu3BharRZXrlzBzp078dNPP9lkSoexY8di69atmDFjBkaNGmWcQiEhIQFz5swxXrUbOnQo1q5dizlz5uDChQsICQnBzp07cerUKUyfPt3YaIqKikLLli2xaNEiJCYmIjIyErt27Sox0Ik5Y8aMQZs2bdCkSRN4eXnhr7/+wo4dO0wGJHr++ecxadIkTJgwwXh1LyEhAfv378c333wDAHj66adx5MgRTJo0CcOHD4dGo8Gvv/6KwsJCk+lAzPHw8MBLL72Et99+G+PHj0fv3r3h6+uLpKQk7N+/Hy1atMCMGTMQGxuL6dOno2fPnoiMjIRGo8GuXbuQmppaodFZNRoNJk2ahHfeeQdTpkxBnz59jFMohIaGWjzfoJKnnnoKBw8exOTJkzFixAhj47tevXpWf++YMWMGJk2ahCeeeAKDBw9GeHg4UlNTcebMGSQnJxtzMsfS5788XFxcEBkZie3bt6NOnTrw9vZG/fr1y7zH6+TJk8YGd2ZmJvbu3Ys///wTvXv3Nt6TWpouXbqgfv36WLVqFUaOHAmNRoN33nkH06dPx+TJk9GzZ0+0bNkSGo0GV69exZYtW+Dl5YV//etf2Lx5M9RqtdnGfffu3bF48WJs27bN2B1006ZNSExMNE5FceLECSxbtgxA0QizpfVu+OSTT5Cfn48HHngAdevWRWFhIU6fPo3t27cjNDS0zCuGERERxqkSzCmewqi4kWVpI8/DwwPTpk3De++9h4kTJ6JPnz7w8vLC5cuXodVq8frrrys+ztK/mUuXLsXJkyfRpUsXhISEIC0tDb/88guCgoKM98m98MIL8Pf3R4sWLeDv748bN25gzZo16NKli/E9cNKkSTh+/DiefvppDB48GPXq1UNmZiYuXryII0eOGOeAtGRfRFQ1sJFXjXTv3h3Lly/H999/j7179+LXX3+Fk5MTGjZsiKlTp5rc+zNr1iyEhYVh48aN2L17NwICAjB27FhMmDDB6nUVd+X78MMPsXDhQri7u2PChAnGYcGBontBXn31VXz77beYP38+QkNDMWnSJCQmJtqlkQcUzaXn7++PrVu3YteuXejQoQPeeustxMTEWHwP4JYtWxTvIRwwYIBJI8/FxQXdunXD5s2b0b59e8UJeSuT0aVLlwAA69evx/r1603WhYSEWL2RBxR9kJ8zZw5mzJiBmTNnYt68eWjWrBkWLFiA5cuXY8eOHdi0aRM8PDxQu3ZtPPXUU8bBeqzN398fixYtwueff441a9YgPz8fDRs2xPvvv2/ywdLFxQULFizA559/jo0bNxonQ//3v/9t8sFPpVLhf//7H+bNm4c//vjDOBn61KlTTV7H5owePRp79+7FkSNHUFBQgJCQEDz99NMm9yo2atQIixcvxpdffolff/0V+fn5CAkJMU6rARQNzvDZZ59h0aJFWLFihXEy9DfffLPUrmR369u3LwIDA/Htt9/i+++/R0FBAWrVqoVWrVoZf+fg4GD07t0bx44dwx9//AG1Wo06derg7bffrvC9owMHDoSrqyu+/fZbfP7553B1dcUDDzyAyZMnK05SbamGDRvik08+wfz587FkyRLUqlULTz31FFJSUqz+3lGvXj3jJOobN25ERkYG/Pz8cN999+HJJ5+0aB+WPP/l9eqrr+Ljjz/G/PnzUVhYiAkTJpTZyFu9erXx305OTggLC8Mzzzxj9v5ZJTExMZgzZw7++OMPDBw4EBEREVi2bBlWrVqF3bt3G0d0jIiIwODBgzF69GjodDrs2LEDzZs3NxmJ927169dHWFgY/vjjD2Mjb/369fjzzz+N2xw/ftw4GmnLli1LbeRNnToVO3bswIEDB7B27VrodDoEBwdj+PDhePLJJy16/T355JPYvHmzsUurkr59++L06dO4//77y9X1cvDgwfDz88O3336LZcuWQaPRoG7duqV++WHp38zu3bsjMTERGzZsQEZGBnx8fNCmTRuT99+hQ4di8+bNWLVqFfLy8lCrVi2MGjXK5DXt7++PJUuWYOnSpdi9ezd++eUX+Pj4oF69eiZfMFmyLyKqGiStVmv/kSlIGLNnz8bOnTuxdetWR5dSIRcvXsT48eNN5lEjIiIiIqrOeE8e1Rj33g8BFN0Tp1KpTOZlIyIiIiKqzthdk2qM7777Dn/99Rfatm0LtVqNgwcP4uDBgxg6dGiFR/8jIiIiIqpq2MijGqN58+Y4cuQIli1bhry8PAQHB+Opp54qczhwIiIiIqLqhPfkERERERERCYT35BEREREREQmE3TWrOIPBgISEBHh6elo8lxsREREREYlHlmVkZ2cjNDTUOC+wEjbyqriEhAQ0aNDA0WUQEREREVEVceXKFYSHh5tdz0ZeFVc8memVK1cqNaFwWbKysmy6f7IfZikOZikOZikG5igOZimOmpZlVlYWGjRoYGwjmMNGXhVX3EXTy8sL3t7eNjuOu7s7NBq+HETALMXBLMXBLMXAHMXBLMVRU7Ms6zYuDrxCAICMjAxHl0BWwizFwSzFwSzFwBzFwSzFwSyVsZFHREREREQkEDbyCADg4eHh6BLISpilOJilOJilGJijOJilOJilMjbyCEDRVA0kBmYpDmYpDmYpBuYoDmYpDmapjI08AgDk5eU5ugSyEmYpDmYpDmYpBuYoDmYpDmapjI08IiIiIiIigbCRRwAAPz8/R5dAVsIsxcEsxcEsxcAcxcEsxcEslbGRRwCAzMxMR5dAVsIsxcEsxcEsxcAcxcEsxcEslbGRRwAAvV7v6BLISpilOJilOJilGJijOJilOJilMjbyCACg0WgcXQJZCbMUB7MUB7MUA3MUB7MUB7NUxkYeAQA8PT0dXQJZCbMUB7MUB7MUA3MUB7MUB7NUxkYeAQDS09MdXQJZCbMUB7MUB7MUA3MUB7MUB7NUxkYeERERERGRQNjIIwCAu7u7o0sgK2GW4mCW4mCWYmCO4mCW4mCWytjIIyIiIiIiEggbeQQAyM3NdXQJZCXMUhzMUhzMUgzMURzMUhzMUhnHHCUiIqIaZ968ecjMzIS3tzemT5/u6HKIiKxK0mq1sqOLIPMyMzMRFBSE27dvw9vb22bH0ev1UKvVNts/2Q+zFAezFAezrHoaNGiA+Ph4hIeH48qVKxY9hjmKg1mKo6ZlaWnbgN01CQCQnZ3t6BLISpilOJilOJilGJijOJilOJilMjbyCACg0+kcXQJZCbMUB7MUB7MUA3MUB7MUB7NUxkYeAUCNuswtOmYpDmYpDmYpBuYoDmYpDmapjI08AgCb3u9H9sUsxcEsxcEsxcAcxcEsxcEslbGRRwCAtLQ0R5dAVsIsxcEsxcEsxcAcxcEsxcEslbGRR0REREREJBA28ggA4Obm5ugSyEqYpTiYpTiYpRiYoziYpTiYpTI28ggAoFLxpSAKZikOZikOZikG5igOZikOZqmMzwoBAHJychxdAlkJsxQHsxQHsxQDcxQHsxQHs1TGRh4REREREZFA2MgjAICPj4+jSyArYZbiYJbiYJZiYI7iYJbiYJbK2MgjAEBubq6jSyArYZbiYJbiYJZiYI7iYJbiYJbK2MgjAEBhYaGjSyArYZbiYJbiYJZiYI7iYJbiYJbK2MgjAIBarXZ0CWQlzFIczFIczFIMzFEczFIczFKZxtEFUNXA/sziYJbiYJbiYJbVU74uC7ey/4TOoIWva10E+tzn6JLISnhOioNZKmMjjwAAqampCAgIcHQZZAXMUhzMUhzMsvq5krYd5++shUHWGZc56QMR3ehFuGr4obK64zkpDmapjN01iYiIiO5yO+ccziavMWngAUBWYRyO3FrioKqIiCzHRh4BANzc3BxdAlkJsxQHsxQHs6xerqbtUFyu0WiQpr2KNO11+xZEVsdzUhzMUhkbeQSAN62KhFmKg1mKg1lWLxn5cYrLJUkCAGTmx9uzHLIBnpPiYJbK2MgjAEB2drajSyArYZbiYJbiYJbVi7PaU3F58VDt5tZT9cFzUhzMUhkbeURERER3qe3dyew6Z7UXgjya2bEaIqLyYyOPAHD4WZEwS3EwS3Ewy+qlnu+DCHBrVGK5q4s7WgePgVri4OTVHc9JcTBLZWzkEQAgLy/P0SWQlTBLcTBLcTDL6kWtckLniKloGfQo/N0awtslHHV8uqJ94HMI8Wzh6PLICnhOioNZKuNXUQQAKCgocHQJZCXMUhzMUhzMsvpRSxpE+nZHpG9347KUlBQHVkTWxHNSHMxSGa/kEQBApeJLQRTMUhzMUhzMUgzMURzMUhzMUhmfFQIA+Pn5OboEshJmKQ5mKQ5mKQbmKA5mKQ5mqYyNPALALigiYZbiYJbiYJZiYI7iYJbiYJbKeE8eERERVQsF+hyka29AJTnB360BVBK/qyYiUsJGHgEAXF1dHV0CWUlNyVKry8SNjH1Iy7sKtcoZ4V7tEeLZSqgPfTUly5qAWVaOQTbg3J1fcD19Dwxy0YTkrhpfNAscjnDv9nargzmKg1mKg1kqYyOPAAAaDV8KoqgJWaZpr+Ng3EIUGnKNyxKyTyDIoxk6hv0LKkntwOqspyZkWVMwy8o5d+cXXE3bbrJMq0vHscRlcFJ7IMijqV3qYI7iYJbiYJbKxPnKmyolOzvb0SWQlYiepSzLOJ6wzKSBV+x2zllcS99p/6JsRPQsaxJmWXEF+hxcT99jZq2MS6l/2K0W5igOZikOZqmMjTwiqlZS8i4jpzDZ7PobGfvtWE31IcsyDLLB0WUQlVu69oaxi6aSlLzLkGXZjhUREVV9vL5JAABvb29Hl0BWInqWWl16pdZXJ9bIMrcwBRdSfkd81jEY5EL4utZFQ78+CPNqY4UKyVKin5e2pJKcylivgSRJdqmFOYqDWYqDWSrjlTwCAGi1WkeXQFYiepaezkFlrA+2UyW2V9ks8wrTsPfmx7iZedB4JSRdewNHE5bgatpOK1RIlhL9vLQlf7cGcFH7mF1vzy8smKM4mKU4mKUyNvIIAFBQUODoEshKRM/S17Uu/FzrmV0f6fOAHauxrcpmeTlti9krm3+l/AadIb9S+yfLiX5e2pJKUqF5rREASl6tc1Z7obH/Q3arhTmKg1mKg1kqYyOPAMBuXV3I9mpClu1Cx8PDqeQVvfq+0ajj09kBFdlGZbO8lXXC7DqdQYvk3L8qtX+yXE04L20p3Ls9OodPQYBbIwASVJITIrw7onvtGfBwrmW3OpijOJilOJilMt6TRwAAf39/R5dAVlITsnR3CkB05GtIyD6B1Lyr0KhcEe7VDt4uYY4uzaoqm2Vpg1UAgMFQ+nqynppwXtpakEdTBHk0hSzLDvtQxxzFwSzFwSyV8UoeAQBSUlIcXQJZSU3JUiWpEe7VDi2CRqNp4GDhGnhA5bMMdL/P7DoJKvi7N6zU/slyNeW8tAdrNPB0hnzoDEVdvPSGQotH52SO4mCW4mCWytjIIyISVEO/3pDMTAxf27sT3DS+9i2IqApIzD6FzVf/g0J9DgCgQJ+Nbdf/i5TcS0jMPoXbOeehl3UOrpKIqHLYXZMAAC4uLo4ugayEWYqjsln6udVDx7B/4fTtVcgtLPqmUyU5oY5PFzSrNdIaJZKFeF5WDdkFSTia8BUM9zTibmYewqXUPxDq2QYqSQVntRea1RqO2t6dTLYrK8fM/HhcSv0DSTnnIEFCsGdzNPLrCy+XUKv/LlQ5PCfFwSyVsZFHAABnZ2dHl0BWwizFYY0sgz2aISjy/5CmvQa9oQA+rrXhrPawQnVUHjwvq4Zr6btLNPAMsg5ZBQkAgLzCFHg410KBPgt/Jq6As9oTwR7NjNuWlmNq3lUciF8A/V2j1sZlHkZi9ilERTwPH9faVv5tqDJ4ToqDWSpjd00CAGRlZTm6BLISZikOa2UpSRL83eqjlkcTNvAchOdl1ZCRH1dimQyD8d8Ff3fhLF5zOXWLybal5Xg2+WeTBl4xnUGLs3d+KX+xZFM8J8XBLJWxkUdEREQ1govas9T1KpVpB6fUvCsW7TevMA1p2mtm19/JvYh8HT+IEpH9sJFHAAAvLy9Hl0BWwizFwSyrP72sQ1zmEVzJW4s/E1cgMfsUDLKh7AeSTUR4dTC7ToJUYv5NlcrJ5Gdz56TOoC3jyDL0Midsrkr4/ioOZqmM9+QRAKCgoIB9mgXBLMXBLKu3fF0W9sfNR1bBLRQWFsLJyQk3Mw/C360hOodPhkbFwQLsLcSzFcK92iE+61iJdX6u9aBRmZ5v4Z5tTX42d056OAfBRe2NfH2m4nHdNH5w1fhVonKyNr6/ioNZKuOVPAIA5OeXvI+AqidmKQ5mWb2dvr0KWQW3AAB6vd64PDXvMv6685ujyrKpAn0O7uReUrz3rSqQJAltQ8ajbciTUElFV+k0KleEeLQ0zrWpl3XIzI9HZv4tqCUX5OnSjY83d06qJDXq+/U0e9wGfr2hkviRqyrh+6s4mKUyXskjIiKysnxdFhKyT5pdH5t5EE1rDYNaEuPPsF7W4eztnxCbeRAGuRAA4O0SjpZBj8DfrYGDqzMlSRIivDvAReMJIAOuGh9ER76Gy6lbcDPzEO7kXoCbxg8+rrVxLWMnrmfuQevgx1Hbu2Op+23o1xs6gxZX0rYZnwO1ygUN/fqgvl8Pm/9eRER3E+OvC1VaQECAo0sgK2GW4mCW1VeeLtVk1EZXV1eT9TpDHgr1OVBrfOxdmk2cSFxeogtkZn48DsQvxAN1XoGXc4iDKrNMLffG8HYOQ2reFbh6e5usk2U9TiSugJ9rJAICgszsoajx2DRwMBr49UJK7kUAQKB7Yzip3WxaO1UM31/FwSyVse8AAQBSU1MdXQJZCbMUB7Osvtw0/pDu+hOrvac7kUblCidBprPILriN+Kzjiuv0hnxcTdtu54qKFOhzcCvrOG5l/YkCfW6Z29/MPGh2cBQZBtzI2GfROemsdkeoV2uEerVmA68K4/urOJilMl7JIwCALMuOLoGshFmKg1lWXy4aL4R4tkRC9omiBfdkWdu7szBdNe/kXgBg/rWanPuX/YpB0XnzV8p6k26TKskJDf16o0ngILOPyylMLnW/OQW3IbvynBQF31/FwSyV8UoeAQBHJRIIsxQHs6zeWgY9Ci/nUACASq02Lvd3a4CmgUMcVZbVSZK69PUofb21XUnbikupm4wNPAAwyIW4mLoRV9K2mX2cq8a31P26OvnynBQIsxQHs1QmxteIVGn33i9C1RezFAezrN5cNF54oO4sJGT9iYTM09BonBDq2QpBHs2FGmkxxKMFTksaGGSd4vowrzZ2q8Ug63ElbYfZ9VfStqOeb7Ti81/buzMupmw0uZfybnW9u8JVzXNSFHx/FQezVCbOXxmqlMxM5bl9qPphluJgltWfWtIgwrsD6rsNRZuQsQjxbClUAw8oasw29OuruM5N44/6vtF2qyW38A7y9Rlm12t16cgrTFFc5+7kj5bBMQCkEuuaBg6Dj2ttnpMCYZbiYJbKeCWPiIiIKqVJ4EC4OfniSto2ZBckQSU5IdyrHZoEDoaLxstudagtmGC+tG3q+nSFv2s93MjYh+zC23DT+KGuT1f4utY12U4v64S5p5KIxMR3KAIAeHp6OroEshJmKQ5mKY6akGVdnyjU9YmCzpAPleTkkCuWbhpf+Ls1RGreZcX1ge6N4arxVlxXzMslFM2DRpVYbpANSNIdxLGrB6DVpcFZ7YW6Pl3RyL8/NCreE1Td1IRzsqZglsrE6jNCFabTKd9LQdUPsxQHsxRHTcpSo3JxaJfU5rVGQKMqeY+ORuWKZoHDK7zf44lf4/ydddDq0gAABfosXEr9AwfjF8IgK9/HR1VXTTonRccslbGRRwAArVbr6BLISpilOJilOJil/fi61kX32i8hwrsjnFQecFJ5oLZ3J3Sv8zJ8XGtXaJ+peVdwK+s49HodCvQ5SMm7gsTsU7idcx5xmYdxy8w8gVR18ZwUB7NUxu6aREREVK3pZR1uZR1DRn4cnFWeiPDugLYh4yx7rKEAp26vggpqhHq2RoB7wxLb3Pp7vsNcXQqy8q9DvmtewFxdCg7fWoRwry8hSSUHbSEicgQ28ggA4O/v7+gSyEqYpTiYpTiYpWUy8uNwJW0bUnIvQ61yQphnO9T36wFntYfZx2Tmx+Ng/GfQ6tKNyy6krEfTwKFo6N/b7OPkv7tYFuhzcD19NwDgavoOhHi0RPuwiVDdNf+fQdZBL+uQpY81aeAVS827gsTskwj1al2+X5gchuekOJilMnbXJABAenq6o0sgK2GW4mCW4mCWZbudcw57Yj9AXOZh5OlSkV2QhIupv2NP7AfI12UpPsYgG3D41hcmDTwAkGHAuTu/4E7uRbPHKzTkKS5PzDmFiymbTJYFut2H3IJk6PUFio9x1fgiNvOg4jpZlnEjYx92x76PTVdmYnfsB4jNOABZLtlYJPvhOSkOZqmMjTwCABgMvGlcFMxSHMxSHMyydLIs49TtlYoTqucUJuNi6iaFRwG3c84g18y8dwBwLX2X4vI8XTr0hkKzj7uRsdekERbi2QrOai8otcs0kgs8nIOg1SnPz/dn0nKcTPoe6dobKNBnI117HSeSvsWp2z+YPT7ZHs9JcTBLZWzkEQDAycnJ0SWQlTBLcTBLcTDL0qVqr5baWIvLPKK4PKsgqdT9ZhUkKi4vOpb5K2n5+kzoDP8M5qCSVGgfNhFuTn64e7J0V7U3gj2aQy1p4OUSUmI/d3IvIi7zsOIxbmTsQ1retVLrJ9vhOSkOZqmM9+QRAMDd3d3RJZCVMEtxMEtxMMvSFepzS19vUF7vqvEp9XGuGt8KPc5J5V5i0vS6Pl0R6dsdubokFOq10Khc4KR2+3uthHo+D5TYT1yWcuP0n/VH4edWr9RtyDZ4ToqDWSrjlTwCAGRkKHczoeqHWYqDWYqDWZbOx7U2pFI+kvi61lVcHurZGk4q8x/w6np3UVzu4RQIlWT+2//aPp1LzPWnktRo4hkDT+cQuDn5Ght4kqRGy6BHFRtrd18NVKIzc18g2R7PSXEwS2W8kkdEREQO5abxRbhXO7NXvhr69VJcrlE5o03IEziasKTE/XwR3h0R5tXO7DGLGmklPxz6udZDk4BBio/xcgpDr3r/h4SsP5GZHw8XjRfCvTrAVeOtuL2fa2Spc+j5ufIqHhHZBht5BADw8DA/PDVVL8xSHMxSHMyybC2DY6CXC5Hw95x0AKCSnNAkYCDCvNqafVyIZwv0qPsfXM/Yg8z8eDirPVDbqxOCPJqVOm9d8RQJTmp3hHi0hCSpEObZBqFebUymT7ibh4cH1JIGEd4dAHQo83eq7d0Fl1I3o0CfXWKdi9oHEd4dy9wH2QbPSXEwS2Vs5BEAjkwkEmYpDmYpDmZZNo3KBR3CnkZWfgJS8i5DrXJGsEcLOKvLvt/G0zkIzWuNrPBxO4b/y6Jty5ujs9odXcKn4mjCV8gpTDYu93QOQfvQp6C5574/sh+ek+JglsrYyCMAQF5eHm9cFQSzFAezFAeztJyXSyi8XEIdXYaiiubYwK8XcgpT4O7kDy/nUAS6N7JBdVQePCfFwSyVsZFHREREZGX5+mwcS1iKO7kXjMucVO5oGfSIA6siopqCo2sSAMDPz8/RJZCVMEtxMEtxMEsxlCfHo7eWmDTwgKKpII4nfoPUvKvWLo3KieekOJilMjbyCACQmZnp6BLISpilOJilOJilGCzNMV17Ayl5lxTXyTDgatp2a5ZFFcBzUhzMUhm7axIAQK/XO7oEshJmKQ5mKY7qkqVWl4lr6TuRlHMGsiwj2KMZ6vn1gJuZScVrGktzTNNeL2P9DStUQ5VRXc5JKhuzVObQK3mzZ89GVFRUif/i4uIwdepUzJ07t8RjNmzYgH79+hl//uqrrxAVFYX333/fZLuLFy8iKioKCQkJJst37NiBqVOnom/fvujduzeeeOIJLF26tMS3AL///jsmTZoEACVqmTp1KqKiorB161aTx6xatQojR4402cbcf1OnTi3382VLTk7mJ4Wl6oVZioNZiqM6ZJlTkIzdsf/DpdQ/kJkfj6yCW7ictgW7bryHrIJER5dXJViaY2kTtAMwTqROjlMdzkmyDLNU5vAreZ07d8a///1vk2W+vr7l2oezszPWr1+PmJgY1K5d2+x2X3zxBb777js8/PDD+Ne//oXAwEDcvHkTv/76KzZt2oSHH37YuO2ePXvQrVu3Uo+5ePFi9OjRAxpNyafxnXfeQWFhIQDg9u3bmDhxIubNm4d69YomPq1qL0jOMSIOZikOZimO6pDl2eSfodWlG382yHrk67Kg1WXg9O3V6BrxnOOKqyIszTHEsyU0KlfoDFrF9RFeZc+xR7ZVHc5JsgyzVObwRp6TkxMCAgIqtY86derAz88Pixcvxttvv624zblz57B8+XJMnz7dpDEXGhqKjh07Iisry7gsPz8fR44cwbPPPmv2mH369MHevXuxbt06jBgxosR6b29v478LCgqMy8r6XQsKCoyNQwDIyckpdXtrSU9Pr3QOVDUwS3EwS3FU9SwL9LlIyjkDAJBlICM/Fpn58TCgqBtUcu4FBLk3RUP/3o4s0+HS09Ph5euK+Kyj0Ooy4OkcjDDPtlCrTL+41ahc0LzWKJxI+g6AbLLO17UuIn0fsGPVpKSqn5NkOWapzOGNPGuZNGkSJk6ciPPnz6Np06Yl1m/evBlubm6KDTIA8PLyMv772LFjCAwMRN26dc0ez93dHU888QS+/vprDBgwAG5u1ul6sWLFCixdutT4M/sZExGRrRUaciGjaELhjPybSM+PNVmvlwtwIuk7eDgFItSrtQMqrBqS8k7iQOo6GOSiL2N1hgI4qz3QNeJ5+LtFmmxbx6cL3J0CcTVtO9LzY+GkckOEd0fU832Qk6ATkc05vJG3f/9+9O79zzeDnTt3xuzZs8u9n8aNG6Nnz574/PPPMX/+/BLrb968ifDwcMWulfcqq6tmsREjRmD16tVYuXIlxo8fX+6alYwdOxaPPvqo8efMzExjF09b4qVucTBLcTBLcVT1LF01vnBWe0Gry0BmfnyJ9SpJA43KFRdTNzmskWeQ9UjJvQS9XAg/10i4aLzKfpAVZeUn4ELWT1CrJWh1mUjTXke+vuh+/ltZx9G3/ruI9I0yeUygeyNOfF5FVfVzkizHLJU5vJHXpk0bvPzyy8afXV1dK7yvp59+GmPGjMGhQ4cqPGeGLMvYt28f3nrrrTK3dXZ2xsSJE/HJJ59g+PDhFTqe0j6dnZ2NP9vrSp4sy2VvRNUCsxQHsxRHVc9SLWlQz/cBnEz6AQboSqz3cg6BSlIhI/8mCvV5dh845GbmYZxL/sXYqFJJGtT1iUKzWqOgkuwzhtz1jD2QZT20uqKurcVXPgFAq8/AvrhPAMiI9C37S2JyvKp+TpLlmKUyh8+T5+bmhoiICON/gYGBAIq6Qyrdj5adnW22xR4REYEhQ4Zg0aJFJdbVrl0b8fHx0OlK/vG627lz56DX69GiRQuL6u/Xrx9CQkKwbNkyi7avqnJzcx1dAlkJsxQHsxRHdciykX9/1PbqCAmScZkECZ5OQfB1qWtcopLs+/3w7Zzz+DNxubGBBwAGWYdr6btwNvknu9WRVZAInU6HjPybJg28YoX6HFxI+R0GueQ6qnqqwzlJlmGWyhzeyDOnTp06uHjxYonlFy9eLHUEzfHjx+PmzZslpjfo06cP8vLy8PPPPys+rnjglT179qBLly5Qq9UW1alSqfDss8/i119/LTFdAxERUXWhklToHDEVDf36wt+1Pvxd6yPMsy0C3e+DJBU1/EI8WpQYZMTWLqX+gXsHLyl2I2Mf8vXZdqnDRe0FGQbk3TUC6d3UKmfk6zOQXsYceURE9lBlG3nDhw9HbGwsPvnkE1y+fBk3btzAypUrsWXLFsTExJh9nL+/Px555BGsXr3aZHmzZs0wZswYLFiwAAsXLsSZM2eQmJiIo0eP4rXXXsPGjRsBAHv37rXofry7de3aFffffz/Wrl1b/l+0iqho91aqepilOJilOKpLlpIkoU3IWPi4RMDbJcykW6ZG5YomgYPsXlNq3hWz6wyyDul51+1SRx3vLnBxdoFSg1OCBA+nWn//xK5j1UF1OSepbMxSWZVt5IWHh+Ozzz7DjRs3MH36dDzzzDPYtm0bZs+ejc6dO5f62Mceewzu7iUnIp08eTLefPNNnDt3Di+88AIef/xxfPrpp2jQoAEGDBiAuLg4xMfHo1OnTuWud/LkycapEqqjeyeDp+qLWYqDWYqjOmUZ4tkCXSKmIdC9CYq7Z4Z5tUW32jPg7RJu93pUZVw5VKucS11vLbU8miDEpTNc1T4myyVI8HetD43KBc5qL/i4mh+Zm6qO6nROUumYpTJJq9XyK6e/rVy5EkeOHMFHH33k6FKMMjMzERQUhNu3b5vMvWdtKSkpnGNEEMxSHMxSHNU1y+IBDYq7azrCicRvEZt5QHGdq8YXveu9Xe7BV/SyDg0bNETCrUSEh4fjyhXzVwvvlpKSgjvyIey9+TF0shZOKnd4OYcYR/psXms06vv1KFct5BjV9Zykkmpalpa2DRw+umZVUqtWLYwdO9bRZTiEJVNLUPXALMXBLMVRXbN0ZOOuWIR3J5y/sw5afQacVG7wcA6GWtIAkNAscHi5GniyLONy2lacur0SeYWpAIDcwlRcT99r0aiYarUKjX0fgp9bPZy78ysy8+MAAG4aPzTy74dI3+4V+h3J/qrrOUklMUtlfFbu0qtXL0eX4DCenp6OLoGshFmKg1mKg1lWzLX0XTh9ezVcNF7Q6tKRro1FRn4c7vMfgNYhj6OWe+Ny7e+vlN9wMH4hsgoSjSNkGuRCrL80DT0j38T9tYaWeExuYSoupGzArazjKDTkwz+zHhr59UGPuq8ip/AODLIOHk5BdpvKgayD56Q4mKUyviMRACA9Pd3RJZCVMEtxMEtxMMvyS9fewOnbqwHI0KhcEeDeCHV8uqC2dycUGvLgW8573wr0OThz+ydkFSSWWGeAHofiP4NWZ3pvT15hGvbe/Ag3Mw9CLxegID8f6drrOJLwJa6l74aHU6BxDkGqXnhOioNZKuO7EhEREVU51zP2wtxIlTpDHuIzj5Rrfym5l5CRH1/0g1xyzzm6O4jPMt3npbTN0JqZMuGvO79BZ6i+A64RkdjYyCMAUByNlKonZikOZikOZll+OQXJpa8vLH29Er1cgAJdNnIKb0OW9QAAWTYYB5jJ12WZbJ+QdcLk57vv/Sk05OJO7l/lroGqBp6T4mCWynhPHhEREVU5bk5+QF4p6zXlmxsr0L0xCnRZKDCYTp4uQ0ZeYSpquTeBl0uoyTq9XFhiPwZZh8z8W8guuA2tLhPhXu3RwK8nQjxblKseIiJb4pU8AgDk5uY6ugSyEmYpDmYpDmZZfnV9zI92qZKcEOHdsVz7yy5MgodzEICSI4YaoIOrkx/CPNuaLA90a2Tyc0FhPhKzzyA9PxZ6OR8alStS8i7i8K1FuJK2vVz1kGPxnBQHs1TGRh4RERFVOQFuDXCf/0MllqskDdqGjIOz2qNc+0vMPoVA90ao5d4U0l0dmSRI8HQKRphna6jvmXi9oX8fSJLa+HOO7rbxSqCnczA0d03Efv7OOhToc8pVExGRrbC7JgEAfH19HV0CWQmzFAezFAezrJgmgQMR7NkMsRkHkK/LhKdzMOr6doOHU2C59yXLRVMmBHvcjwDXhlBLPwLIh1rljLo+XeGiKTmpsL9bfXQIfRpnklcjtzAFBUiHBBU8nYPh51rPZFuDXIiE7JOo69O1Qr8r2RfPSXEwS2Vs5BEAIDs7Gz4+Po4ug6yAWYqDWYqDWVacn2sk/FwjK72fWh5NcTltCwBAo3aG6u8rdBJUkCQVgtybKj4uxLMFgj2aI017DduvvAe1k/T3ZOwl6QzaStdJ9sFzUhzMUhkbeQQA0Ol0ji6BrIRZioNZioNZWodBNiAp+xQSc05Blg2o5dEUYV7tzDa67lbLvTH83RoiNe9yiXWuGj/UKeUKnCRJ8Herj2DX1kgznDW7nb9bfct+EXI4npPiYJbK2MgjAIBarS57I6oWmKU4mKU4mGXl6Qz5OBj/mUkjLS7rCC6lbkbXiOlwVehuea9O4ZNw5vaPiM86ZlymkpwQVft5OKvLHoY90rsHMjIuwCCX/FAZ4HafVa44kn3wnBQHs1TGgVcIAODtXfYfR6oemKU4mKU4mGXl/XXnN8WrcNkFiTh9e5VF+3BSuaJNyBPoW/8duGi8AAAuGk+L7/ELD2yC9qET4aI27RoW5NEMHcImWrQPqhp4ToqDWSrjlTwCAKSlpSEgIMDRZZAVMEtxMEtxMMvK0cs6xGYeNLs+MfsUtLpMi67mAYCz2gMqC7p43istLQ0hAS0Q5NEMd3L/QoE+Bz4uESXm16uMO7kXkZB9AgZZj1ruTRDi2Qoqid/JWxvPSXEwS2Vs5BEREVGVVqjPhc5gfmZ0GQbk6VItbuRVlkpSIcjjfqvuUy/rcPTWEiTlnDYuu5GxF94uEegSPtV45ZGIyBL8aogAAG5ubo4ugayEWYqDWYqDWVaOs9oDTirz98xJUMFN42/zOmyZ46WUTSYNvGKZ+XE4dXulzY5bU/GcFAezVMZGHgEAVCq+FETBLMXBLMXBLCtHJalR26ez2fUhni3tchXPVjnKsozrGXvNrk/MPoU8XbpNjl1T8ZwUB7NUxmeFAAA5OTmOLoGshFmKg1mKg1lWXpOAwQhwa1RiuZdzGFoGPWqXGmyVY6EhDwX6LLPrZRiQW5hik2PXVDwnxcEslfGePCIiIqryNCpndI2Yjts5Z5GQcxKybECQe1OEerUxTmxeXWlUrnBSuaPQkGtmCwluGl97lkRE1RwbeQQA8PHxKXsjqhaYpTiYpTiYpXVIkoRgz+YI9mzukOPbKkeVpEJtn864mrZdcX0t9yZwd+LogdbEc1IczFIZu2sSACA319y3h1TdMEtxMEtxMEsx2DLHJgGD4Odav8Ryd6cAtAqOsdlxayqek+Jglsp4JY8AAIWFhY4ugayEWYqDWYqDWYrBljlqVC6Iqv08ErJPICH7BGTZgED3xqjt3QkalYvNjltT8ZwUB7NUxkYeAeDIRCJhluYZZH21uneHWYqDWYrB1jmqJDXCvdoh3KudTY9DPCdFwiyVsZFHAABfX19Hl0BWwixNybKMq+k7cC19J3ILU+Cs9kQd7y64L2BAlf92nFmKg1mKgTmKg1mKg1kqY9OXAACpqamOLoGshFmaOpn0Hc4mrzEOP16gz8bltC04EPcp9LLOwdWVjlmKg1mKgTmKg1mKg1kqYyOPiISVkR+H2MwDiuvStNcQn3nEzhURkaMUGrTI15mfi46ISCTsrkkAAFdXV0eXQFbCLP9xK+vPUtcnZJ9AHZ8udqqm/JilOJil42Rob+L8nXW4nXsegAxP5xDc59+/QvtijuJgluJglsrYyCMAgEbDl4IomOU/DGV0x9TLVXtELmYpDmbpGJn5t7Avbi50Bq1xWXZBIo4nLoPOkF/u/TFHcTBLcTBLZeyuSQCA7OxsR5dAVsIs/xHofl/p691KX+9ozFIczNIxLqb8btLAu5vOkFfu/TFHcTBLcTBLZWz6EhFkWcat7OO4mXEQ+foseLuEoZ7vg/B1revo0iolyP1++LjUQUZ+bIl1zmov1PWJckBVRGQviTmnza6TZdmOlRAR2Ve5G3kHDx6Em5sbWrVqBQBYs2YN1q1bh3r16uHFF1+Et7e31Ysk22Nu4ihvlrIs41jiUtzKOm5clpF/EzczD6N18OOo49PZ2iXajSRJ6BwxBScTv/v7w17Rhzpf17poHfw4XDReji2wDDX5vDTIBqRpr0FvyIeva104qz0cXVKl1OQsHUWWZciywar7ZI7iYJbisHWWKXlXkJZ3FRqVG0K9WsNF7WnT41lLuRt5CxcuxKRJkwAAV65cwYIFC/DII4/g+PHj+PTTT/Gf//zH6kWS7Wm1Wjg5OTm6DLKC8mZ5K/u4SQPvHzJO3V6JEM+WcFa7W69AO3NRe6Jj+L+QW5iC7ILbcNX4wNslzNFlWaSmnpcJWSdwJvkn5OnSAAAqyQmRPt1wf60RUEnV8y6DmpqlI0mShFoeTXE756y5Dcq9T+YoDmYpDltlma/LwuFbXyBNe8247EzyajQNHIoGfj2tfjxrK/dfy4SEBNSrVw8AsHPnTnTt2hXPPvssZsyYgYMHD1q9QLKPgoICR5dAVlLeLG9mmD9vDXIhbmUdq2xJVYK7UwCCPJpWmwYeUDPPyzu5l3A04StjAw8oeh1eTd+Bs8k/ObCyyqmJWVYF9/n3gySpFddpJJdy7485ioNZisNWWR5L/NqkgQcUDeh2NnkNksx9eVSFlLuRp9FooNUW3cR89OhRdOzYEUDRpdKcnBzrVkd2I1XgG02qmsqbZb6+9HmjylpPtlMTz8vLqZshQ7mL3Y2MfdV2nrPqkGVuYSru5F5CTuEdR5diNf5uDdApbBK8nEONy5zVnmgaOBQaVfmHXa8OOZJlmKU4bJFlRn4c7uReMLv+Stp2qx/T2srdXbNly5b49NNP0aJFC5w7dw5vvfUWACA2NhZBQUFWL5Dsw9/f39ElkJWUN0tvlzBk5N8sZX14ZUuiCqqJ5+WdvItm1xlkHVK1VxHq2cqOFVlHVc5Sq8vEyaTvkZRzBsX3rQa6N0br4DFwdwpwbHFWEOTRFEEeryErPwF6uQBeLuFQS5oKfTCsyjlS+TBLcdgiywyt+c9FAJCZH2f1Y1pbua/kvfjii1Cr1di5cydeeukl1KpVC0DRgCydOnWyeoFkHykpKY4ugaykvFnW830QgPKHHXenAAR7tLBCVVQRNfG8VEml31ehLmN9VVVVszTIehyIm4+kuwYmAoA7uRewP25eheaSq6q8XELh61oXaqniA4tX1Ryp/JilOGyRpXMZg6s4qar+WAXlfqcLCQnBBx98UGL59OnTrVIQEdlX8UiTp26vhOGuycHdnQLQKWxStR3ogqqnMK82iM3Yr7jOWe2FgDLmPqTyScj6E1kFCYrrcgtTEJd5GJG+3e1cFRGRYwV53A8XtTfy9ZmK62t7V/2Rx8vdyLtw4QI0Gg0aNGgAANizZw82bNiAyMhIPPXUUxypqJpydS3/vQlUNVUkyzo+nRHi2RK3so79PU9eOII9WrCB52A18by8z78/krLPKPxhlXB/4NBKXYVxpKqaZWndYwEgOfcvNvLuYi5HWZaRlHMGadpr0KhcEe7VToiuriKrqucklZ8tslRJarQKfgxHE5bAIOtM1vm51kN9vx5WP6a1lfuv5fvvv4/HH38cDRo0QHx8PN544w08+OCD2LFjB7RaLZ5//nkblEm2xsa5OCqapbPanR/mqpiaeF66OwWgW50ZuJjyO+KzjsMgF8LPtT4a+fdFiGf17TpcVbM0N/JkMVU1bVTbilKOeYVpOBi/0OSK6Pk763Cff380CRxkz/KoHKrqOUnlZ6ssQzxboHudl3EtbSdStdfgpHJDuFd71PWJglpV9V8/5X73vnnzJho1agQA2LFjB1q3bo3//ve/OHXqFN5880028qqprKwsBATwW0cRMEtx1NQsPZwC0SbkCbQOHgsZshBXlKtqlqGerXE9fbfZ9WGebexYTdWnlOPRhK8UurzKuJi6Ed4uYQjzamu/AsliVfWcpPKzZZY+LhFoHfK4TfZta+X+yynLMmS56ObsI0eOoEuXLgCAoKAgpKenW7U4IiKquSRJEqKBV5XVcm9sdnClALdGCPZsaeeKqpe0vGtIybtk/Fx0r6vpu+xcERFRkXJfyWvSpAmWLVuGDh064MSJE3j55ZcBFE2SzuFoqy8vLy9Hl0BWwizFwSzFUZWzbB82EZdTN+N6+h7k6zPhrPZEHZ+uuM9/QKUb2bmFKbiVdRx6uRABbg0RWM0HzinOMV+fjb/u/Ia/Un5DUs5ZaCRXeLuEwss5zGRqhqx85UFtyPGq8jlJ5cMslZW7kTd9+nT83//9H/bs2YNx48YhIiICQFHXzRYtqu/9EjVdQUEBnJ2dHV0GWQGzFAezFEdVzlItadA44CHc5z8AerkAasnZKpMLn0v+FZfTtuLuqRn8XOuhU/gkOKs9Kr1/RygoKICkMWDfzU+QXZAIyEXPk07WIlV7DYWGPAS4NTRu76Lhh8+qqiqfk1Q+zFJZuRt5DRs2xIoVK0osnzJlCtTq0m/gpqorPz8fnp6lzwlC1QOzFAezFEd1yFKSJGgkF6vsKzbjAC6nbSmxPE17DX8mrkCn8Getchx7y8/PR1LhoaIGHgBXjQ80kgt0ctF8gtkFSfB2DoeT2g0AUKcaDLNeU1WHc5IswyyVWW3YLBcX6/xhIMewxre2VDUwS3EwS3HUtCyvpu80uy4p5wxyCpLh4VzLfgVZiSRJSMg+ZfJzoPt9uJ1zDgboIUNGri4FPuoIBLo3QT2/aJvUka/PxuXUzYjLPAqdrIWfayQa+PVCsEczi/ehMxRAglQtRgm0hZp2ToqMWSordyNPr9dj1apV2L59O5KSklBYWGiyftOmTVYrjuyH91OKg1mKg1mKo6ZlmZV/q5S1MrIKEqplI8/f3x+GbNM5s1w1Pgj1aoOs/ETk6zPh41IbbULGItyrPVRlTFFREQX6HOyN/Qg5hbeNy+7kXsCd3ItoGfQoIn27lfr4O7kXcSHld6TkXQIgIdC9MZoEPAR/twZWr7Uqq2nnpMiYpbJy31G9dOlSrFy5Er169UJ2djYeffRRPPjgg1CpVHjqqadsUSPZQWpqqqNLICthluJgluKoaVk6q0u/F81ZXT27VqWmpqKWe5MSy51UrvB3i0SoZ0t0q/0iant3skkDDwCupG0zaeD9Q8a5O79AZ8g3+9jbOedwIO7Tvxt4RY+5k/sX9sfNx53cS2YfJ6Kadk6KjFkqK3cjb/PmzZg1axZiYmKgVqvRu3dvvPrqqxg/fjzOnj1rixrJDswN/0zVD7MUB7MUR03Lso6P+XvRPJ2D4e9W347VWI8sy4j07W62kRro3sTmv1t81jGz63QGLW7nmP8sdu7Or5BhKLHcIOtw/s46q9RXXdS0c1JkzFJZuRt5qampqF+/6A3M3d0dOTk5AICuXbti//791q2O7Ib3VIqDWYqDWYqjpmXZ0L8vfF3rlliuUbmidXD1nFgYKMrRTeOLrhHT4Odaz7hcJWkQ4d0RHcKetnkNekNBqevNXcnLKbyDzPx4s49L016FVpdZqdqqk5p2ToqMWSor9z15tWrVQkpKCkJCQhAeHo7Dhw+jcePGOH/+PJycaubNuyLg0LPiYJbiYJbiqGlZOqlcERXxAm5mHkR81jHo5QIEuDVCPd8H4O4U4OjyKqw4R2+XcHSv8xKyC5KQr8uCp3Ow3aZL8Herj4TsE2bWSmavJBpkneLyu8myvuKFVTM17ZwUGbNUVu5G3oMPPoijR4+iWbNmGDlyJN566y2sX78eSUlJeOSRR2xRI9lBVlYWAgKq7x9e+gezFAezFEdNzFKtckKkb3dE+nZ3dClWc2+Ons7B8HQOtmsNDfx6ITH7lGK3y1DPVmbr8XQKhpvGD3m6NOX1zsFwc/Kzaq1VWU08J0XFLJWVu5E3adIk47979+6NkJAQnDlzBhEREejWrfQRnYiIiIio4vzd6qN96FM4dXsV8vXF3SslhHm1KbUrrCRJaOTfH6du/6C4vpF/fxtUS0SOUul58po3b47mzZtboxZyIC8v+3QzIdtjluJgluJglmKoKjmGerVGsGcL3Mm9CJ0hD76udS3qBhvp2w0y9LiYssnYQHTV+KJxwEDU9u5o67KrlKqSJVUes1RmUSNvz549Fu+we3dxumXUJIWFhezTLAhmKQ5mKQ5mKYaqlKNKUiPIo2m5H1fP90HU9emGdO0NSJDg41oXKqnc4/BVe1UpS6ocZqnMokbeq6++atHOJEkqV4OQqg6tVgsPDw9Hl0FWwCzFwSzFwSzFIEqOKkldbaexsBZRsiRmaY5Fjby9e/faug4iIiIiIiKygpp3fZ4UcVQicTBLcTBLcTBLMTBHcTBLcTBLZRY38o4dO4YxY8YYJz+/W3Z2NsaMGYMTJ05Yszayo9TUVEeXQFbCLMXBLMXBLMXAHMXBLMXBLJVZ3MhbtWoVBg8erNjn1dPTE8OGDcOqVausWhzZjyzLji6BrIRZioNZioNZioE5ioNZioNZKrO4kXf58mV07tzZ7PqOHTviwoULVimK7I+jEomDWYqDWYqDWYqBOYqDWYqDWSqzuJGXlpYGjcb8OC1qtRppaWlWKYrsz9XV1dElkJUwS3EwS3EwSzEwR3EwS3EwS2UWN/ICAwNx9epVs+svX76MwMBAqxRF9peZmenoEshKmKU4mKU4mKUYmKM4mKU4mKUyixt5Xbp0wZdffon8/PwS6/Lz8/HVV1+ha9euVi2OiIiIiIiIyseiefIA4Mknn8SuXbsQExODkSNHok6dOgCAGzdu4Oeff4Zer8e4ceNsVijZlqenp6NLICthluJgluJglmJgjuJgluJglsosbuT5+/vjiy++wAcffIBFixYZR7KRJAkdO3bEjBkz4O/vb7NCybZ0Oh1cXFwcXQZZAbMUB7MUB7MUA3MUB7MUB7NUZnEjDwBCQkLw0UcfITMzE/Hx8ZBlGREREfD29rZVfWQnWq1WcXoMqn6YpTiYpTiYpRiYoziYpTiYpbJyNfKKeXt7s2FHRERERERUBVk88AqJjV1txcEsxcEsxcEsxcAcxcEsxcEslbGRRwCA9PR0R5dAVsIsxcEsxcEsxcAcxcEsxcEslVWouyaJx2AwOLoEshJmKQ5mKQ5mWfVMmzYNmZmZ5br9hDmKg1mKg1kqs7iRt2/fPnTp0gUqFS/+icjJycnRJZCVMEtxMEtxMMuqZ/r06eV+DHMUB7MUB7NUZnEj79VXX4Wfnx8eeughDBw4EBEREbasi+zM3d3d0SWQlTBLcTBLcTBLMTBHcTBLcTBLZRZfllu9ejWGDh2KrVu3IiYmBlOmTMGmTZuQn59vy/rITjIyMhxdAlkJsxQHsxQHsxQDcxQHsxQHs1RmcSMvODgYEyZMwOrVqzF37lyEhITgww8/xJAhQ/D+++/j/PnztqyTiIiIiIiILCBptVq5og/OycnBtm3b8Pvvv+Ps2bOoX78+vvnmG2vWV+NlZmYiKCgIt2/ftunchFqtFq6urjbbP9kPsxQHsxQHsxQDcxQHsxRHTcvS0rZBpUbX9PDwQLt27ZCYmIgbN27g2rVrldkdORBHJhIHsxQHsxQHsxQDcxQHsxQHs1RWoUZefn4+tm/fjg0bNuDkyZMIDQ3Fo48+ioceesja9ZGd5OXl8cZVQTBLcTBLcTBLMTBHcTBLcTBLZeVq5J05cwYbNmzA9u3bUVhYiAcffBBz585Fu3btbFUfERERERERlYPFjbwxY8YgNjYWjRo1wrPPPos+ffrA09PTlrWRHfn5+Tm6BLISZikOZikOZikG5igOZikOZqnM4kZe+/bt8d///heNGjWyZT3kIJmZmfD19XV0GWQFzFIczFIczFIMzFEczFIczFKZxY28F154wZZ1kIPp9XpHl0BWwizFwSzFwSzFwBzFwSzFwSyVWdzIe+6558rcRpIkzJ8/v1IFkWNoNJUaaJWqEGYpDmYpDmYpBuYoDmYpDmapzOJnpWHDhmbX5ebmYsuWLSgsLLRKUWR/vL9SHMxSHMxSHMxSDMxRHMxSHMxSmcWNvOnTp5dYptPp8PPPP+Obb75BrVq18PTTT1u1OLKf9PR0BAQEOLoMsgJmKQ5mKQ5mKQbmKA5mKQ5mqazC1zf/+OMPLFmyBPn5+XjqqacwZMgQXi4lIiIiIiJysHK3yg4ePIjPP/8cCQkJiImJwaOPPgo3Nzdb1EZ2xEkkxcEsxcEsxcEsxcAcxcEsxcEslVncyDt37hw+++wznD17FsOGDcO4ceM4XCkREREREVEVY3Ej75lnnoGLiwuGDRuGsLAwbNmyRXG70aNHW604sp/c3FxekRUEsxQHsxQHsxQDcxQHsxQHs1RmcSMvODgYkiRhz549ZreRJImNPCIiIiIiIgeStFqt7OgiyLzMzEwEBQXh9u3b8Pb2ttlx9Ho91Gq1zfZP9sMsxcEsxcEsxcAcxcEsxVHTsrS0baCyY01UhWVnZzu6BLISZikOZikOZikG5igOZikOZqnM4kbemTNnsG/fPpNlGzduxKhRozBw4ED873//Q0FBgdULJPvQ6XSOLoGshFmKg1mKg1mKgTmKg1mKg1kqs7iR9/XXX+PatWvGn69cuYJ3330X7du3x9ixY7Fv3z6sWLHCJkWS7dWky9yiY5biYJbiYJZiYI7iYJbiYJbKLG7kXbp0Ce3atTP+vHXrVjRr1gyzZs3Co48+iueffx7bt2+3SZFke7a834/si1mKg1mKg1mKgTmKg1mKg1kqs7iRl5WVBX9/f+PPJ06cQOfOnY0/N23aFElJSdatjuwmLS3N0SWQlTBLcTBLcTBLMTBHcTBLcTBLZRY38vz8/JCQkAAAKCwsxIULF9CsWTPj+tzcXGg0Fs/IQERERERERDZgcSOvS5cu+Pzzz3HixAksWrQIrq6uaNWqlXH95cuXER4ebpMiyfbc3d0dXQJZCbMUB7MUB7MUA3MUB7MUB7NUZnEj7+mnn4ZarcbUqVOxbt06zJw5E05OTsb1GzZsQMeOHW1SJNmeJEmOLoGshFmKg1mKg1mKgTmKg1mKg1kqs7h/pa+vLz777DNkZ2fDzc2txEg2s2fPhpubm9ULJPvIycmBq6uro8sgK2CW4mCW4mCWYmCO4mCW4mCWysp9E52np6fico5sQ0RERERE5HgWd9cksfn6+jq6BLISZikOZikOZikG5igOZikOZqmMjTwCUHSpm8TALMXBLMXBLMXAHMXBLMXBLJWxkUcAiqbFIDEwS3EwS3EwSzEwR3EwS3EwS2Vs5BEAlBhIh6ovZikOZikOZikG5igOZikOZqnMao28zMxMbNy40Vq7IzvjwDniYJbiYJbiYJZiYI7iYJbiYJbKrNbIS0pKwjvvvGOt3ZGdpaWlOboEshJmKQ5mKQ5mKQbmKA5mKQ5mqcziKRTKuqkxNze30sUQERERERFR5VjcyOvXr1+pM8rLsswZ56sxTmQvDmYpDmYpDjc3N8ybNw+ZmZnw9vbG9OnTHV0SVQDPSXEwS3EwS2UWN/Lc3d0xbtw43H///Yrrb968iQ8++MBqhZF9qVQcg0cUzFIczFIcKpUK8+fPR3x8PMLDw9nIq6Z4ToqDWYqDWSqzuJF33333AQDatGmjuN7T0xOyLFunKrK7nJwcuLq6OroMsgJmKQ5mKQ7O4yQGnpPiYJbiYJbKLG769u3bF87OzmbXBwQEYMKECVYpioiIiIiIiCrG4it5Q4YMKXW9v78/G3nVmI+Pj6NLICthluJgluJglmJgjuJgluJglsqs1ok1KysLP/30k7V2R3bG0VHFwSzFwSzFwSzFwBzFwSzFwSyVWXwlz5yjR49i/fr12LVrF1xdXTFq1Chr1EV2VlhY6OgSyEqYpTiYpTiYpRiYoziYpTiYpbIKNfKSkpKwYcMG/P7770hKSkKvXr3w7rvvon379tauj+yEIxOJg1mKg1mKg1mKgTmKg1mKg1kqs/hZ0el02L59O1544QXExMTg0qVLmDJlCiRJwrhx49C5c2doNJW+MEgO4uvr6+gSyEqYpTiYpTiYpRiYoziYpTiYpTKLG3lDhw7FTz/9hB49euDXX3/Fu+++i+joaFvWRnaUmprq6BLISpilOJilOJilGJijOJilOJilMosbeXq9HpIkQZIkqNVqW9ZEREREREREFWRx/8q1a9di586dWL9+PebOnYvOnTujX79+kCTJlvWRnXASSXEwS3EwS3EwSzEwR3EwS3EwS2UWX8lzcXFBv3798Omnn2L58uWIjIzE3Llzodfr8c033+Dw4cPQ6/W2rJVsiPdTioNZioNZioNZioE5ioNZioNZKqvQcDQRERF45plnsGbNGnzwwQcoLCzEK6+8gkGDBlm7PrKT7OxsR5dAVsIsxcEsxcEsxcAcxcEsxcEslVWq6atSqdClSxd06dIF6enp2LRpk7XqIiIiIiIiogqw+EpeZmYmVq9ejZycnBLrsrOzsWXLFgwePNiqxZH9eHt7O7oEshJmKQ5mKQ5mKQbmKA5mKQ5mqcziRt6aNWtw8uRJeHh4lFjn6emJU6dO4aeffrJqcWQ/Wq3W0SWQlTBLcTBLcTBLMTBHcTBLcTBLZRY38nbu3Ilhw4aZXT906FDs2LHDGjWRAxQUFDi6BLISZikOZikOZikG5igOZikOZqnM4kZefHw8IiIizK6PiIhAfHy8VYoi++NUGOJgluJgluJglmIoV44GA5CTU/R/qnJ4ToqDWSqzeOAVtVqNO3fuICQkRHH9nTt3oFJVaLBOqgL8/f0dXQJZCbMUB7MEkJ8P6cQJIDMTCA2F3KwZUA3/oDNLMViUY2EhVBs2QNq/H1JODmQPD8hdusAwaBDg5GT7IskiPCfFwSyVWdzIa9SoEfbs2YPmzZsrrt+9ezcaNWpktcLIvlJSUhAQEODoMsgKmKU4anqW0okTUH37LaS8POMyuVYt6J99FjDzhWNVlZKS4ugSyArKPCdlGerPP4d04YJxkZSTA2nrVkixsdBPm1Ytv6QQUU1/fxUJs1Rm8aW3kSNH4ocffsBPP/1kMum5Xq/H6tWrsWrVKowcOdImRRIRUQ2TkAD10qUmDTwAkJKTof7sM3aBoypJOnPGpIFnsu7iRUhnzti5IiKqqSy+khcdHY2LFy9i7ty5WLx4McLCwgAAt27dQl5eHh577DFER0fbrFCyLRcXF0eXQFbCLMWhmGVODqBSAW5u9i/IjlS7dwN3faF4NyklBdLJk5DbtLFzVRXH81IMZeUonTxZ5nq5RQtrlkQVxHNSHMxSWbkmQ//Xv/6F7t27Y/PmzYiLi4Msy2jdujX69u2L+++/31Y1kh04Ozs7ugSyEmYpjruzlE6dKrrP5+JFIC8P8n33QT9+PFCvngMrtB3p5s3S18fFVatGHs9LMZSZo5kvJox0OusVQ5XCc1IczFJZuRp5AHD//fcrNuju3LmDb775BjNmzLBKYWRfWVlZ7M8sCGYpjuIspaNHoV68uKgbWHo6AEA6fx6qrVtR+P77QLduji3UBmRPT5R255Ls7m63WqwhKyvL0SWQFZT1/io3aQIcPmx+fdOmtiiLKoB/K8XBLJWVazjMq1ev4qeffsK6deuMf7DS09Mxb948jB49GsePH7dJkURENZYsQ7V2LaSzZ40NPKOcHGj+7/+AtDSHlGZLcqdO5leq1ZA7dLBfMUQWktu1g2xmUCA5OBhyu3Z2roiIaiqLG3l79uzB+PHjMXfuXLz//vt46qmncOzYMYwZMwY3btzAu+++i++++86WtZINeXl5OboEshJmKQ4vLy8gNhbSlStAdrbiNlJyMlQ7dti5MtuTW7eGoW3bkiskCfoRIwBvb/sXVQk8L8VQZo4aDfTTpsHQosU/o2hKEgwtWkA/fTqgKXcHKrIRnpPiYJbKLH63+eabbzBixAg8/fTT+O233/Dpp59i7ty5+PDDD9GU3Q+qvYKCAvZpFgSzFEdBQQGcDYaiOeLMkWVIly7Zryh7kSQYJkyA3KIFVAcOAJmZkENDIffoAblhQ0dXV24FBQWOLoGswKL3Vx8fGJ59Fob0dCAlBQgIAHx97VEelQP/VoqDWSqzuJEXGxuL//73v3B3d8eoUaOwcOFCTJs2jQ08QeTn58PT09P8BrJcdDUhLw9y7dr8g1WFlZklVRv5+fnwrF0bKC1PL6/S11dnkgS5Y0foO3Z0dCWVlp+f7+gSyArK9f7q68u/lVUY/1aKg1kqs7iRl5ubCw8PDwCAWq2Gs7OzcRoFEpt07hxUK1dCKp7MV6WCoX17GGJiAH5zQmRbGg30Dz8MzVtvAbJsuk6SINepo9ytURR6PaQDB6A6dAjIzoYcEQFDjx5AgwaOrkwssgxp3z6o9u0D7twBAgNhiIqCHBXFybuJiKqhcnUOP3TokLGhJ8syjh07hqtXr5ps0717d+tVR3ZjdlSimzeh/uIL02GfDQaoDh8GCgpgePpp+xRIFuMIU+IozlIeMQKGM2eg+u03oLjbn5sb5Lp1iwZ6EHUQEr0e6kWLIJ07Z1wk3b4N1Z9/Qv/YY5C7dnVgceVT1c9L1fLlRe/rxWJjoY6NheHKFRjGjXNcYVVMVc+RLMcsxcEslZWrkTd79myTn99//32TnyVJwp49eypfFdldamoq/P39SyxXbdtmdl4f1cmTMCQlAcHBti6PysFcllT93J2l/o03YOjZE6r16yHduVM0Ul/XrjD06SPsYA7SwYMmDTwjWYZ69WroWrcGqslUCqmpqY4uwSzp8mXTBt5dVIcPw9CtG6+c/o3vr+JgluJglsos/mSwd+9eW9ZBDibf2w3sb9Lly6U9CNKlS5DZyKtSzGVJVUxhIaRDh6A6eRLQ6yE3bgxDVJTJ/XX3Zil36wa9gHPimWOu4QEAKCiAdOJEtbmaV6nz0mAomkIjORkICIDcvDmgVlutNunIkVLXq44cgYGNPAB8fxUJsxQHs1Qm5te/VG5mRyUq6woB78mrcjjCVDWQmwv1vHmQ4uKMi6QLFyDt2lU0zPrfX5zU+Cxzciq3vgqpcJY3bkD95ZeQ7poLUfb1hX7CBOtdXdNqK7fe2q5eLTo3PDwgt2wJODnZ9/ilqPHnpECYpTiYpbJyN/IyMjLg4+MDAEhKSsK6deuQn5+Pbt26oXXr1tauj+zE1dVVcbncrh2kTZuUH+TsXPSNMlUp5rKkqkO1fr1JA6+YlJEB9fffQ//CCwCYpVy7NqSEBPMb1K5dNBhNYmLRzyEhVXaQkAplqdVC/dlnkO6ZI1FKT4f688+hf/PNotFVK0muVw84erT09ZUgHT8O1d69wO3bgL9/0YAuHTuWzCojo6hBe+3aP8d2d4chJgZyFRlcqKafkyJhluJglsosngz9ypUrGDlyJAYNGoSYmBhcvHgREydOxKpVq7Bu3TpMmzYNu3fvtmWtZEOZZubhMvTsCblWLcV1+kGDqs39MDWJuSypijAYIB06ZHa1dPly0YdhMEtDjx6ASvnPlBwWVtQoePNNaGbPhmb2bKj/7/8gHTtm3yItVJEspYMHSzTwjOvy8iAdOFDZsgAAcufOkM1MLi97e0Pu1KnC+1b9/DPUX31VdKU6LQ3SlStQL18O1bfflthW/cUXJg08AJByc6H++msgNrbCNVhTTT8nRcIsxcEslVncyPvss8/QoEEDLFy4EG3atMErr7yCrl27YvPmzdi0aROGDh2KFStW2LJWcgQPD+hnzIDhwQchu7sXDdkeGQn9xImQe/VydHVE1U9BAaQyur9J/INVpG5d6J94AvI939LK4eEwdO4M9fLl/0ztAkBKTob6668h/fmnvSu1CenmzdLX37hhnQO5ukL/3HOQQ0JMFsshIdA/9xxQ0W/J4+KKBu9SoDp4ENLFi8afpcuXzf8+BgNUO3ZUrAYiohrK4u6a58+fx/z589GwYUM0bNgQ69atw/Dhw6H6+1vWUaNG4ZlnnrFZoWRbpU4i6eUFw8MPAw8/bL+CqMI4IWgV5+IC2c/P5B4rEyoV5KAgAMwSAOQOHaBv0QLSyZNAdjYQEQH5vvug/r//M/MAGar166Fv08a+hZahQlmW1VPi7ymNrCIsDPrXXy+6kpySAjkwsNL3/KnKGNBFOnwY8n33Ff1wzxW8Ettaq0FbSTwnxcEsxcEslVl8JS8zM9M4D4W7uztcXV3hdde9AF5eXsjNzbV+hWQXOjPTJFD1wyyrOEmCXMp8ooY2bYC/u84xy7+5ukLu1Alyr16QGzcGkpMhJSeb3VxKTATuusJXFVQkS0PHjqWvr0Q3SnPkhg2LumdaY1CXsgbGycv7599lNViryK0BPCfFwSzFwSyVWdzIUyJV0Rvcqfy09h49jWyGWVZ9hj59YFCYwFyuXx+GRx81/swszbDkb4+Ze/kcpUJZ1q4NQ+/eiquqw9x1ct26pW9Qp84/27ZuXeoomkrniyPwnBQHsxQHs1RWrtE158yZA6e/34QLCgrwwQcfGEe0KSwstH51REQiUqlgePJJGHr1KponT6eD3LRp0VUqKlutWpBDQ82OvCnXrg34+dm5KNswDB8OuV69otEp/54nzxAVBbldO0eXVia5Y0fIv/+ueI+p7O4Ow91zHLq7Qz9qFNQrVxaNmHr3tg0bQo6KsnW5RERCsbiRN2DAAJOf+/XrV2Kb/v37V74icojirrhU/THLaqR2bRhq1za7mlmaZxg6FOovvijRIIBKBcOQIY4pqhSVyVJu3Rr66jhFkYsL9FOnFk2LcFf3WtnPr2iev3umf5C7dYM+OBjS9u1Fg854eMDQsSPkBx4oe85WO6n0OanVFnVT9fGpclebaxq+v4qDWSqTtFotp4mvwjIzMxEUFITbt2/D28wQ19aQlpYGP0G++a7pmKU4mGXppLNni+Yc/Ht4fbluXRgGDYJ8//0OrqyktLQ0tG/fHvHx8QgPD8eVK1ccXZL9yDKkv/4yzpMnN2tWbRs4FT4n79yB6pdfoDp1CjAYIPv6wtCjB+Tevavs3I6i4/urOGpalpa2DarGV2PkcAaDwSHHlQ4f/qcbUvEkuV268I9eJTgqS7I+Zlk6uVkz6Js1A4q7A9rwi7DKqtFZShLkpk2Bpk0dXUmlVSjHzEyoP/kEUnq6cZGUng71r7/CkJZWNHo12V2NPicFwyyVsZFHAABnZ2e7H1P1449Q7dr1z4LMTKivX4fh8mUYnnjC7vWIwhFZkm0wSwtV4cZdMWYphorkqNqxw6SBZ7Juz56iwXX8/StZGZUXz0lxMEtl1bO/BFmdm5ubfQ9444ZpA+8uqkOHIF26ZN96BGL3LMlmmKU4mKUYKpKjdOqU+ZUGA6QzZypREVUUz0lxMEtlbOQRACAjI8OuxytzktxDh+xUiXjsnSXZDrMUB7MUQ4VyvHdwoHuxq5lD8JwUB7NUxkYeOcbdk+BWZD0REZUtPx/SiROQjhwB0tIcXU2NVOpAQJJUJQcKIqLqj/fkEQDA09PTrseTIyOBgwdLX08VYu8syXaYpTgckaW0axdU69ZBKp4oWKWCoWNHGGJibD8lQXIypPPnAZUKcvPmgK+vbY9nJxXJ0RAdDenwYUg5OSXXdegABAVZozQqJ76/ioNZKmMjjwAAer3erscrdZJcDw/Id0+SS+Vi7yzJdpilOOydpfTnn1D/+KPpQoMBqoMHAY2mqKFnC3o9VN99B9Xhw/90U1SpYOjRA4YRI6r9yMkVyjEgAPrp06Fevdp4v7ns6gq5W7cqOadjTcH3V3EwS2Vs5BEAIC8vD+7u7vY7oIsL9M89VzRJ7u3bxsVyQAD0Tz0FeHjYrxbB2D1LshlmKY48O3dBV23dan7dwYMwDB4M2ODbb9W6dVDde0+1wQDV9u2QfX0h9+pl9WPaU4XPyfBw6J9/vqjLbE4OUKsW4OJi9frIcnx/FQezVMZGHjlOWBj0b7wB6eLFonnyAgIgN2lS7b/pJSJyKIMB0vXr5tfrdJBiY61/L1h+PqS9e82uVu3YAX10dLWdCN0q/PyK/iMisjE28ggA4O+oOXokCXLjxkDjxo45voAcliVZHbMUh12zVKkgu7hAys83u4ns6mr9496+/c/9fwqktDQgKwvw8bH+se2E56Q4mKU4mKWyGvx1Gt2Nw8+Kg1mKg1mKw95Zym3bml8XEADUq2f9g5bVXUqlsn4XxdxcSFu2QP3hh1D/739QrV0LmJl43Bp4ToqDWYqDWSrjlTwCUA1uWi0sBFJTi+4h4f16paryWYooMbHoPpuQEKu+PpmlOOydpWHgQEh//VV09exuajUMDz9sm27xAQGQGzSAdOWKck2tWgHWvIKYmQn1xx9DSk42LpJiYyHt3w/9tGlAeLj1jvU3npPiYJbiYJbK2MgjAICTk5OjS1Cm0xUNAb5/P6S8vKJR2lq0gGHkSCAgwNHVVUlVNksR3bgB9cqVkGJji37WaGDo1AmG0aMBK+TALMVh9yz9/KB/6SWotm6F9OefQGEh5EaNYOjd2zZX8f6mHz0a6vnzIeXmmiyXfX1hGDbMqsdSrVtn0sArJmVnQ71yJfQzZlj1eADPSZEwS3EwS2Vs5BEAVNlRiVRffw3ViRP/LDAYoDp5ElJsLPSvvsqregqqapY2cesWpIwMyEFB9m/037kD9YIFph9mdTqo9u0DcnJgePrpSh+iRmUpOIdk6esLw6hRwKhR9jtm7drQz5wJ1fbtkM6dK7o/sGVLGKKjrXsvnk4H1dGjZldLV68Ct29bfQ46npPiYJbiYJbK2MgjAEX9mQOq2pWx2FjTBt5dpLQ0SPv2Qe7b1741VQNVMktrS0iA+ttv/xlBUJJgaNYMhscfB7y87FKCaufOElcrjOtOnIAhPr7S3cVqRJY1RI26ZyQwsKhLqC3l5xd14y9NdrbVG3k8J8XBLMXBLJVx4BWqslRnz5a+/vRpO1VCVUpOTlF3sLuHiJdlqM6cgXrBgn8mYLYx6a+/KrWeiCrB3R1yaVMRODkBwcH2q4eIqIphI48AAB7s9igM0bOU9u+HlJmpvC4uDtKZM/YpRK2u3HoLiJ5lTcIsrUySYHjwQbOrDR062KQ7P3MUB7MUB7NUxkYeAQAMBoOjSyjB0Lx56etbtLBTJRUky0BuLqDT2fWwVTFLa1JdvFjqeunCBbvUIbdpY36lSgW5VatKH0P0LGsSZml9cu/eMHTrVmKkUEPz5kWDH9kAcxQHsxQHs1TGe/IIAJCXl1f1blytXRuG1q0V78uT/f0hR0XZvyZLyDKkHTug2rEDUmoq4OQEQ9u2MAwZAvj62vzwVTJLK5I1GpQ6+Luzs13qMHTvDunAAUh37pRcFx0NlNaVzEKiZymctLSikSx1OsgNGwL16xtX5eXlObAwO0pKAgoKgNBQQGPjjxiSBENMDAy9ekE6eRKSwQBDkyZA3bo2OyTPSXEwS3EwS2Vs5FGVZhg/Hvjtt6Iuerm5RVMotGxZNIVCFb08r1qzBqodO/5ZUFgI1aFDkC5fhn7mzCpbd3Uht2kDnDpldr2hdWv7FOLhAf2LL0L1229Fo/wVFkIODIQhOhpyjx72qYGqDNW6dVBt2QLc9Y2y3Lgx9E8/Dbi5ObAy+5DOn4fq558h3boFAJC9vWHo1Qty7962P3hQEOQ+fWCfu3GJiKoHSavV8n2xCsvMzERQUBBu374Nb29vmx3HYDBAparCvXcLC4G0tKIGUlVuJKWkQPPmm2YH/zAMHgxD//42LaHKZ1lZOh3U8+YVDZF+D0PHjjCMG2f/mvT6oteoi4tVJ5kWPktBSAcOQP3tt4rrDK1bw/D00zAYDGjUqBHi4+MRHh6OK2YmDK+WrlyBZt68ovPgHoYhQ2Do188BRdkGz0lxMEtx1LQsLW0b1JxnhEqVaWYgiyrDyaloKOyq3MADIJ0+XerojpKZKSGsqcpnWVkaDfRTp8LQty/kv6dLkAMCoB8xAoYnnnBMTWo14Opq1QYeUAOyFITJlft71508CaSkCJ2letMmxQYeAEhbthRNdyAIkXOsaZilOJilMnbXJACA3swfaCqnsp5HOwzCUiOydHGBYehQYOjQoufcCiNZVkU1IsvqTpaNXRTNrk9IgD401H412ZMsQzp/3uxqKS8P0rVrkJs0sWNRtsNzUhzMUhzMUhmv5BEAQGPrG+RriLI+yMhNm9q8hhqXpaANPKAGZlkdSRLkMnoYyB4eYmdZ1hXs/2/vvsOjKvP+j7/PTEJJSCihBCmhI1JEkKIsriBFUFHUn90HxfK4bpQiqGvZFUFF5VFcEKQKdgVdG7grWQvFBRURKdIl9JZCEpKQZOb8/phlYMwkkDCZk9z5vK6L6zLnPjPznXxyYr5zzrlvgy6jMjrHSkZZmkNZBmfOb145KzVq1HC6BDM0alTkxB92VBTeMEzIoSzNoSwrBrtHj6LH4uOheXNzs7QsvJ06FTlsx8RgnzLLaEVnbI6VkLI0h7IMTk2eAJCenu50Ccbw3nGHb+2myEj/NrtZMzwPPghxcWX++srSHMqyYvBefjl2o0aFttvVquG55RbA7Cy9gwZhV60afGzw4LJfSiFcbJvsf/0L9/PP4x47Fvdzz2EtW1bsfdhSfpl8TFY2yjI4Q37zipQjkZG+tZuGDPGtGRUdDQ0aOF2ViJSVqCg8Dz2EtWIFrp9+grw87DZtfGfu69Z1urqy17gxnhEjcH/6KdbmzWDb2PHxeAcOxO7e3enqQsb11ltU/+YbrGrVfBuys3G/9x7ebdt8y/2IiJQjavIEQItIloXo6IDFkMNFWZpDWVYgVati9+2Lp2/foMPGZ5mQgOeBByA72zfBVBku+eOI7dtxrVwZ9N4f148/Yvfqhd2mjQOFSWkZf0xWIsoyODV5IiIiEhqG/rHl+vHHYsetH38Mf5P3229Ye/ZAjRrYHToE3CIgIqImTwDIzs6mevXqTpchIaAszaEszZGdne10CXI2cnMBKCgoCD6TX05O+Go5ehT37NlYO3b4N9nR0XhvvRX7/PPDV0cFp9+v5lCWwanJK8aECRPIyspi4sSJhcaOHz/Om2++SVJSEgcOHCAqKoouXbowfPhwWpxyid6cOXOYO3duocdPnjyZbt26lWn9IiIicvbsFi3g+++LHw8T92uvYe3aFbDNOnYM95w5FDz8MDRuHLZaRKT8UpNXCnl5eYwYMYKDBw+SmJhI+/btSU1N5c033+Tee+9l8uTJdOjQwb9/8+bNeeWVVwKeI7ac3a9Qq1Ytp0uQEFGW5lCW5lCWFZvdvTv2P/9JlbS0wmOxscUuoxFK1tathRo8P48H19df47399rDUUtHpmDSHsgxOSyiUwgcffMD69et54YUXuOyyy4iPj+e8887jmWeeISEhgeeeew77lCmV3W43cXFxAf8iy9m181lZWU6XICGiLM2hLM2hLCu4qlXxJCZy/HfL4Njx8Xj+/Ofw3Yu4c2exw1ZycnjqMICOSXMoy+B0Jq8UlixZQrdu3WjdunXAdpfLxY033si4cePYunUrbUpxE3ZeXh75+fn+r48dO3bW9Z6JgoKCsLyOlD1laQ5laQ5laYCGDckYOZLI9HSsI0cgLg67Vavw1nBi+Yai6L6kM6Zj0hzKMjg1eaWwa9cuLrjggqBjzZo1A2D37t3+Jm/Hjh3069cvYJ/Zs2cHffybb74ZcA+fx+MJUdXFc7vdYXkdKXvK0hzK0hzK0nnWL7/gSkrC+u037GrVsLt2xXv55VCCS73cbje0bIndsmXZFVoMu0sX+PBDOOXD4FN5DVqXsKzpmDSHsgxOTV4YNG3alOeff97/dXGXat5+++3cdNNN/q8zMjJo3rx5mdYH5e8eQSk9ZWkOZWkOZeksa9ky3O+9d/Lr7GysZcuwNmzAM3bsGa/r53iO0dF4hg7F/cEHhYbs1q2xL7rIgaIqJsezlJBRlsGpySuFpk2bsrOI6+JPbG/SpIl/W0REBI3PcLarKlWqUKVKFf/X4TqTl5aWRtzv7jWQiklZmkNZmiMtyIQdEibHj+P65JOgQ1ZqKq6kJLzXXntGT1Uejkn7j3/EEx+P9fXXvnXyoqPxdu+OfcklEGx5BwmqPGQpoaEsg9Nvg1K47LLLmDlzJlu3bg24L8/r9fL+++/TrFmzQvfriYiISPhZv/6KVcw6dtZPP8EZNnnlhd22LXbbtk6XISLlmJq808jKymLLli0B2wYOHMiyZct45JFHSExM5LzzziMtLY033niD5ORkJk+ejGVZDlVcOlpE0hzK0hzK0hzK0kF5eWc3fgrlaA5laQ5lGZyavNNYs2YNd955Z8C2K6+8kilTpvDGG28wY8aMgMXQZ86cGbAYekXhcmk1DVMoS3OUhyytNWtwffcdpKVhN2iAfcklOoNQCuUhy8rKbtkSLAtOWdooYLwEV94oR3MoS3Moy+Cs3Nzc4L/1pFzIyMigfv36HDp0qExvLE1JSdH1zIZQluZwOkvX22/7Grzf8VxzDXb//g5UVHGlpKTQvXt39u7dS6NGjdi+fbvTJVUqrnnzcP3wQ5ABFwWjRsEZfjjr9DEpoaMszVHZsjzT3kCtr4iIFGL9+mvQBg/A/ckncORImCsS4+TnY23dirVtG5TxOlfeW2/Fe9FFcMpU63bt2njuueeMGzwRkYpEl2sKADVr1nS6BAkRZWkOJ7O0Vq0qetC2cX3/Pd7Bg8NXUAWn4zKQtWQJriVLsI4dA8COicE7eLBvhsiyEBmJ97bb8F51FdauXVCtmu8yzhJe5qUczaEszaEsg9OZPAEgOzvb6RIkRJSlOZzM8sQf30XKygpPIYbQcXmS9c03uD/+OOBnzMrMxP3++8V/uBAKNWtid+zouw+vFPfxKEdzKEtzKMvg1OQJAPn5+U6XICGiLM3hZJZ2o0bFj5/h2p/io+Pyv7xeXEuWFDns+uc/w1hMySlHcyhLcyjL4NTkCQDuU+5TkIpNWZZSRobvXzniZJbeP/wBIiODjtmxsdgXXhjmiio2HZf/deAAVnp6kcPWoUNQjheOV47mUJbmUJbB6Z48AXQ9s0mUZclYGzbg+vxz3306gN2kCd6rrsJu397hyhzOsm5dPPfcg2vePKxTLoWxa9fG87//C1WqOFdbBWTUcZmaimvFCti7F2rUwO7ZE7tVqzN7bBEfHJR4H4cYlWMlpyzNoSyDU5MnAKSmplaq6WdNpizPnLV+Pe4ZM8DrPblt927c06fj+d//xe7Y0cHqnM/Sbt8ezzPPYP38M6SnQ/362J06lepepsouNTXV6RJCwtqwAfesWXDq5VH/+Q/eP/4R7w03nP4J6tXDbtwYa8+eoMN269ZQo0aIqg09p49JCR1laQ5lGZz+Ty0ilZbr008DGjw/28b12WfhL6g8qlIFu3t37AEDsDt3VoNXmR0/jmvevMAG779c336LtW7dGT2Nd+jQgKUM/CIj8QwZcpZFiogI6Eye/Ff16tWdLkFC5IyyzMjwrYG2bx/ExuLt0QOaNCn74sqT1FSsvXuLHLb27oWUFHDw00Edl+YwIUtrzZqAS3cLja9YcUZnv+1zz6VgxAjcX3yBtWkTAN4OHfAOGgQJCSGrtyyYkKP4KEtzKMvg1OQJoJtWTXK6LK1Nm3DNnIl1/Lh/m+vrr/EOHoz3iivKurzyw7ZPv0+ws3xhpOPSHCZkWdyEKQBWSSZMadkST2LiyWOsgpwhNiFH8VGW5lCWwVWM36pS5rK05pUxis3y+HFcc+YENHgnuBYvxtq8uQwrK2fi4rDj44scths0gHr1wlhQYTouzWFClnbdusWPl+Z4cbkqTIMHZuQoPsrSHMoyuIrzm1VEzpq1enXxl1stXx7GapxX3JlL7+DBYaxEpPyzO3fGjo0tevySS8JYjYiIFEdNngCaftYkxWVppaQU+9jTjZvG7tIFz513BpyBsOvWxXPHHeViHTgdl+YwIsuICDz33osdHR243bLwDhmC3aaNM3WFkRE5CqAsTaIsg9M9eQJATk4OMTExTpchIVBclqe93Oo04yayL7wQT9eucPCg7z69+HiwLKfLAnRcmiQnJwc8Ht8XR4/ifvRR7Hbt8PbvD+ec42xxJdG8OZ6nn8Zatco3OVFMjG/ipvr1na4sLHRMmkNZmkNZBqcmTwDIy8tzugQJkeKytLt0wf7ooyIv2bR79y6rssLL48Fav963tluDBtht2xbfuFmWr7krZ3RcmqNg927IzPR94fViZWZiff891tq1vglIWrRwtsCSqFYN+49/5AymLjKOjklzKEtzKMvg1OQJAK4KdOO7FK/YLKtWxXv33bhmzCg0+Yr3yit9CxFXcNbWrbjmzsXKyPBvs+vXx3PvvdCwoYOVlZyOS3NE/fOfQWd0tY4fx/3RR3jGjHGgqhBKT/cty7J3L9So4Tu7V5Ea1zOkY9IcytIcyjI4NXkCQO3atZ0uQULkdFnabdviGTcOa+VK3+VWsbF4e/asWJeMFSUtDdf06YUaWOvQIdyvvornb3+DyEiHiis5HZeGyMkheseOIoet336DI0eggl4uHXRZluXL8fbti/e66xysLPR0TJpDWZpDWQan1lcASKlkE26Y7IyyjInB7t8f7x134L32WjMaPMC1bFnQ5SHAt4aX9dNPYa7o7Oi4NMTx4+Tm5BS/T25ueGoJtfx835nzYMuyfPUV1rp1DhRVdnRMmkNZmkNZBqczeSJiDGvXrtOO2z16hKkakf+qWRNvMZ8029HR0KBBGAsKHWvNGqxjx4oeX7ECu2PH8BRTUID17be4Vq2CrCxo1AjvpZdit28fntcXESlHdCZPAKhWrZrTJUiIVOYs7erVi98hKio8hYRIZc7SKJaF3b9/kcP2H/9YoS4jPpWVllb8eGpqeAopKMA9bRrujz7C2rsX6+hRrI0bcU+bhvXvf4fsZXRMmkNZmkNZBqcmTwCIiNBJXVNU5izt7t2LHrQsvN26ha+YEKjMWRrn0kvh93+IRETg7dMH7+DBjpQUCqddluWUNSjLkrVqFdbmzUHH3J9+CqdMxHQ2dEyaQ1maQ1kGpyZPAMjKynK6BAmRypyl3aED3q5dg455L7+8wq3lVZmzNE1WVhacONMcHY3njjsoGD8e7/XXl5t1GUvDPv987NjYosfDtCyL64cfih4sKAjZ/bg6Js2hLM2hLINT6ysi5rAsvHfeid2unW869/+uk+e95BLsTp2crk7Ep0oV7Ap2VrlIERF47rkH9/Tphdbf9A4ejH3uueGp4zQT21i5uZVyXT8RqbzU5AkAscV8EisVS5FZer1Ya9dibdgALhd2p06+CQkq8FmEoCwL+6KL8Fx0kdOVnDUdl+YwOssWLU4uy7Jv38l18sK4LqXdrBnWnj1FjyckhOR1jM6xklGW5lCWwanJEwByc3OJrKA3/kugoFkeO4Z76tTA2SdXrMBu3RrPn/4EVauGt0g5IzouDZGdTa7plxNFRWH37evY2TLvpZfiWrkSCgoKjdmNG4fsjKKOSXMoS3Moy+DU5AkAeXl5TpcgIRIsS9eCBUGXF7C2bsX16ad4/9//C0dpUkI6Lis2a8MGXIsWYSUnUz03FzIznS7JXA0b4rnnHlxvvYV1yvfZbt4cz913h+yKBR2T5lCW5lCWwanJEwAs0y7Zq8QKZXnsGK41a4ref+VKuOaaCjuFu8l0XFZc1tq1uGfNAvu/57Ys6+RZpiBnm+Ts2R064JkwAWvjRsjIwG7cGJo1C+lr6Jg0h7I0h7IMTk2eAFCnTh2nS5AQKZRlamqxf1Raubm+hYOLWaxZnKHjsoKybVwff3yywQOqnXpJ9GkmCZGzEBFRppMs6Zg0h7I0h7IMTksoCAApKSlOlyAhUijLWrXAVfShbletCjVqlG1RUio6LiuoAwewDh0K2JSbm3vyi4ICOHYszEVJKOiYNIeyNIeyDE5NnojpYmLwdu5c5LDdrZsu1RQJJY8nNPuIiIiUkpo8AaCqZlc0RrAsvTfcgB1kOnM7IQHv0KHhKEtKQcdlBXXOOdi/u/zZ7Xaf+gVoyu8KScekOZSlOZRlcLonTwCoUqWK0yVIiATNMiYGz6OPYq1efXKdvI4dsTt39v3BKeWSjssKyuXCe/nluN999+SmU4+zatUcKEpCQcekOZSlOZRlcGryBIDMzEzi4uKcLkNCoMgsIyKwe/TA7tEj/EVJqei4rLjsP/wBj23j+uc/sdLTyc/LO3lvrP4gqbB0TJpDWZpDWQanJk9ERKQM2L174+nVC3bvJvPoUfj6a99MtiIiImVMTZ4AEBMT43QJEiLK0hzK0gAuFyQkEJ2XF7IFuSsS65dfsJYv9802GheHt1cv7C5dnC6r1HRMmkNZmkNZBqcmTwDIy8vTNc2GUJbmUJbmyMvLc7qEsHN9/DGuJUtObjh8GPemTXi3bsV7443OFVYE65dfcH31FezeDdHR2D164L3ssoB7KHVMmkNZmkNZBqfZNQWA48ePO12ChIiyNIeyNEely3LPnsAG7xSupUth+/YwF1Q8KykJ94wZWFu3YuXmYqWk4Fq8GPcrr8Ap2VW6HA2mLM2hLINTkyciIiIh5fr++7MaD6usLNyffRZ0yNq1C2v58jAXJCJy9tTkCYBmJTKIsjSHsjRHpcvy2LHix8vRBDTWzz9DQUGR467Vq/3/XelyNJiyNIeyDE5NngCQmprqdAkSIsrSHMrSHJUtS7tJk+J3aNo0PIWcidNd6pWb6//PypajyZSlOZRlcJp4RQCwbdvpEiRElKU5zjpLjwfr55+xtm+HyEi8XbpAQkJoipMSqWzHpd2jB/bixVhBzujZ1arhvegiB6oKzm7evPjxFi1O/ncly9FkytIcyjI4NXkCoFmJDKIszXFWWaal4Z46FevAAf8mV1IS3p498d52W6Wczt9Jle64rF4dz/334549Gystzb/Zjo3Fe9ddEBvrYHG/06IFdqtWWNu2FR6LiMDbp4//y0qXo8GUpTmUZXBq8gSAaqdMES0VW7nJct8+rKNHsevXB10vXypnk6X7jTcCGrwTXCtXYjdpgn3ppWdRmZRUuTkuw6lZMzxPP421fj0cPgx16mB36gRut9OVFeLp35+I77/H2rTJt7ZhXBzeDh18DWmjRv79KmWOhlKW5lCWwanJEwAyMjJ046ohHM9y/37cb76JlZzs+9qyfH8s3XoraMHSEil1lvv3Y23ZUuSwa+lSPGrywiojI8PpEpzhcvkau3LMWrcO98yZvia0c2ffpDGRkRAZ6fuQ6hSO/36VkFGW5lCWwWniFREJncxM3H//+8kGD8C2ca1bh/vVV0HXzYeFdfhw8eOHDoWpEpFyzrZxLVgAXq/v66goqFcPatXCysrCtWiRs/WJiJSSmjwBoEaNGk6XICHiZJau777DKuKMhbV7N9aGDWGuqGIrbZZ2zZrFj9euXarnldLT79hyaudOrJSUIodda9YEfDilHM2hLM2hLINTkycAFBSzRpBULE5mWdwlggDW5s1hqsQMpc4yIQG7ceMih+1evUpZkZSWfseWT1ZeXvE7FBSAx3PKl8rRFMrSHMoyODV5AkDuKesAScXmZJZ2ZGTxO2gGrBI5myw9w4ZhB7kH0j73XLz9+p1NWVIK+h1bPtlNmhT7e8lOSICIk9MXKEdzKEtzKMvgNPGKiISMfcEFsG5dkePeCy4IYzWV3Dnn4HnySazvvvNNDR8Zid21K/b55/tmDxQRiIrC+4c/4Prqq6DD3gEDwlyQiEhoqMkTAOrUqeN0CRIiTmZpd+2KvXw51o4dhca8PXtCMZcQSmFnnWV0NHb//tj9+4emICk1/Y4tv7xDh4LHg2vFCt/lmYAdHY13yBDfbJunUI7mUJbmUJbBqckTANLT06mtyRiM4GiWERF4EhNxffEF1sqVWJmZ2HXr4r3kEuy+fZ2pqQLTcWmO9PR0p0uQorhceG+4Ae+gQVjbt/vOerdp41tG4Xd0TJpDWZpDWQanJk8A8J6YPloqPMezrFoV7zXXwDXX+CYsKIcLH1cUjmcpIaMsK4CYmEJn7n5POZpDWZpDWQanGzMEgMjTTZghFUa5ylIN3lkpV1nKWVGWZlCO5lCW5lCWwanJEwCioqKcLkFCRFmaQ1maQ1maQTmaQ1maQ1kGpyZPADh69KjTJUiIKEtzKEtzKEszKEdzKEtzKMvg1OSJiIiIiIgYRE2eABAdHe10CRIiytIcytIcytIMytEcytIcyjI4NXkCaGYikyhLcyhLcyhLMyhHcyhLcyjL4NTkCQA5OTlOlyAhoizNoSzNoSzNoBzNoSzNoSyDU5MnIiIiIiJiEDV5AkDt2rWdLkFCRFmaQ1maQ1maQTmaQ1maQ1kGpyZPAMjIyHC6BAkRZWkOZWkOZWkG5WgOZWkOZRmcmjwBwOPxOF2ChIiyNIeyNIeyNINyNIeyNIeyDE5NngAQGRnpdAkSIsrSHMrSHMrSDMrRHMrSHMoyODV5AmiNEZMoS3MoS3MoSzMoR3MoS3Moy+DU5AkA6enpTpcgIaIszaEszaEszaAczaEszaEsg1OTJyIiIiIiYhA1eQLoVLdJlKU5lKU5lKUZlKM5lKU5lGVwavIEANu2nS5BQkRZmkNZmkNZmkE5mkNZmkNZBqcmTwDIzs52ugQJEWVpDmVpDmVpBuVoDmVpDmUZXITTBYiIiFQGDz74IBkZGcTGxjpdioiIGM7Kzc3VOc5yLCMjg/r163Po0KEy/cPA6/XicunErgmUpTmUpTmUpRmUozmUpTkqW5Zn2htUnu+IFCsjI8PpEiRElKU5lKU5lKUZlKM5lKU5lGVwavIEAI/H43QJEiLK0hzK0hzK0gzK0RzK0hzKMjg1eQJARIRuzzSFsjSHsjSHsjSDcjSHsjSHsgxOTZ4AUKNGDadLkBBRluZQluZQlmZQjuZQluZQlsGpyRMA0tPTnS5BQkRZmkNZmkNZmkE5mkNZmkNZBqcmT0RERERExCBq8gSAqKgop0uQEFGW5lCW5lCWZlCO5lCW5lCWwanJExERERERMYiaPAEgOzvb6RIkRJSlOZSlOZSlGZSjOZSlOZRlcGryREREREREDKImTwCoVauW0yVIiChLcyhLcyhLMyhHcyhLcyjL4NTkCQBZWVlOlyAhoizNoSzNoSzNoBzNoSzNoSyDU5MnABQUFDhdgoSIsjSHsjSHsjSDcjSHsjSHsgxOTZ4A4Ha7nS5BQkRZmkNZmkNZmkE5mkNZmkNZBqcmTwCIjY11ugQJEWVpDmVpDmVpBuVoDmVpDmUZnJo8ASAtLc3pEiRElKU5lKU5lKUZlKM5lKU5lGVwEU4XIMWzbRuAzMzMMn2dzMxMIiMjy/Q1JDyUpTmUpTmUpRmUozmUpTkqW5YneoITPUJR1OSVcydmDGrZsqXDlYiIiIiISHmQlZVFzZo1ixy3cnNzi28DxVFer5f9+/dTo0YNLMsqk9c4duwYQ4cO5R//+AfR0dFl8hoSHsrSHMrSHMrSDMrRHMrSHJUxS9u2ycrKomHDhrhcRd95pzN55ZzL5aJRo0Zl+hputxu3201sbGylOUBMpSzNoSzNoSzNoBzNoSzNUVmzLO4M3gmaeEVERERERMQgavJEREREREQMoiZPiIyMZPjw4ZVqZiJTKUtzKEtzKEszKEdzKEtzKMuiaeIVERERERERg+hMnoiIiIiIiEHU5ImIiIiIiBhETZ6IiIiIiIhB1OSJiIiIiIgYRIuhGygjI4OXXnqJFStW4HK5uPTSSxkxYgRRUVFFPub48eNMnTqVpKQk8vPz6d69O2PGjKFOnTr+fQ4cOMCkSZP46aefqF69OoMGDeK+++4jIqLwj9Evv/xCYmIizZs3Z/78+WXyPisDp7Jcu3Yt06dPJzk5mdzcXOLj47n66qu56aabyvw9m8qpLL/55hv+8Y9/sG3bNvLy8mjevDl33XUXPXr0KPP3bCqnsjxy5AhTp05l06ZN7Nmzh+uvv56RI0eW9ds1yocffsg777xDamoqrVq1YtSoUZx33nlF7v/VV18xa9YsDhw4QOPGjfnTn/7ExRdf7B+3bZvZs2fz2WefkZmZSadOnRgzZgxNmjTx71OanxcpnhM5zp8/n++++46tW7cSGRnJv/71rzJ9j5VFuLPcv38/8+bNY/Xq1aSkpFC3bl0GDhzIsGHDjJuhU2fyDDRu3Dh+++03Jk+ezAsvvMDPP//MCy+8UOxj/v73v7NixQomTJjA1KlTOXLkCI899ph/3OPxMHbsWPLz83nttdd44okn+OKLL5g9e3ah58rMzGT8+PF07do15O+tsnEqy+rVq3Pdddfx6quv8s4773DHHXcwa9YsPvnkkzJ7r6ZzKsuff/6Z7t27M2nSJObOnUuXLl14+OGH2bJlS5m9V9M5lWV+fj61atVi2LBhtGrVqszen6mSkpKYMmUKw4cPZ+7cubRq1YrRo0eTlpYWdP9169bx1FNPceWVV/L666/Tu3dv/vKXv7Bjxw7/Pm+//TYLFy5k7NixzJo1i2rVqjF69GiOHz/u36c0Py9SNKdyzM/Pp0+fPgwdOrTM32Nl4USWycnJeL1exo4dy1tvvcWDDz7Ixx9/zIwZM8LynsNJTZ5hdu7cycqVK3n00Udp3749559/PqNGjSIpKYnDhw8HfUxWVhaff/45DzzwAF27duXcc8/l8ccfZ926daxfvx6A77//np07d/K3v/2NNm3acNFFF3H33Xfz0UcfkZ+fH/B8L774Iv3796dDhw5l/n5N5mSWbdq0oX///rRo0YKGDRsycOBAunfvztq1a8P2/k3iZJYjR47k1ltvpV27djRp0oT77ruPxo0bs3z58rC9f5M4mWXDhg0ZOXIkgwYNokaNGmF7z6Z4//33ueqqq7jiiito3rw5Y8eOpWrVqnz++edB9//ggw/o0aMHt956K82aNePee++lTZs2LFy4EPCdMfjggw8YNmwYvXv3plWrVjz55JMcOXKEZcuWAaX7eZHiOZEjwN13381NN91EixYtwvI+KwMnsuzZsyePP/44PXr0oFGjRvTu3Zubb76Zb7/9NmzvO1zU5Blm/fr1xMTE0K5dO/+2Cy+8EJfLxcaNG4M+ZvPmzRQUFHDhhRf6tyUkJNCgQQP/HyDr16+nRYsWAZcW9ejRg2PHjvHbb7/5ty1atIh9+/YxfPjwUL+1SsfpLE+1ZcsW1q9fT+fOnUPwziqf8pSl1+slJyeH2NjYULy1Sqc8ZSlnLj8/n82bN9OtWzf/NpfLxYUXXujP4Pc2bNgQkBn4MtmwYQMA+/btIyUlJWCfGjVqcN555wXkWtKfFymaUzlK6JWnLI8dO0ZMTMzZvJ1ySffkGSYlJYVatWoFbIuIiCAmJobU1NQiHxMZGVnoB7xOnTr+x6Smpgb88XFi/MTjAXbv3s306dOZNm1a0Pv0pGSczPKEa665hvT0dDweD8OHD2fIkCFn85YqrfKQ5Qnvvvsu2dnZXHbZZaV5K5VeecpSztyJ32PBvse7du0K+piUlJSg+5/I40R2xe1Tmp8XKZpTOUrolZcs9+zZw8KFC0lMTCzV+yjP9Jd4BTF9+nTeeuutYvd55513wlRNYR6Ph6eeeoq77rqLpk2bOlZHRVDeszzVtGnTyMnJYcOGDUyfPp3GjRvTv39/p8sqNypSlgBffvklc+fOZeLEidSuXdvpcsqVipaliIicncOHDzN69Gj69Olj5IfYavIqiJtuuonBgwcXu88555xDXFwc6enpAdsLCgrIzMws9MnGCXFxceTn55OZmRnwSfOpnyzXqVOn0KUlJz4xiYuLIzs7m02bNrF161ZefvllwHdZmG3bXHLJJbz88suaiOW/ynuWv68DoGXLlqSmpjJnzhw1eaeoSFkmJSUxceJEJkyYEHB5jPhUpCyl5GrVqoXb7S509izYGdQT4uLigu5/Io8Tj0tNTaVu3boB+7Ru3dr/HCX9eZGiOZWjhJ7TWR4+fJgHHniAjh078sgjj5z1+ymPdE9eBVG7dm0SEhKK/RcZGUmHDh3IzMxk06ZN/seuXr0ar9db5JS0bdu2JSIigh9//NG/LTk5mYMHD/onT+nQoQM7duwImPHohx9+IDo6mmbNmhEdHc2bb77JvHnz/P+uueYamjZtyrx584qdDreyKe9ZFsXr9RaaZKeyqyhZLlmyhGeeeYZx48YFTDUtJ1WULKV0IiMjadu2bUAGXq+X1atXFzlJWPv27Vm9enXAth9++IH27dsDJ5v+U/c5duwYGzduDMi1pD8vUjSncpTQczLLEw1e27Zteeyxx3C5zGyHzHxXlVizZs3o2bMnzz//PBs3buSXX37h5Zdfpl+/ftSrVw/w/XDffPPN/k+Na9SowZVXXsmUKVNYvXo1mzZt4tlnn6VDhw7+g6J79+40a9aMp59+mq1bt7Jq1SpmzpzJtddeS5UqVXC5XLRo0SLgX+3atalSpQotWrSgevXqjn1PKiqnsgTfujXLly9n9+7d7N69m88++4x3332XgQMHOvPNqOCczPLLL79k/PjxPPDAA5x33nmkpKSQkpJCVlaWM9+MCs7JLME3CdKWLVvIzs4mPT2dLVu2aGKWM3TjjTfy2WefsXjxYnbu3MmkSZPIzc3liiuuAGD8+PFMnz7dv/8NN9zAypUreffdd0lOTmbOnDls2rSJ66+/HgDLsrjhhhuYP38+y5YtY/v27YwfP566devSu3dv4Mx+XqRknMgRfOtYbtmyhYMHD+LxeAKORSkdJ7I8fPgwiYmJNGjQgMTERNLT0/3/XzSNlZubaztdhITWiYVXly9f7l94deTIkf6FV/fv38/111/PlClT6NKlC3Byod4lS5YELNR76mVCBw4c4MUXX2TNmjWnXQwdYM6cOSxdulSLoZ8Fp7JcsGABn3zyCfv378ftdtOoUSOGDBnC1VdfbewnXmXNqSwTExNZs2ZNoXoGDRrEE088EYZ3bh4nf8f26tWrUD3x8fF8+OGHZfyuzbBw4UL/wsutW7dm5MiR/rMAiYmJxMfHBxwXX331FTNnzvQvvHz//fcHXXj5008/JSsri06dOvHQQw8F3Jt+up8XKTkncpwwYQJffPFFoVpOPc6l5MKd5aJFi3j22WeD1rJixYoyfKfhpyZPRERERETEIPpIXkRERERExCBq8kRERERERAyiJk9ERERERMQgavJEREREREQMoiZPRERERETEIGryREREREREDKImT0RERERExCBq8kRERERERAyiJk9ERMrcnDlzGDZsWNhfd9GiRQwcODBkdZzu+SZMmMCjjz5a6uc/G9dddx3vv/++I68tIiLlS4TTBYiISPk0YcIEvvjiCwDcbjexsbG0bNmS/v37M3jwYFyu0n9OOGHCBLKyspg4ceJZ19mrVy+ee+45LrnkkkJj/fr14+KLLz7r1yjKzTffzPXXX19mzx9Kubm5vP7663z11VccOXKEqKgomjVrxk033UTv3r2dLu+0rrvuOg4cOFBo+3333cftt99eJq+5f/9+5s2bx+rVq0lJSaFu3boMHDiQYcOGERkZWSavKSISCmryRESkSD179uSxxx7D6/WSmprKypUrmTx5Ml9//TXPP/88ERHl+38jVatWpWrVqmX2/FFRUWX23KH24osvsmHDBkaNGkXz5s05evQo69at4+jRo2X2mvn5+SFthu6++26GDBkSsK0sM0hOTsbr9TJ27FgaN27Mjh07eP7558nNzSUxMbHMXldE5GyV7/87i4iIoyIjI4mLiwOgXr16tG3blg4dOvDggw+yePFi/x/cmZmZTJ06leXLl5OXl8e5557Lgw8+SOvWrQs955w5c/xnCHv16gXAlClT6NKlC9OmTWPp0qUcOnSIuLg4+vfvz/Dhw0vdTC5atIi///3v/Otf/wo6vmfPHkaNGkXPnj0ZPXo0+fn5zJw5kyVLlpCVlUWLFi3405/+RJcuXYI+fs6cOSxdupT58+cHbH/nnXd47733yM/Pp1+/fowYMcL/HjIyMnjllVdYsWIFeXl5XHDBBYwcOZImTZr4H//1118ze/Zs9u7dS1xcHNdffz0333yzfzwtLY3nnnuOH374gbi4OO65557Tfi+WL1/OiBEj/Gc2GzZsyLnnnhuwT15eHrNnz2bJkiWkpaVRv359br/9dq666ioA1qxZw6uvvsq2bduIjY1l0KBB3HPPPf73lpiYSPPmzXG73Xz55Ze0aNGCqVOnsmPHDl599VXWrl1LtWrV6N69Ow8++CC1atU6bd2nioqK8v88nsrr9XLttdcybNgwhg4d6t++ZcsWhg8fzsKFC4mPj+fAgQO8/PLLrF69Gsuy6NmzJ6NGjaJOnTpBX69nz5707NnT/3WjRo3YtWsXH3/8sZo8ESnXdE+eiIiUSNeuXWnVqhXffvutf9sTTzxBWloakyZNYu7cubRt25YRI0aQkZFR6PE333wzffv2pWfPnnz66ad8+umndOzYEfD9Ef/444/z9ttvM2LECD777LMyu89s27Zt3H///fTv35+HHnoIy7J46aWXWL9+PePGjWP+/Pn06dOHhx56iN27d5/x8/7000/s3buXKVOm8MQTT7B48WIWL17sH3/mmWfYtGkTEydOZMaMGdi2zZgxYygoKABg06ZN/PWvf6Vfv3688cYbDB8+nFmzZrFo0SL/c0yYMIGDBw8yZcoUJkyYwD/+8Q/S0tKKratOnTr85z//4dixY0XuM378eJKSkhg5ciRvv/02Dz/8MNWrVwfg8OHDjBkzhnbt2jF//nzGjBnD559/XqjB/eKLL4iMjGT69OmMHTuWzMxMHnjgAVq3bs2cOXN46aWXSE1N5cknnzzj7+npuFwu+vXrx5dffhmw/csvv6Rjx47Ex8fj9Xp59NFHycjIYOrUqUyePJm9e/fy17/+tUSvdezYMWJiYkJWu4hIWdCZPBERKbGEhAS2b98OwNq1a/n111/5/PPPqVKlCuA7o7N06VK+/vprrr766oDHRkVFUbVqVfLz8wudlbnjjjv8/92wYUN27dpFUlISt956a0jrX7duHWPHjmXYsGH+M2QHDhxg8eLFfPjhh9SrVw+AW265hVWrVrFo0SLuu+++M3rumJgYRo8ejdvtJiEhgYsvvpgff/yRIUOGsHv3bpYvX85rr73mb2z/9re/MXToUJYuXUrfvn1577336Nq1K3feeScATZs2ZefOnbzzzjtcccUV7Nq1i5UrVzJ79mzatWsHwF/+8hduueWWYut65JFHGDduHIMHD6ZVq1Z06tSJPn360KlTJwB27drFV199xeTJk+nWrRvgO3N1wkcffUT9+vUZPXo0lmWRkJDAkSNHmDZtGnfeeaf/Hs0mTZrw5z//2f+4efPm0aZNm4Dv32OPPcbQoUPZtWsXTZs2PaPvK8D06dOZNWtWwLZJkybRuXNnBgwYwHvvvceBAwf8TV1SUpJ/Ypwff/yRHTt2sGDBAho0aADAk08+yW233cavv/7q/14WZ8+ePSxcuFBn8USk3FOTJyIiJWbbtv+/t23bRk5ODoMHDw7Y5/jx4+zdu7dEz5uUlMTChQvZu3cvOTk5eDyekN9zdfDgQUaOHMm9997LjTfe6N++Y8cOPB5PwGWR4LuEMTY29oyf/8TliifExcX5G+KdO3fidrs577zz/OM1a9b0N3Lguw/s9xOhdOrUiQ8++ACPx0NycjJut5u2bdv6xxMSEk57dqlz584sWLCA9evXs379en788UcWLFjAXXfdxZ133snWrVtxu91ccMEFQR+/c+dOOnTogGVZ/m0dO3YkJyeHQ4cOER8fDxBQF/h+Pn766Sf69etX6Dn37t1boibvlltuKfRzdqIhb9OmDQkJCSxZsoTbb7+dNWvWkJaWRt++fQHf97V+/fr+Bg98WcXExLBz587TNnmHDx9m9OjR9OnTp9B9gSIi5Y2aPBERKbHk5GTOOeccAHJycoiLi2Pq1KmF9qtRo8YZP+f69et5+umnueuuu+jevTs1atQgKSmJ9957L2R1A9SqVYu6deuSlJTElVdeSXR0NADZ2dm43W7mzJkT0KQB/ksWz0Sw+wdPbYqdFBERQefOnencuTO33XYb8+bN4/XXX+e2224L2QQ11apVC/g6JyeHXr16cf/99xfaN9j9dcWpWbMmjRs3LnJ8wIAB/iZvyZIl9OjRg5o1a5boNYI5fPgwDzzwAB07duSRRx456+cTESlruidPRERKZPXq1Wzfvp1LL70U8J1BSU1Nxe1207hx44B/RU2sERkZidfrDdi2bt06GjRowLBhw2jXrh1NmjQJOmX+2apatSovvvgiVapUYdSoUf571Nq0aYPH4yEtLa3Q+yhpM1KUZs2a4fF42Lhxo3/b0aNH2bVrF82bNwd8Z+V++eWXgMf98ssvNGnSBLfbTdOmTfF4PGzevNk/npycTGZmZqnrycvLo2XLlni9XtasWVPkvuvXrw9oWNetW0dUVBT169cv8jXatGnDb7/9Rnx8fKHva0ma5zMxYMAAduzYwaZNm/jmm28YMGCAfywhIYFDhw5x8OBB/7bffvuNzMxMmjVrVuRznmjw2rZty2OPPXZWS4eIiISLflOJiEiR8vPzSUlJ4fDhw2zevJn58+fzyCOP0KtXLy6//HIAunXrRvv27Xn00UdZtWoV+/fvZ926dcyYMYNff/016PPGx8ezbds2kpOTSU9Pp6CggMaNG3Pw4EGSkpLYs2cPCxYsYOnSpWdU5759+9iyZUvAv5ycnCL3r169Oi+++CJut5sxY8aQnZ1N06ZNGTBgABMmTOCbb75h3759bNy4kTfeeIPvvvuu5N+8IJo0aULv3r2ZOHEia9euZevWrTz99NPUq1fPf4nmzTffzOrVq3n99dfZtWuX/z7BE5eRJiQk0LNnT1544QU2bNjgn8TldGfiEhMT+fjjj9m0aRP79+/nu+++Y8aMGXTp0oXo6GgaNmzIoEGDeO6551i6dCn79u3jp59+4t///jcA1157LYcOHeKll14iOTmZZcuWMWfOHG666aZiG59rr72WjIwMnnrqKX799Vf27NnDqlWreOaZZ/B4PCX6/mVnZ5OSkhLw79SJZBo2bEjHjh157rnn8Hg8AZe9duvWjRYtWjBu3Dg2b97Mxo0bGT9+PBdccEGRl2oePnyYxMREGjRoQGJiIunp6f7XFREpz3S5poiIFGnlypUMGTIEt9tNTEwMrVq1YtSoUQwaNMj/h71lWUyaNImZM2fy7LPPkp6eTlxcHOeff36RU9MPGTKENWvWcNddd5GTk8OUKVPo3bs3N954Iy+99BJ5eXlcfPHF3HHHHcydO/e0dU6ZMqXQtmnTphX7mKioKP7v//6P0aNHM3bsWCZNmsTjjz/OvHnzmDp1KocPH6ZmzZq0b9/ev9RDKDz22GO88sorPPzww+Tn59O5c2cmTZrkv8yzbdu2PP3008yePZt58+YRFxfH3XffzRVXXBHwHBMnTiQxMZHatWtz7733FpqQ5Pe6d+/OF198wYwZM8jNzaVu3br06tXLP8ELwJgxY5gxYwaTJk0iIyODBg0a8D//8z+A7963SZMm8eqrrzJs2DBiY2O58sor/RObFKVevXq89tprTJs2jVGjRpGXl0d8fDw9evTw/wwtWrSIZ599lhUrVhT7XLNnz2b27NkB266++moefvhh/9cDBgxg0qRJXH755QGNr2VZTJw4kZdffpk///nPAUsoFOX7779nz5497Nmzh2uuuSZg7HS1iog4ycrNzS0fNwqIiIhIpTR79mx+/vnnoPd1iohIyelyTREREXHUypUrg07MIiIipaMzeSIiIiIiIgbRmTwRERERERGDqMkTERERERExiJo8ERERERERg6jJExERERERMYiaPBEREREREYOoyRMRERERETGImjwRERERERGDqMkTERERERExiJo8ERERERERg/x/KkR0ANmKMC0AAAAASUVORK5CYII=", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "plot_strip_with_means(brca1_df, x_col=\"evo2_delta_score\", class_col=\"class\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can also calculate the area under the receiver operating characteristic curve (AUROC) of this zero-shot prediction method. Note that the results are nearly random unless you are on one of the following configurations:\n", - "* `--fp8` on an fp8 enabled GPU with either the 1b or 7b models. The 40b likely works as well.\n", - "* the 7b model uniquely seems to work well without `--fp8` so if you are on an older device, the 7b model should produce\n", - " robust results. Change the `MODEL_SIZE` earlier in this tutorial and rerun for good results in that case.\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Zero-shot prediction AUROC: 0.77\n" - ] - } - ], - "source": [ - "# Calculate AUROC of zero-shot predictions\n", - "# class 1 is LOF which is the bad thing. That means we expect this to be more negative.\n", - "y_true = brca1_df[\"class\"] == \"LOF\"\n", - "auroc = roc_auc_score(y_true, -brca1_df[\"evo2_delta_score\"])\n", - "print(f\"Zero-shot prediction AUROC: {auroc:.2}\")" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": { - "jupyter": { - "source_hidden": true - }, - "tags": [ - "hide-cell" - ] - }, - "outputs": [], - "source": [ - "def plot_roc_curve(df):\n", - " \"\"\"Plots an ROC curve using Seaborn with a light NVIDIA-themed design.\n", - "\n", - " The function assumes:\n", - " - `class` column as the true labels (binary, 'LOF' = 1, else 0).\n", - " - `evo2_delta_score` as the prediction score.\n", - "\n", - " Parameters:\n", - " - df (pd.DataFrame): DataFrame containing `class` and `evo2_delta_score`.\n", - "\n", - " Returns:\n", - " - matplotlib Figure: ROC Curve Visualization.\n", - " \"\"\"\n", - " # NVIDIA theme colors\n", - " NVIDIA_GREEN = \"#76B900\"\n", - " BACKGROUND_COLOR = \"#F8F8F8\"\n", - " GRID_COLOR = \"#DDDDDD\"\n", - " FONT_COLOR = \"#333333\"\n", - "\n", - " # Validate required columns\n", - " if \"class\" not in df.columns or \"evo2_delta_score\" not in df.columns:\n", - " raise ValueError(\"DataFrame must contain 'class' and 'evo2_delta_score' columns.\")\n", - "\n", - " # Convert 'class' to binary labels: Assume 'LOF' = 1, anything else = 0\n", - " y_true = (df[\"class\"] == \"LOF\").astype(int)\n", - "\n", - " # Compute ROC curve\n", - " fpr, tpr, _ = roc_curve(y_true, -df[\"evo2_delta_score\"]) # Negative to align with previous logic\n", - " roc_auc = auc(fpr, tpr)\n", - "\n", - " # Set up the plot with NVIDIA theme\n", - " plt.figure(figsize=(9, 5), facecolor=BACKGROUND_COLOR)\n", - " plt.style.use(\"default\") # Reset to default to avoid any pre-existing style\n", - "\n", - " # Plot ROC curve\n", - " plt.plot(fpr, tpr, color=NVIDIA_GREEN, lw=3, label=f\"ROC curve (AUROC = {roc_auc:.2f})\")\n", - "\n", - " # Plot diagonal reference line for random guessing\n", - " plt.plot([0, 1], [0, 1], color=\"gray\", lw=2, linestyle=\"--\")\n", - "\n", - " # Customize plot appearance\n", - " plt.xlim([0.0, 1.0])\n", - " plt.ylim([0.0, 1.05])\n", - " plt.xlabel(\"False Positive Rate\", color=FONT_COLOR, fontsize=12)\n", - " plt.ylabel(\"True Positive Rate\", color=FONT_COLOR, fontsize=12)\n", - " plt.title(\n", - " \"Zeroshot ROC Curve\\nEvaluating the discriminative performance of Evo 2 predictions\",\n", - " color=FONT_COLOR,\n", - " fontsize=16,\n", - " loc=\"left\",\n", - " )\n", - "\n", - " # Customize grid and tick colors\n", - " plt.grid(color=GRID_COLOR, linestyle=\"--\", linewidth=0.5)\n", - " plt.tick_params(colors=FONT_COLOR)\n", - "\n", - " # Set background color\n", - " plt.gca().set_facecolor(BACKGROUND_COLOR)\n", - "\n", - " # Add legend\n", - " plt.legend(loc=\"lower right\", frameon=True, facecolor=BACKGROUND_COLOR, edgecolor=GRID_COLOR)" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAw8AAAHzCAYAAABmEyGdAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjEsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvc2/+5QAAAAlwSFlzAAAPYQAAD2EBqD+naQAAx2RJREFUeJzs3Xd4FOX6N/DvZtN7JfQaIIRAKAECaYiANMECxwYq6ovisXvQA3YFjuV4FOFYQFAsICKiSFNqCikkhDQCCb0EEkLapm22vn/kt3NYsoGdZJPZTb6f6/K6zDOzu3eGeydzzzxFplQq9SAiIiIiIroFO6kDICIiIiIi28DigYiIiIiIzMLigYiIiIiIzMLigYiIiIiIzMLigYiIiIiIzMLigYiIiIiIzMLigYiIiIiIzMLigYiIiIiIzMLigYiIiIiIzGIvdQDW4plnnsHRo0dFvebQoUOtFE3bunLlCmbPno3OnTtjy5YtUodzSzt27MDy5cuN2mQyGVxcXNCtWzeMGTMG999/P3x8fG76PhqNBjt37kRcXBxOnjyJyspKuLq6onv37hg7dizuueceeHt73zKe4uJi/P7770hLS8Ply5dRVVUFFxcXdO3aFWFhYbjjjjswaNCgZv2uhw8fxp49e5CTk4PS0lKo1Wp4enqiT58+GDt2LO64445b/p5ERERElsLi4f+MGTMGnTt3vuV+Bw8eRF1dHTp16tQGUbVPGRkZePbZZzF8+HCsWrWq2e/j4uKC8ePHAwB0Oh2KioqQm5uLkydPYufOnfj888/Ro0cPk689d+4c/vnPf+LixYuQy+UYPHgwRowYgaqqKmRnZyMvLw8//fQTlixZInyGKT/++CPWrFkDtVoNFxcXDB48GD4+PqitrcXp06exefNmbN68GQ8++CD+/ve/m/27VVRU4K233kJ6ejoAoEuXLhgxYgRcXFxQWlqK3NxcpKen4+uvv8aKFSswePBgs9+biIiIqLlYPPyfefPm3XKfzZs3Y9euXXB0dGx055vanpeXF15//XWjtjNnzuCZZ55BWVkZVqxYgX//+9+NXnf58mUsXLgQCoUC4eHhWLx4sVHhWF9fjzVr1mDjxo144403sGzZMsTExDR6ny+++AI//PAD7O3t8cwzz+Dee++Fo6Oj0T65ublYvXo1Ll68aPbvVV1djYULF+LChQvo1asXXnnlFQwbNsxoH5VKhV27dmHt2rUoLS01+72JiIiIWoLFg5kyMzOxcuVKAMDLL7/c7G4o1Lr69u2L++67D6tXr0ZaWhpUKlWjC/r33nsPCoUCgwcPxkcffdRou5OTE5555hkAwMaNG7Fs2TKEhYXBy8tL2Cc9PR0//PADAODdd99FbGysyXhCQ0OxYsUKZGVlmf07fPLJJ7hw4QK6dOmCL7/8Ep6eno32cXR0xKxZsxAdHY2qqiqz35uIiIioJVg8mOHq1at4/fXXodVqcffdd2PGjBlN7nvgwAH88ccfyM/PR3V1Nby9vTFixAg8/PDD6NOnj9G+1481+Pnnn7F582bs3r0bly5dQl1dndGYitTUVPzyyy/Iy8tDdXU1vLy8EBYWhgcffNBkIVNdXY0NGzYgISEBly9fhlarhaenJ7p27YqRI0di/vz5sLdv/M+v1+uxbds2/P777zh//jzkcjlCQkLwxBNPIDQ0tMnj88MPPyA1NRVXr16Fg4MD+vbtiylTpuDOO++EXC4X9r1+bMnRo0cRGRkpbLPUmIugoCAADWMaFAoF/P39hW1Hjx5FdnY2AOCll15qVDhc7//9v/+H3bt3o7y8HFu2bMFjjz0mbPv2228BAFFRUU0WDgYymazRk4OmFBYWYs+ePQCAZ5991mThcD1fX1/4+voKPy9duhS7du3CkiVLMH369Eb7G8aLTJ061eipzfXtzz33HNatW4dDhw6hpKQEoaGhmDdvHl566SX06tULGzZsMBmLRqPB3XffjbKyMnz77bfo37+/sK2+vh6//vor9u/fjwsXLkClUiEwMBDR0dGYO3euUWFGRERE1ovFwy2oVCq89tprKC8vx9ChQ/HCCy+Y3E+j0eCdd97B/v374ejoiIEDByIgIAAXLlzAX3/9hbi4OCxfvhwRERGNXqvX67FkyRKkpqYiLCwMvXv3xtmzZ4Xtq1evxvr16yGTyTBkyBAEBgbi3Llz2L9/P+Li4vDKK68YFTRKpRILFy7EmTNn4O3tjZEjRwp95S9cuICcnBzcf//98PDwaBTLsmXLsGfPHoSFhWHcuHE4efIk0tLSkJWVhVWrVjXqW3/8+HG89NJLUCgUwsVgTU0NMjIykJOTg/j4eHzwwQdwcHAA0DC2xNHREampqfD19cWYMWOE9zJncLI5ampqAAByubzRe8bHxwMA+vTpg+Dg4Ju+j5OTEyZMmIAtW7YgMTFRKB6qqqqEJwlTp061SMwGSUlJ0Gq18PDwQFRUlEXf2xyVlZV4/PHHUVVVhbCwMAQHB8Pe3h6jRo1Cp06dcP78eeTm5posJFNSUlBWVoaBAwcaFQ4lJSV4+eWXcfr0aXh6eiI4OBiurq4oKCjAhg0bcODAAaxatcqsMUdEREQkLRYPt/Dxxx8jLy8Pfn5+eO+990zerQeAtWvXYv/+/QgJCcE777yDrl27CtsOHDiAt956C2+//TY2b97c6KK9uLgYer0e3333HXr27Gm0LSUlBevXr4ejoyM++OADjB49Wtj2xx9/4P3338dHH32EkJAQ9O3bV/i8M2fOICIiAh988IFRzDqdDllZWXB2dm70OxQVFeHo0aP4/vvvhTi0Wi0+/PBDbN++HV9//TU++eQTYX+VSoXXX38dCoUCd911F1588UXhswoLC/H8888jNTUV69atw5NPPgmgYWzJ4MGDkZqail69ejUas2AJSUlJABoKlRv/vfLz8wEAISEhZr2X4anOyZMnodFoYG9vj4KCAuh0OqPtlnLixAkAwIABA4ye2LSVpKQkhIeHY/ny5XBzczPaNnXqVKxfvx47d+40WTzs2LEDADBt2jShTa/X480338Tp06cxY8YMPPfcc8L7ajQafPnll0LXMEO3QCIiIrJeXOfhJrZu3Yrt27fDwcEBy5cvN+r+cj2FQoFNmzYJA6mvLxwA4LbbbsOsWbNQVVWFP//80+R7PPnkk40KB6Chzz0A3HPPPUaFAwDceeediIyMhEajwebNm4X2srIyAMCoUaMaXTzb2dlh+PDhwpOAG7344otGccjlcixYsABAw7gPjUYjbDtw4ACKiorg7++P559/3uizunXrJswu9Msvv6C+vt7k51mKVqtFYWEhvvjiC+zZswedO3c2+ZSooqICAMye3tTQJUin0wljC8rLy4Xtlp4mVWx8lmZvb49XXnmlUeEA/K8o2LdvX6N/z/LyciQlJcHR0RGTJ08W2lNTU5GdnY3+/ftj0aJFRu9rb2+Pp59+Gn379kVGRgbOnDnTSr8VERERWQqLhybk5OTg008/BQA8//zzTfb3B4AjR46gvr4eQ4cORUBAgMl9hg8fDqBh9h1TTE0HqtFohP7519/NvZ6hu1JGRobQZrgbvmHDBuzatQsKhaLJ2K8nl8tNdqvy8/ODh4cHVCoVKisrhXbDZ06cONHk2IHx48fDw8MDtbW1wh1/SyoqKkJkZCQiIyMRExODv/3tb/jhhx8QEhKC9evXo1u3bi3+DL1eb4FIbceAAQOaPG7du3fHsGHDUF1dLXT/Mvjrr7+g0WgQFRVlNE7D8BRo/PjxJp/a2dnZCeNBcnJyLPRbEBERUWthtyUTrl27htdeew0ajQYzZszA3XfffdP9L1++DKBhBp7rBwCbcv1dawMfHx+T3YgUCgVUKhWAhnn+TTFc6JWUlAhtI0aMwNy5c7FhwwYsXboUMpkM3bt3x9ChQxEdHY3IyEjY2TWuG/39/ZvsluXm5oaqqiohHqDhON0sNplMhq5duyI/P98oPku5fp0HtVqNc+fO4dSpU8jLy8OHH36Id999t9FrDANzTf07mGLYz87OTuhudv1TgfLycgQGBrbk1zBiGKNhbnyWdqtxB9OnT0dmZiZ27NiBSZMmCe07d+4Utl/P8N1Ys2YN1qxZc9P3Njx1ISIiIuvF4uEGGo0Gr7/+OkpLSxESEoKXX375lq8x3J3u3r07hgwZctN9e/Xq1ajNycmpecHexMKFC3HXXXchMTER2dnZyMnJwY4dO7Bjxw4MGjQIK1euhIuLi9FrZDKZxeNoTabWeTh48CDefPNN7Nu3D8OGDcM999xjtH3gwIHIzs7GsWPHzPqMvLw8AA0zOBkKq/79+8POzg46nQ7Hjx+3aPEwcOBA7N69GwUFBdBqtRYf93CrJym3ysUJEybgk08+wZEjR3D16lV06tQJ+fn5OHXqFAICAhp1rTN83tChQ2/5JOjG2ciIiIjI+rB4uMEnn3yCnJwc+Pj4YNmyZTedytPAsNp0z549LToA2NPTE46OjlCpVLh8+bIwBen1DHd2TXWX6tKlC+bMmYM5c+YAaJgZ6Z133sHx48fx448/4oknnmhRfIYxIIYYTLlZfK1h/PjxmDt3LtavX4+vv/4akydPhru7u7A9Ojoamzdvxrlz53DixImbzrhUX1+P/fv3A4DRzEeenp4ICwvD0aNHsWvXrpuuQC3WuHHjsHLlSlRVVSExMfGW08DeyDCWpba21uT2oqKiFsXn7OyMCRMmYPv27di1axceeeQR4anD1KlTGz3RMnw3oqOj8eCDD7bos4mIiEh6HPNwne3bt+O3336DXC7H0qVLhQufWwkPD4eDgwOOHj1q0e4m9vb2GDp0KID/dQsxFTPQ0FXpVgYNGiTciT958mSL4zN8pqkBtAAQFxeHqqoquLq6YuDAgUK74QJXq9W2OAZTHn74Yfj5+aGyshI//fST0baRI0cK081+/PHHRt2wbrRmzRpUVFTAzc2t0ROMhx9+GACQmJiIuLi4m8aj1+vNXiSue/fumDhxIgBg1apVtxyvUl5ejvPnzws/G4q069uujyMlJcWsOG7GMM5m165dUKlUwroUpsblGMbQHDhwoMONHyEiImqPWDz8n+PHj+Pjjz8G0LA4l7mLegENM/LMnj0bdXV1eOWVV3D69OlG+6hUKiQkJJi8qLuZ+++/H0DDzE/p6elG23bs2IHExETY29sLTxeAhov2zMxMYTpRA41Gg9TUVAC37ttujttuuw2BgYG4du0aVq5caTQT0+XLl7Fq1SoAwOzZs426wxgucC9evGj0GktxdnbG/PnzAQA///xzowvwN998Ex4eHsjLy8OiRYtQXFxstL2+vh7//e9/sXHjRshkMixevLjR7EejR4/GAw88AAB466238NNPP5ksRE6cOIEXX3xRmDXLHC+++CK6d++Oy5cvY+HChSYLD7Vaje3bt+PRRx81yqnw8HAAwO7du43WCtFoNPj8889x/Phxs+NoypAhQ9CzZ09cvHgRn3/+OSorKzF06FD06NGj0b7R0dEYNGgQ8vLysGzZMpPFtUKhwNatW1slF4iIiMiy2G3p/yxZsgQqlQouLi7Iz8/H0qVLb/maefPmCWMYnnrqKVy7dg179uzBo48+iqCgIHTt2hVyuRwlJSU4efIk6urq8PHHH5sc99CUsWPH4pFHHsH69evxwgsvCIvEXbhwAfn5+ZDL5Vi0aJGwxgPQsIry5s2b4e3tjf79+8PHxwe1tbU4duwYysvLERAQgIceekj8QbqBo6Mjli1bhpdeeglbt25FcnIyBg8ejNraWhw5cgQqlQpjxowxWpkZaChcgoODceLECcybNw/BwcFwdHSEt7c3Fi5c2OK4gIZpbDdu3IjCwkJs3LhRWGcCaLi7//nnn+Of//wn0tPTMWfOHAwePBiBgYGoqqpCTk4Oampq4OLigiVLluC2224z+RnPPPMMPDw8sG7dOqxcuRJr165FSEgIfHx8UFdXh9OnT+PKlSsAgLlz55odu6enJ7788ku88cYbOHr0KJ5++ml07doV/fr1g7OzM8rKypCXl4e6ujq4ubkZTSFsGBSfkJCAxx9/HEOHDoWTkxMKCgpQU1ODOXPmGE3r21zTp0/HF198IbxXU6uu29nZ4V//+hcWLVqEXbt24eDBgwgKCkJgYCDUajUuX76MM2fOQKvVYtq0aU0O2CciIiLrwL/U/+fq1asAgLq6Ouzatcus10ybNk0oBOzt7fH222/jjjvuwB9//IG8vDycOXMGLi4u8PPzQ2RkJKKiokQ90TBYsGABhg4dil9++QV5eXk4duwYvL29MWHCBDzwwAONFjybNm0anJyckJ2djXPnziEzMxNubm4IDAzE3/72N8yaNUuYdailBg0ahG+//RY//PADUlJSEB8fDwcHBwwYMABTpkzBnXfeafKCcNmyZfjyyy+RkZGBffv2QavVonPnzhYrHuzt7bFgwQK89dZb+OWXX3D//fcb/c59+/bFjz/+iJ07d+LgwYPCLE0uLi7o0aMHxo4di3vvvfeW6y088sgjuOOOO/Dbb78hPT0dJ0+eRHV1NVxcXNC1a1dERUVh2rRpGDBggKj4fXx8sGrVKqSkpGDPnj3Izc1Feno61Go1vLy8EBoainHjxmHKlClGU6MCwLvvvotvv/0We/bswdGjR+Hh4YHw8HA88cQTZnefupUpU6Zg9erV0Gq1cHFxabLAAhqeNK1evRo7d+7Evn37cPr0aeTl5cHT0xP+/v6YNWsWoqKiWmXiACIiIrIsmVKpZEdkIiIiIiK6JY55ICIiIiIis7B4ICIiIiIis7B4ICIiIiIis7B4ICIiIiIis7B4ICIiIiIis7B4ICIiIiIis3Cdh+vce++9KCoquuV+S5YswfTp09sgImNXrlzB7Nmz0blzZ2zZsqXNP1+MHTt2YPny5Zg6dSpef/11qcNpNls65jeLNTIyEgBw6NAhKUK7qaVLl2LXrl1t9r165plncPToUaxcuRIjRoxo9c9rCWv+d7NmiYmJ+PHHH3Hq1CnU1tYCgE38e1PLlJSU4IsvvkB6ejoqKiqg1WrN+hu0du1arFu37pbvP3z4cKxatcpS4VpMQUEBUlJSkJ6ejjNnzkChUMDV1RV9+vTBxIkTMWvWrA6xAKfhGu6XX35Bly5dhPa2POcbcumxxx7D448/3qqfJaX2n03NMHToUHTr1q3J7d27d2/DaKyPLV1Q34otXUhS+8Lcax0FBQV47bXXoNfrMWLECPj5+UEmk8HPz0/q0KgV6fV6LFmyBHl5eejduzdGjBgBe3t7DB061Oz38PX1xZgxY5rcblgU1ppoNBrMnz8fAODi4oJBgwbB19cXV69exbFjx5CdnY3du3fjP//5Dzw8PCSO1rZlZGTg2Weftdoisi2xeDBhxowZkjxZaE9iY2MRGhoKNzc3qUMhABs2bJA6hCY99dRTmDdvXptd3L3++uuor69HYGBgm3xeS1jzv5u1SkhIgEajwcMPP4wnn3xS6nCojRQVFSEvLw+BgYFYv359s+609+rVyyaflA8cOBBz585FVFQUHB0dhfbTp0/jxRdfRF5eHlauXIklS5ZIGKV02vKcf++992LixInw8vJq9c+SEosHahXu7u5wd3eXOgz6P9Z4x8zA398f/v7+bfZ5nTt3brPPailr/nezVsXFxQD4hLijMfy7d+3atUN00TGwt7dvsstVv3798Pe//x3vvvsu9u7di1deeaVDHRuDtjzne3t7w9vbu80+TyodL4ss6Pz583jwwQfh4eGB33//HU5OTib3e+yxx5Cfn4/3338f0dHRAICzZ89i3759SE9Px5UrV1BRUQFXV1cMGDAAM2fOxO233252HOZ0I2qqL6DYOAz904GGOz2GPtkGhr7ZTY15uP6x36effoqffvoJu3btwuXLl+Hs7Ixhw4bhySefRO/evU3+HllZWVi/fj2OHTsGrVaLXr16Yfbs2Zg6daqo/uGGOAyu/3/A9LgWvV6Pbdu24ffff8f58+chl8sREhKCJ554AqGhoSY/p76+Hr/++iv279+PCxcuQKVSITAwENHR0Zg7d26z7k4cOnQIGzZsQH5+Puzs7NCvXz88+OCDCAoKavI1TR2ba9eu4YcffkBKSgqKi4shk8ng5eWFHj16ICIiAg8++GCj9yopKcGmTZuQmpqKK1euQK/XIyAgAKGhoZg1axaGDBli8nN37NiB33//HefOnUNNTY2Qi02Nebi+7+jMmTPx9ddfIzk5GVVVVejWrRvuu+8+3HnnnQAavovffPMNjhw5gurqavTs2RPz5s3DxIkTG8XfVHeh6+MYPnw4vv76a6SlpaGqqgoBAQGYNGkSHn30UaM7ewBQU1ODffv2ITk5GWfOnMG1a9cANFzEREVFCecIAzG5d+O/W1VVFWbOnAmNRoNff/0VAQEBjX4/AHjttddw8OBBPP/88/jb3/5mtO3AgQP4448/kJ+fj+rqanh7e2PEiBF4+OGH0adPH5Pv15Tr49u2bRt+++03nD9/Hvb29ggNDcX8+fOb/G5oNBrs3LkTf/31F06dOgWlUgl/f3+MGTMGDz/8cKO7hNefO/7973/j+++/x4EDB1BUVAQfHx9MmzbN6CJq+fLlWL58OYDGfdXPnz+PH374AUeOHEFZWRmcnZ0xYMAAzJo1y+S59/pcnD59Or755hscPnwYpaWlmDx5Ml5//XWj/AkNDcXXX3+NjIwMKJVK9O7dG48++qhw/j927JhwHqutrUX//v2xYMEChIeHN/rsvLw8HDhwAEePHkVxcTEUCgU8PDwQEhKCOXPmYNSoUY1ec/359+WXX8a3336LAwcO4OrVq/Dw8MDo0aPx1FNPNZk/Yr7jQOuc5/Ly8rBx40ZkZWWhsrIS7u7uGDx4MGbPno3Ro0cL+xn+/hkcPXrU6O/SjX/zLGXBggU4duwY3nnnHZPnGcNnf/LJJ4iJicG//vUvo2179+7Ftm3bUFBQAKVSCV9fX4SHh2Pu3Lno2bOnxeLs378/gIZ/o4qKCrNv1NyY82vWrBHOh4GBgZg0aRLmzp3b6Lrn+tx77rnnsG7dOhw6dAglJSUIDQ01+h6mp6djy5YtOHbsmPBvPHToUDz00ENNnjfOnj2LtWvXIiMjA3V1dejWrRumTZuG++67r8nf5VZdRI8cOYKtW7ciNzcXFRUVcHNzQ+fOnTF27FjMmTMHXl5ewnsAjXPs+muvW415SE1NxS+//IK8vDxUV1fDy8sLYWFhePDBBzFo0KCbxu7u7o5vvvkGWVlZqK2tRbdu3TBjxgzcf//9kMlkRq9TqVTYvHkz9u3bhwsXLkCtVsPT0xOdOnXCiBEjMG/ePHh6ejZ5zG6FxUML9OrVC6GhocjNzUVCQoLJE8jp06eRn58PX19fjB07Vmj/6aefsH37dvTq1Qv9+vWDu7s7iouLkZGRgfT0dBw7dgzPPfdcq/8OYuMYOnQo6urqcPDgQbi4uGD8+PHN+lyNRoOXX34Zubm5GDZsGHr37o3jx48jPj4eR48exTfffNPohL93716888470Ol06NevH/r27YuSkhIsX74c586dE/X5fn5+mDp1KlJTU1FWVoYxY8bA19dX2G7qruWyZcuwZ88ehIWFYdy4cTh58iTS0tKQlZWFVatWYfDgwUb7l5SU4OWXX8bp06fh6emJ4OBguLq6oqCgABs2bMCBAwewatUqUXdFfvrpJ6xcuRIAEBISgm7duuHixYv45z//ifvvv1/UMSgtLcXjjz+Oa9euITAwEGPGjIGjoyOuXbuGkydPIj8/v1HxkJ6ejtdffx1VVVXw8fFBeHg4HBwccOXKFezZswcAGl1YAMB//vMfbN26FaGhoRg3bhwKCwsbneyaUlxcjMcffxz29vYICwtDRUUFsrKy8P7776O6uhpDhgzBiy++CH9/f4wYMQJFRUXIzc3FW2+9BQBN/mFvysmTJ7FixQp4eHhg2LBhUCgUyMnJwfr163H27NlGFwGnTp3CBx98AG9vb/Ts2RMDBw5EVVUV8vPz8d1332H//v1YvXq1cAHVnNwz8PDwQGxsLPbs2YPdu3dj3rx5jfaprKzEoUOH4ODggDvuuENo12g0eOedd7B//344Ojpi4MCBCAgIwIULF/DXX38hLi4Oy5cvR0REhKjjBQCfffYZfv75ZwwZMgTR0dE4ffo0UlJSkJaWhvfeew+xsbFG+9fU1ODVV1/F0aNH4eLiguDgYHh7e+P06dP47bffcODAAXz66acYMGBAo8+qr6/HM888g3PnziEsLAxBQUGorKxE//79MXXqVGRnZ6OwsNBo7Nr1T3CSkpLw2muvQaVSoWfPnoiNjUV5eTkyMzNx5MgRHD58GIsXLzb5e168eBHz58+Hg4MDhgwZAr1e3+guY0FBAT755BMEBAQgPDxcyMfFixfjvffeg1wuxxtvvIG+ffti5MiROH/+PI4dO4aXXnoJK1euRFhYmNH7ffXVV8jIyECfPn0wcOBAuLi4oLCwEIcOHcKhQ4dMFojXH+ennnoKxcXFGDp0KPr27Yvc3Fzs3r0bmZmZWL9+faMnxGK/461xntu2bRs++ugj6HQ6DBgwQPheG37nxx9/HI899hiAhn7+U6dORVlZGVJTUxuNW3BxcTH7c8WYPn06jh07hp07dzZ5jtm5c6ewr4Fer8fSpUuxe/duyOVyDBs2DD4+PsjPz8eOHTuwb98+LFu2rFnfQ1MuXboEAHBwcGjWxeLly5fx2GOPCbHW19cjIyMD69atQ3p6Oj799FOTN04rKyvx+OOPo6qqCmFhYQgODjZ66rFq1Sps3LgRdnZ2CA4ORlhYGIqKipCQkIBDhw7h1VdfbXQDLysrCy+//DLq6urQtWtXjBo1ChUVFfjqq69w7Ngx0b8bAHzyySf45ZdfADQUWmFhYaiursaFCxfwzTffYMSIERgxYoTw99FUjpn7pGH16tVYv349ZDIZhgwZgsDAQJw7dw779+9HXFwcXnnlFcyYMcPka1NTU7Fp0yZ069YNo0aNQmlpKbKzs7Fq1SoUFxfjhRdeEPbV6XRYtGgR0tPT4ebmhrCwMHh4eKC8vByXLl3Chg0bMGnSJBYPUpo+fTpyc3ObPIHs2LEDAHDHHXcYfXGmTJmChx9+uNHA7PPnz+OFF17Apk2bMHHiRISEhLRq/GLjmDlzJkaNGoWDBw/Cy8ur2f1Dc3JyMGDAAPz8889CX/f6+nosXrwYqamp+P777/HKK68I+5eUlOD999+HTqfDCy+8gDlz5gjbMjMz8Y9//EPU5xv6tj7zzDMoKyvD3LlzbzpotaioCEePHsX3338v3BXSarX48MMPsX37dnz99df45JNPhP31ej3efPNNnD59GjNmzMBzzz0njP/QaDT48ssvsXHjRixbtkwoBm7l1KlT+Pzzz2FnZ4d3330Xt912m7Dtzz//xHvvvSfqGPz++++4du0aZs2ahUWLFhldzGs0GmRmZhrtX1xcjNdeew3V1dWYO3cunnjiCTg4OAjby8vLceHCBZOftXv3bnzxxRdN3k26mR07duCuu+7Ciy++KHyHEhMT8eqrr2LdunXw9PTE3Llz8fDDDwu/w88//4wVK1ZgzZo1oouHzZs345FHHsHjjz8OuVwOADhz5gwWLFiA+Ph45ObmGv0eXbp0wYoVKzBixAjY2f1v9mulUomPPvoIu3fvxtdff42XX34ZgPjcu9H06dOxZ88e7Nq1y2Tx8Ndff0GtVmP8+PFGd3zXrl2L/fv3IyQkBO+88w66du0qbDtw4ADeeustvP3229i8ebPogZW//fYbVqxYgZEjRwptP/74Iz7//HMsX74cQ4cOhY+Pj7Dto48+Eu7eLV682Gjbpk2b8Nlnn+HNN9/Ejz/+KPwbGOTl5SEoKAibNm1qNE4mJiYGS5cuRWFhocmxa2VlZXjnnXegUqmwYMECo5w5fvw4XnzxRWzfvh2DBw/GzJkzG/2ee/bswR133IF//vOfjZ5AGfzyyy+N3nvz5s349NNP8dlnn6Gurg6LFy/GlClThNesWLECP//8M9atW4cVK1YYvd/999+PN954o9Ed49zcXLz00kv473//i9tuu83kU4T4+HiMGTMGn3/+uXD+USgUeO6553Dy5En8+uuvePjhh4X9xX7HW+M8d/r0afz73/+GXq/HG2+8YXSckpOTsXjxYqxduxahoaEYPXo0vL298frrryMjIwOpqaltNm5h4sSJ+Oyzz5CWloaSkpJGx//UqVPCjcPrC4HffvsNu3fvhre3Nz755BOhQNbr9Vi3bh3WrVuHt99+Gxs3bjT6XjSHXq/Hjz/+CAAYN25ckzl7M7t370Z0dDTeeecdoUi4evUqnnvuOWRnZ2PdunVYuHBho9clJSUhPDwcy5cvbzT2cdu2bdi4cSO6d++OZcuWGT01z8zMxKJFi/DRRx9h6NCh6NGjB4CG64N33nkHdXV1+Nvf/oZnnnlGODecOnUKzz//PCoqKkT9bps3b8Yvv/wCLy8vvPfee0bnL6DhXGM4x8ybNw+DBw9udo6lpKRg/fr1cHR0xAcffGD09OyPP/7A+++/j48++gghISHo27dvo9f/8MMPWLRoEe666y6h7ciRI3j++efx66+/4sEHH0SnTp0AANnZ2UhPT8eAAQOwatWqRsf/+PHjLR7/wXUeTFi+fDkiIyOb/K+qqkrY9/bbb4ezs7NwArmeRqPBn3/+CQCYNm2a0bbhw4ebnNGpV69eePTRRwE0/EFvbVLFIZPJsGTJEqM//k5OTsJjvrS0NKP9t2/fjrq6OoSGhhoVDgAwbNgwoy9Ua3nxxReNHifL5XIsWLAAQMMJT6PRCNtSU1ORnZ2N/v37Y9GiRUZfXnt7ezz99NPo27cvMjIycObMGbM+/5dffoFWq8Vtt91mVDgADcVpVFSUqN+nvLwcADBmzJhGTwHs7e0bdaH46aefUF1djcjISCxcuNDoogIAfHx8Gt01NXjggQeaVTgAQGBgIJ577jmj4jsqKgpBQUGora2Fr6+v0YUaANxzzz3w9PTEpUuXzJp++XoDBw7E//t//8/oorVv377CXfwbc7NTp04IDw83KhwAwNnZGYsWLYJcLsf+/ftFxXAz4eHh6Ny5M86fP4/c3NxG2w03LK6/cFYoFNi0aRMcHR2xfPlyo8IBAG677TbMmjULVVVVwjlLjFmzZjX6w/vQQw8hODgY1dXV2LZtm9B+7tw57N27F/7+/njrrbcaXSDdd999GDt2LC5evIiUlBSTn/fiiy82a4D9tm3bUF1djYEDB+KRRx4xyplBgwbhkUceAdD0QHVPT0+89NJLN70ICwkJaZSPd999Nzw9PXH16lWMGjXK6IIYgHCuzcrKMjqPAMDYsWNNdjUJDQ3FPffcA41Gg4SEBJOxuLi4YMmSJUbnH0OxDTQ8Zbie2O94a5znfv75Z2i1WsTExDQ6TmPHjhWKutaYTMBQ0Db136ZNm4R93dzcMH78eOh0OqEr7/WaunG4ceNGAA3/5tc/WZPJZHjssccQFBSEqqoqo+9Mc61btw65ublwcXExeYFvDicnJyxatMjo6UKnTp2E7pZbt25FfX19o9fZ29vjlVdeaXThqtPpsHbtWgDAO++806i77bBhw/Doo49CrVbj999/F9oPHjyI4uJiBAYG4u9//7vR+TkoKMioCDaHRqPBt99+CwB45ZVXGp2/gIbvsqUGWRv+3e+55x6jwgEA7rzzTkRGRkKj0WDz5s0mXx8bG9voOmfkyJEYM2YMtFotMjIyhPaysjIAQFhYmMlJawYNGtTiAd188mDCraZqvf6EajiB7N69G7t27TJK4KSkJFRUVDRZSdbW1iIlJQUFBQWorKyEWq0G0NCdBECTd3EtTYo4AgMDhb6Y1zOMdTD0Gzcw3AWfPHmyyfebPHmy8OVsDXK53ORjZD8/P3h4eKCqqgqVlZXCBU1SUhIAYPz48SYHqNnZ2WHYsGE4c+YMcnJyTObHjQz9La/vinK9qVOnNnkRYYqhf+UXX3wBvV6P0aNHw9XVtcn9U1NTATRcKIp1Y7EjxogRI0w+Fu/evTtOnTqFiIgIk8VPly5doFAocO3aNVFdJiIjI012qTLk5o03CQxycnKQlZWF4uJiKJVK6PV6AA3ni4qKCigUihY9JjaQyWSYOnUqvvnmG+zcudOoKCsoKMDJkyfh5+dn9Fj9yJEjqK+vR3h4eJP93IcPH45ff/0Vubm5Rn3IzTF16lST7VOmTMGJEydw9OhR4cI8OTkZer0eERERTc7GNnz4cCQnJyMnJ6fRuCofHx8MGzZMVHwGhu9QU/HOmDEDq1atwsWLF03eTQ4PD7/lRBC3ykdT5xEvLy94enpCoVAYnUcMKisrkZSUhDNnzqCqqkooMAxdUpo6RwcHB5ssPJrKZbHf8dY8z914w81gxowZ2LJlC7KysqDVahs9mWqJW03VeuNYvGnTppn826/RaPDXX38J8RpcvXoVhYWFwmtvJJPJMG3aNHz22WfIyMgQvjPNsWvXLnzzzTews7PDkiVLhDv4Yo0ePdpkoR4ZGQkvLy9UVlaioKCgUXfVAQMGmLyOKigowLVr19CtWzcEBweb/EzDk9icnByhzZAXEyZMMJlrU6dOxWeffWb275Wfn4+Kigp4e3s36lZpaRqNBtnZ2QBunteHDh0yKgKud+N50KBXr15ISUkx+i4PGDAAcrkc27dvR48ePRAbG2vxSUlYPJggdqrWGTNmYPfu3di5c6fRCcRw58FUsiQmJmL58uWorKxs8n1rampERN08UsXRVDVvuJhQqVRG7VevXgWAJge+tcaAuOv5+/s3OUuFm5sbqqqqjGK+fPkyAGDNmjVYs2bNTd/b3Eetlj4GU6ZMQVpaGv766y+89tprkMvl6N27N4YOHYrbbrut0Z0Ywx385swA1JJ/n6ZyxdCXuanthkLoxlxq7uc19X7l5eVYsmSJ8MehKbW1tRYpHoCGpwrffvst9u3bh+eff14orgx9rKdOnWp0UWXIx/T09Cb/CBkYnkiJceOTjBvbr//DZohl+/bt2L59+03f19R3oyW5ZIijqXg9PDyEi3hTxYM5n32r/GmqkHV1dYVCoWiUX9u2bRO6OzWlqXO02POs2O94a5znbvVvZLggValUUCgULe7acz2x3VFGjBiBrl274sKFC8jJyREuoA8dOiTcOLy+4DD8bl5eXk0Wzobf78YbaGLs379fGJv16quvYsKECc1+r5vlfOfOnVFZWSn8bbpxmymGnCksLLzluej6nDEcu6bi8fT0hLu7O6qrq2/6ngaGXO/Zs6fZ4++a6/rvdVPxG/7dm7o51dTxNPVd7t69O5599ll8/vnn+M9//oP//Oc/6Ny5szDmcMKECY2eKorF4sEChg0bJgxcNZxAysvLkZycDEdHx0Z9rktKSvDmm2+ivr4eDz30ECZPnowuXbrAxcUFdnZ2SE1NxUsvvWTRGA13QaWOw+DGLh4t1dpffrHvbzjet3qKBUD0DDeWYmdnh7feegsPP/wwkpKSkJOTg5ycHGzduhVbt25FZGQk/vWvf1nkzl5TM5GZG+fNWPrfXuz7/etf/0J2djZCQ0Px+OOPo3///vDw8BCKzZkzZ6K0tNTkd7C5unTpghEjRuDIkSOIi4vD5MmTodFohAGtN96wMHx29+7dTQ5ov15rTA97/e+u0+kANAxOvNkMYQBMjvlqSS61lDmffav8EZNfJ06cwIcffgg7Ozs8/fTTiIyMRGBgIJydnSGTyfD777/jww8/bDK3Wvu8aAvnudZkeFLw9ddfY+fOncJ3y1TXwbZy8OBBvP3229DpdDcdgGtJpvKvqe+K4fvv5+fXqPvOjTrClKfmEvtdnjNnDm6//XYkJiYiKysL2dnZ2Lt3L/bu3Yu1a9fi888/b9HTCBYPFmA4gaxZswY7duzAkCFD8Oeff0Kr1WLChAmNBh8mJiaivr4eMTExePrppxu9n+FRtLkMFym1tbUmt2s0GpN3MSwdR2syzArTVP/1K1eutHFEN2cYuBQdHW1yutPmCAgIQGFhIYqKikw+/hfbt9+gT58+wh92vV6PI0eO4O2338ahQ4ewe/du4Q9gYGAgLly4gPPnz3MO/f9TV1eHlJQU2NnZ4d///nej73pdXZ3Q/9TSpk+fjiNHjmDHjh2YPHkyEhMTUVFRgSFDhjQqAAz52LNnz1YZTHr58mWTMyMZvpfX38E3xDJkyBBhEHlbCQgIwPnz54W7nzeqrq6GQqEQ9pXagQMHoNfrMXv2bDz00EONtl+8eNGinyf2O96a57nCwkKT5znDv52jo6NVrJg8depUrFu3Dvv27cMLL7yAmpoapKSkwMnJqdGNQ0NOVVZWoqamxuTTB8Pv15wLu/j4eLz11lvQ6XT4xz/+YXLQv1g3+9tq+JtjyANzGJ6GeXp6ijoXGY5HU/FUVVWZ/dTh+jguXLgAvV7fqoW2p6cnHB0doVKpcPnyZZM3TQz/7pY87/j6+mLmzJlCHpw/fx7Lly9Hbm4uvvjiC7zxxhvNfm8OmLaQadOmwc7ODvv374dSqbzpnQfDHydTj6H0er1w99BcPj4+cHBwgEKhMNnlIDU1FVqt1mJxGIoVU+/ZWgx9nJuKSewxM2it38XQr9nwx98Shg8fDgBCX9obmRq0J5ZMJkN4eDgmTZoEoGHaUgNDX2BLDORrL6qrq6HVauHm5mbyQubPP/9s8t+/pbk3fvx4uLu7IyMjA8XFxUKXJVPdJA3TbR49erRZ3ZJupalB1ob262eTMkxZfejQIZMDLVuT4TvU1HfF0I2qR48eVlE83OwcXV9fj7i4OIt+ntjveGue5wz5fCPD39awsDCrWPCsc+fOGDlyJGpqanDw4EHhxmFsbGyj8TGdOnUSntCY+v30er3QLmYGNqDhZuAbb7wBrVaLf/zjHxabROTw4cMmzxlJSUmorKyEq6srBg4caPb7DRo0CN7e3jh37pzZg+iB/+XF/v37G00qADTMCiWGYXroiooKxMfHm/UaQ1cfsedse3t7DB06FEDTeW0494j9dxejV69ewmQJp06datF7sXiwkE6dOmHUqFGoqanBl19+iTNnziAwMNDkCH5DH8iDBw8aPRHQarVYs2aN0SAhcxjmvwca5hE2PBYEGi7+/vOf/5h8XXPjMBQrZWVlwh+31jZjxgw4OzsjOzu70UJ42dnZ+PXXX5v1voY7JmfPnm1xjNeLjo7GoEGDkJeXh2XLlpk8+SoUCmzdutXkidCUe++9V5i558aLhr1794oaLA00XECdOHGiUXtNTY0wOO36i5b7778frq6uSExMxOrVqxvFXV5ejqysLFEx2DpfX19hwPyNf7xyc3Px5ZdfNvnaluaek5MTbr/9duh0OmGhP2dnZ5OLnPn6+mL27Nmoq6vDK6+8gtOnTzfaR6VSISEhAefPnxcdy9atWxsN9Pvpp5+Ql5cHV1dXo64TAwYMwPjx41FcXIwlS5aYvJNYV1eHP//80+JPbWbOnAk3Nzfk5+dj/fr1Rhe8BQUFWL9+PQBY7C56SxmeIO3atctoXEN9fT0+/vjjJp+gNJfY73hrnOf+9re/QS6XIyEhoVFRmpqaKszA88ADD4j99VqNIb937tx5yy5Lhri//fZbo5szer1eaPPw8BD11CApKQmvv/46tFpto+k8W6q+vh4fffSRUaFfUlIiLPZ21113iepKaG9vj/nz50Ov12PJkiUm/2ZotVocOXLEaDY5w3TExcXF+PLLL42uc86cOSPMnCQmDsMY1Q8//LDR1ORAw5Sm14/nMNxQuHjxotn5bGBYh2nr1q2NZjnbsWMHEhMTYW9v32g2yeY4cuQIkpKSGsWo1+uFBUdbOouU9GW7Fdq+fbtw8WTK6NGjTc76M23aNKSmpgpTbRmeRtwoMjISAwcORH5+Pu6//34MHz4czs7OyMvLw7Vr1zB37lz88MMPomJesGABsrKysG3bNhw9ehRBQUEoKSnBiRMnMGnSJBw9erRRt5bmxmFvb4+oqCgcOHAAjz76KIYOHSqcPJpaXKmlOnXqhFdeeQVLly7Ff/7zH2zbtg19+vTBtWvXkJWVhfvuuw8bN24UfSdq/Pjx2LFjBz7//HOkp6cLg+9mzJhxy77hN2NnZ4d//etfWLRoEXbt2oWDBw8iKCgIgYGBUKvVuHz5Ms6cOQOtVotp06aZFfeAAQPw5JNP4vPPP8eSJUuEReIuXbqE48eP47777jOaSvBW4uLisHTpUvj7+wv99KuqqpCTk4Pq6mr07dtXWMEZaCgkli5ditdffx3r16/HH3/8gdDQUNjb26OoqAgFBQWYNGlSk9O1tkdyuRzz58/HZ599hvfeew9btmxBt27dUFxcjJycHNxxxx3IzMw02aXMErk3ffp0/P7770LxPGnSpCYHYj711FO4du0a9uzZg0cffRRBQUHo2rUr5HI5SkpKcPLkSdTV1eHjjz8WPe5h1qxZeO655xAWFoaAgACcOXMGp0+fhlwubzQlM9CwinZ1dTVSUlLwwAMPICgoSBhIeOXKFZw6dQpqtRobNmwwWkCvpXx9ffHWW2/h9ddfx+rVq/Hnn39iwIABKC8vx9GjR6HVajF9+nSLdPewhOnTp2Pz5s0oKCjAnDlzEBYWBjs7O2RlZaG+vh5z5sxpcmrH5hD7HW+N81y/fv3w8ssv49///jfeffddbNq0CT179hS+U3q9Ho8//vhNZ0VqrvPnz2Pp0qVNbnd2dja5plB0dDQ8PDyEi8IuXbqYvHEINFxsGxbqe/zxxzF8+HBhkbgLFy7AycnJ5BTGTSkvL8drr70GtVqNTp06CWPXTHnmmWdEjyOYMmUKkpKShPwzLBJnmDr9iSeeEPV+ADB79mwUFxdjw4YNePrpp9GnTx90794dTk5OKC0txalTp1BVVYV//OMfwmxyhuPyj3/8Axs3bkR8fDwGDRqEyspKYYrd/Px8Ud13//a3v+HChQv47bff8Pe//x0DBgxAz549UVNTI3RvXLlypXCjp3PnzggODsaJEycwb948BAcHw9HREd7e3recCnfs2LF45JFHsH79erzwwgvCInEXLlxAfn4+5HI5Fi1aZNaMZLdy6tQpfPbZZ3Bzc8OAAQPg7++P+vp6FBQUoKioCO7u7s36d7seiwcTsrOzbzpziru7u8niISYmRpipwzAOwhR7e3usWrUK33//PQ4ePCisAjhkyBAsW7YMtbW1oouHwYMHY9WqVVi7di2OHTuGpKQk9OjRAy+88ALuuusuk1MvtiSOV155BZ6enkhJScGBAweECre1igegYYrSTp06Yf369cjLy8OlS5fQq1cvvPrqqxg1ahQ2btwoeu7icePG4dVXX8Vvv/2GI0eOQKlUAmgYANiS4gFouEuxevVq7Ny5E/v27cPp06eRl5cHT09P+Pv7Y9asWYiKihJ11+ahhx5Cz549sWHDBpw8eRJnz55FUFAQli5diuDgYFHFw/33348uXbogNzcXBQUFwlSivXv3xqRJkzB9+vRGq7OOGTMG33//PX766SekpqYiJSUFcrkc/v7+mDJlilGx0VHcd9996Nq1K3788UecO3cOZ8+eRa9evfDyyy83+d0DLJN7gwcPRp8+fYSnFzcboGlvb4+3334bd9xxB/744w/k5eXhzJkzcHFxgZ+fHyIjIxEVFdWsaVCff/559OzZE7///juOHz8Oe3t7RERE4NFHHzX5u7i5ueGTTz7Bvn378OeffyI/Px8nT56Em5sb/Pz8MHnyZERFRd1yEG5zREZG4ptvvsEPP/yAI0eO4MCBA3BxcUFYWBhmzZolelHB1uTh4YG1a9di7dq1wvfN09MTo0ePxmOPPdYqT/rEfsdb4zw3a9YsBAUFYePGjcjOzsapU6fg7u6OsWPHYs6cObccaNtcZWVlN+3+6e7ubrJ4cHJywqRJk4QifurUqU32oZfJZHjjjTcQERGB33//Hfn5+airq4Ofnx+mTZuGuXPniirelUqlMNPO1atXbxr/448/Lrp46Nq1K9auXYvVq1fjyJEjqKqqQmBgICZNmoS5c+c2ewKDv//974iJicGvv/6K7OxspKamwt7eHn5+fhg+fDjGjRvXaArV4cOHY82aNfj6669x9OhRxMfHo2vXrnjiiSfwwAMP4L777hMVg0wmw6JFixAdHY3ffvsNx44dw5kzZ+Du7o6uXbti6tSpjcYnLFu2DF9++SUyMjKwb98+aLVadO7c2ax1NBYsWIChQ4fil19+QV5eHo4dOwZvb29MmDABDzzwgMUWBY6MjERNTQ2ysrJw8eJFHDt2DE5OTggMDMTcuXNx7733ihqnYopMqVRabgoQIons2rULS5cuRWRkJD788EOpwyHqEAxTLRoehRNR+7B27VqsW7cOjz32mLB4K5EBxzyQzSgqKhIWrrtedna20P9SiqnxiIiIiDoKdlsim3HkyBG8//77Qp9aOzs7FBYWCrMGTJ8+vdVXiiQiIiLqyFg8kM0IDQ3FtGnTkJWVhaNHj6Kurg4eHh4IDw/HjBkzhOlFiYiIiKh1cMwDERERERGZhWMeiIiIiIjILCweiIiIiIjILB1+zINOp8OVK1fg7u7e5LzMRERERES2Rq/Xo7q6Gl26dDG5cHFzdPji4cqVK+jXr5/UYRARERERtYrTp09bbOHNDl88uLu7A2g4qB4eHhJHQ7agrKwMvr6+UodBNoC5QmIwX8hczBUyV1VVFfr16ydc71pChy8eDF2VPDw84OnpKXE0ZCuYK2Qu5gqJwXwhczFXSAxLds3ngGkikVxdXaUOgWwEc4XEYL6QuZgrJCUWD0QiVVZWSh0C2QjmConBfCFzMVdISiweiIiIiIjILCweiERyc3OTOgSyEcwVEoP5QuZirpCUWDwQiaTT6aQOgWwEc4XEYL6QuZgrJCUWD0Qi1dXVSR0C2QjmConBfCFzMVdISiweiIiIiIjILCweiETy8fGROgSyEcwVEoP5QuZirpCUWDwQiaRQKKQOgWwEc4XEYL6QuZgrJCUWD0QiabVaqUMgG8FcITGYL2Qu5gpJicUDkUj29vZSh0A2grlCYjBfyFzMFZKSVRUPmZmZeOWVVzBz5kxERkYiPj7+lq/JyMjA/PnzMX78ePztb3/Djh072iBS6sjc3d2lDoFsBHOFxGC+kLmYKyQlqypd6+rqEBQUhOnTp2PJkiW33P/y5ctYtGgR7rrrLrz11ltIT0/HBx98AH9/f4wZM6YNIqaOqKKiAn5+flKHQTaAuUJiMF/IXMyV1qfVa6BUl0sdRovVqKos/p5WVTyMHTsWY8eONXv/3377DV26dMGzzz4LAOjduzeys7OxadMmFg9EREREJFpW8QbsOPkilNpKqUNpGb0M9dUyi7+tVXVbEis3Nxfh4eFGbWPGjEFubm6Tr1GpVKipqTH6j0gMV1dXqUMgG8FcITGYL2Qu5krr0eo1tl846AF5aTCcsxbCvijC4m9vVU8exCorK4Ovr69Rm4+PD2pqalBfXw8nJ6dGr/n++++xbt064WfDjAVlZWVQq9Xw8fGBQqGAVquFvb093N3dUVFRAeB/X9ba2loAgLe3N6qrq6HRaCCXy+Hp6Yny8nJhX5lMJhQn3t7eqKmpgVqtbrSvi4sL7OzshH29vLxQW1sLtVoNOzs7eHt7o6ysDADg7OwMe3t7VFdXAwA8PT2hVCqhUqkgk8ng6+uL0tJSAICTkxMcHR1RVdXwyMrDwwMqlQr19fUAAD8/P5SVlUGv18PR0RHOzs7C9G/u7u7QaDRQKpXCvuXl5dDpdHB0dISLiwsqKyuFfbVarbDipa+vLyorK6HVauHg4ABXV1dhXzc3N+h0OmFfSx3vmx1DuVwOLy8v4Ri6uLhALpcLx9DLywt1dXVQqVSws7ODj4+PcAxNHe+6ujrU1ta26vH29fVFRUUFdDqdqGPo4OAANzc34Ri6ublBr9cLx7AtjndLctbZ2RkODg4mj6FhX8MxNHW81Wq1Uc629vG+1TF0dnZGZWUlzxEd7BzR3OPt6urKc8QtjmF7O0c09zrC8PvxHGH5nC2pvGC7hYMesKvsC4cLt0Ne0w0A4KAYAyDJoh8jUyqVeou+o4VERkbiX//6F2JiYprc5/7778e0adPw8MMPC21JSUlYtGgR9u/fb7J4UKlUUKvVws8KhQJ9+vTB1atX4enpadlfgtql0tJS9jUlszBXSAzmC5mLudJ6alQl+CC5h9RhiGZX1b2haFD0MWqvdTiFD1/7waLXuTb95MFwh+F65eXlcHNzM1k4AICjoyMcHR2FnzlXMhERERE15dnwTLg6WG+xlpGWjUO5aUZtfv6+GBs1En5+c/Dhaz9Y9PNsungIDQ1FcnKyUVtaWhpCQ0Mliog6Am9vb6lDIBvBXCExmC9kLuZK23J18IObY4DUYTRp0MAwJCWmQ6/Xw8fHB9HR0Rg0aBBkMlmrrEZuVQOma2trUVBQgIKCAgANU7EWFBSgqKgIAPDFF1/gvffeE/a/6667cPnyZfz3v//F+fPn8euvv2L//v247777JImfOgZDP1GiW2GukBjMFzIXc6XjqqqqQmFhoVGbv78/IiIiMGXKFDzxxBMICQmBTGb5WZYMrOrJw4kTJ4RpVwFg5cqVAICpU6fi9ddfR2lpKYqLi4XtXbt2xUcffYTPPvsMmzdvRkBAAF599VVO00qtSqPRSB0C2QjmConBfCFzMVc6nrq6OqSkpODIkSNwc3PDggULIJfLhe2xsbFtFotVFQ8jRozAoUOHmtz++uuvm3zNt99+24pRERm7/stKdDPMFRKD+ULmYq7cXEsWeKtVl1o4mpZRqVRIS0vD4cOHhVmuKisrkZWVhREjRkgSk1UVD0S2gLNykbmYKyQG84XMxVxpWntZ4E2j0eDo0aNITk4WppoFGgrHESNGIDg4WLLYWDwQiVReXs4p8sgszBUSg/lC5mKumNYeFnjT6XTIycnBoUOHjAY7y2QyDB06FJGRkZIXjyweiIiIiMjmKdXlFi8cnOVecHbwseh73kx8fDxSUlKM2gYNGoTo6OhGCyNLhcUDkUguLi5Sh0A2grlCYjBfyFzMlbbhLPfC9P6fQC5ru8vlYcOG4fDhw9DpdOjXrx9iYmIQGBjYZp9vDhYPRCLZ2VnVDMdkxZgrJAbzhczFXDFfSxZ4c3bwadXC4dKlS6irq0P//v2FNm9vb0ycOBEBAQHo0cM6V7pm8UAkUk1NDZydnaUOg2wAc4XEYL6QuZgr5rPGBd6Ki4sRHx+P06dPw93dHb1794aDg4OwXapZlMzF4oGIiIiIqJWVlZUhISEBx48fF9qqq6uRk5Nj9QXD9Vg8EInk5eUldQhkI5grJAbzhczFXLEtCoUChw4dQnZ2NvR6vdDu6emJyMhIDBkyRMLoxGPxQCRSbW2t5NOkkW1grpAYzBcylzXnSksWaGspa1vgrba2FsnJycjIyIBWqxXaXVxcMG7cOAwfPhz29rZ3KW57ERNJTK1WSx0C2QjmConBfCFzWWuutJcF2iwlOTkZaWlpws9OTk4YPXo0wsPD4eTkJGFkLcPigUgkuVwudQhkI5grJAbzhcxljbnSHhZos7QxY8YgMzMTer0eI0eORERERLuYZpfFA5FI7GtK5mKukBjMFzKXNeZKayzQ1lJttcCbTqdDdnY2ZDIZwsLChHZ3d3fceeed6NKlCzw8PFo9jrbC4oFIpLKyMvj5NW/OaOpYmCskBvOFzMVcubW2WOBNr9fj+PHjSEhIQHl5OZydnTFw4ECjaXQHDBjQap8vFRYPRERERGRxLVmgraVac4E3vV6P06dPIz4+HlevXhXalUolTp06hdDQ0Fb5XGvB4oFIpPbQX5HaBnOFxGC+kLlsJVescYG2lrpw4QLi4+Nx6dIlo/aePXsiNjYW3bp1kyiytsPigUgkaxyoRtaJuUJiMF/IXMyVtldUVIT4+HicOXPGqL1z586IjY1F7969IZPJJIqubbF4IBKpurrapqdYo7bDXCExbDFfpJzTvyMrqyiHr0/rDwQWw9rWWLC0EydOGBUOfn5+iImJwYABAzpM0WDA4oGIiIhE45z+1JFERETg6NGjcHJyQnR0NAYPHgw7Ozupw5IEiwcikaxxijyyTswVEsOW8oVz+lN7VVNTg+TkZLi6umLcuHFCu7OzMx544AH4+/vb5KrQltSxf3uiZqirq2tX8zVT62GukBi2lC/WOKc/WZe2WmPBUpRKJQ4fPoy0tDSo1Wo4ODggLCwMbm5uwj6dO3eWMELrweKBSCSVSiV1CGQjmCskBvOF2ou2WGPBUtRqNTIyMpCcnAylUim06/V6XLlyBUFBQRJGZ52s/1+VyMp01D6OJB5zhcSw9XyRck7/jqaisgLeXt5Sh9Gk1lxjwVK0Wi2ys7Nx6NAhVFdXC+12dnYYNmwYxo0bB3d3dwkjtF7W/S9LZIV8rGyGC7JezBUSw9bzpT3O6W+t3AJ4nFvixIkTOHjwICoqKozaBw8ejOjoaHh7e0sSl61g8UAkUmlpKfz8eHeNbo25QmIwX8hczJWWKSsrMyoc+vfvj5iYGASwKDMLiwciIiIiard0Op1Rt8Dw8HCkp6cjICAAsbGx6Nq1q4TR2R4WD0QiOTs7Sx0C2QjmCjXF1OJqOnktalQ6iSISp70vCGbteG4xz5UrVxAXF4eAgADcfvvtQrujoyMee+wxjmloJhYPRCJ19PmdyXzMFTKFi6tRS/HccnPXrl1DfHw8CgoKAAAXL17EqFGj4OnpKezDwqH5mH1EIlVXV8PJyUnqMMgGMFfoRlxcjSyB5xbTKioqkJiYiGPHjkGv1wvt7u7uUCgURsUDNR+LByIiojbSXhdXs7UFwah9qa6uRlJSEjIzM6HT/a/rn5ubGyIjIxEWFga5XC5hhO0LiwcikXjngszFXKGOwJYWBGsveG75n9TUVCQmJkKtVgttzs7OiIiIwIgRI+Do6ChhdO0Tv+lEIimVSjg4OEgdBtkA5gqZw7C4WnV1tU32w7aFBcHaG55b/kculwuFg4ODA0aNGoXRo0dzUHkr4redSCSVSiV1CGQjmCtkDsPiakq9HdwcOXc/3VpHPbdotVqo1WqjwmDYsGE4cuQI+vbti3HjxsHNzU3CCDsGFg9EIslkMqlDIBvBXCExmC9kro6WKzqdDseOHUNiYiJ69+6NqVOnCtvs7e3xxBNPcExDG2LxQCSSr6+v1CGQjWCukBjMFzJXR8kVvV6PgoICJCQk4Nq1awCA7OxsjBkzxugYsHBoWyweiEQqLS2Fnx+7FtCtMVdIDOYLmasj5Mq5c+cQFxeHK1euGLX37t3baBpWanssHoiIiIjIKhQWFiI+Ph7nz583au/WrRtiYmLQq1cviSIjAxYPRCJxYR4yF3OFxGC+kLnaa6789ddfyMjIMGoLCAhATEwMgoKCOtxYD2vF4oFIJM4ZTeZirpAYzBcyV3vNlcDAQOH/vb29ER0djZCQEBYNVobFA5FIVVVV7b6vKVkGc4XEYL6QudpDrlRXV0Ov18PDw0NoGzJkCPLy8hAcHIyhQ4dyILSVYvFARERERG2irq4OqampSE9Px4ABAzBz5kxhm52dHR544AEJoyNzsHggEun6uyREN8NcITGYL2QuW8wVlUqF9PR0pKamor6+HgCQl5eHMWPGGHVXIuvH4oFIJJVK1W77m5JlMVdIDOYLmcuWckWj0SAzMxNJSUmora0V2uVyOUaMGGGThVBHx+KBSKT6+nq4u7tLHQbZAGvLFa1eA6W6XOowOrRadWmT26wtX8h62UKu6HQ65ObmIjExEQqFQmiXyWQYOnQoIiMj4enpKWGE1FwsHoiIOoCs4g3YcfJFKLWVUodCRB3Ar7/+ilOnThm1BQcHIzo62uYHe3d0LB6IROJJj8xlLbmi1WtYONgAa8kXsn62kCuDBw8Wioe+ffsiJiYGnTt3ljgqsgQWD0QilZWVwdfXV+owyAZYS64o1eUsHKyUs9wLzg4+AKwnX8j6WVuuXLp0CS4uLkZFTXBwMM6dO4fQ0FD06NFDwujI0lg8EImk1+ulDoFsBHOFbsZZ7oXp/T+BXNbwp5j5Quaylly5evUq4uLicPr0aQQFBWH27NnCNplMhqlTp0oYHbUWFg9EItnKDBckPWvOlWfDM+HqYP1dH9ozZwcfoXAArDtfyLpInSvl5eVISEhAXl6e0Hbq1ClcvnwZXbt2lTAyagssHohEcnZ2ljoEshHWnCuuDn5wcwyQOgy6jjXnC1kXqXKlqqoKhw4dQlZWltHTDw8PD0RFRXFMQwfB4oFIJIVCYROD1Uh6zBUSg/lC5mrrXKmrq0NycjIyMjKg0WiEdhcXF4wdOxYjRoyAvT0vKTsK/ksTERERUZN+/fVXXLx4UfjZ0dERo0ePxqhRo+Dk5CRhZCQFFg9EIln7wjxkPa7PFSkXaLvZwmRkPXhuIXO1da5ERETg4sWLsLe3x4gRIxAREQFXV9c2jYGsB4sHIpE0Gg3vtJBZDLnCBdrIHDy3kLlaK1d0Oh1ycnLQqVMndOnSRWjv27cvxo8fj8GDB8PDw8Pin0u2hcUDkUhKpRJubm5Sh0E2QKlUwtnViYUDmYXnFjKXpXNFr9fjxIkTiI+PR3l5OXr27IkHHngAMpkMQMO0qxERERb7PLJtLB6IiFqRNS7Qdv3CZETUcen1epw5cwZxcXG4evWq0H7hwgUUFxdz9iQyicUDkUjWtKonWTdfX1/Uqq9JHYaRGxcmI+vBcwuZyxK5cvHiRcTFxeHSpUtG7T169EBsbCwLB2oS/3oQiVRRUQEfH961pVurqKiAo4meBVIu0HbjwmRkPXhuIXO1JFeKiooQHx+PM2fOGLV37twZsbGx6N27t9BdicgU/gUhEkmn00kdAtmIpnKFC7SRKTy3kLlakit79+41etrg5+eH6OhoDBw4kEUDmYXFA5FIDg4OUodANqIhV+qlDoNsBM8tZK6W5EpMTAw2bNgAT09PREVFITQ0FHZ2dhaMjto7Fg9EInFuazKXq6sr6nUsHsg8PLeQuczJldraWiQnJ6Nfv37o3bu30N6zZ0/cfffd6NevH1eFpmZh1hCJVFlZCT8/afqrkzSau8BbWXk5nN3ZFYXMw3MLmetmuVJfX4/Dhw8jLS0NKpUKly5dQq9evYy6JA0cOLCtQqV2iMUDEdFNcIE3IrIFarUaGRkZSE5OhlKpFNpLSkpQWloKf39/CaOj9oTFA5FIXMSp49DqNSwcqM3w3ELmuj5XtFotsrOzcejQIVRXVwvtdnZ2GDZsGMaNGwd3d3cpwqR2yuqKhy1btmDDhg0oKytDUFAQXnzxRYSEhDS5/6ZNm7B161YUFxfD29sb48ePx1NPPdUqy7YTAZwRpSNpjQXeuEAbNYXnFjKXTqeDXq9HXl4eEhISUFFRYbR98ODBiI6Ohre3tyTxUftmVcXD3r17sXLlSixatAghISH4+eef8dJLL2Hjxo0m5zP+66+/8OWXX2Lx4sUYMmQILly4gGXLlkEmk+G5556T4DegjqCuro4DG6lZuEAb3QzPLWQuQ67k5uYaFQ79+/dHTEwMAgI4FTS1Hqv6C7Zp0ybceeedmD59OgBg0aJFSEpKwvbt2zFv3rxG++fk5GDIkCGYPHkyAKBLly6YNGkS8vLymvwMlUoFtVot/FxTU2Ph34KI2jMxC7yVlZfD97obH1ygjYhaQq/XGw18jo2NxdmzZ9GrVy/Exsaia9euEkZHHYXV/BVTq9XIz883KhLs7OwQHh6O3Nxck68ZMmQI/vrrL+Tl5SEkJASFhYVITk7GHXfc0eTnfP/991i3bp3ws1arBQCUlZVBrVbDx8cHCoUCWq0W9vb2cHd3F6p6wx2h2tpaAIC3tzeqq6uh0Wggl8vh6emJ8vJyYV+ZTCYUJ97e3qipqYFarW60r4uLC+zs7IR9vby8UFtbC7VaDTs7O3h7e6OsrAwA4OzsDHt7e6Ffo6enJ5RKJVQqFWQyGXx9fVFaWgoAcHJygqOjI6qqqgAAHh4eUKlUqK9vmDrSz88PZWVl0Ov1cHR0hLOzMxQKBQDA3d0dGo1GGHTl5+eH8vJy6HQ6ODo6wsXFBZWVlcK+Wq0WdXV1AABfX19UVlZCq9XCwcEBrq6uwr5ubm7Q6XTCvpY63jc7hnK5HF5eXsIxdHFxgVwuF46hl5cX6urqoFKpYGdnBx8fH+EYmjreDg4OKC0tbdXj7evri4qKCuh0OlHH0MHBAW5ubsIxdHNzg16vF45hWxzvluSss7MzHBwcTB5Dw76GY2jqeKvVaqOcbenxrtU0nmHJWe4Dda2DWcewi19f1NTUCMfQxdMOpWWlwr48R7TPc0Rzj7ePjw/PEbc4htZ2jmjNnL3+HFFSUoLMzEwEBwejR48esLOzE2K555570L17d9jZ2QnHiecIy+astZwjmpOzhuNmSTKlUqm3+Ls2Q0lJCe666y589dVXCA0NFdr/+9//IjMzE2vWrDH5us2bN2PVqlXQ6/XQarW46667sGjRoiY/58YnDwqFAn369MHVq1fh6elpuV+I2q2Kigr2I+0galQl+CC5h1Hbq2Mvmr06NHOFxGC+0I2uXbuGhIQE5OfnAwD8/f3x2GOPQaFQMFfILAqFAp06dbLoda7VPHlojoyMDHz33Xd4+eWXMXjwYFy6dAkrVqzAN998g/nz55t8jaOjIxwdHYWfDU8eiMzFnCFzMVdIDOYLGVRWViIxMRG5ubnQ6/93j1etVgt32YmkYjXFg7e3N+RyufBIyKCsrAy+vr4mX7NmzRrccccdmDlzJgCgX79+UCqV+OCDD/DII49wuXVqFQ4ODlKHYJbmLmxG/1OrLm3R620lV8g6MF+opqYGSUlJyMzMNCoQ3NzcEBkZibCwMMjlcqGrCpEUrKZ4cHBwwMCBA5Geno6YmBgADVORHTlyBPfee6/J19TX1zcqEAw/X1+pE1mSLczFzoXNrIMt5ApZD+ZLx6XX65GQkIC0tDSjrtVOTk6IiIjAyJEjjXpNMFdISlZTPADAfffdh2XLliE4OFiYqlWpVAqzL7333nvw9/fHwoULAQCRkZH46aefMGDAAISEhODSpUtYs2YNIiMjIZfLpfxVqB2rqKiAn595s+1IgQubWQ9rzxWyLsyXjksmk+HatWtC4eDg4IDw8HCMGTMGzs7OjfZnrpCUrKp4mDhxIioqKvD111+jrKwM/fv3x8cffyx0WyouLjaaouyRRx6BTCbD6tWrUVJSAh8fH0RGRmLBggVS/QpEkmuNhc2oARd4IyJL0Gq1kMlkRr0nYmJicObMGYSFhWHs2LFcFZqsltXMtiSV1hiFTu2bUqk0eSfIWpiaIYhazrDAW1jgg2a/xtpzhawL86X90+l0wqrQ0dHRRrNLAg2Lv7m4uNzyfZgrZC7OtkRkBWxxPI2Yhc3ItOYs8GaLuULSYb60X3q9HidPnkR8fDyuXbsGAEhMTMSgQYOMulmbUzgY3o9IKiweiESqra01+wRvLVwd/Mxem4AsxxZzhaTDfGmfzp07h7i4OFy5csWo3cfHB3V1dc3qnsRcISmxeCAiIiKysMuXLyMuLg7nz583au/WrRtiYmLQq1cviSIjahkWD0Qi+fhwwCyZh7lCYjBf2gedTofff/9dWBXaICAgADExMQgKCjKa/KU5mCskJRYPRCIpFAp4e3tLHQbZAOYKicF8aR/s7OyMFvzz9vZGdHQ0QkJCWlw0GDBXSEosHohEun7VT6KbYa6QGMwX21RdXQ0XFxejgc9RUVG4ePEiIiIiMHToUIuvPcVcISmxeCASyd6eXxsyD3OFxGC+2Ja6ujqkpqYiPT0dt99+O4YPHy5s8/b2xpNPPmm0joMlMVdISsw+IpG4cA+Zi7lCYjBfbINKpUJ6ejpSU1NRX18PADh06BBCQ0ONuiu1VuEAMFdIWiweiESqqKiAnx/XTKBbY66QGMwX66bRaJCZmYmkpCTU1tYK7XK5HMHBwdDpdG0WC3OFpMTigYiIiKgJOp0Oubm5SExMhEKhENplMhmGDh2KcePGwcvLS8IIidoWiwcikVxdXaUOgWwEc4XEYL5YH61Wi/Xr1+Pq1atG7cHBwYiOjpbs7j9zhaTE4oGIiIjIBLlcju7duwvFQ9++fRETE4POnTtLHBmRdFg8EIlUW1sLFxcXqcMgG8BcITGYL9IrLCxEp06djAY+jxs3DmVlZYiMjESPHj0kjO5/mCskJRYPRERE1KFdvXoVcXFxOH36NG677TaMGTNG2Obu7o77779fwuiIrAuLByKRuKonmYu5QmIwX9peeXk5EhISkJeXJ7SlpKRg2LBhcHJykjCym2OukJRYPBCJVF1dzZk1yCzMFRKD+dJ2qqqqcOjQIWRnZxtNserh4YGoqCirX4SNuUJSsu5vB5EV0mg0UodANoK5QmIwX1pfXV0dkpOTkZGRYXS8XVxcMHbsWIwYMcLqCweAuULSsv5vCJGVkcvlUodANoK5QmIwX1qXRqPB119/jZqaGqHN0dERo0ePxqhRo6y6m9KNmCskJRYPRCJ5enpKHQLZCOYKicF8aV329vYYMmQIUlJSIJfLMXLkSERERNjkmgnMFZISiwcikcrLyyVbGIhsC3OFxGC+WI5hVeiBAwcaPVEYM2YMVCoVIiIibPoCnLlCUmLxQERERO2CXq/HiRMnkJCQgLKyMigUCkRFRQnbXVxcMHnyZAkjJLJ9LSoeVCoVCgoKUF5ejiFDhnDqMOoQuDAPmYu5QmIwX5pPr9fjzJkziI+PR3FxsdB++PBhmxvPYA7mCkmp2cXD5s2bsXbtWmHg0aeffoqRI0eioqICDz74IJ5++mnMmDHDYoESWQs7OzupQyAbwVwhMZgvzXPx4kXExcXh0qVLRu09evRAbGxsuyscAOYKSatZxcOOHTuwYsUKTJw4EaNGjcK//vUvYZu3tzdGjhyJvXv3snigdqmmpgbOzs5Sh0E2gLlCYjBfxCkqKkJ8fDzOnDlj1B4YGIjY2Fj06dMHMplMouhaF3OFpNSs4mHjxo2Ijo7G22+/jcrKykbbBw4ciM2bN7c4OCIiIqIbqdVqbNy4EfX19UKbr68vYmJiMHDgwHZbNBBZg2YVD4WFhZgzZ06T2z09PaFQKJodFJE146qeZC7mConBfDGfg4MDRo8ejYSEBHh6eiIyMhJDhgzpMN15mCskpWYVD+7u7qioqGhy+9mzZ+Hr69vcmIisWm1trU1P8Udth7lCYjBfTKutrUVqairGjBljtCZDeHg4nJ2dERYWZhOrQlsSc4Wk1Kxv29ixY7Ft2zbcc889jbadOXMGf/zxB6ZPn97i4IiskVqtljoEshHMFRKD+WKsvr4ehw8fRlpaGlQqFfR6PSZMmCBsd3JywsiRIyWMUDrMFZJSs4qH//f//h/S0tIwb948REZGQiaTYefOndi+fTsOHjwIf39/zJ8/39KxElmFjvJYnFqOuUJiMF8aqNVqZGRkICUlBXV1dUJ7dnY2oqKi4OjoKGF01oG5QlJqVvEQEBCAdevW4csvv8S+ffug1+vx559/wtXVFZMmTcLChQu55gO1W8xtMhdzhcTo6Pmi1WqRk5ODQ4cOoaqqSmi3s7NDWFgYxo0bx8Lh/3T0XCFpNbuToI+PDxYvXozFixejvLwcer0e3t7erIap3SsrK4Ofn1+rfoZWr4FSXd6s19aqSy0cDTVXW+QKtR8dNV/0ej2OHz+OhIQElJcbn/cGDx6M6OhoXizfoKPmClmHZhUPy5cvx6xZszB48GAADYXE9fLy8vDbb79hyZIlLY+QqIPJKt6AHSdfhFLbeBpkIqL2Rq1WY+/evaitrRXa+vfvj+joaHTq1EnCyIjIlGY9Jti5cycKCwub3H758mXs2rWr2UERWbPWXJhHq9ewcGhHuIgTidFR88XR0RFjx44FAPTs2RPz5s3Dvffey8LhJjpqrpB1aJW5za5du9Yul4MnAtCqUwIq1eUWLxyc5V5wdvC59Y5kcR1t+khqmY6QL0VFRUhMTMQdd9wBDw8PoX348OEICAhAr169uMCbGTpCrpD1Mjv7EhISkJCQIPy8bds2pKenN9qvqqoK6enpCA4OtkyERFamurraZopjZ7kXpvf/BHIZ/9BIwZZyhaTXnvOltLQU8fHxyM/PB9CwXtSUKVOE7fb29ujdu7dE0dme9pwrZP3MvqI4e/Ys9u/fDwCQyWQ4duwYTpw4YbSPTCaDi4sLhg0bhmeffdaykRJ1UM+GZ8LVoXkD45wdfFg4EJFkKisrkZiYiNzcXOj1eqH9/Pnz0Gg0vINOZIPM/tY+/PDDePjhhwEAUVFRWLx4MSZPntxqgRFZq7Ze1dPVwQ9ujgFt+plkGVwBlsRoT/lSU1ODpKQkZGZmQqvVCu1ubm4YN25ch1wV2pLaU66Q7WnWNzcxMdHScRDZDKVSCQcHB6nDIBvAXCEx2kO+qFQqpKSkIC0tzWgVZCcnJ0RERGDkyJFcq8EC2kOukO1i2U8kkkqluuU+zV2ngWs0tC/m5AqRQXvIFzs7O+Tk5AiFg4ODA8LDwzFmzBjOEGRB7SFXyHY1u3hITk7GTz/9hIKCAtTU1Bj1ZTS4foA1UXtxq5lAuE4DGXDWGBLDFvNFr9cbxW1vb4+oqCj8+eefGD58OMaOHQt3d3cJI2yfbDFXqP1oVvFw4MABvPnmm+jTpw8mTpyIrVu3YtKkSdDr9UhMTET37t0RExNj6ViJrIKvr2+T27hOA13vZrlCdCNbyhedToe8vDwkJSVhzpw5RovFDhkyBL169eKq0K3IlnKF2p9mFQ/ff/89Bg0ahC+++AJVVVXYunUrZsyYgZEjR+LKlStYsGABunTpYulYiaxCaWkp/PxMz35k6XUauEaDbbtZrhDdyBbyRa/X4+TJk4iPj8e1a9cANIyDvPPOO4V97OzsWDi0MlvIFWq/mrXC9Llz5zBx4kTI5XLI5XIAgEajAQB06dIFd999N3744QfLRUnUAXGNBiKyJufOncP333+PX3/9VSgcAKC2ttZoRiUiat+adVXi7OwsjPL38PCAg4OD0YnE19cXV65csUyERFZG7KC/5q7TwDUabB8HiJIY1povly9fRnx8PM6dO2fU3rVrV8TGxqJXr17SBNaBWWuuUMfQrCuTnj174uzZs8LP/fv3x59//ok77rgDWq0We/bsQWBgoMWCJLImYqfH4zoNHRenUiQxrC1famtrsXv3bhQUFBi1BwQEICYmBkFBQRy4KxFryxXqWJrVbSkmJgaJiYnCVGGPPPIIjh49iilTpmD69OnIysrC3LlzLRookbWoqqqSOgSyEcwVEsPa8sXJyQklJSXCz97e3rjzzjsxf/589O/fn4WDhKwtV6hjadaThwcffBAPPvig8HNkZCRWrlyJuLg4yOVyjB07FiNHjrRYkERERNS66uvr4eTkJPwsl8sRHR2N/fv3IzIyEkOHDhXGORJRx2WxDtXDhg3DsGHDhJ9ramrg5uZmqbcnshoeHh5Sh0A2grlCYkiVL0qlEikpKcjIyMDcuXPRqVMnYdugQYPQv39/dpOxMjy3kJSa1W3pZsrLy/Hll1/i3nvvtfRbE1kFruxJ5mKukBhtnS8qlQpJSUn44osvkJKSApVK1WhxV5lMxsLBCvHcQlIS9eShvLwcu3btQmFhITw8PDB+/HgEBwcDAEpKSrB+/Xrs3LkTKpUKw4cPb5WAiaRWX1/PFVPJLMwVEqOt8kWj0SAzMxNJSUmora0V2uVyOby8vKDT6WBnZ/F7i2RBPLeQlMwuHs6fP4+nn34aCoUCer0eALBhwwa8+eabAID3338fKpUK48ePx4MPPigUFUTtDQcJkrmYKyRGa+eLTqdDbm4uEhMToVAojD53yJAhiIyMhJeXV6vGQJbBcwtJyeziYc2aNairq8PLL7+MsLAwXLlyBZ999hlWrFiB6upqREVF4amnnkK3bt1aM14iyfn6+kodAtkI5gqJ0Zr5olAosGnTJpSWlhq1BwcHIzo6mqsV2xieW0hKZhcPmZmZuPvuu3HXXXcBAPr06QO5XI6XX34ZU6dOxWuvvdZaMRJZlbKyMp64ySzMFRKjNfPFw8PDqCtS3759ERMTg86dO7fK51Hr4rmFpGR28aBQKNCvXz+jtqCgIAAN6z4QdRSGbntEt8JcITEsmS+lpaVGTxNkMhliY2ORnJyM2NhY9OzZ02KfRW2P5xaSktnFg06ng7298e6Gn11dXS0bFZEVu34edKKbYa6QGJbIl6tXryI+Ph6nTp3Cww8/jK5duwrb+vXrh379+rG/fDvAcwtJSdRsSydOnICjo6Pwc21tLWQyGbKyskyudjh+/PgWB0hkba7/DhDdDHOFxGhJvpSXlyMxMRHHjh0T2uLi4vDAAw8IP7NoaD94biEpiSoefv75Z/z888+N2tetW9eoTSaTNZovmqg9qKqq4uBCMgtzhcRoTr5UVVUhKSkJWVlZ0Ol0QruHhwcGDRoEvV7PoqEd4rmFpGR28bBy5crWjIOIiIjMVFdXh5SUFBw5cgQajUZod3FxwdixYzF8+HAu7kZErcLs4oGLvhE18PDwkDoEshHMFRLD3HwpLy/Ht99+i/r6eqHN0dERo0aNwujRo9kfvgPguYWkJKrbEhEBarWa/U3JLMwVEsPcfPH29oa/vz8KCwshl8sxcuRIREREcPKSDoTnFpISiwcikZRKJdzc3KQOg2wAc4XEMJUvOp0OZ86cEaZGB/437eqxY8cQGRkJT0/Ptg6VJMZzC0mJxQMREZGV0ev1OHHiBBISElBWVob7778fvXv3Frb37NmTazUQkSSsrnjYsmULNmzYgLKyMgQFBeHFF19ESEhIk/tXVVVh9erViIuLg0KhQOfOnfHcc89h3LhxbRg1dSSc4YLMxVwhMfz8/KDX63H27FnExcWhuLhY2BYfH49evXpx5iQCwHMLScuqioe9e/di5cqVWLRoEUJCQvDzzz/jpZdewsaNG+Hj49Nof7VajRdeeAE+Pj5YunQpAgICUFRUBHd3dwmip46irKwMvr6+UodBNoC5QmLk5eUhIyMDly5dMmrv3r07YmNjWTiQgOcWkpJVFQ+bNm3CnXfeienTpwMAFi1ahKSkJGzfvh3z5s1rtP/27duhUCjw1VdfCatdd+nSpU1jJuuj1WugVJe32vvXqMvhpNKa3FarLm21zyXbo9frpQ6BbEBxcTHi4+Nx+vRpo/bAwEDExsaiT58+LBzICM8tJKVmFw9FRUX47rvvkJGRgYqKCrz//vsYNmwYKioq8M0332D69OkYMGCA2e+nVquRn59vVCTY2dkhPDwcubm5Jl+TmJiI0NBQfPzxx0hISIC3tzcmTZqEuXPnQi6Xm3yNSqWCWq0Wfq6pqTE7RrJ+WcUbsOPki1BqK6UOhYizodAtlZSU4JtvvjFq8/X1RUxMDAYOHMiigUziuYWk1Kzi4ezZs3j66aeh1+sREhKCwsJCaLUNd2K9vb2RnZ0NpVKJxYsXm/2eFRUV0Gq1jR7D+fr64sKFCyZfc/nyZWRkZGDy5Mn497//jUuXLuHjjz+GVqvFY489ZvI133//vdGK2Ia4y8rKoFar4ePjA4VCAa1WC3t7e7i7u6OiogIAhGnwamtrhd+1uroaGo0Gcrkcnp6eKC8vF/aVyWRCceLt7Y2amhqo1epG+7q4uMDOzk7Y18vLC7W1tVCr1bCzs4O3tzfKysoAAM7OzrC3t0d1dTUAwNPTE0qlEiqVCjKZDL6+vigtbbj77eTkBEdHR1RVVQFomBdapVIJc4P7+fmhrKwMer0ejo6OcHZ2hkKhAAC4u7tDo9FAqVQK+5aXl0On08HR0REuLi6orKwU9tVqtairqxP+zSorK6HVauHg4ABXV1dhXzc3N+h0OmFfSx1vFxcXQKbDHwUvQKVTmPy3l0pZeTmU9naijrevry8qKiqg0+lEHUMHBwe4ubkJx9DNzQ16vV44hpY83q2Rs87OznBwcDCZs4Z9DcfQVH6r1WqjnG3t432rY+ju7o7KykqeI6zkHNHUMZTL5fDy8hKOoYuLC+RyuXAMvby8UFdXB5VKBTs7O/j4+BjlbEuOt4uLC7p164bCwkJ4eHhg+PDh6N+/P5ydnaHRaHiOaOfniOZeR8hkMuh0Op4j2ihnpTxHNPd4G3LWcNwsSaZUKkU/+1q0aBHOnTuH1atXQyaTYcaMGVixYgVGjhwJAFi9ejX279+Pn376yez3LCkpwV133YWvvvoKoaGhQvt///tfZGZmYs2aNY1ec//990OlUmHz5s3Ck4affvoJGzZswLZt20x+zo1PHhQKBfr06YOrV69yujsbV6MqwQfJPaQOw4iz3AuvRhZCLrOqHoLURkpLSzmwkQS1tbXIzc3FqFGjjJ4oFBUV4eLFi+jZsycCAwMljJBsBc8tZC6FQoFOnTpZ9Dq3WVc0mZmZmD9/Pnx8fExWNJ07d0ZJSYmo9/T29oZcLheqOoObDQry8/ODvb29URelXr16obS0FGq1Gg4ODo1e4+joaPS4z/DkgcjSnOVemN7/ExYORB1cfX09Dh8+jLS0NKhUKvj4+KB///7C9s6dO6Nz587C3UciImvWrKsavV4PZ2fnJreXl5ebvHC/GQcHBwwcOBDp6emIiYkB0LA4zpEjR3DvvfeafM2QIUOwZ88e6HQ62NnZAQAuXrwIPz8/0Z9P7dOz4ZlwdbDs3Zl6lQpOZvQ3dXbwYeHQwXHmt45NrVYjIyMDKSkpQhcLAEhKSjIqHgyYL2Qu5gpJqVlXNgMGDEBSUhLuueeeRts0Gg327duHwYMHi37f++67D8uWLUNwcLAwVatSqRRmX3rvvffg7++PhQsXAgDuvvtubNmyBZ9++ilmz56NS5cu4bvvvsOcOXOa82tRO+Tq4Ac3xwDLvqm6Bm6OXNmTbk2j0cDJyUnqMKiNabVa5OTk4NChQ0LfZaBhEpCwsLAm1yFivpC5mCskpWYVD/PmzcMrr7yCjz76CBMnTgTQ0L0oLS0N3333Hc6dO4eXXnpJ9PtOnDgRFRUV+Prrr1FWVob+/fvj448/FrotFRcXG/UTDQwMxCeffIIVK1bgkUcegb+/P+bMmYO5c+c259ciMotSqYSbG4sHujXmSsei1+tx/PhxJCQkCIMwDQYPHoyoqCiTaxYZMF/IXMwVklKzBkwDwO7du/Hpp5+ipqYGer0eMpkMer0ebm5u+Mc//oFJkyZZOtZW0RoDSUgapgZMvzr2osWfPHCgGpmLudKxFBcXN5p2NSgoCDExMejUqdMtX898IXMxV8hcVjNgGgCmTJmC2NhYpKWl4eLFi9Dr9ejWrRtGjx7NapiarSULvLXVAm1c1ZPMxVzpWAIDAzFgwAAUFBSgZ8+eiI2NRbdu3cx+PfOFzMVcISk1e8C0TCaDi4uLMLiZqKVsZYG3ioqKm3Y9IDJgrrRfRUVFyM7OxqRJk4y608bGxmL48OHo3bu36AXemC9kLuYKSalZxcOsWbMwYcIETJgwAUOHDrV0TNQBafUamygcgIZZwIjMwVxpf0pLSxEfH4/8/HwAQLdu3YwmCPHz82t2dxLmC5mLuUJSalbxMHz4cGzfvh1btmxBQECAUEiEhIRYOj7qIJTqcosXDs5yLzg7WP7ODKcBJnMxV9qPyspKHDp0CDk5OdDr/zdUMDs7u1mzC5rCfCFzMVdISs0qHt555x3U19fj0KFD2LdvH7Zu3YpNmzahc+fOuP322zFhwgQMGDDA0rESma01F2gzLHdPdCvMFdtXU1OD5ORkHD161GhRUTc3N4wbNw5hYWEW+yzmC5mLuUJSavZsS9erq6tDYmIi9u3bh9TUVGg0GnTv3h0bN260RIytirMtWQdTMyW1ZIG31lygjbNckLmYK7ZLqVQKq0Kr1Wqh3cnJCRERERg5ciQczVgsUgzmC5mLuULmsqrZlq7n4uKCSZMmITIyEjt37sTq1atx6dIlS7w1dWCtssAbEZEZqqqqkJSUJPzs4OCA8PBwjBkzBs7OzhJGRkQkrRYXD0ql0uipg1qtRrdu3TBhwgRLxEdkdTgVMZmLuWK7AgICMHjwYBw/fhzDhg3DuHHj4O7u3qqfyXwhczFXSErNKh7q6+uRlJSE/fv3Izk5GUqlEl26dMGcOXNw++23c7wDtWuc5YLMxVyxfjqdDnl5eTh27Bhmz54NuVwubBs/fjyio6Ph7e3dZrEQmYO5QlJqVvEwffp01NfXw9/fHzNnzsTtt99usdkmiKxdXV0dB6uRWZgr1kuv1+PkyZNISEhASUkJACAnJwfDhg0T9vHw8GjTmJgvZC7mCkmpWcXDtGnTcPvtt1t0lgkiIqK2cP78ecTFxeHy5ctG7ZcuXTIqHoiIqLFmFQ8vvfSSpeMgshlc1ZPMxVyxLleuXEFcXBzOnTtn1N6lSxfExsaid+/eksRlwHwhczFXSEpmFQ+ZmZkAINyRMfx8K7yDQ+2RQqFosz7QZNuYK9ahoqIC+/fvR0FBgVG7v78/YmJi0L9/f8hkMomi+x/mC5mLuUJSMqt4eOaZZyCTybB//344ODgIPzdFr9dDJpMhISHBYoESWYvrF4oiuhnminXQ6/U4deqU8LOXlxeio6MREhICOzs7CSMzxnwhczFXSEpmFQ8rV64E8L/l0A0/E3VE9vats/gctT/MFWnodDqjosDHxwdhYWE4efKksCr09bMqWQvmC5mLuUJSMiv7hg8fftOfiTqS1p7rndoP5krbUiqVSE1NRX5+PubPny/c8AKA2NhYTJgwwajN2jBfyFzMFZJSs57XPvvss0hPT29y+5EjR/Dss882Oygia1ZRUSF1CGQjmCttQ6VSITk5GV988QWSk5NRVlaGo0ePGu3j7Oxs1YUDwHwh8zFXSErNeu519OhR3HnnnU1uLy8vN3tQNRERUXNotVpkZmYiKSkJNTU1QrtcLkd9fb2EkRERtV/N7jR3swHThYWFXLyE2i3mNpmLudI6dDodjh07hsTERFRWVgrtMpkMoaGhiIqKgpeXl4QRNg/zhczFXCEpmV087Ny5E7t27RJ+/vbbb7Ft27ZG+1VXV+P06dOIiIiwTIRERET/p6ioCH/88QdKS0uN2gcOHIjo6Gj4+/tLFBkRUcdgdvFQX19v1Meurq6uUZ87mUwGZ2dnzJo1C4899pilYiSyKrW1tXBxcZE6DLIBzBXLc3NzM3ra0KdPH8TGxqJz584SRmUZzBcyF3OFpGR28XD33Xfj7rvvBgDMnj0bzz//PKKjo1stMCIiotraWqMuGh4eHhg5ciQuXbqE2NhY9OzZU8LoiIg6nmaNefjll18sHQeRzeCqnmQu5krzlZSUID4+HoWFhXjyySfh5OQkbIuJiYGdnZ1VrAptScwXMhdzhaRkVvFQVFQEAMJjYcPPt9IeHiMT3ai6utomB2NS22OuiFdRUYGEhAQcO3ZMaEtLS0NUVJTwszUu8GYJzBcyF3OFpGRW8TB79mzIZDLs378fDg4Ows+3kpCQ0OIAiayNRqOROgSyEcwV81VVVSEpKQlZWVnQ6XRCu7u7e4e5SGK+kLmYKyQls4qHxYsXQyaTCcuhG34m6oja611Psjzmyq3V1dUhJSUFR44cMbogcnFxQUREBEaMGGH1i7tZCvOFzMVcISmZVTxMnz79pj8TAYBWr4FSXd6s19aqS2+9k5Xw9PSUOgSyEcyVmzt37hy2bt1qtKCbo6MjRo0ahdGjRxuNc+gImC9kLuYKSanZi8SZolarodFoOH1YB5RVvAE7Tr4Ipbby1jvbuPLycvj5+UkdBtkA5srNBQYGQq/XA2i4kzpixAiMHTu2wy6AxXwhczFXSErNKh727t2LY8eO4fnnnxfa1q1bh/Xr10Ov1yMyMhJvvPFGh/0D0NFo9ZoOUzgQUfPodDqUlJQgMDBQaDN0TaqsrERkZCTvphIR2QC75rxo48aNUCqVws85OTlYt24dxowZg/vuuw8pKSlYv369xYIk66ZUl1u8cHCWe8HZwcei72kpLIrJXMwVQK/X48SJE1i7di02bNiAuro6o+3jxo3D1KlTWTiA+ULmY66QlJr15KGwsBBTp04Vfv7rr7/g6+uL5cuXw97eHnq9HnFxcVi4cKHFAqWOw1nuhen9P4FcZtFedRbDyQLIXB05V/R6Pc6ePYu4uDgUFxcL7cnJyZgwYYKEkVmvjpwvJA5zhaTUrKsztVoNR0dH4ee0tDSMHTtWmI2pd+/e+PXXXy0TIdmkZ8Mz4erQvP6Yzg4+Vls4AEBNTQ2cnZ2lDoNsQEfNlUuXLiEuLg4XL140au/evTv69+8vUVTWr6PmC4nHXCEpNesKrUuXLkhPT8fMmTNx/PhxXLp0CQsWLBC2l5WVcdB0B+fq4Ac3xwCpwyCiNlRcXIz4+HicPn3aqL1Tp06IjY1F3759eceUiMjGNat4mDVrFlasWIFz587h6tWr6NSpEyIjI4XtOTk56NOnj8WCJLIm3t7eUodANqIj5cqJEyfw22+/GbX5+voiOjoawcHBLBrM0JHyhVqGuUJSalbxMGfOHDg5OSE5ORkDBw7E3Llzhfm4FQoFSktLcdddd1kyTiKrUVNTw8GdZJaOlCt9+vSBi4sL6urq4OHhgaioKAwZMgR2ds2al6ND6kj5Qi3DXCEpNbtj+cyZMzFz5sxG7Z6enli3bl2LgiKyZmq1WuoQyEa011ypra3F5cuXERQUJLQ5OTlh/PjxUKlUGD58uDAGjszXXvOFLI+5QlJq8dn97NmzKCoqAgB07tyZ3ZWo3ZPL5VKHQDaiveVKfX090tLScPjwYeh0Ojz55JPw8PAQtoeFhUkYne1rb/lCrYe5QlJqdvGQkJCAzz77TCgcDLp06YJnn30W0dHRLQ6OyBrxUTGZq73kikajQUZGBpKTk43WaUhOTsbkyZMljKx9aS/5Qq2PuUJSalbxkJSUhNdeew2BgYF48skn0atXLwDA+fPn8fvvv+O1117Dhx9+iIiICIsGS2QNysvL4efXvGloqWOx9VzR6XTIzs7GoUOHUFVVJbTLZDKEhYVh7NixEkbX/th6vlDbYa6QlJpVPHz77bfo168fPv/8c6MpWaOjo3Hvvfdi4cKFWLduHYsHIiIbZFgVOj4+HuXl5UbbQkJCEB0dDR8f61wBnoiIWleziofTp0/jySefNLmWg4uLC6ZNm4avvvqqxcERWSOuYULmstVcyc7Oxq5du4zagoKCEBMTg06dOkkUVftnq/lCbY+5QlJqVvHg6OgIhULR5HaFQmG0AjVRe8KpJ8lctporISEhSExMRFVVFXr06IHY2Fh0795d6rDaPVvNF2p7zBWSUrOKh5EjR2Lz5s2IiIhAaGio0bZjx47hl19+wahRoywSIJG1qampgbOzs9RhkA2whVwpKirClStXMHz4cKHNwcEBkyZNgoODA3r37s0F3tqILeQLWQfmCkmpWcXD008/jSeffBILFy7EoEGD0LNnTwDAhQsXcPz4cfj4+ODpp5+2aKBERGQ5paWlSEhIwIkTJ2BnZ4c+ffoYrVo7YMAA6YIjIiKr1azioWvXrvjuu+/w3XffISUlBfv27QPQsM7DnDlzMG/ePA6mo3bLy8tL6hDIRlhjrlRWVuLQoUPIycmBXq8H0DCrUlpaGiZNmiRxdB2bNeYLWSfmCklJdPGg1WpRUVEBd3d3PP/883j++edbIy4iq1VbW8s5tsks1pQrNTU1SE5OxtGjR6HVaoV2V1dXjBs3DsOGDZMuOAJgXflC1o25QlIyu3jQ6/X46quvsGXLFiiVStjZ2WHcuHFYvHgxE5g6FLVaLXUIZCOsIVeUSiUOHz6M9PR0qFQqod3JyQljxoxBeHg4J7iwEtaQL2QbmCskJbOLh507d+KHH35Ap06dEBERgcLCQiQkJECn0+GDDz5ozRiJrApnuSBzWUOu5OfnIykpSfjZ3t4e4eHhGDNmDKd7tDLWkC9kG5grJCWzi4etW7diwIAB+OKLL+Dk5AQA+PTTT/Hrr7+ioqLCaKAdUXvGXCdzWUOuhIaGIjk5GQqFAsOGDcO4cePg7u4udVhkgjXkC9kG5gpJyezStbCwEFOmTBEKBwC4++67odPpcOnSpVYJjsgalZWVSR0C2Yi2zBW9Xo9jx44hLi7OqF0ul2PGjBlYsGABJk+ezMLBivHcQuZirpCUzH7yUFVV1ajSNfxcX19vyZiIiMhMer0ep06dQnx8PEpKSgA0LPIWEBAg7MMF3oiIyFJEzbbEhYKIwIV5yGytnSvnz59HfHw8CgsLjdpPnDhhVDyQbeC5hczFXCEpiSoevvjiC3z//ffCz4bp/t5///1GA+9kMhnWr19vgRCJrIu9fbOWR6EOqLVy5cqVK4iLi8O5c+eM2rt06YLY2Fj07t27VT6XWhfPLWQu5gpJyezsa2oOcC4GRx1NdXW10dgfoqZYOldKS0sRFxeHgoICo3Z/f3/ExMSgf//+fEJsw3huIXMxV0hKZhcPq1atas04iIjoFoqKiowKBy8vL0RHRyMkJIRTNxIRUZvgcy8ikbgoIpnL0rkSEhKClJQU1NbWIjIyEmFhYZDL5Rb9DJIOzy1kLuYKSYnFA5FISqUSDg4OUodBNqC5uaJUKpGamgqFQoE777xTaJfJZLj77rvh7u7OVaHbIZ5byFzMFZISiwcikVQqldQhkI0QmysqlQpHjhxBSkqKMAX2yJEj0bVrV2EfX19fi8ZI1oPnFjIXc4WkxOKBSCQOSCVzmZsrWq0WmZmZSEpKQk1NjdBuZ2eH4uJio+KB2i+eW8hczBWSEosHIpF455fMdatc0el0OHbsGBITE1FZWSm0y2QyhIaGIjIystHinNR+8dxC5mKukJRYPBCJVFpaCj8/P6nDIBtws1w5d+4c9u7di2vXrhm1Dxw4ENHR0fD392+LEMmK8NxC5mKukJRaVDyUlJQgMzMT5eXlGD9+PDp16gStVouamhq4ublxFhAioiaoVCqjwqFPnz6IiYlBly5dJIyKiIjo5ppVPOj1eqxcuRJbtmyBVquFTCZDv3790KlTJ9TV1eHee+/FE088gfvuu69ZQW3ZsgUbNmxAWVkZgoKC8OKLLyIkJOSWr9u7dy/eeustREdH4/3332/WZxPdChfmIXNdnysajcZoVdj+/fuja9eukMlkiImJQa9evaQIkawIzy1kLuYKSalZqwpt2LABmzdvxgMPPIBPP/0Uer1e2Obu7o7Y2FgcPHiwWQHt3bsXK1euxGOPPYZ169YhKCgIL730EsrLy2/6uitXrmDVqlUICwtr1ucSmYtTZJK5HB0dUVJSgi1btmDLli1G22QyGebMmYO5c+eycCAAPLeQ+ZgrJKVmFQ/btm3DlClT8NRTT6F///6NtgcFBeHixYvNCmjTpk248847MX36dPTp0weLFi2Ck5MTtm/f3uRrtFot3nnnHTz++OOclYRaXVVVldQhkA2oqKjAtm3bsHbtWpw8eRJnz57F+fPnjfZxcXHhrCkk4LmFzMVcISk1q9vS1atXMWTIkCa3Ozs7G003aC61Wo38/HzMmzdPaLOzs0N4eDhyc3ObfN0333wDHx8f3HnnncjKyrrpZ6hUKqjVauHn5sRJRNSU6upqHDp0CFlZWdDpdEK7u7u7sHYDERGRrWpW8eDj44Pi4uImt+fn5yMwMFD0+1ZUVECr1TaagszX1xcXLlww+ZqsrCxs374d3377rVmf8f3332PdunXCz1qtFgBQVlYGtVoNHx8fKBQKaLVa2Nvbw93dHRUVFQAAV1dXAEBtbS0AwNvbG9XV1dBoNJDL5fD09BS6V7m6ukImkwnFibe3N2pqaqBWqxvt6+LiAjs7O2FfLy8v1NbWQq1Ww87ODt7e3igrKwPQUJjZ29ujuroaQMMS9UqlEiqVCjKZDL6+vigtLQXQ0CfS0dFRuEPh4eEBlUolXMD4+fmhrKwMer0ejo6OcHZ2hkKhANBwoaPRaKBUKoV9y8vLodPp4OjoCBcXF2FqSZlT48VqKhWVUMrs4ODgAFdXV2FfNzc36HQ61NXVAYDFjvfNjqFcLoeXl5dwDF1cXCCXy4Vj6OXlhbq6OqhUKtjZ2cHHx0c4hqaOt1wuR2lpaaseb19fX1RUVECn04k6hg4ODnBzcxOOoZubG/R6vXAM2+J4tyRnnZ2d4eDgYPIYGvY1HENTx1utVhvlbGsf7+uPoVKpRF5eHrKysqDRaITvgpOTE8LCwjBkyBD4+fkJv2tHOke4u7tDq9UKx9DX1xeVlZVCzra3c0Rzj7eHhwfPEbc4hrZ8jhB7DG92jgAapnrmOaJjnSOak7PXTwNuKTKlUqm/9W7GPv30U+zZswerV6+Gu7s7pk+fjhUrVmDkyJFITU3FK6+8goceeggLFiwQ9b4lJSW466678NVXXyE0NFRo/+9//4vMzEysWbPGaP+amho88sgjePnllzF27FgAwNKlS1FdXd3kgOkbnzwoFAr06dMHV69ehaenp6h4qUGNqgQfJPcwant17EW4OQZIFFHrqq6uhru7u9RhkBXJzc3Fnj17jJ4sODg4YNiwYYiMjISzs7OE0ZGt4LmFzMVcIXMpFAp06tTJote5zXry8MQTT+Do0aN49NFHERYWBplMhh9//BFff/01cnNz0b9/fzz88MOi39fb2xtyuVyo7AzKyspMLohSWFiIK1eu4NVXXxXaDN0EYmJisGHDBnTv3t3oNY6OjkYDjQxPHojMVV9fz5M2Gbm+S5JcLsfw4cMxduxYKJVKFg5kNp5byFzMFZJSs4oHd3d3rF69Ghs3bsSBAwfg6OiIo0ePolu3bpg/fz4eeuihZk0j5uDggIEDByI9PR0xMTEAGoqBI0eO4N577220f69evfD9998bta1evRq1tbV44YUXmtV1iojoZnQ6HWpra43+cPfu3Rt9+vSBh4cHIiMj4eXlBQDC42MiIqL2otmLxDk5OeHRRx/Fo48+asFwgPvuuw/Lli1DcHAwQkJC8PPPP0OpVGL69OkAgPfeew/+/v5YuHAhnJyc0LdvX6PXG/6g39hOt6bVa6BU33xKXFNq1aWtEI314qqeHZNer0d+fj4SEhLg7OyMuXPnGs2UNGfOHNjZGU9gx1whMZgvZC7mCkmpRStMt4aJEyeioqICX3/9NcrKytC/f398/PHHQrel4uJiTm3YCrKKN2DHyReh1Fp+YE1701Q3Omqf9Ho9zp07h7i4OBQVFQntp06dMpqq+sbCAWCukDjMFzIXc4Wk1KziYfny5bfcRyaTYfHixc15e8yePRuzZ882uW3VqlU3fe3rr7/erM/syLR6DQsHEa5fFJHat8LCQhw8eLDRujXdu3eHm5vbLV/PXCExmC9kLuYKSalZxcORI0ca3f3X6XS4du0adDodvL294eLiYpEAqfUp1eUWLRyc5V5wdvCx2PtZG67s2f5dvXoV8fHxOHXqlFF7p06dEBsbi759+5r1BJS5QmIwX8hczBWSUrOKhy1btphs12g0+O233/Dzzz/j008/bUlcZKOc5V6Y3v8TyGVW1yPOYjh7TvuWkpKCgwcPGrX5+PggJiYGwcHBorpNMldIDOYLmYu5QlKy6BWevb09Zs+ejXPnzuE///kP/v3vf1vy7akNPRueCVcH8QOynB182nXhADTMmczBau1Xz549hf83zJ40ZMgQyOVy0e/FXCExmC9kLuYKSalVrvKCgoKwe/fu1nhraiOuDn7tdpE3IoO6ujpUV1cjIOB/ud61a1cMHToU/v7+GDFiBOzt23cxTEREJEar/FVMS0vjIzVqt7gwj+2rr69Heno6Dh8+DC8vL8yfP9+oO9K0adMs8jnMFRKD+ULmYq6QlJpVPKxbt85ke3V1NTIzM1FQUIC5c+e2KDAia6XRaJq1CCJJT6PRICMjA8nJyairqwPQMDj6+PHjCAkJaZXPY66QuZgvZC7mCknJosWDh4cHunXrhkWLFmHmzJktCozEae4Cb0DHW+StpZRKpVnTdJL10Ol0yMnJQWJiIqqqqoR2mUyGsLAwdO/evVU+l7lCYjBfyFzMFZJSs4qHxMRES8dBLcAF3ohM0+v1OHHiBBISElBWVma0LSQkBNHR0fDxab/TChMREVma6OKhvr4eX331FUaMGIGoqKjWiIlE4AJvbY8zXNiO/fv3Iy0tzaitX79+iImJQWBgYKt/PnOFxGC+kLmYKyQlO7EvcHJywu+//97oLh5Jw9ILvAHtf5G3liovb173MGp7Q4cOFf6/R48emDt3LubMmdMmhQPAXCFxmC9kLuYKSalZ3ZYGDhyIM2fOWDoWsgIdYZG3ltLpdFKHQCYUFRWhvr4evXr1EtoCAgIQHR2NLl26oE+fPqIWeLME5gqJwXwhczFXSErNukJ8/vnn8Y9//AN9+/bFtGnTOA+6lWnuAm9Ax1jkraUcHR2lDoGuU1ZWhoSEBBw/fhw+Pj544oknjBZ0i4yMlCw25gqJwXwhczFXSEpmXyVmZmaiV69e8PHxwdKlSyGTyfDRRx/h008/RUBAQKMpw2QyGdavX2/xgOnWuMBb63JxcZE6BELDCquHDh1CdnY29Ho9gIZH+Xl5eRgyZIjE0TVgrpAYzBcyF3OFpGR28fDss8/izTffxKRJk+Dl5QUvLy/07NmzNWMjskqVlZUcrCah2tpaJCcnIyMjA1qtVmh3dXXF2LFjMWjQIAmjM8ZcITGYL2Qu5gpJyeziQa/XC3f3Vq1a1WoBERGZUl9fj8OHDyMtLQ0qlUpod3JywujRozFq1Cg+yiciImpl7NxOJJK7u7vUIXRI+/fvR1ZWlvCzvb09Ro4ciYiICKt9hM9cITGYL2Qu5gpJSVTx0NYzlRBZo+u7ylDbiYiIQE5ODgAgLCwM48aNg4eHh8RR3RxzhcRgvpC5mCskJVHFw7vvvot3333XrH1lMhni4+ObFRSRNaurq4Orq6vUYbRber0ex48fh52dHYKDg4V2Hx8fTJ06FT169IC3t7d0AYrAXCExmC9kLuYKSUlU8RAeHo4ePXq0VixE1IHp9XqcPn0acXFxKCkpgYeHB4KCgoymgraWWZSIiIg6KlHFw9SpUzF58uTWioXIJvj6+kodQrtz4cIFxMXFobCwUGirqqpCQUEBQkJCJIysZZgrJAbzhczFXCEpccA0kUiVlZU2023G2hUVFSEuLg5nz541au/SpQtiY2PRu3dvaQKzEOYKicF8IXMxV0hKLB6IROJAtZa7du0aEhISkJ+fb9Tu7++P6OhoDBgwoF1M0MBcITGYL2Qu5gpJicUDkUgODg5Sh2Dz0tPTjQoHLy8vREVFYfDgwbCzs5MwMstirpAYzBcyF3OFpGR28ZCYmNiacRDZDM5w0XKRkZHIzc2Fk5MTxo0bh7CwMKOB0e0Fc4XEYL6QuZgrJKX299eaqJVVVlbCz89P6jBsglKpxOHDh+Hh4YHhw4cL7R4eHpg9eza6du3arleFZq6QGMwXMhdzhaTE4oGILE6tVuPIkSNISUmBUqmEi4sLQkJC4OTkJOxj64OhiYiIOiIWD0Qiubm5SR2C1dJqtcjKykJSUhKqq6uF9vr6ely6dAn9+vWTMLq2x1whMZgvZC7mCkmJxQORSDqdTuoQrI5Op0NeXh4SExNRUVEhtMtkMgwePBhRUVEdclpB5gqJwXwhczFXSEosHohEqqur42C165w6dQoHDx7EtWvXjNoHDBiAmJgY+Pv7SxSZ9JgrJAbzhczFXCEpsXggoha5cOGCUeHQu3dvxMbGokuXLhJGRURERK2BxQORSD4+PlKHICm9Xm+0gFtERAQyMzPh7++P2NhY9OrVS8LorEtHzxUSh/lC5mKukJRYPBCJpFAoOmT//ZKSEsTHx6Nz586IjIwU2l1dXfHoo4/Cx8enXawKbUkdNVeoeZgvZC7mCkmJxQORSFqtVuoQ2lRFRQUSEhJw7NgxAA3dlEaMGAEXFxdhH19fX6nCs2odLVeoZZgvZC7mCkmJxQORSO1xJWRTqqurkZSUhMzMTKOZPRwcHFBWVoZu3bpJGJ1t6Ci5QpbBfCFzMVdISsw+IpHc3d2lDqFVKZVKpKSkID09HRqNRmh3dnZGREQERo4cCQcHBwkjtB3tPVfIspgvZC7mCkmJxQORSBUVFfDz85M6jFaRlpaGxMRE1NfXC20ODg4YNWoURo8eDWdnZwmjsz3tOVfI8pgvZC7mCkmJxQMRCZRKpVA4yOVyDB8+HGPHjuVqpkRERASAxQORaO1lYR6dTgetVmvUBWn06NHIzMxEv379EBkZCS8vLwkjtH3tJVeobTBfyFzMFZISiweiDkav1yM/Px8JCQkICgrCbbfdJmxzcnLCk08+CUdHRwkjJCIiImvF4oFIpNraWqNpSm2FXq/HuXPnEBcXh6KiIgBAZWUlwsPD4eHhIezHwsFybDVXSBrMFzIXc4WkxOKBqAMoLCxEXFwcLly4YNTeuXNn1NfXGxUPRERERE1h8UAkki2t6nn16lXEx8fj1KlTRu2dOnVCbGws+vbty1WhW5Et5QpJj/lC5mKukJRYPBCJVF1dbRMDiffs2YMjR44Ytfn4+CA6OhqDBg1i0dAGbCVXyDowX8hczBWSEosHIpGuXzjNmnl6egr/7+HhgcjISAwZMgRyuVzCqDoWW8kVsg7MFzIXc4WkxOKBSCRrvPiuq6sDAKMBdCNGjMCxY8cQGhqK4cOHc1VoCVhjrpD1Yr6QuZgrJCUWD0QiXX9HX2r19fVIT0/H4cOHMXjwYEyePFnY5uDggPnz57N7koSsKVfI+jFfyFzMFZKSndQBENma8vJyqUOARqNBWloavvrqKyQkJKC+vh6ZmZmoqKgw2o+Fg7SsIVfIdjBfyFzMFZISnzwQ2RCdToecnBwcOnQICoVCaJfJZBgyZAjs7fmVJiIiotbDKw0ikaRYmMewKnR8fDzKysqMtoWEhCAqKgq+vr5tHhfdHBdxIjGYL2Qu5gpJicUDkUh2dm3f2++XX37B6dOnjdr69euHmJgYBAYGtnk8ZB4pcoVsF/OFzMVcISmxeCASqaam5v+3d99RUZ1r28CvmaEXaTYUYwNs2AUFBCygsResUZcRSzSJJ5bYcvJG8xpTPLZEk9gbWLC36DGWKIIFlNhrNMaCKDJ0GJj2/eHL/hwBncFh9oDXby3Xcp7d7tncDPue/ezngY2NjUmP6enpKRQPtWrVQkhICDw8PEwaAxlOjFyh8ov5QvpirpCYWDwQmZmnT5/CxsZGZwKgZs2a4f79+2jevDnq1q3LB6GJiIhIFCweiAxUVrN6yuVynDp1Cjdu3ICPjw969uwpLJPJZOjXr1+ZHJfKDmeAJUMwX0hfzBUSEzvNERkoNzfXqPvLzMzEoUOHsGrVKty4cQMAcPXqVTx//tyoxyHTM3auUMXGfCF9MVdITLzzQGQgpVJplP3k5ubizJkzSExMhFqtFtrt7Ozg7+8PZ2dnoxyHxGOsXKF3A/OF9MVcITGxeCAykEwme6vt8/PzER8fj4SEBBQUFAjt1tbW8PPzQ5s2bWBtbf22YZIZeNtcoXcL84X0xVwhMbF4IDLQ2/Y1jY6ORlJSkvDawsICrVu3Rrt27Th2dwXDfslkCOYL6Yu5QmJi8UBkILlcDjc3t1Jv36pVKyQlJUEqlaJ58+YICAiAo6OjESMkc/G2uULvFuYL6Yu5QmJi8WAG1FoVFMq0Um2bq0w1cjRkLFqtFjdu3EC1atV0PuQbN26M58+fo3nz5nBxcRExQiIiIiLDsHgQ2aWnm/HbnclQqDPEDoX09KauRVqtFnfv3sXJkyeRkpICb29v9O/fX1gulUrRoUOHMo6SzAG7oZEhmC+kL+YKiYnFg4jUWhULh3LodQ+qPXjwACdPnsTjx4+Fttu3b+P58+eoXLmyKcIjM8KHGskQzBfSF3OFxMTiQUQKZZrRCwcbmRNsLNkVpixlZ2cXGQ0pOTkZJ0+exN9//63TXr16dYSEhLBv6juquFwhKgnzhfTFXCExsXioQGxkTujhtRgyCX+sppKamoqYmBjcunVLp93NzQ3BwcHw9vaGRCIRKToiIiIi4zLLq8ydO3di8+bNkMvl8PT0xOTJk9G4ceNi1923bx8OHTokfOPboEEDfPTRRyWub+4mtrkIO8vSfUttY+nCwsEECofI02q12Lt3L549eyYsq1SpEoKCgtCkSRNIpZzA/V3H4RTJEMwX0hdzhcRkdleaR48exdKlSzFt2jQ0btwY27Ztw5QpU7Bly5ZiR6ZJTExEWFgYfHx8YG1tjaioKEyePBlRUVGoUqWKCO/g7dhZusHeqvzF/S7Jy8uDo6MjJBIJ2rdvj127dsHOzg4BAQFo0aIFLCzM7teKRFKYK0T6YL6QvpgrJCazu8qJjo5Gr1690KNHDwDAtGnTcPr0aRw4cAAjRowosv6cOXN0Xs+cORMnTpzA+fPn0a1btyLrFxQU6EzrnpOTY9w3QBWSQqFAfHw8vL29YWlpKbR7eXmhW7duaNSoEaysrESMkMzRyzOIE70J84X0xVwhMZlV8aBUKnHr1i2dIkEqlaJNmza4evWqXvtQKBRQqVSoVKlSscsjIyOxdu1a4bVarQbwYsIVpVIJFxcXZGZmQq1Ww8LCAg4ODkhPTwcA2NnZAQByc3MBAM7OzsjOzoZKpYJMJkOlSpWQlpYmrCuRSITixNnZGTk5OVAqlcK68rSiczvI09Jg7eaC3NxcKJVKSKVSODs7Qy6XAwBsbGxgYWGB7OxsAC+6ySgUChQUFEAikcDV1RWpqS/mfrC2toaVlRWysrIAAI6OjigoKEB+fj6AF/3y5XI5tFotrKysYGNjg8zMTACAg4MDVCoVFAqFsG5aWho0Gg2srKxga2uLjIwMYV21Wo28vDwAgKurKzIyMqBWq2FpaQk7OzthXXt7e2g0GmFdY51vW1tbSKVS4Xw7OTkJ51Amk8HJyUk4h7a2tpDJZMI5dHJyQl5eHgoKCiCVSuHi4iKcQ5lMhqtXryIhIQH5+fl4/PgxOnfujNTUVOF8e3h4ICsry6jn29XVFenp6dBoNAadQ0tLS9jb2wvn0N7eHlqtVjiHpjjfb5OzNjY2sLS0LPYcFq5beA6LO99KpVInZ8v6fL/pHEokEmRkZJT6M6Kszzc/I97+M8KY51sqlfIz4g3nsKJ9RpT2OkKlUkGj0fAzwkQ5ay6fEaXJ2cLzZkwShUKhNfpeSyklJQV9+/bFihUr4OPjI7T//PPPuHjxIlatWvXGfSxYsADnzp1DVFRUsSMRvHrnITMzE3Xr1sWzZ89KLDjepLSTvOUqU7H0fAudthn+D9ltyQyo1WpcunQJp0+fFn7hgRd/3MeOHcvJ3YiIiMjsZWZmomrVqm91nfsqs7rz8LYiIyNx9OhRLFu2rMQhzKysrHS6lxTeeSgtTvJWsWg0Gly/fh2xsbHCNxeFfHx80L59+7fOGXp3pKamcphe0hvzhfTFXCExmVXx4OzsDJlMJtwWKiSXy+Hq6vrabTdv3oyoqCgsWbIEnp6eZRmmgJO8VRxarRZ37tzBqVOnkJKSorPM29sbQUFBwgP4hbcXiYiIiN41ZlU8WFpaokGDBjh//jyCg4MBvPgm+MKFCwgPDy9xu02bNmHDhg1YtGgRGjVqZKpwjT7JGyd4E9eZM2d0Coc6deogODgYNWrU0FnPxsbG1KFROcVcIUMwX0hfzBUSk1kVDwAwePBgzJs3Dw0bNhSGalUoFMLoS3PnzkXlypUxYcIEAEBUVBRWr16N2bNnw93dXfhW2NbWVnhQpjzgBG/ikkgkCAkJwdatW+Hu7o6QkBDUqVOn2HU5FCvpi7lChmC+kL6YKyQms8u+0NBQpKenY/Xq1ZDL5fDy8sLChQuFbktPnz7VmbF39+7dUCqV+PLLL3X2ExERgdGjR5s0dqD0k7xxgjfTef78OWJiYtCmTRu89957QnudOnXwwQcfoFatWq+dFTo7O7vEZ2qIXsZcIUMwX0hfzBUSk1lerQ4YMAADBgwodtmyZct0Xu/cudMUIemNk7yZr/T0dMTGxuLatWvQarXIycnB8OHDdQqFl4sJIiIiItJllsUDkTFlZ2fj9OnTuHjxIjQajdCenp6OrKwsg4cuM9ZQZ1TxMVfIEMwX0hdzhcTE4oEqLIVCgXPnzuH8+fM6c3vY2NigXbt2aN26tc5s0YbstzTb0buHuUKGYL6QvpgrJCYWD1ThaDQanDt3DmfPnhVmZQRejObl6+sLPz+/txqpoqCgwBhh0juAuUKGYL6QvpgrJCYWD1ThSCQS/P3330LhIJPJ0LJlS/j7+8Pe3t4o+yfSB3OFDMF8IX0xV0hMLB6o3NNoNJBIJMKHaeGwq1FRUWjatCkCAwPh5ORktOO9acJCokLMFTIE84X0xVwhMUnFDoCotLRaLW7duoU1a9bg7t27Ostq1qyJCRMmoHv37kYtHADOME36Y66QIZgvpC/mComJdx6o3NFqtbh//z5OnjyJ5ORkAEBMTAzq16+vcyuXo1EQERERGReLBypXHj9+jJMnT+LBgwc67ZaWlsjNzTXKMw1vwol5SF/MFTIE84X0xVwhMbF4oHIhJSUFMTExuHPnjk57lSpVEBISUuSuQ1mysrIyyXGo/GOukCGYL6Qv5gqJicUDmTW1Wo2DBw/i2rVrOu3Ozs4IDg5Go0aNTD7qRFZWFtzc3Ex6TCqfmCtkCOYL6Yu5QmJi8fB/cgqeQ1aQ/+YVX5Kr5ANLZU0mk+nM1eDg4ID27dujadOmkMlkIkZGRERE9O5h8fB/lsQ3hnXZd5enN1AoFLC2tta5mxAcHIykpCS0bdsWrVq1En1WTUdHR1GPT+UHc4UMwXwhfTFXSEwsHsgsFBQUICEhAfHx8Xj//ffRqFEjYVnVqlXx8ccfw8LCPNK1oKCA/U1JL8wVMgTzhfTFXCExmcfVWAVhI3OCjaWL2GGUKyqVCn/++SfOnDmD3NxcAC+GXfX29tbplmQuhQMA5Ofnw8HBQewwqBxgrpAhmC+kL+YKicl8rsjKORuZE3p4LYZMwlOqD41GgytXriAuLg6ZmZlCu0QiwXvvvQelUslnGoiIiIjMDK90XzKxzUXYWZZu9AIbSxcWDnoonBU6JiYGcrlcZ1mjRo0QFBQEV1dXkaLTD0e4IH0xV8gQzBfSF3OFxMSr3ZfYWbrB3qqK2GFUWCqVClFRUcKs0IXq16+PoKAgVK9eXaTIDCOXy82+wCHzwFwhQzBfSF/MFRITiwcyGQsLC7i6ugrFg4eHB0JCQlCrVi2RIzOMVqsVOwQqJ5grZAjmC+mLuUJiYvFAZSYlJQVubm6QSqVCW1BQENLS0tC+fXvUq1fP5BO8GQNHuCB9MVfIEMwX0hdzhcTE4oGMTi6XIzY2FtevX0e3bt3QvHlzYZmLiwtGjhwpYnRvz8bGRuwQqJxgrpAhmC+kL+YKiUn65lWI9JOZmYn//ve/WLVqFa5fvw4AiI2NhUqlEjky43p5dCii12GukCGYL6Qv5gqJiXce6K3l5ubi7NmzuHDhAtRqtdBua2sLPz8/ESMjIiIiImNi8UCllp+fL8wKXVBQILRbW1vDz88Pbdq0gbW1tYgRlg1OzEP6Yq6QIZgvpC/mComJxQOVSkFBAVauXImcnByhzcLCAq1atYK/vz9sbW1FjK5sqVSqClkUkfExV8gQzBfSF3OFxMTigUrFysoKXl5euHjxIqRSKZo3b46AgAA4OjqKHVqZUygUsLe3FzsMKgeYK2QI5gvpi7lCYmLxQG9UOCu0p6cnLCz+f8oEBARApVIhMDAQLi4uIkZIRERERKbA4oFKpNVqcffuXcTExODZs2fo3LkzfH19heWVKlVCz549RYxQHJzVk/TFXCFDMF9IX8wVEhOHaqViPXz4EJs2bcKOHTvw7NkzAMCZM2d0Hox+V6Wnp4sdApUTzBUyBPOF9MVcITHxzgPpSE5ORkxMDO7du6fTXr16dYSEhMDS0lKkyMyHRqMROwQqJ5grZAjmC+mLuUJiYvFAAIDU1FScOnUKN2/e1Gl3c3NDUFAQGjRoAIlEIlJ05oUFFOmLuUKGYL6QvpgrJCYWD4T8/HysX78eSqVSaKtUqRLat28PHx8fSKXs3fYyOzs7sUOgcoK5QoZgvpC+mCskJl4VEqytrdGiRQsALz6QQkNDMW7cODRr1oyFQzEyMjLEDoHKCeYKGYL5QvpirpCYeOfhHaNQKJCYmIg2bdrAyspKaPf394ednR1at26t005EREREVIjFwztCqVQiMTERZ86cgUKhAPBinoZCdnZ28Pf3Fyu8coUT85C+mCtkCHPIF61WC41GwwdyzZyNjY1OV2N6d0mlUkilUpM+l8rioYJTq9W4fPky4uLikJ2dLbQnJCTAz89PZ9I30g//qJK+mCtkCLHzRa1WIzMzkxel5YBWq+UgJiSwtLREpUqVIJPJTHI8XjlWUBqNBjdu3MCpU6eKjAfdpEkTBAUFsXAopby8PD6sRnphrpAhxMwXrVYLuVwOqVRq0osQKh21Ws2fEQF4kQs5OTmQy+WoXLmySYpKXj1WMFqtFn/99RdiYmKQkpKis8zLywvBwcGoUqWKSNEREZE5UqvV0Gq1cHR05HNv5YBEIuEXgATgxV0HqVSK9PR0qNVqk+QFM6+CUSgU2L9/v85M0LVr10ZISAhq1KghYmQVh4uLi9ghUDnBXCFDiJkvWq0WANgVppzgXQd6WeHvbeHvcVnjOJwVjK2tLXx9fQEA7u7uGDJkCIYOHcrCwYgyMzPFDoHKCeYKGYL5QvpSq9Vih0DvMN55KMeeP3+OM2fOIDQ0FLa2tkK7r68vqlWrBi8vL36LVAb4oU36Yq6QIZgvRFQesHgoh9LT0xEbG4tr164JfVQ7dOggLLexsYG3t7d4AVZwlpaWYodA5QRzhQzBfCF98YtBEhO7LZUjOTk5OHLkCFauXImrV68Kfdtu3rzJb6xMyBzGYqfygblChmC+kL6kUuNcvt2+fRu1a9dGVlaWUfZHxrdq1Sr0799f7DB0sHgoBxQKBU6ePInly5fjwoULwljgNjY26NChAyIiIvjwlAm9OvQtUUmYK2QIc8oXjVaDnIIUUf9ptPrNezFmzBjY2NjAxsYGDg4OaNCgAb744gthQtSXHTx4EKGhoahcuTJcXFwQGBiIjRs3Frvf3bt3IywsDFWrVoWbmxvatGmDefPmQS6Xv9W5NQZjfWH4P//zP5gwYQIcHR2LLGvWrBkqVaqE5OTkIsu8vb2xdOnSIu1z586Fn5+f8Loi/WzkcjlGjhyJKlWqoFq1avjoo4905s961f3794X3/uq/nTt3AgA2btxY4jrPnj0DAIwcORIXL15EbGxsmb03Q7HbkhlTKpU4f/48zp49i/z8fKHd0tISvr6+8PPzg42NjYgREhFRRZSnTMUPZ2qJGsMM/4ewt9JvaPEuXbpg5cqVUCqV+PPPPzFmzBhIJBLMmzdPWOeXX37B559/jqlTp2Lp0qWwsrLC/v37MXHiRFy/fh3ff/+9sO5XX32FhQsX4l//+hf+93//F+7u7vjrr7+wevVqbN68GZ9++qnR329xCgoKymzo3AcPHuDgwYNYtGhRkWVxcXHIy8tDv379EBUVhc8//7zUx6koP5sPP/wQycnJ+O2336BUKjFu3Dh8/PHHJRY4tWrVwv3793Xa1qxZg8WLF6Nr164AgIEDB6JLly4664wdOxYKhQJVq1YFAFhZWWHw4MH45Zdf0L59e+O/sVJg8WDG1Go1zp07JxQOMpkMLVq0QEBAAG9vi4jnnvTFXCFDMF9Kz9raGtWrVwfw4qKtU6dOOHbsmHCB+vDhQ8yYMQMTJ07E3Llzhe0mTZoEKysrTJkyBf3794efnx8SEhIwf/58LFiwQOdCtE6dOggNDX3tHaJHjx5h1qxZOHr0KPLz89GwYUMsWbIEfn5+GDNmDDIyMrB9+3Zh/c8//xyXLl3CkSNHAABhYWFo0qQJLCwssGXLFjRp0gTVq1eHRqNBVFSUsJ1arUbt2rXxww8/YPjw4dBoNFiwYAHWrFmDp0+fwsvLC7NmzXptd5edO3eiWbNmqFmzZpFl69evx+DBgxEcHIypU6e+VfFgLj+bt3Hz5k38/vvviIuLQ+vWrQEAixcvRp8+ffD9998XO6KlTCYT3nehffv2ITw8HA4ODgBejJD58oA3KSkpOHHiBJYvX66zXffu3dGjRw/k5eXprC8WdlsyYzY2NvD394dEIkHTpk0xbtw4hIWF8Q+MyEw1jjKVf8wVMgTzxTiuXbuGs2fP6nxjv3v3biiVSkyaNKnI+mPGjIGDgwO2bdsGANi6dSscHBzw0UcfFbt/Z2fnYtuzs7MRFhaGpKQk7NixAwkJCZgyZYrQ1VhfUVFRsLKywh9//IFly5Zh6NCh+O2333S6yBw5cgS5ubno06cPAGD+/PnYtGkTli1bhsTEREycOBGjRo1CTExMiceJi4tDq1atirRnZWVh165d+OCDD9C5c2dkZmYarcuMWD8bAGjZsiXc3NxK/Ne7d+8Stz179iycnZ2FwgEAOnXqBKlUioSEhDe86xcSExNx6dIlfPjhhyWus2nTJtjZ2RUp+lq3bg2VSoX4+Hi9jlXWeOfBDGi1Wty+fRtnz57VqUgBoFWrVqhfvz4qV64sYoT0stzcXLOo/Mn8MVfIEMyX0jt48CDc3NygUqmQn58PqVSKxYsXC8vv3LkDJycnuLu7F9nWysoKdevWxZ07dwAAf/31F+rWrWvw6Fdbt27F8+fPERcXB1dXVwBA/fr1DX4vnp6e+Pbbb4XX9erVg729Pfbu3Ythw4YBAKKjo9GzZ084OjoiPz8f8+fPx8GDB9GuXTthm9OnT2P16tUIDg4u9jgPHjwotnjYtm0bPD090bhxYwAvutasX7++1F1mzOFnAwB79uyBSqUqcfnruoE/ffoUVarodqGzsLCAq6srnj59qtfx169fj4YNG8Lf3/+16wwePLjI54CdnR2cnJzw4MEDvY5V1lg8iOz+/fs4efIknjx5AgA4ffq0Tv83S0tLFg5ERGRStpZumOH/UPQY9BUSEoKlS5ciJycHP/30EywsLNCvX79SHbe0d4AuX76M5s2bC4VDabVs2VLntYWFBcLDw7F161YMGzYMOTk5+O233xAZGQkAuHv3LnJzc9GjRw+d7QoKCtCiRYsSj5OXl1fsBfPGjRsxdOhQ4fXQoUMRFhaGxYsXF/tg9ZuYw88GAGrXrl3qbd9WXl4eoqOjMWvWrBLXOXv2LG7evIm1a9cWu9zW1hZ5eXllFaJBWDyIJCkpCSdPnsQ///yj056SkgKNRmO0YdjI+FxcXMQOgcoJ5goZwpzyRSqR6v2wsjmwt7cXvuVfuXIlfH19sW7dOowaNQoA4OXlhYyMDCQlJRXpn15QUIB79+4hJCREWPf06dNQKpUGfcP9prtGUqm0yMWvUqks9r28asiQIQgLC8OzZ89w7Ngx2NraCl80FnZn2r17d5HnF173sLWbmxvS0tJ02m7cuIFz584hISEB//73v4V2tVqNbdu2YfTo0QCASpUqISMjo8g+MzIyUKlSpSLvR+yfDfCiKHvdN/eBgYHYt29fscuqVauGlJQUnTaVSgW5XI5q1aq98di7du1Cbm6ucOeoOOvWrUPz5s2LvRsEvBjtyVy+TOYVqomlpKRg586d2Lhxo07hUKVKFYSHh+ODDz5g4WDmMjMzxQ6BygnmChmC+WIcUqkU06dPx9dffy18U9u3b19YWlrixx9/LLL+qlWrkJOTg0GDBgEABg8ejOzsbKxYsaLY/Zf0UK6Pjw8uX75c4nChVapUKTLs6aVLl/R6T/7+/vDw8MCOHTuwdetW4f0AQKNGjWBtbY2HDx+ifv36Ov9q1Sp5xKwWLVrg5s2bOm2F3ZMSEhIQHx8v/Pvss8+wYcMGYT1vb2/8+eefRfb5559/wsvLq8RjivWzAV50W3r5Pb3679dffy1x23bt2iE9PR2JiYlC2x9//AGNRgNfX98Styu0fv169OzZs0jXp0LZ2dnYuXNnic9D3L17FwqFAs2bN3/jsUyBdx5MJDc3F8eOHcO1a9d02p2dnREUFITGjRtzxshyghPykb6YK2QI5ovxhIeH44svvsDy5csxefJkvPfee/j2228xY8YMWFtbY9iwYbCwsMCBAwfw1VdfYdKkScL8BH5+fpg6dSpmzJiBpKQk9O7dG+7u7rh79y5Wr16NgICAYocDHTx4MObPn4+BAwdi7ty5qF69Oi5dugR3d3e0a9cOHTp0wKJFixAVFYV27dph8+bNuH79ut4XhIMHD8aqVatw584dHDx4UGh3dHTEpEmTMH36dGg0GgQEBCAzMxNnzpyBo6MjRowYUez+wsLCMGHCBKjVashkMiiVSmzevBlfffUVmjRporPuqFGj8OOPP+L69eto3LgxJk6ciM6dO+P7779H3759oVarER0djXPnzuGnn34yu58N8Hbdlho2bIguXbrg448/xtKlS6FUKjF58mQMHDhQuFvy+PFjdOvWDWvWrNEpKO7evYvY2Fjs3bu3xP1v374dKpVKp7vYy+Li4lC3bt1SPUNTFvgVt4lYWlrq3GlwcHBA165dMXbsWDRp0oSFQzliYcGam/TDXCFDMF+Mx8LCAuPHj8eiRYuQk5MDAJg4cSK2bduGuLg4BAQEoHXr1oiOjsZPP/2kM48AAMybNw8bNmxAfHw8evXqhVatWmHGjBnw8fHB8OHDiz2mlZUVDhw4gKpVq6Jv375o06YNFixYIEziGhYWhlmzZuHf//43AgMDkZ2d/dpuLK8aOnQobty4gRo1ahR56HbOnDmYNWsW/vOf/6BFixbo3bs3Dh06hDp16pS4v65du8LCwgLHjx8HABw4cACpqanFjjrUsGFDNGzYEOvXrwfw4k7I3r17cfjwYXTs2BFdunTB2bNncejQoSKFx6vE+NkYw/r16+Ht7Y1u3bqhb9++CAgIwC+//CIsVyqVuH37NnJzc4tsV7NmTYSGhpa47w0bNqBPnz4ljha1bds2REREGOV9GINEoVC802PDZWZmomrVqvhsF/BVqP4T0ryJSqUq8ocgMTERMTEx8Pf3R6tWrUo1WgCJr/BbGqI3Ya6QIcTMF6VSibS0NLi4uPBvUzmg1WqN8qXj8uXLceDAARw4cMAIUVFZuH79Ot5//31cuXIFTk5Oxa7zut/fwuvcZ8+eFXkepbT4NYeRFRQU4Pz58zh//jxGjhyp84Nu3rw5GjduzFmhy7n09HS4uek/Cgi9u5grZAjmC+lLrVYb5U7VmDFjkJ6ejqysrFKNpERl78mTJ1izZk2JhYMYWDwYiUqlwsWLF3H69GnhllVsbKzO0GkymYzfQhIREZFZsLCwwMyZM8UOg16jc+fOYodQBIuHt6TRaHD16lXExsbqjJQhkUggk8mMdmuRzIednZ3YIVA5wVwhQzBfSF8clZHExOKhlLRaLW7duoVTp04hNTVVZ1nDhg0RFBTE289EREREVKGweCiFjIwM7N69u8h4zfXq1UNwcDCqV68uUmRkCrm5uW+cDIgIYK6QYcTMl8IutaWZfItMj5PJ0ssKJxs0Vdd4Fg+l4ODgoDNFuIeHB0JCQl47GQsREZG5kkqlsLW1FWYrZgFh3tRqdZHZqundpFQqkZ2dDVtbW5MVlCwe9PDqKAQymQxBQUGIj49HSEgI6tWrx+ca3iEljcNM9CrmChlC7HxxcHAAAKGAIKLywdbWVvj9NQUWD6+RlpaGU6dO4ebNm4iIiEDlypWFZU2aNOHkbu+o7OxssxoyjcwXc4UMIXa+SCQSODo6wt7enrNdm7ns7GyTXiyS+ZLJZCbvwsbioRhZWVmIi4vD5cuXodFoAACnTp1Cv379hHVYNLy7VCqV2CFQOcFcIUOYS75IpVL2pzdzGo2GXctINGZZPOzcuRObN2+GXC6Hp6cnJk+ejMaNG5e4/vHjx7Fq1SokJyfDw8MDEyZMQEBAgMHHzctT4FzscSQmJup8iNva2sLDw4PDrhIA0z2QROUfc4UMwXwhfTFXSExm99XC0aNHsXTpUkRERGDt2rXw9PTElClTkJaWVuz6V65cwZw5c9CzZ0+sW7cOQUFBmDVrFu7du2fQcS0eB2LDmm2Ij48XCgcrKyu0b98e48ePh6+vLwsHAgCjTe9OFR9zhQzBfCF9MVdITGZXPERHR6NXr17o0aMH6tati2nTpsHa2hoHDhwodv1t27ahbdu2GDZsGOrUqYNx48bB29sbO3bsMOi4lo+DoCx4MdSVhYUF/Pz8MH78eLRv3x7W1tZv/b6o4iipkCV6FXOFDMF8IX0xV0hMZtVtSalU4tatWxgxYoTQJpVK0aZNG1y9erXYba5du4bBgwfrtLVt2xanTp0qdv2CggJhPFzg/48qkZ+fD4lEgqZNm6Jdu3ZwcHCASqXSmTWaCHjxTAz7mpI+mCtkCOYL6Yu5QvrKysoCAKMO7WtWxUN6ejrUajVcXV112l1dXfHgwYNit0lNTS12/VdnfS4UGRmJtWvXCq8LCgoAAIsXL36b0ImIiIiIzJJcLjfaaG5mVTyYwogRIzBkyBDhdVZWFnr16oV9+/bB3t5exMioPMjJyUG/fv2we/du5gu9FnOFDMF8IX0xV8gQOTk56N27t850A2/LrIoHZ2dnyGQyyOVynXa5XF7k7kIhNze3Ytd3c3Mrdn0rKytYWVkVaatUqRJ/CemNZDIZZDIZ84XeiLlChmC+kL6YK2QImUwGKysrow6/bFYPTFtaWqJBgwY4f/680KbRaHDhwgX4+PgUu02TJk1w4cIFnbaEhAQ0adKkTGMlIiIiInrXmFXxAACDBw/G/v37cfDgQdy/fx8LFiyAQqFAjx49AABz587Fr7/+Kqw/aNAgnD17Flu2bME///yDNWvW4ObNmxgwYIBYb4GIiIiIqEIyq25LABAaGor09HSsXr0acrkcXl5eWLhwodBt6enTpzrzLTRt2hRz5szBypUrsWLFCnh4eOC7775DvXr19DqepaUlIiIiOGoB6YX5QvpirpAhmC+kL+YKGaIs8kWiUCiMN3YTERERERFVWGbXbYmIiIiIiMwTiwciIiIiItILiwciIiIiItILiwciIiIiItLLO1E87Ny5E+Hh4ejYsSPGjh2L69evv3b948ePY+jQoejYsSNGjBiB06dPmyhSMgeG5Mu+ffswYcIEvP/++3j//ffx2WefvTG/qOIw9LOl0NGjRxEYGIiZM2eWcYRkTgzNl6ysLCxcuBC9e/dGhw4dMGTIEP49ekcYmivR0dEYMmQIOnbsiH79+uHHH39Efn6+iaIlsVy8eBHTp09H7969ERgYiJiYmDduk5iYiFGjRqFDhw4YNGgQfvvtN4OPW+GLh6NHj2Lp0qWIiIjA2rVr4enpiSlTpiAtLa3Y9a9cuYI5c+agZ8+eWLduHYKCgjBr1izcu3fPxJGTGAzNl8TERISFheGnn37CihUrULVqVUyePBkpKSkmjpxMzdBcKfTkyRMsW7YMzZs3N1GkZA4MzRelUolJkybhyZMn+Oabb7BlyxbMmDEDVapUMXHkZGqG5srvv/+O5cuXIyIiAps3b8bMmTNx7NgxrFixwsSRk6nl5eXB09MTU6dO1Wv9pKQkTJs2Da1atcL69esxaNAg/PDDDzh37pxBx63wxUN0dDR69eqFHj16oG7dupg2bRqsra1x4MCBYtfftm0b2rZti2HDhqFOnToYN24cvL29sWPHDhNHTmIwNF/mzJmD/v37w9vbG7Vr18bMmTOh0Wh0ZkmnisnQXAEAtVqNr7/+GqNHj0aNGjVMGC2JzdB8OXDgADIzM/H999+jWbNmcHd3R8uWLeHl5WXiyMnUDM2VK1euoGnTpujSpQvc3d3Rtm1bhIWF4caNGyaOnEzN398f48aNQ0hIiF7r79mzB+7u7pg4cSLq1KmDAQMGoEOHDoiOjjbouBW6eFAqlbh16xZ8fX2FNqlUijZt2uDq1avFbnPt2jW0adNGp61t27a4du1amcZK4itNvrxKoVBApVKhUqVKZRUmmYHS5sq6devg4uKCXr16mSJMMhOlyZfY2Fj4+Phg4cKF6NmzJ4YPH44NGzZArVabKmwSQWlypWnTprh165bQtenx48c4c+YM2rVrZ5KYqfy4evVqsde4+l7jFDK7GaaNKT09HWq1WpidupCrqysePHhQ7DapqanFrp+amlpmcZJ5KE2+vOrXX39F5cqVi/xyUsVSmly5dOkSDhw4gPXr15sgQjInpcmXpKQkJCYmokuXLliwYAEePXqEhQsXQq1WIyIiwhRhkwhKkytdunRBRkYGJkyYAK1WC7Vajb59+2LkyJGmCJnKEblcXiS3XFxckJOTg/z8fFhbW+u1nwpdPBCZUmRkJI4ePYply5bp/QtI74acnBzMnTsXM2bMgLOzs9jhUDmg1Wrh4uKC6dOnQyaToWHDhnj+/Dk2b97M4oF0JCYmYuPGjZg6dSqaNGmCR48e4ccff8S6deswatQoscOjCqhCFw/Ozs6QyWSQy+U67cVVXoXc3NyKXd/Nza3M4iTzUJp8KbR582ZERUVhyZIl8PT0LMswyQwYmiuPHz/GkydPMGPGDKFNo9EAAIKDg7F582Z4eHiUbdAkmtL+LbKwsIBMJhPaateujdTUVCiVSlhaWpZpzCSO0uTKqlWr0LVrV/Tu3RsAUL9+fSgUCvzwww8YOXIkpNIK3UOdDODq6lokt9LS0mBvb2/Ql54VOqMsLS3RoEEDnYdXNRoNLly4AB8fn2K3adKkCS5cuKDTlpCQgCZNmpRprCS+0uQLAGzatAnr16/HwoUL0ahRI1OESiIzNFdq166NyMhIrF+/XvjXvn17YcSLatWqmTJ8MrHSfLY0bdoUjx49EopMAHj48CHc3NxYOFRgpcmV/Pz8IgVC4WutVlt2wVK54+PjU+w17uuucYpToYsHABg8eDD279+PgwcP4v79+1iwYAEUCgV69OgBAJg7dy5+/fVXYf1Bgwbh7Nmz2LJlC/755x+sWbMGN2/exIABA8R6C2RChuZLVFQUVq1ahVmzZsHd3R2pqalITU1Fbm6uWG+BTMSQXLG2tka9evV0/jk4OMDOzg716tXjxeA7wNDPln79+iEzMxNLlizBgwcPcPr0aWzcuBHh4eFivQUyEUNzJTAwELt378bRo0eRlJSE+Ph4rFq1CoGBgTp3rqjiyc3Nxe3bt3H79m0AL56Vun37NpKTkwG8eA5z7ty5wvp9+/ZFUlISfv75Z/zzzz/YtWsXjh8/jsGDBxt03ArdbQkAQkNDkZ6ejtWrV0Mul8PLywsLFy4Ubv89ffoUEolEWL9p06aYM2cOVq5ciRUrVsDDwwPfffcd6tWrJ9ZbIBMyNF92794NpVKJL7/8Umc/ERERGD16tEljJ9MyNFfo3WZovlSrVg2LFy/Gjz/+iJEjR6Jy5coYOHAghg8fLtZbIBMxNFdGjhwJiUSClStXIiUlBS4uLggMDMS4cePEegtkIjdv3sTEiROF10uXLgUAdOvWDV9++SVSU1Px9OlTYXmNGjXwn//8Bz/99BO2b9+OKlWqYMaMGWjbtq1Bx5UoFAre0yIiIiIiojeq8N2WiIiIiIjIOFg8EBERERGRXlg8EBERERGRXlg8EBERERGRXlg8EBERERGRXlg8EBERERGRXlg8EBERERGRXlg8EBERERGRXlg8EBGZicTERAQGBiIxMVHsUMpUYGAg1qxZo9e64eHh+Oabb8o4IiIi0peF2AEQEZV3v/32G7799ttilw0fPhwTJkwwcUT6ezV2KysrVKtWDb6+vhg1ahRcXV3LPIYrV64gPj4egwYNgqOjY5kfTx/h4eFITk4WXtvY2KBu3boIDw9Ht27dSrXP06dP48aNGxg9erSxwiQiMjkWD0RERjJmzBjUqFFDp61u3boiRWOYwtjz8/Nx+fJl7NmzB2fPnkVkZCRsbGyMeqzjx49DJpMJr69cuYK1a9eie/fuRYqHLVu2QCoV5ya5l5cXhg4dCgB4/vw59u/fj2+++QZKpRK9e/c2eH9nzpzBrl27WDwQUbnG4oGIyEjatWuHRo0aiR1Gqbwce+/eveHk5IStW7fi1KlTCAsLM+qxrK2t9V7XysrKqMc2RJUqVdC1a1fhdffu3TFw4EBER0eXqnggIqoIWDwQEZWx5ORkREVF4fz583j69ClsbGzQunVrfPLJJ3B3d3/ttg8fPsSvv/6KK1euIDs7G05OTmjWrBmmT58OBwcHYb3Dhw8jOjoaf//9N6ytreHn54dPPvkE1apVK1XMrVu3xtatW5GUlAQAUKlUiIyMxMGDB5GSkgI3NzeEhYUhIiJC5wL/xo0bWLlyJW7duoW8vDy4ubmhVatW+OKLL4R1AgMDERERgdGjR2PNmjVYu3YtAGDAgAHCOjt27IC7uzvCw8PRsmVLfPnll7hx4wbGjBmDf//73+jevbtOvOfOncOUKVMwf/58BAYGAgBSUlKwatUqnD59GtnZ2ahZsyaGDh2Knj17luqcuLi4oHbt2rh7965O+8WLF7Fjxw5cv34dcrkcLi4u6NChA8aPHy8USt988w0OHTokvP9CcXFxAACNRoPt27dj3759SEpKgr29PYKDgzF+/HhUqlSpVPESEZUFFg9EREaSk5OD9PR0nTZnZ2fcuHEDV65cQWhoKKpWrYonT55g9+7d+PTTT7Fp06YSuwUplUpMmTIFBQUFGDBgAFxdXZGSkiJcDBcWDxs2bMCqVavQqVMn9OrVC2lpadi5cyc++eQTrFu3rlTPETx+/BgA4OTkBAD4/vvvcejQIXTs2BFDhw7FtWvXEBkZiX/++QffffcdACAtLQ2TJ0+Gs7Mzhg8fDgcHByQnJ+PkyZMlHickJAQPHz7EkSNH8K9//QvOzs7CeXtVo0aNUKNGDRw/frxI8XDs2DE4Ojqibdu2AAC5XI5x48ZBIpEgPDwczs7OOHv2LL777jvk5ORg8ODBBp8TlUqFlJSUIufzjz/+gEKhQN++feHk5ITr169j586dSElJER727tu3L54/f46EhAR89dVXRfY9f/58HDx4ED169MDAgQPx5MkT7Ny5E7dv38by5cthYcE/10RkHvhpRERkJJ999lmRtri4OAQEBKBjx4467YGBgfjoo49w4sQJvP/++8Xu7++//0ZSUhK++eYbne0jIiKE/ycnJ2PNmjUYO3YsRo4cKbR36NABH374IXbt2qXTXpLCwqegoACXL1/GunXrYG1tjcDAQNy5cweHDh1Cr169MHPmTABA//794eLigi1btuDChQto3bo1rly5gqysLCxevFin+9a4ceNKPK6npye8vb1x5MgRBAcHv/FOTOfOnbFlyxZkZmYK38grlUrExMQgJCREuMhesWIF1Go1IiMjhQKoX79+mD17NtauXYu+ffu+sfuUSqUSikG5XI5NmzYhNTUV/fv311nv448/1tlXnz594OHhgRUrViA5ORnVq1eHj48PatWqhYSEBJ2uUABw6dIl7N+/H7Nnz0aXLl2E9latWmHKlCk4fvy4TjsRkZhYPBARGcnUqVNRq1atIu0vX1iqVCrk5OTAw8MDjo6OuHXrVonFQ+GdhXPnzsHf37/YOxQnTpyARqNB586dde56uLq6olatWkhMTNSreHi18KlevTpmz56NKlWqCN1thgwZorPO0KFDsWXLFpw5cwatW7cW4j19+jS8vLzK5Nvyzp07IzIyEidPnkSvXr0AAPHx8cjKykKnTp0AAFqtFidPnkSnTp2g1Wp1zoufnx+OHj2KW7duoVmzZq89Vnx8PHr06KHT1qNHD3z66ac6bS//fPPy8pCfn4+mTZtCq9Xizp07qF69+muP88cff8DBwQG+vr46sTZo0AC2trZITExk8UBEZoPFAxGRkTRq1KjYB6bz8/OxceNG4XkBrVYrLMvJySlxfzVq1MCQIUOwdetW/P7772jevDnat2+Prl27Chfqjx49glarLbEbjr4X8IWFj0wmg6urK9577z1hlKPk5GRIpVJ4eHjobOPm5gZHR0dhSNOWLVuiQ4cOWLt2LaKjo9GyZUsEBwcjLCzMaA8+e3l5oXbt2jh27JhQPBw7dgzOzs5o3bo1ACA9PR1ZWVnYu3cv9u7dW+x+0tLS3nisxo0bY9y4cdBoNLh37x42bNiArKysIuc0OTkZq1evRmxsLLKysnSWZWdnv/E4Dx8+RHZ2donPYugTKxGRqbB4ICIqY4sWLcLBgwcxaNAg+Pj4wN7eHhKJBLNnz4ZGo3ntthMnTkT37t1x6tQpxMfHY8mSJYiMjMTKlStRtWpVaDQaSCQSLFy4sNghTW1tbfWKsaTCxxASiQTz5s3D1atXERcXh3PnzuHbb7/Fli1bsHLlStjZ2b3V/gt17twZGzZsQHp6Ouzs7BAbG4vQ0FDhor7wnHbt2rXEORk8PT3feBxnZ2f4+voCANq2bYvatWtj2rRp2L59u3AXRq1WY9KkScjKysLw4cNRu3Zt2NjYICUlBfPmzXvjzxd4cafExcUFs2fPLjEOIiJzweKBiKiMnThxAt26dcPEiROFtvz8fL2+lQaA+vXro379+vjwww9x5coVjB8/Hnv27MG4ceNQs2ZNaLVauLu747333iuT+KtXrw6NRoNHjx6hTp06QrtcLkdWVlaRbjk+Pj7w8fHBRx99hN9//x1ff/01jh49WuLwphKJxKB4OnfujLVr1+LEiRNwdXVFTk4OQkNDheXOzs6ws7ODRqMRLv6NISAgAC1btsTGjRvRp08f2Nra4u7du3j48CG+/PJLnUIlPj6+yPYlvc+aNWvi/PnzaNasmUHD2BIRiUGcmXeIiN4hUqlUp6sS8GIoUrVa/drtcnJyoFKpdNrq1asHqVSKgoICAC9GK5LJZFi7dm2RY2i1WmRkZLx1/P7+/gCAbdu26bRv3bpVZ3lmZmaRGLy8vAC8eKi5JIXPcrza5ackderUQf369XHs2DEcO3YMbm5uaNGihbBcJpOhQ4cOOHHiBO7du1dk+7fpBjRs2DBkZGRg3759wrEA6LxvrVaL7du3F9m28C7Qq++zU6dOUKvVWLduXZFtVCqV3ueFiMgUeOeBiKiMBQQE4PDhw7C3t0fdunVx9epVnD9/XhgFqCQXLlzAokWL0LFjR7z33ntQqVQ4fPgwpFIpOnToAADw8PDA2LFjsXz5ciQnJyMoKAh2dnZ48uQJYmJi0Lt3b3zwwQdvFb+Xlxe6deuGvXv3IisrCy1btsT169dx6NAhBAcHC88aHDp0CLt370ZwcDBq1qyJ3Nxc7Nu3D/b29kKBUZyGDRsCAFauXCl0PwoMDHxtl6vOnTtj9erVsLKyQs+ePYt02ZowYQISExMxduxY9OrVC3Xr1kVmZiZu376NhIQE/Pe//y3VufD390e9evUQHR2N8PBw1K5dGzVr1sTPP/+MlJQU2Nvb48SJE8Ve8Ddo0AAAsGTJEvj5+UEmkyE0NBQtW7ZEnz59EBkZiTt37sDPzw8WFhZ4+PAh/vjjD0yaNKnIaF1ERGJh8UBEVMYmTZoEmUyGI0eOID8/H82aNcOSJUswZcqU127n6emJtm3bIi4uDnv27IGNjQ08PT2xcOFC+Pj4COuNGDECtWrVQnR0tPDtddWqVeHr64v27dsb5T3MnDkTNWrUwKFDhxATEwM3NzeMGDFCZ9jYli1b4saNGzh69CjS0tJgb2+Pxo0bY/bs2ahRo0aJ+27UqBHGjh2LPXv24Ny5c9BoNNixY8cbi4eVK1dCoVCgc+fORZa7urpi9erVWLt2LWJiYrB79244OTmhbt26+Pjjj9/qXAwdOhTz5s3D4cOH0aNHD8yfPx+LFy9GZGQkrK2tERwcjPDw8CKjXIWEhGDAgAE4evQoDh8+DK1WK3S3mj59Oho2bIg9e/ZgxYoVkMlkcHd3R9euXdG0adO3ipeIyJgkCoVC++bViIiIiIjoXcdnHoiIiIiISC8sHoiIiIiISC8sHoiIiIiISC8sHoiIiIiISC8sHoiIiIiISC8sHoiIiIiISC8sHoiIiIiISC8sHoiIiIiISC8sHoiIiIiISC8sHoiIiIiISC8sHoiIiIiISC8sHoiIiIiISC//D2TvgbA5yFSzAAAAAElFTkSuQmCC", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "plot_roc_curve(brca1_df)" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [], - "source": [ - "# Check if the AUC is a reasonable value for our CI suite when we run the full model\n", - "assert FAST_CI_MODE or auroc >= 0.73" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Full Sample Performance\n", - "\n", - "The above analysis may have been performed on a subset of the available data.\n", - "\n", - "For comparison, the table below presents the AUROC scores for different model sizes trained on the *full dataset* (100% sample fraction).\n", - "\n", - "| Model Size | Dataset Sample Fraction | AUROC |\n", - "|------------|------------------------|-------|\n", - "| Evo 2 1B | 100% | 0.74 |\n", - "| Evo 2 7B | 100% | 0.87 |\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/sub-packages/bionemo-evo2/pyproject.toml b/sub-packages/bionemo-evo2/pyproject.toml deleted file mode 100644 index 554183a503..0000000000 --- a/sub-packages/bionemo-evo2/pyproject.toml +++ /dev/null @@ -1,47 +0,0 @@ -[build-system] -requires = ["setuptools>=64", "wheel"] -build-backend = "setuptools.build_meta" - -[project] -name = "bionemo-evo2" -readme = "README.md" -description = "[DEPRECATED] Library containing data preprocessing, training, and inference tooling for Evo2. Use bionemo-recipes/recipes/evo2_megatron/ instead." -authors = [{ name = "BioNeMo Team", email = "bionemofeedback@nvidia.com" }] -requires-python = ">=3.10" -license = { file = "LICENSE" } -dynamic = ["version"] -dependencies = [ - # internal - "bionemo-noodles", - "bionemo-core", - "bionemo-llm", - # external -] - -[project.optional-dependencies] -test = [ - 'bionemo-testing' -] - -[project.scripts] -infer_evo2 = "bionemo.evo2.run.infer:main" -train_evo2 = "bionemo.evo2.run.train:main" -predict_evo2 = "bionemo.evo2.run.predict:main" -preprocess_evo2 = "bionemo.evo2.data.preprocess:main" -splice_evo2 = "bionemo.evo2.data.transcript_extraction:main" -evo2_convert_to_nemo2 = "bionemo.evo2.utils.checkpoint.convert_to_nemo:main" -evo2_nemo2_to_hf = "bionemo.evo2.utils.checkpoint.nemo2_to_hf:main" -evo2_remove_optimizer = "bionemo.evo2.utils.checkpoint.evo2_remove_optimizer:main" - - -[tool.setuptools.packages.find] -where = ["src"] -include = ["bionemo.*"] -namespaces = true -exclude = ["test*."] - -[tool.setuptools.dynamic] -version = { file = "VERSION" } - -[tool.uv] -cache-keys = [{ git = true }] diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/__init__.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/__init__.py deleted file mode 100644 index 7313887c9d..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import warnings - -warnings.warn( - "bionemo.evo2 (sub-packages/bionemo-evo2) is deprecated and will be removed in a future release. " - "Please use the replacement implementation at bionemo-recipes/recipes/evo2_megatron/ instead.", - DeprecationWarning, - stacklevel=2, -) diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/data/README.md b/sub-packages/bionemo-evo2/src/bionemo/evo2/data/README.md deleted file mode 100644 index 7567400347..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/data/README.md +++ /dev/null @@ -1,234 +0,0 @@ -# Evo2 Data Preparation - -## Data Preprocessing - -To streamline the process of preparing and building datasets for training Evo2 on DNA sequences, we provide a configurable preprocessing script (`preprocess.py`) that can preprocess and tokenize a collection of `.fasta` files and convert them into Megatron-compatible `IndexedDataset`. - -```bash -preprocess_evo2 -c -``` - -or if you are running the script outside of the BioNeMo container or you haven't pip-installed `bionemo-evo2`, then you can run the script directly: - -```bash -python sub-packages/bionemo-evo2/src/bionemo/evo2/data/preprocess.py -c -``` - -Configuration YAML parameters for the script can be found in `utils/config.py`: - -```python -class Evo2PreprocessingConfig(BaseModel): - """Pydantic model class specifying the configuration schema for a preprocessed IndexedDataset (.bin, .idx).""" - - # Collection of FASTA files to preprocess and wrap into a single IndexedDataset. - datapaths: list[Path] = [] - # Output directory for the preprocessed dataset .bin/.idx. - output_dir: None | Path = None - # Output file prefix for identifying your datasets. - output_prefix: None | str = None - # Random Sequence-Level Datasplit - train_split: float = 0.7 - valid_split: float = 0.2 - test_split: float = 0.1 - # Overwrite existing binaries. Otherwise, skip already preprocessed datasets. - overwrite: bool = False - # Raw Preprocessing Transforms - # For every sequence, include a reverse-complemented copy of that sequence in the dataset. Doubles the size of the dataset. - embed_reverse_complement: bool = False - # For every sequence, randomly reverse complement the sequence with the specified probability instead of using the original sequence. - random_reverse_complement: float = 0.0 - # For sequences associated with taxonomic lineages specified in `taxonomy_data`, randomly drop out nodes of the lineage with the specified probability. For instance: |d__KINGDOM;p__None;c__CLASS;o__None;f__None;g__None;s__None| - random_lineage_dropout: float = 0.0 - # Transcribe (DNA -> RNA) or Back-Transcribe (RNA -> DNA) the sequence before tokenization. - transcribe: None | Literal["transcribe", "back_transcribe"] = None - # Force upper-case alphabetical characters in the `.fasta` sequences. - force_uppercase: bool = False - # Data type of the IndexedDataset. When using the byte-level tokenizer, uint8 is more than sufficient with a vocabulary size of 255 for ASCII. - indexed_dataset_dtype: str = "uint8" - # Tokenization Transforms - # Append end-of-document token to the end of each sequence. - append_eod: bool = False - # Enforce the length of the sequence, by padding shorter sequences and raising exceptions when the length is exceeded. - enforce_sample_length: None | int = None - # Run ftfy on the sequence characters prior to tokenization to fix encoding issues. - ftfy: bool = False - # Tokenizer - tokenizer_type: Literal[ - "Byte-Level", - "HuggingFace", - "SentencePiece", - "Regex", - "Megatron", - "Tiktoken", - ] = "Byte-Level" # Recommended for DNA / RNA sequences. All other tokenizers have not been tested, and only supported here for experimentation! - # For more information on the behavior of the following parameters, refer to NeMo: - # https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/modules/common/tokenizer_utils.py - vocab_file: None | Path = None - vocab_size: None | int = 512 - merges_file: None | Path = None - tokenizer_model_name: None | str = None - pretrained_tokenizer_model: None | str = None - special_tokens: None | dict[str, str] = {} - fast_hf_tokenizer: bool = False - # Compute Configuration - # NOTE: If preprocessing a large amount of short individual sequences (< 1000 bp), do NOT use - # multiprocessing (workers > 1) because sequence-level parallel IPC will dominate the preprocessing time! - workers: int = 1 - # Number of sequences to load into memory at any given time during preprocessing. - # Prevents OOM while doing sequence-parallel. - preproc_concurrency: int = 100000 - chunksize: int = 1 - # Data Filters - drop_empty_sequences: bool = False - # If `NNN` is detected in the sequence, drop it from the preprocessed dataset. - nnn_filter: bool = False - # RNG - seed: None | int = None - # Evo2 Taxonomic Lineage Tags - # SeqID Sub-String Indexing: "ABC" will have taxonomy data from "A". - taxonomy_data: dict[str, Evo2TaxonomyLineage] = {} - # Periodicity of injecting phylogenetic lineage tags in the sequence prior to tokenization. - prompt_spacer_length: int = 131072 -``` - -Furthermore, the `taxonomy_data` field contains a map from sequence ID substrings to phylogenetic lineage data of the form: - -```python -class Evo2TaxonomyLineage(BaseModel): - """Pydantic model class that defines the source lineage of a DNA sequence.""" - - kingdom: None | str = None - phylum: None | str = None - clazz: None | str = None - order: None | str = None - family: None | str = None - genus: None | str = None - species: None | str = None -``` - -which gets converted into a lineage string prior to tokenization as a prefix to the sequence: - -``` -# (Example) Escherichia coli -|d__Bacteria;p__Pseudomonadota;c__Gammaproteobacteria;o__Enterobacterales;f__Enterobacteriaceae;g__Escherichia;s__Escherichia coli|ATCGTACGTACATCTCTA... -``` - -In the Evo2 model, this special "token" is masked out in the loss function, so the model will learn to not generate tokens of this form. - -### Testing - -To test equivalence with the reference implementation we first downloaded source-of-truth preprocessed Megatron `IndexedDataset` containing promoters data: - -```bash -$ ls -lah --rwxr-xr-x 1 bionemo bionemo 1.2M Dec 4 00:56 data_promoters_test_text_CharLevelTokenizer_document.bin --rwxr-xr-x 1 bionemo bionemo 20K Dec 4 00:56 data_promoters_test_text_CharLevelTokenizer_document.idx --rwxr-xr-x 1 bionemo bionemo 392M Dec 4 00:56 data_promoters_train_text_CharLevelTokenizer_document.bin --rwxr-xr-x 1 bionemo bionemo 6.6M Dec 4 00:56 data_promoters_train_text_CharLevelTokenizer_document.idx --rwxr-xr-x 1 bionemo bionemo 1.2M Dec 4 00:56 data_promoters_valid_text_CharLevelTokenizer_document.bin --rwxr-xr-x 1 bionemo bionemo 20K Dec 4 00:56 data_promoters_valid_text_CharLevelTokenizer_document.idx -``` - -Next we acquired the `.fasta` file that was used to generate this, and configured our scripts to preprocess the sequence data into equivalent Megatron `IndexedDataset`. - -```yaml -# mmseqs_promotors_config.yaml -- datapaths: ["/workspace/bionemo2/data/mmseqs_results_rep_seq_distinct.fasta"] - output_dir: "/workspace/bionemo2/data" - output_prefix: promoters_uint8_distinct - train_split: 1.0 # We're just going to dump everything into a single file and compare against the union of the 3 splits in the SoT. - valid_split: 0.0 - test_split: 0.0 - overwrite: True - embed_reverse_complement: true - random_reverse_complement: 0.0 - random_lineage_dropout: 0.0 - include_sequence_id: false - transcribe: "back_transcribe" - force_uppercase: true - indexed_dataset_dtype: "uint8" - tokenizer_type: "Byte-Level" - vocab_file: null - vocab_size: null - merges_file: null - pretrained_tokenizer_model: null - special_tokens: null - fast_hf_tokenizer: true - append_eod: true - enforce_sample_length: null - ftfy: false - workers: 1 - preproc_concurrency: 100000 - chunksize: 25 - drop_empty_sequences: true - nnn_filter: true - seed: null # Not relevant because we are not using random reverse complement or lineage dropout. -``` - -To run the preprocessing script, we ran the following command: - -```bash -$ python preprocess.py -c mmseqs_promotors_config.yaml -``` - -To check equivalence of the two preprocessed datasets, we verify that we get the same elements out of our processed dataset as the original, but do not enforce ordering of the data. (`bionemo-noodles` does not sequentially read the `.fasta` file.) - -```python ->>> from megatron.core.datasets.indexed_dataset import IndexedDataset ->>> ds_train_ref = IndexedDataset("./data_promoters_train_text_CharLevelTokenizer_document") ->>> ds_val_ref = IndexedDataset("./data_promoters_valid_text_CharLevelTokenizer_document") ->>> ds_test_ref = IndexedDataset("./data_promoters_test_text_CharLevelTokenizer_document") ->>> ds_train_ours = IndexedDataset("./promoters_uint8_distinct_byte-level_train") ->>> len(ds_train_ours) == len(ds_train_ref) + len(ds_test_ref) + len(ds_val_ref) -True ->>> # Example of what one of these set elements looks like, it's just a string representation of the token list for an ->>> # element of the training dataset. We can then compare all of these to make sure that the two datasets have the ->>> # same set of samples. ->>> ','.join([str(t) for t in ds_train_ref[0]]) -'67,84,71,71,65,71,67,67,84,71,65,67,67,65,84,65,65,71,84,65,71,84,71,71,67,84,65,84,65,65,67,71,65,71,71,65,65,71,65,65,71,65,84,71,65,65,71,65,71,65,84,84,65,71,65,71,65,65,65,65,84,71,65,65,84,71,84,84,67,84,84,71,65,65,71,84,65,71,67,67,65,84,84,71,84,84,71,84,65,71,84,84,71,84,84,71,84,71,84,71,84,71,84,65,84,71,84,84,71,65,71,65,84,71,84,84,84,84,71,71,71,71,84,84,84,71,84,84,65,84,65,84,65,71,65,71,65,71,65,71,65,84,71,84,65,71,84,84,84,71,71,84,71,65,65,71,65,71,84,65,71,71,65,84,84,67,84,67,84,84,65,67,84,65,71,84,71,84,71,65,65,71,65,84,84,65,84,84,65,67,84,65,71,71,84,65,65,67,84,65,65,65,84,71,65,71,65,84,84,67,84,65,84,67,65,65,67,84,65,65,71,84,67,65,84,84,65,71,65,71,65,84,84,71,71,65,65,65,84,71,84,84,84,67,84,84,84,84,65,71,71,84,84,84,65,65,84,65,65,65,71,84,84,84,71,84,84,84,71,65,65,84,84,71,65,71,65,65,65,71,65,71,65,71,65,71,71,65,71,65,71,65,67,65,84,84,71,67,84,84,84,71,65,65,71,71,71,65,71,65,71,84,84,84,71,71,71,84,71,71,71,84,71,65,71,71,65,84,84,71,65,65,65,65,84,71,65,65,65,65,65,84,71,65,65,67,84,71,65,65,65,65,65,71,71,84,71,84,84,65,84,65,71,84,71,65,67,67,84,71,84,67,65,65,65,65,65,65,71,67,84,71,84,71,65,65,71,65,65,71,84,71,84,84,65,84,67,67,65,65,71,65,65,65,84,65,84,71,71,65,84,84,71,67,84,65,65,84,67,65,84,65,67,84,65,67,84,71,84,84,67,65,84,84,65,84,71,65,84,84,84,84,65,84,71,84,71,84,67,65,84,71,84,71,84,71,84,71,67,67,84,65,84,67,65,84,67,65,84,84,67,67,84,84,65,84,65,84,84,84,84,65,71,84,84,71,71,67,65,65,65,65,65,65,65,65,65,65,65,71,65,67,84,84,71,71,65,65,71,84,65,84,84,71,65,65,65,65,67,67,65,65,65,84,67,84,71,65,84,67,84,67,65,65,67,67,84,65,71,65,67,65,65,71,84,67,71,65,84,84,65,65,65,71,67,84,65,65,65,67,67,71,65,65,65,65,67,67,71,65,65,84,67,67,67,71,65,67,67,71,71,84,84,65,65,84,84,71,65,65,65,65,67,67,71,65,84,67,67,65,0' ->>> # Create a set of all of these elements: ->>> all_ref_data = {','.join([str(t) for t in rec]) for ds in [ds_train_ref, ds_val_ref, ds_test_ref] for rec in ds} ->>> # Verify that there is no redundancy so we can do set equality safely ->>> len(all_ref_data) == len(ds_train_ours) -True ->>> len(all_ref_data) -343504 ->>> all_our_data = {','.join([str(t) for t in rec]) for ds in [ds_train_ours] for rec in ds} ->>> len(all_our_data) -343504 ->>> # Verify set equality to show that we have processed an identical dataset ->>> # (ignoring shuffling order and train/test/val splits) ->>> all_our_data == all_ref_data -True -``` - -## Sequence Splicing & Stitching - -Evo2 has also been trained on spliced DNA and mRNA sequences, where introns are removed leaving only the concatenated exons of the genome. Moreover, "stitched" variants of spliced transcripts have been introduced into Evo2's training dataset, which include 1024 bp of sequence from the promoter and 32 bp around each exon. - -To perform splicing or "stitched" splicing on sequences in a FASTA file given an associated gene transfer format (GTF) file, execute the following command: - -```bash -$ splice_evo2 --help -usage: splice_evo2 [-h] --fasta-path FASTA_PATH --gtf-path GTF_PATH [--output-path OUTPUT_PATH] [--transcript-type {default,stitched}] [--stitched-promoter STITCHED_PROMOTER] [--stitched-intron STITCHED_INTRON] [--stitched-overlap] [--only-longest-transcript] [-v] - -Extract spliced transcripts from a FASTA and GTF. - -options: - -h, --help show this help message and exit - --fasta-path FASTA_PATH - Path to FASTA file to extract transcripts from. - --gtf-path GTF_PATH Path to gene transfer format (GTF) file associated with the FASTA. - --output-path OUTPUT_PATH - Path to output FASTA file. - --transcript-type {default,stitched} - Type of transcript to extract from the GTF and FASTA files for splicing. 'Stitched' transcripts include 1024 bp of sequence from the promoter and 32 bp around each exon. - --stitched-promoter STITCHED_PROMOTER - Number of bp to include in the promoter region when --transcript-type=stitched is used. Defaults to 1024. - --stitched-intron STITCHED_INTRON - Number of bp to include from neighboring introns when --transcript-type=stitched is used. Defaults to 32. - --stitched-overlap Allow overlap of neighboring intron windows when --transcript-type=stitched is used. Defaults to False, i.e. prevents overlap by shortening the intron windows for a contiguous splice. - --only-longest-transcript - Only extract the longest transcript per gene. - -v, --verbose Turn on verbose log messages. -``` diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/data/__init__.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/data/__init__.py deleted file mode 100644 index 4c0c148742..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/data/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/data/fasta_dataset.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/data/fasta_dataset.py deleted file mode 100644 index e19593ff9f..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/data/fasta_dataset.py +++ /dev/null @@ -1,79 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import json -from pathlib import Path -from typing import Callable - -import torch - -from bionemo.noodles.nvfaidx import NvFaidx - - -class SimpleFastaDataset(torch.utils.data.Dataset): - """A simple dataset for Evo2 prediction. - - Currently, this will not work for pre-training or fine-tuning, as that would require: - 1) including "labels" in the input and 2) offsetting/rolling either the labels or - input_ids to handle the off-by-one token prediction alignment. - """ - - def __init__( - self, fasta_path: Path, tokenizer, prepend_bos: bool = True, custom_loss_masker: Callable | None = None - ): - """Initialize the dataset.""" - super().__init__() - self.fasta = NvFaidx(fasta_path) - self.seqids = sorted(self.fasta.keys()) - self.tokenizer = tokenizer - self.prepend_bos = prepend_bos # needed for getting predictions for the requested set of tokens. - self.custom_loss_masker = custom_loss_masker - - def write_idx_map(self, output_dir: Path): - """Write the index map to the output directory.""" - with open(output_dir / "seq_idx_map.json", "w") as f: - json.dump({seqid: idx for idx, seqid in enumerate(self.seqids)}, f) - - def __len__(self): - """Get the length of the dataset.""" - return len(self.seqids) - - def __getitem__(self, idx: int) -> dict[str, torch.Tensor]: - """Get an item from the dataset.""" - sequence = self.fasta[self.seqids[idx]].sequence().upper() - tokenized_seq = self.tokenizer.text_to_ids(sequence) - if self.prepend_bos: # in pretraining we use EOS to start new sequences. - tokens: list[int] = [self.tokenizer.eod] + tokenized_seq - else: - tokens: list[int] = tokenized_seq - loss_mask = torch.ones_like(torch.tensor(tokens, dtype=torch.long), dtype=torch.long) - if self.custom_loss_masker is not None: - custom_loss_mask = self.custom_loss_masker(tokens) - loss_mask &= custom_loss_mask - if self.prepend_bos: - loss_mask[0] = ( - 0 # mask the eos token which we use for causal offsetting. Later in predict we take the output - ) - # for the first [:-1] tokens which align with the sequence starting after the EOS. - return { - "tokens": torch.tensor(tokens, dtype=torch.long), - "position_ids": torch.arange(len(tokens), dtype=torch.long), - "seq_idx": torch.tensor(idx, dtype=torch.long), - "loss_mask": loss_mask, - } diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/data/preprocess.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/data/preprocess.py deleted file mode 100644 index d3fbfdc291..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/data/preprocess.py +++ /dev/null @@ -1,485 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -"""Module containing data preprocessing and splitting functions for Evo2 in BioNeMo. - -It can also be utilized as a script to dump pre-processed data to JSON. -""" - -import argparse -import multiprocessing as mp -import os -import random -import time -from contextlib import contextmanager -from pathlib import Path -from threading import Semaphore -from typing import Optional - -import numpy as np -import torch -import yaml -from megatron.core.datasets.indexed_dataset import IndexedDatasetBuilder -from nemo.utils import logging - -from bionemo.evo2.data.tokenizer import Evo2Tokenizer -from bionemo.evo2.utils.config import Evo2PreprocessingConfig, Evo2TaxonomyLineage -from bionemo.noodles import back_transcribe_sequence, complement_sequence, reverse_sequence, transcribe_sequence -from bionemo.noodles.nvfaidx import NvFaidx - - -class Evo2Preprocessor: - """Data preprocessing class for Evo2.""" - - BIN = ".bin" - IDX = ".idx" - TRAIN = "train" - VAL = "val" - TEST = "test" - - def __init__(self, params: Evo2PreprocessingConfig | None = None): - """Initialize Evo2Preprocessor. - - Args: - params (Evo2PreprocessingConfig | None): Configuration parameters for preprocessing. - """ - self.tokenizer: Evo2Tokenizer = Evo2Tokenizer(params) - - @staticmethod - @contextmanager - def preprocessing_context_manager(seed: Optional[int] = None): - """Context manager for setting and restoring the random number generator state. - - Args: - seed (int | None): Seed for the random number generator. Defaults to None. - """ - # Track current state. - current_state = random.getstate() - try: - # Set random seed. - random.seed(seed) - yield seed - finally: - # Restore random state. - random.setstate(current_state) - - @staticmethod - def _get_output_filename( - config: Evo2PreprocessingConfig, ext: Optional[str] = None, split: Optional[str] = None, temp: bool = False - ) -> Path: - """Generate the output filename for the preprocessed data. - - Args: - config (Evo2PreprocessingConfig): Configuration object containing preprocessing settings. - ext (Optional[str]): File extension for the output file. Defaults to None. - split (Optional[str]): Data split type (e.g., 'train', 'val', 'test'). Defaults to None. - temp (bool): Flag indicating whether the file is temporary. Defaults to False. - - Returns: - Path: The constructed output file path. - """ - # Get output directory. Defaults to CWD. - output_dir = config.output_dir - if output_dir is None: - output_dir = Path.cwd() - # Pickup output file prefix. - config_prefix = "{}_{}".format(config.output_prefix, config.tokenizer_type.lower().replace(" ", "")) - output_filepath = Path(output_dir) / ( - config_prefix - + (f"_{split}" if split is not None else "") - + (ext if ext is not None else "") - + (".tmp" if temp else "") - ) - return output_filepath - - @staticmethod - def _subsequence_generator(sequence: str, subsequence_length: Optional[int] = None, offset: Optional[int] = None): - """Generate subsequences from a given sequence. - - Args: - sequence (str): The input sequence. - subsequence_length (int | None): Length of each subsequence. Defaults to the length of the sequence. - offset (int | None): Step size for generating subsequences. Defaults to subsequence_length. - - Yields: - str: Subsequences of the input sequence. - """ - subsequence_length = subsequence_length if subsequence_length is not None else len(sequence) - step_size = offset if offset is not None else subsequence_length - for i in range(0, len(sequence), step_size): - yield sequence[i : i + subsequence_length] - - @staticmethod - def _random_reverse_complement(seq: str, prob: float = 0.0, seed: Optional[int] = None): - """Randomly reverse complements a DNA sequence based on a given probability. - - Args: - seq (str): The DNA sequence to potentially reverse complement. - prob (float): The probability of reverse complementing the sequence. Defaults to 0.0. - seed (Optional[int]): The seed for the random number generator. Defaults to None. - - Returns: - str: The original or reverse complemented DNA sequence based on the probability. - """ - with Evo2Preprocessor.preprocessing_context_manager(seed): - if random.random() < prob: - return complement_sequence(reverse_sequence(seq)) - else: - return seq - - @staticmethod - def _reverse_complement_expansion(seq: str): - """Generate a list containing the original and reverse complemented sequence. - - Args: - seq (str): The input DNA sequence. - - Returns: - list[str]: List containing the original and reverse complemented sequence. - """ - return [seq, complement_sequence(reverse_sequence(seq))] - - @staticmethod - def _train_val_test_split(train_weight: float, val_weight: float, test_weight: float, seed: Optional[int] = None): - """Randomly assign a data point to train, validation, or test split based on provided weights. - - Args: - train_weight (float): The weight for the training split. - val_weight (float): The weight for the validation split. - test_weight (float): The weight for the test split. - seed (Optional[int]): The seed for the random number generator. Defaults to None. - - Returns: - str: The split assignment ('train', 'val', or 'test'). - - Raises: - ValueError: If the sum of the weights is zero or negative. - """ - with Evo2Preprocessor.preprocessing_context_manager(seed if seed is not None else None): - # Generate random number. - roll = random.random() - # Rectify and normalize split ratios. - total_weight = abs(train_weight) + abs(val_weight) + abs(test_weight) - if total_weight <= 0: - raise ValueError("Train-validation-test split proportions cannot be zero.") - train_split = abs(train_weight) / total_weight - test_split = abs(test_weight) / total_weight - split = "train" - if roll > train_split: - if roll < 1 - test_split: - split = "val" - else: - split = "test" - return split - - @staticmethod - def _construct_taxonomy_token( - lineage: Evo2TaxonomyLineage, dropout: float = 0.0, seed: Optional[int] = None - ) -> Optional[str]: - """Construct a special Taxonomy token for natural language prompting of DNA generation models. - - Args: - lineage (Evo2TaxonomyLineage): The taxonomy lineage information. - dropout (float): The probability of dropping out segments of the lineage. Defaults to 0.0. - seed (Optional[int]): The seed for the random number generator. Defaults to None. - - Returns: - Optional[str]: The constructed taxonomy token or None if lineage is None. - """ - # If dropout > 0, randomly drop out segments of the lineage for training on incomplete lineages. - with Evo2Preprocessor.preprocessing_context_manager(seed if seed is not None else None): - return ( - "|d__{};p__{};c__{};o__{};f__{};g__{};s__{}|".format( - lineage.domain if random.random() >= dropout else None, - lineage.phylum if random.random() >= dropout else None, - lineage.clazz if random.random() >= dropout else None, - lineage.order if random.random() >= dropout else None, - lineage.family if random.random() >= dropout else None, - lineage.genus if random.random() >= dropout else None, - lineage.species if random.random() >= dropout else None, - ) - if lineage is not None - else None - ) - - def preprocess_data(self, filepath: str, seqid: str, seq: str, seq_idx: int, config: Evo2PreprocessingConfig): - """Preprocess fasta datapaths. - - Args: - filepath (str): Path to the .fasta file. - seqid (str): Sequence ID. - seq (str): DNA sequence. - seq_idx (int): Sequence index. - config (Evo2PreprocessingConfig): Configuration object containing preprocessing settings. - - Returns: - tuple[list[dict], float]: Preprocessed data and the time taken for preprocessing. - """ - # Timing. - start = time.time() - # Retrieve taxonomy lineage string if SeqID has associated taxonomy data. - # Note: Better implemented as a suffix tree substring dictionary, but convenient - # for identifying a large amount of sequences with identical lineages. - # Slow for extremely large dictionaries of (SeqID Substr, Taxonomy) pairs. - lineage = None - for id, tax in config.taxonomy_data.items(): - # Taxonomy ID is a substring of Seq ID. - if id in seqid: - lineage = tax - break - - # Preprocess data. - preproc_data = [] - with self.preprocessing_context_manager( - config.seed + hash(filepath) + seq_idx if config.seed is not None else None - ): - # Randomly reverse complement the sequence. - seq = self._random_reverse_complement(seq, prob=config.random_reverse_complement) - seqs_to_parse = self._reverse_complement_expansion(seq) if config.embed_reverse_complement else [seq] - for seq in seqs_to_parse: - # Sequence Modifiers - if config.force_uppercase: - seq = seq.upper() - if config.transcribe == "transcribe": - seq = transcribe_sequence(seq) - elif config.transcribe == "back_transcribe": - seq = back_transcribe_sequence(seq) - if config.drop_empty_sequences and len(seq) == 0: - continue - if config.nnn_filter and "NNN" in seq.upper(): - continue - - # Construct taxonomy token with random dropout on the lineage categories per sequence. - taxonomy_token = self._construct_taxonomy_token(lineage, dropout=config.random_lineage_dropout) - - # Inject taxonomy lineage tokens every prompt_spacer_length tokens in the sequence. - # If the taxonomy lineage token is not provided, then just take the original sequence. - target_length = ( - config.prompt_spacer_length - len(taxonomy_token) if taxonomy_token is not None else None - ) - taxonomy_injected_sequence = [ - taxonomy_token + str(subseq) if taxonomy_token is not None else str(subseq) - for subseq in self._subsequence_generator(seq, target_length, target_length) - ] - - # Wrap and tokenize. - preproc_data_record = { - "text": "".join(taxonomy_injected_sequence), - } - preproc_data_record["tokens"] = self.tokenizer.tokenize( - preproc_data_record["text"], - use_ftfy=config.ftfy, - enforce_sample_length=config.enforce_sample_length, - append_eod=config.append_eod, - drop_empty_sequences=config.drop_empty_sequences, - ) - preproc_data.append(preproc_data_record) - end = time.time() - return preproc_data, end - start - - def preprocess_data_task(self, file_sequence_config): - """Wrapper function to unpack args for preprocess_data. - - Args: - file_sequence_config (tuple): Tuple containing arguments for preprocess_data. - - Returns: - tuple[list[dict], float]: Preprocessed data and the time taken for preprocessing. - """ - return self.preprocess_data(*file_sequence_config) - - @staticmethod - def _yield_sequences_from_files(config: Evo2PreprocessingConfig, semaphore: Semaphore): - """Iterator over sequences within multiple input documents. Arguments for multiprocessing tasks. - - Utilized to limit the amount of sequences streamed into memory. - - Args: - config (Evo2PreprocessingConfig): Configuration object containing preprocessing settings. - semaphore (Semaphore): Semaphore to limit the number of sequences in memory. - - Yields: - tuple: Arguments for preprocess_data. - """ - - def yielder(fname, semaphore): - # Read FASTA. - index = NvFaidx(fname) - for i, (seqid, sequence) in enumerate(index.items()): - semaphore.acquire() - # Yield filename and sequence within fasta. - yield str(fname), seqid, sequence, i, config - - for fname in config.datapaths: - semaphore.acquire() - yield from yielder(fname, semaphore) - - def preprocess_generator(self, preproc_config: Evo2PreprocessingConfig): - """Main function to preprocess data for Evo2. - - Args: - preproc_config (Evo2PreprocessingConfig): Configuration object containing preprocessing settings. - - Yields: - tuple[dict, float]: Preprocessed sequence data and the time taken for preprocessing. - """ - # Track which splits have been assigned - split_assignments = { - "train": preproc_config.train_split > 0, - "val": preproc_config.valid_split > 0, - "test": preproc_config.test_split > 0, - } - splits_needed = {k for k, v in split_assignments.items() if v} - - # Instantiate multiprocessing pool. Use semaphore to limit the amount of sequences to read into memory. - semaphore = Semaphore(preproc_config.preproc_concurrency + preproc_config.workers) - if preproc_config.workers > 1: - pool = mp.Pool(preproc_config.workers) - # Ordered imap for downstream seeded splitting. - preproc_tasks = pool.imap( - self.preprocess_data_task, - self._yield_sequences_from_files(preproc_config, semaphore), - chunksize=preproc_config.chunksize, - ) - else: - preproc_tasks = ( - self.preprocess_data_task(x) for x in self._yield_sequences_from_files(preproc_config, semaphore) - ) - - # Preprocess data and split results into train, test, and split. - with self.preprocessing_context_manager(preproc_config.seed if preproc_config.seed is not None else None): - for result, elapsed_time in preproc_tasks: - # Release semaphore for the task associated with the result. - semaphore.release() - # If we still need to ensure splits are assigned - if splits_needed: - # Force assign to a needed split - split = splits_needed.pop() - else: - # Regular random assignment - split = self._train_val_test_split( - preproc_config.train_split, preproc_config.valid_split, preproc_config.test_split - ) - for sequence in result: - sequence["split"] = split - yield sequence, elapsed_time - - def preprocess_offline(self, preproc_config: Evo2PreprocessingConfig): - """Offline data preprocessing script for Evo2. - - Args: - preproc_config (Evo2PreprocessingConfig): Configuration object containing preprocessing settings. - """ - # Validate if binaries have already been produced for the given config and overwrite is set to False. - if any( - self._get_output_filename(preproc_config, ext, split).is_file() - for ext, split in zip([self.BIN, self.IDX], [self.TRAIN, self.VAL, self.TEST]) - ): - if not preproc_config.overwrite: - # Skip this dataset! - logging.info( - f"Skipped overwriting (overwrite: False) existing preprocessed data: {preproc_config.output_prefix}" - ) - return - else: - logging.info( - f"Overwriting (overwrite: True) existing preprocessed data: {preproc_config.output_prefix}" - ) - - # Instantiate indexed data builders. - dataset_dtype = getattr(np, preproc_config.indexed_dataset_dtype) - temp_train_bin = self._get_output_filename(preproc_config, self.BIN, self.TRAIN, temp=True) - temp_val_bin = self._get_output_filename(preproc_config, self.BIN, self.VAL, temp=True) - temp_test_bin = self._get_output_filename(preproc_config, self.BIN, self.TEST, temp=True) - train_builder: IndexedDatasetBuilder = IndexedDatasetBuilder(bin_path=str(temp_train_bin), dtype=dataset_dtype) - val_builder: IndexedDatasetBuilder = IndexedDatasetBuilder(bin_path=str(temp_val_bin), dtype=dataset_dtype) - test_builder: IndexedDatasetBuilder = IndexedDatasetBuilder(bin_path=str(temp_test_bin), dtype=dataset_dtype) - logging.info(f"Created temporary binary datasets: {temp_train_bin} {temp_val_bin} {temp_test_bin}") - - # Preprocess data and split results into train, validation, or test. - avg_preproc_time = 0.0 - avg_index_time = 0.0 - count = 0 - for sequence, elapsed_time in self.preprocess_generator(preproc_config): - index_start_time = time.time() - if sequence["split"] == "train": - train_builder.add_item(torch.Tensor(sequence["tokens"])) - train_builder.end_document() - elif sequence["split"] == "val": - val_builder.add_item(torch.Tensor(sequence["tokens"])) - val_builder.end_document() - elif sequence["split"] == "test": - test_builder.add_item(torch.Tensor(sequence["tokens"])) - test_builder.end_document() - index_end_time = time.time() - # Update average preprocessing and indexing time. - avg_preproc_time = (avg_preproc_time * count + elapsed_time) / (count + 1) - avg_index_time = (avg_index_time * count + index_end_time - index_start_time) / (count + 1) - count += 1 - - # Report timing. - logging.info(f"Average preprocessing time per sequence: {avg_preproc_time}") - logging.info(f"Average indexing time per sequence: {avg_index_time}") - logging.info(f"Number of sequences processed: {count}") - - # Write preprocessed index data to disk. Rename temporary binaries to denote preprocessing completion. - train_builder.finalize(idx_path=str(self._get_output_filename(preproc_config, self.IDX, self.TRAIN))) - val_builder.finalize(idx_path=str(self._get_output_filename(preproc_config, self.IDX, self.VAL))) - test_builder.finalize(idx_path=str(self._get_output_filename(preproc_config, self.IDX, self.TEST))) - os.rename(temp_train_bin, self._get_output_filename(preproc_config, self.BIN, self.TRAIN)) - os.rename(temp_val_bin, self._get_output_filename(preproc_config, self.BIN, self.VAL)) - os.rename(temp_test_bin, self._get_output_filename(preproc_config, self.BIN, self.TEST)) - - -def parse_args(): - """Parse arguments for preprocessing.""" - parser = argparse.ArgumentParser(description="Preprocess FASTA files for training Evo2.") - parser.add_argument("-c", "--config", type=str, required=True, help="Path to data preprocessing config JSON.") - return parser.parse_args() - - -def main(): - """Main function to execute the preprocessing script. - - This function parses command-line arguments, reads the configuration file, - and initiates the preprocessing of data as specified in the configuration. - """ - # Parse arguments. - args = parse_args() - # Read config YAML. - with open(args.config, "r") as yaml_fs: - evo2_preproc_config_batch = yaml.safe_load(yaml_fs) - for config in evo2_preproc_config_batch: - start = time.time() - # Convert into Evo2PreprocessingConfig. - evo2_preproc_config = Evo2PreprocessingConfig(**config) - if evo2_preproc_config.output_dir is not None: - evo2_preproc_config.output_dir.mkdir(parents=True, exist_ok=True) - # Instantiate Evo2Preprocessor. - evo2_preprocessor = Evo2Preprocessor(evo2_preproc_config) - # Preprocess data specified in config. - evo2_preprocessor.preprocess_offline(evo2_preproc_config) - end = time.time() - logging.info( - f"Finished preprocessing {evo2_preproc_config.output_prefix} ({evo2_preproc_config.datapaths}) in {end - start:.3f} seconds with {evo2_preproc_config.workers} workers." - ) - - -if __name__ == "__main__": - main() diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/data/sharded_eden_dataloader.md b/sub-packages/bionemo-evo2/src/bionemo/evo2/data/sharded_eden_dataloader.md deleted file mode 100644 index b151d0a8af..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/data/sharded_eden_dataloader.md +++ /dev/null @@ -1,282 +0,0 @@ -# Sharded Eden DataLoader Implementation - -## Overview - -The `sharded_eden_dataloader.py` implements a dataloader for genomic sequences that uses pre-computed data structures and SQLite databases for efficient data access. This implementation is designed to significantly reduce the computational overhead during training by moving expensive operations to a pre-processing phase. - -## Key Features - -### 1. Split-Specific Window Databases - -- **Sharded**: Uses separate pre-computed window databases for each split: - - `train_window_db_path`: SQLite database containing window mappings for training data - - `val_window_db_path`: SQLite database containing window mappings for validation data - - `test_window_db_path`: SQLite database containing window mappings for test data - -### 2. SQLite Database Storage - -- **Sharded**: Uses SQLite databases organized by sample: - - **Per-Sample Sequence Databases**: Each sample has its own SQLite file at `sequence_db_dir//glm_dataset_.sqlite` - - **Split-Specific Window Databases**: Pre-computed window mappings stored in separate databases for each data split - -### 3. Virtual Window Pre-computation - -- **Sharded**: Window mappings are pre-computed from Parquet files and stored in split-specific databases - -## Sequence ID Format - -Sequence IDs follow a specific format: `BCR__ECT-SAMPLE1__CT1-1` - -The sample ID can be extracted using: `extract_sample_id(sequence_id)` which implements `".".join(sequence_id.split("__")[1].split("-")[1:])` (returns `SAMPLE1`) - -## Database Schema - -### Per-Sample Sequence Database - -Each sample has its own SQLite file with the following schema: - -```sql -CREATE TABLE sequences ( - contig_id TEXT PRIMARY KEY, - nt_sequence TEXT NOT NULL -); -``` - -### Split-Specific Window Database - -Each split (train/validation/test) has its own window database: - -```sql -CREATE TABLE metadata ( - key TEXT PRIMARY KEY, - value INTEGER NOT NULL -); - -CREATE TABLE window_mappings ( - window_idx INTEGER PRIMARY KEY, - sequence_id TEXT NOT NULL, - window_in_seq_idx INTEGER NOT NULL -); -CREATE INDEX idx_sequence_id ON window_mappings(sequence_id); -``` - -The metadata table stores the `window_size` and `stride` parameters used during pre-computation. - -## Directory Structure - -``` -sequence_db_dir/ -├── SAMPLE1/ -│ └── glm_dataset_SAMPLE1.sqlite -├── SAMPLE2/ -│ └── glm_dataset_SAMPLE2.sqlite -├── SAMPLE3/ -│ └── glm_dataset_SAMPLE3.sqlite -└── ... - -Window databases (separate files): -├── train_windows.db -├── val_windows.db -└── test_windows.db -``` - -## Usage Example - -```python -from bionemo.evo2.run.sharded_eden_dataloader import ShardedEdenDataModule - -# Create the data module -data_module = ShardedEdenDataModule( - sequence_db_dir="path/to/sequence_db_dir", # Directory containing sample folders - train_window_db_path="path/to/train_windows.db", - val_window_db_path="path/to/val_windows.db", - test_window_db_path="path/to/test_windows.db", - seq_length=8192, - micro_batch_size=1, - global_batch_size=4, - num_workers=8, - rc_aug=True, - use_control_tags=True, -) - -# Use with PyTorch Lightning trainer -trainer = pl.Trainer(...) -trainer.fit(model, data_module) -``` - -## Pre-processing Workflow - -### 1. Create Sample Sequence Databases - -For each sample, create its SQLite database: - -```python -import sqlite3 -import os - - -def create_sample_database(sample_id, sequences, output_dir): - """Create SQLite database for a single sample.""" - # Create sample directory - sample_dir = os.path.join(output_dir, sample_id) - os.makedirs(sample_dir, exist_ok=True) - - # Create database - db_path = os.path.join(sample_dir, f"glm_dataset_{sample_id}.sqlite") - conn = sqlite3.connect(db_path) - cursor = conn.cursor() - - # Create table - cursor.execute( - """ - CREATE TABLE sequences ( - contig_id TEXT PRIMARY KEY, - nt_sequence TEXT NOT NULL - ) - """ - ) - - # Insert sequences for this sample - for seq_id, sequence in sequences: - cursor.execute( - "INSERT INTO sequences (contig_id, nt_sequence) VALUES (?, ?)", - (seq_id, sequence), - ) - - conn.commit() - conn.close() - - -# Example usage -# Group sequences by sample_id -from collections import defaultdict - -sequences_by_sample = defaultdict(list) -for seq_id, sequence in all_sequences: # all_sequences is your data - sample_id = extract_sample_id(seq_id) - sequences_by_sample[sample_id].append((seq_id, sequence)) - -# Create database for each sample -for sample_id, sequences in sequences_by_sample.items(): - create_sample_database(sample_id, sequences, "path/to/sequence_db_dir") -``` - -### 2. Create Split Data Files - -Create Parquet files for each split containing sequence metadata: - -```python -import polars as pl - -# Create train split Parquet file -train_data = pl.DataFrame( - { - "contig_id": ["BCR__ECT-SAMPLE1__CT1-1", "BCR__ECT-SAMPLE1__CT1-2", ...], - "length": [1500, 2000, ...], # sequence lengths - } -) -train_data.write_parquet("train_split.parquet") - -# Similarly for validation and test splits -val_data = pl.DataFrame( - {"contig_id": ["BCR__ECT-SAMPLE2__CT1-1", ...], "length": [1800, ...]} -) -val_data.write_parquet("val_split.parquet") - -test_data = pl.DataFrame( - {"contig_id": ["BCR__ECT-SAMPLE3__CT1-1", ...], "length": [1600, ...]} -) -test_data.write_parquet("test_split.parquet") -``` - -### 3. Create Window Mappings Databases using CLI - -The package includes a CLI tool for pre-computing the window databases: - -```bash -# Pre-compute window mappings for training split -python -m bionemo.evo2.run.sharded_eden_dataloader precompute \ - train_split.parquet \ - train_windows.db \ - --window-size 8192 \ - --stride 7992 - -# Pre-compute window mappings for validation split -python -m bionemo.evo2.run.sharded_eden_dataloader precompute \ - val_split.parquet \ - val_windows.db \ - --window-size 8192 \ - --stride 7992 - -# Pre-compute window mappings for test split -python -m bionemo.evo2.run.sharded_eden_dataloader precompute \ - test_split.parquet \ - test_windows.db \ - --window-size 8192 \ - --stride 7992 -``` - -## Implementation Details - -### Key Components - -1. **ShardedEdenDataModule**: - - - Uses separate window databases for each split (train/val/test) - - Manages per-sample SQLite file paths - - Creates datasets with directory and database paths - - Handles distributed training setup with Megatron integration - -2. **ShardedEdenDataset**: - - - Automatically discovers sample SQLite files from directory structure - - Maps sequence IDs to appropriate sample databases using `extract_sample_id()` - - Pre-opens all database connections for performance - - Attaches window database to each sequence connection for efficient JOINs - - Implements sequence caching with connection pooling - - Maintains compatibility with original tokenization and formatting logic - - Optional window access logging for performance analysis - -3. **CLI Tool**: - - - `precompute`: Creates window databases from Parquet files - -### Advanced Features - -#### Window Access Logging - -Enable detailed logging of window access patterns: - -```python -dataset = ShardedEdenDataset( - # ... other parameters ... - log_windows=True, - log_dir="sequence_logs", -) -``` - -This creates CSV logs tracking which windows are accessed, useful for analyzing data loading patterns. - -#### Connection Management - -- All database connections are pre-opened during initialization for performance -- Database connections are pooled and reused per sample -- Sequence data is fetched on-demand using SQL SUBSTR for memory efficiency -- Position IDs are shared across instances to reduce memory usage -- Connections are properly closed when dataset is destroyed - -#### Metadata Validation - -The implementation validates that window databases were created with compatible parameters: - -- Checks stored `window_size` matches dataset `seq_length` -- Checks stored `stride` matches dataset `stride` -- Provides clear error messages for mismatches - -### Error Handling - -- Validates sample SQLite files exist during initialization -- Handles missing sequences gracefully with informative error messages -- Ensures proper cleanup of database connections -- Provides detailed debugging information for database issues -- Validates Parquet file schema during pre-computation diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/data/sharded_eden_dataloader.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/data/sharded_eden_dataloader.py deleted file mode 100644 index fc281ea593..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/data/sharded_eden_dataloader.py +++ /dev/null @@ -1,937 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# high performance implementation of the EdenDataModule, assuming some items are pre-computed + sharded fasta files and fasta index files. -# Contributed by: BaseCamp Research https://basecamp-research.com/ https://github.com/NVIDIA/bionemo-framework/pull/1091 -import argparse -import csv -import os -import sqlite3 -import time -from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union - -import lightning.pytorch as pl -import numpy as np -import polars as pol -import torch -import torch.distributed as dist -from lightning.pytorch.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS -from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer -from nemo.lightning.data import WrappedDataLoader -from nemo.lightning.pytorch.plugins import MegatronDataSampler -from nemo.utils import logging -from nemo.utils.import_utils import safe_import -from torch.utils.data import Dataset, default_collate - -from bionemo.core.data.multi_epoch_dataset import ( - IdentityMultiEpochDatasetWrapper, - MultiEpochDatasetResampler, -) - - -# ----------------------------------------------------------------------------- -# Configurable column names -# ----------------------------------------------------------------------------- -# Column name for IDs in input data (e.g., parquet) and in shard SQLite tables -SEQUENCE_ID_COLUMN_NAME = "contig_id" -# Column name for sequence lengths in input data (e.g., parquet) -SEQUENCE_LENGTH_COLUMN_NAME = "length" -# Column name for nucleotide/amino-acid sequence in shard SQLite tables -SEQUENCE_COLUMN_NAME = "nt_sequence" - -_, HAVE_TE = safe_import("transformer_engine") - -if TYPE_CHECKING: - from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec - - -def extract_sample_id(sequence_id: str) -> str: - """Extract sample ID from sequence ID format: BCR__EXT-SAMPLE1__CT1-1.""" - parts = sequence_id.split("__")[1].split("-")[1:] - return ".".join(parts) - - -class ShardedEdenDataModule(pl.LightningDataModule): - """High-performance DataModule that uses pre-computed splits and SQLite databases. - - Key differences from EdenDataModule: - - Train/val/test splits are loaded from numpy array files - - Sequence data stored in per-sample SQLite databases - - Virtual window mappings pre-computed and stored in separate SQLite database - """ - - def __init__( - self, - sequence_db_dir: str, # Directory containing sample SQLite files - train_window_db_path: str, # Path to the pre-computed DB for the training split - val_window_db_path: str, # Path to the pre-computed DB for the validation split - test_window_db_path: str, # Path to the pre-computed DB for the test split - seq_length: int = 8192, - tokenizer: Optional["TokenizerSpec"] = None, - micro_batch_size: int = 1, - global_batch_size: int = 4, - rampup_batch_size: Optional[List[int]] = None, - num_workers: int = 8, - pin_memory: bool = True, - persistent_workers: bool = False, - create_attention_mask: bool = False, - vocab_file: Optional[str] = None, - merges_file: Optional[str] = None, - rc_aug: bool = False, - stride: int = 7992, - window_min_length_threshold: Optional[int] = None, - use_control_tags: bool = False, - seed: int = 42, - num_epochs: int = 1, - log_windows: bool = False, - log_dir: Optional[str] = None, - **kwargs, - ): - """Initialize the ShardedEdenDataModule. See sub-packages/bionemo-evo2/src/bionemo/evo2/data/sharded_eden_dataloader.md for how to prepare the input data.""" - super().__init__() - self.sequence_db_dir = sequence_db_dir - self.train_window_db_path = train_window_db_path - self.val_window_db_path = val_window_db_path - self.test_window_db_path = test_window_db_path - self.seq_length = seq_length - self.micro_batch_size = micro_batch_size - self.global_batch_size = global_batch_size - self.num_workers = num_workers - self.pin_memory = pin_memory - self.persistent_workers = persistent_workers - self.create_attention_mask = create_attention_mask or not HAVE_TE - self.rc_aug = rc_aug - self.stride = stride if stride is not None else 7992 - # Minimum effective window length used at precomputation time. If None or 0, disabled. - self.window_min_length_threshold = int(window_min_length_threshold) if window_min_length_threshold else 0 - self.use_control_tags = use_control_tags - self.init_global_step = 0 - self.seed = seed - self.num_epochs = num_epochs - self.log_windows = log_windows - self.log_dir = log_dir - - if tokenizer is None: - self.tokenizer = get_nmt_tokenizer( - "megatron", - "GPT2BPETokenizer", - vocab_file=vocab_file, - merges_file=merges_file, - ) - else: - self.tokenizer = tokenizer - - # Megatron sampler - self.data_sampler = MegatronDataSampler( - seq_len=self.seq_length, - micro_batch_size=self.micro_batch_size, - global_batch_size=self.global_batch_size, - rampup_batch_size=rampup_batch_size, - ) - - def build( - self, - trainer_max_steps: int, - trainer_val_check_interval: Union[int, float], - trainer_limit_val_batches: Union[int, float], - trainer_limit_test_batches: Union[int, float], - ): - """Build the datasets using pre-computed, split-specific window databases.""" - if not dist.is_initialized() or dist.get_rank() == 0: - print("Creating datasets from pre-computed, split-specific window databases.") - - # Create datasets wrapped with epoch-based resampler - self._train_ds = self._create_epoch_wrapped_sharded_eden_dataset( - window_db_path=self.train_window_db_path, - split="train", - shuffle=True, - ) - - self._validation_ds = self._create_epoch_wrapped_sharded_eden_dataset( - window_db_path=self.val_window_db_path, - split="validation", - shuffle=False, - ) - - self._test_ds = self._create_epoch_wrapped_sharded_eden_dataset( - window_db_path=self.test_window_db_path, - split="test", - shuffle=False, - ) - - if not dist.is_initialized() or dist.get_rank() == 0: - print( - f"Dataset windows: Train={len(self._train_ds)}, Val={len(self._validation_ds)}, Test={len(self._test_ds)}" - ) - - def setup(self, stage: str = "") -> None: - """Setup the data module.""" - assert hasattr(self, "trainer") and self.trainer is not None, ( - "Setup should be completed when trainer and config are attached." - ) - - self.build( - trainer_max_steps=self.trainer.max_steps, - trainer_val_check_interval=self.trainer.val_check_interval, - trainer_limit_val_batches=self.trainer.limit_val_batches, - trainer_limit_test_batches=self.trainer.limit_test_batches, - ) - - def train_dataloader(self) -> TRAIN_DATALOADERS: - """Get the train dataloader.""" - return self._create_dataloader(self._train_ds, mode="train") - - def val_dataloader(self) -> EVAL_DATALOADERS: - """Get the validation dataloader.""" - return self._create_dataloader(self._validation_ds, mode="validation") - - def test_dataloader(self) -> EVAL_DATALOADERS: - """Get the test dataloader.""" - return self._create_dataloader(self._test_ds, mode="test") - - def _create_dataloader(self, dataset, mode, **kwargs) -> WrappedDataLoader: - assert hasattr(self, "trainer") and self.trainer is not None, ( - "Trainer must be attached before creating dataloaders." - ) - self.init_global_step = self.trainer.global_step - self.data_sampler.init_global_step = self.init_global_step - dataloader = WrappedDataLoader( - mode=mode, - dataset=dataset, - num_workers=self.num_workers, - pin_memory=self.pin_memory, - persistent_workers=self.persistent_workers, - collate_fn=getattr(dataset, "collate_fn", default_collate), - **kwargs, - ) - return dataloader - - def _create_epoch_wrapped_sharded_eden_dataset( - self, - *, - window_db_path: str, - split: str, - shuffle: bool, - ) -> MultiEpochDatasetResampler: - """Instantiate `ShardedEdenDataset` and wrap it with `MultiEpochDatasetResampler`. - - By default, `num_epochs=1`, so the wrapped dataset length equals the base dataset length. - """ - base_dataset = ShardedEdenDataset( - tokenizer=self.tokenizer, - sequence_db_dir=self.sequence_db_dir, - window_db_path=window_db_path, - seq_length=self.seq_length, - create_attention_mask=self.create_attention_mask, - stride=self.stride, - window_min_length_threshold=self.window_min_length_threshold, - rc_aug=self.rc_aug, - use_control_tags=self.use_control_tags, - split=split, - log_windows=self.log_windows, - log_dir=self.log_dir, - ) - - wrapped = MultiEpochDatasetResampler( - IdentityMultiEpochDatasetWrapper(base_dataset), - num_epochs=self.num_epochs, - shuffle=shuffle, - seed=self.seed, - ) - return wrapped - - def state_dict(self) -> Dict[str, Any]: - """Called when saving a checkpoint.""" - consumed_samples = self.data_sampler.compute_consumed_samples(self.trainer.global_step - self.init_global_step) - return {"consumed_samples": consumed_samples} - - def load_state_dict(self, state_dict: Dict[str, Any]) -> None: - """Called when loading a checkpoint.""" - try: - from megatron.core.num_microbatches_calculator import ( - update_num_microbatches, - ) - except (ImportError, ModuleNotFoundError): - logging.warning("Megatron num_microbatches_calculator not found, using Apex version.") - from apex.transformer.pipeline_parallel.utils import update_num_microbatches - - consumed_samples = state_dict["consumed_samples"] - self.data_sampler.init_consumed_samples = consumed_samples - self.data_sampler.prev_consumed_samples = consumed_samples - - update_num_microbatches( - consumed_samples=consumed_samples, - consistency_check=False, - ) - self.data_sampler.if_first_step = 1 - - def reconfigure_limit_batches(self): - """Reconfigure trainer.limit_train_batches and trainer.limit_val_batches.""" - self._reconfigure_limit_batches(self.trainer.limit_train_batches, self._train_ds, "train") - self._reconfigure_limit_batches(self.trainer.limit_val_batches, self._validation_ds, "val") - - def _reconfigure_limit_batches(self, limit_batches, dataloader, mode): - """Reconfigure limit_batches for distributed training.""" - try: - from megatron.core.num_microbatches_calculator import get_num_microbatches - except (ImportError, ModuleNotFoundError): - logging.warning("Megatron num_microbatches_calculator not found, using Apex version.") - from apex.transformer.pipeline_parallel.utils import get_num_microbatches - - if isinstance(limit_batches, int): - limit_batches *= get_num_microbatches() - else: - assert isinstance(limit_batches, float) - if limit_batches == 0.0 or dataloader is None: - return - - dl_len_in_micro_batches = len(dataloader) - if len(dataloader) != float("inf"): - if limit_batches == 1.0: - limit_batches = dl_len_in_micro_batches - else: - limit_micro_batches = int(dl_len_in_micro_batches * limit_batches) - if limit_micro_batches == 0 and limit_batches > 0.0: - min_percentage = 1.0 / len(dataloader) - raise ValueError( - f"You requested to check {limit_batches} of the val_dataloader but" - f" {limit_batches} * {len(dataloader)} < 1. Please increase the" - f" `limit_val_batches` argument. Try at least" - f" `limit_val_batches={min_percentage}`" - ) - if limit_micro_batches < get_num_microbatches(): - limit_batches = get_num_microbatches() - else: - limit_batches = limit_batches - limit_batches % get_num_microbatches() - - if mode == "train": - self.trainer.limit_train_batches = limit_batches - else: - self.trainer.limit_val_batches = limit_batches - - -class ShardedEdenDataset(Dataset): - """High-performance Dataset that uses SQLite databases for sequence storage and window mapping. Assumes that the window_db_path points to a database pre-computed for a specific data split (e.g., train, validation, or test).""" - - def __init__( - self, - tokenizer, - sequence_db_dir: str, - window_db_path: str, - seq_length: int, - create_attention_mask: bool = False, - rc_aug: bool = False, - stride: Optional[int] = 7992, - window_min_length_threshold: Optional[int] = None, - use_control_tags: bool = False, - split: str = "train", - log_windows: bool = False, - log_dir: Optional[str] = None, - skip_stats: bool = True, - ) -> None: - """Initialize the ShardedEdenDataset.""" - super().__init__() - self.seq_length = seq_length - self.tokenizer = tokenizer - self.sequence_db_dir = sequence_db_dir - self.window_db_path = window_db_path - self.create_attention_mask = create_attention_mask - self.rc_aug = rc_aug - self.stride = stride if stride is not None else 7992 - self.window_min_length_threshold = int(window_min_length_threshold) if window_min_length_threshold else 0 - self.use_control_tags = use_control_tags - self.split = split - self.skip_stats = skip_stats - # Window access logging setup (lazy init in __getitem__) - self.log_windows = log_windows - # Remember desired log directory for lazy init in worker processes - self._log_dir = log_dir - - # Create mapping from sample_id to SQLite file path - self._create_sample_db_mapping() - - # Pre-open all database connections for performance - self._open_all_sequence_dbs() - - # Validates metadata and sets up the dataset - self._validate_and_setup_db() - - # Prepare control-tag IDs if needed - if self.use_control_tags: - self._prepare_control_tags() - - # Attention mask and position ids - if create_attention_mask: - self.attention_mask = torch.tril(torch.ones((seq_length, seq_length))).unsqueeze(0) < 0.5 - - # Shared position_ids for memory efficiency - if not hasattr(ShardedEdenDataset, "_position_ids") or ShardedEdenDataset._position_ids.size(0) != seq_length: - ShardedEdenDataset._position_ids = torch.arange(seq_length, dtype=torch.int64) - self.position_ids = ShardedEdenDataset._position_ids - - # Counter for periodic commits if logging is enabled - if self.log_windows: - self._log_counter = 0 - - def _open_all_sequence_dbs(self): - """Open all sequence database files ahead of time and attach the window database for efficient cross-database queries.""" - self.db_connections = {} - if not dist.is_initialized() or dist.get_rank() == 0: - print(f"Pre-opening {len(self.sample_db_mapping)} sequence database files...") - - for sample_id, db_path in self.sample_db_mapping.items(): - try: - # URI=true allows for read-only connections if needed and more options - conn = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True) - self.db_connections[sample_id] = conn - except sqlite3.Error as e: - logging.error(f"Failed to open/attach database for sample {sample_id} at {db_path}: {e}") - raise - - def _create_sample_db_mapping(self): - """Create mapping from sample ID to SQLite file path.""" - self.sample_db_mapping = {} - - # Scan the directory for sample SQLite files - db_dir = Path(self.sequence_db_dir) - for sample_dir in db_dir.iterdir(): - if sample_dir.is_dir(): - sample_id = sample_dir.name - db_file = sample_dir / f"glm_dataset_{sample_id}.sqlite" - if db_file.exists(): - self.sample_db_mapping[sample_id] = str(db_file) - - if not self.sample_db_mapping: - raise ValueError(f"No SQLite files found in {self.sequence_db_dir}") - - if not dist.is_initialized() or dist.get_rank() == 0: - print(f"Found {len(self.sample_db_mapping)} sample SQLite files") - - def _validate_and_setup_db(self): - """Connects to the window database, validates its metadata, and computes the length of the dataset for the current split.""" - self.window_db_conn = sqlite3.connect(f"file:{self.window_db_path}?mode=ro", uri=True) - cursor = self.window_db_conn.cursor() - - # Validate metadata - try: - cursor.execute("SELECT key, value FROM metadata") - db_meta = dict(cursor.fetchall()) - - if "window_size" not in db_meta or "stride" not in db_meta: - raise ValueError("Database metadata is missing 'window_size' or 'stride' keys.") - - db_window_size = int(db_meta["window_size"]) - db_stride = int(db_meta["stride"]) - db_min_len_raw = db_meta.get("window_min_length_threshold") - db_min_len = int(db_min_len_raw) if db_min_len_raw is not None else None - - if db_window_size != self.seq_length or db_stride != self.stride: - raise ValueError( - f"Database metadata mismatch! " - f"DB created with window_size={db_window_size}, stride={db_stride}. " - f"Dataset configured with seq_length={self.seq_length}, stride={self.stride}. " - f"Please re-run pre-computation or check your config." - ) - - # Validate presence and value of window_min_length_threshold only if enabled - if self.window_min_length_threshold and self.window_min_length_threshold > 0: - if db_min_len is None: - raise ValueError( - "Database metadata is missing 'window_min_length_threshold'. " - "Please re-run the pre-computation script with an updated version to populate this key." - ) - if db_min_len != self.window_min_length_threshold: - raise ValueError( - f"Database metadata mismatch for window_min_length_threshold! " - f"DB created with window_min_length_threshold={db_min_len}. " - f"Dataset configured with window_min_length_threshold={self.window_min_length_threshold}. " - f"Please re-run pre-computation or align the configuration." - ) - else: - # Case: DB is pruned but runtime threshold is not set (> 0) - if db_min_len is not None and int(db_min_len) > 0: - raise ValueError( - f"Window DB indicates pruning was applied (window_min_length_threshold={db_min_len}), " - "but the current configuration does not set --window-min-length-threshold (> 0). " - "Please set the argument to match the DB or use an unpruned database." - ) - except sqlite3.OperationalError: - raise ValueError( - f"Could not find `metadata` table in {self.window_db_path}. " - "Please ensure the database was created with a recent version of the pre-computation script." - ) - - # Require modern metadata keys - if "total_windows" not in db_meta or "distinct_sequences" not in db_meta: - raise ValueError( - "Database metadata must contain 'total_windows' and 'distinct_sequences'. " - "Please re-run the pre-computation script to create an up-to-date window database." - ) - - # Read counts directly from metadata - self.length = int(db_meta["total_windows"]) - - if not dist.is_initialized() or dist.get_rank() == 0: - print(f"Found {self.length} windows for {self.split} split in {self.window_db_path}.") - - # Distinct sequences directly from metadata - self.distinct_sequences = int(db_meta["distinct_sequences"]) - if not dist.is_initialized() or dist.get_rank() == 0: - print(f"Found {self.distinct_sequences} distinct sequences.") - - def _prepare_control_tags(self): - """Prepare control tag IDs for sequences.""" - self.ctrl_ids_map = {} - - # Query the split-specific DB for the sequences it contains - cursor = self.window_db_conn.cursor() - unique_sequence_ids = [row[0] for row in cursor.execute("SELECT DISTINCT sequence_id FROM window_mappings")] - - # Create control tags for unique sequence IDs in this split - for seq_id in unique_sequence_ids: - # Extract meaningful part from sequence ID for control tag - ctrl_name = seq_id.split("__")[0] if "__" in seq_id else seq_id - self.ctrl_ids_map[seq_id] = self.tokenizer.text_to_ids(f"") - - def __len__(self) -> int: - """Return the length of the dataset.""" - return self.length - - def _get_db_connection(self, sample_id: str) -> sqlite3.Connection: - """Get a pre-opened database connection for a sample.""" - conn = self.db_connections.get(sample_id) - if conn is None: - # This should not happen if _open_all_sequence_dbs was called - # and the sample_id is valid. - raise ValueError(f"No pre-opened SQLite connection found for sample {sample_id}") - return conn - - def reverse_complement(self, seq: str) -> str: - """Compute reverse complement of a sequence.""" - cmap = {"A": "T", "C": "G", "G": "C", "T": "A", "N": "N"} - return "".join(cmap.get(b, b) for b in reversed(seq)) - - def __getitem__(self, idx: np.int64) -> Dict[str, torch.Tensor]: - """Get a single item from the dataset.""" - if idx >= self.length: - raise IndexError(f"Index {idx} out of range for dataset with length {self.length}") - - # The dataloader index `idx` is now the `window_idx` in this split-specific DB. - # Step 1: Get the sequence_id and other info from the window DB. - window_cursor = self.window_db_conn.cursor() - res = window_cursor.execute( - "SELECT sequence_id, window_in_seq_idx FROM window_mappings WHERE window_idx = ?", - (int(idx),), - ).fetchone() - - if res is None: - # run PRAGMA database_list; to check exactly which DBs are open - # and which ones are not - current_dbs = self.window_db_conn.execute("PRAGMA database_list;").fetchall() - - raise IndexError( - f"Window index {idx} which is a {type(idx)} was not found in the database {current_dbs}, which is unexpected." - ) - - sequence_id, window_in_seq_idx = res - - # Log window access if enabled - if self.log_windows: - # Ensure logger is initialised in the current process (e.g., after DataLoader forks) - if not hasattr(self, "_log_writer"): - self._init_window_logger(self._log_dir) - - # Derive sample_id for logging independent of DB connection logic - try: - sample_id_for_log = extract_sample_id(sequence_id) - except Exception: - sample_id_for_log = "unknown" - - # Synchronously write CSV row (no flush here; only on cleanup) - row = [ - int(idx), - sequence_id, - sample_id_for_log, - int(window_in_seq_idx), - int(self._rank), - int(time.time_ns()), - ] - self._log_writer.writerow(row) - self._log_file.flush() - - # if there is only one DB connection, use it directly - if len(self.db_connections) == 1: - conn = next(iter(self.db_connections.values())) - cursor = conn.cursor() - sample_id = None - else: - # otherwise, we need to get the sample_id from the sequence_id - # and get the DB connection for that sample - sample_id = extract_sample_id(sequence_id) - conn = self._get_db_connection(sample_id) - cursor = conn.cursor() - - # Calculate window position within sequence (0-based for Python, +1 for SQLite SUBSTR) - start_pos = window_in_seq_idx * self.stride - - # Build token window - ctrl_ids = self.ctrl_ids_map.get(sequence_id, []) if self.use_control_tags else [] - bos_id = self.tokenizer.bos_id - eos_id = self.tokenizer.eos_id - sep_id = self.tokenizer._sep_id - pad_id = self.tokenizer.pad_id - - header = [bos_id] + ctrl_ids + [sep_id] - footer = [eos_id] - special_tokens_count = len(header) + len(footer) - eff_len = self.seq_length - special_tokens_count - - # ------------------------------------------------------------------ - # Retrieve the subsequence directly in SQL to avoid loading the - # full contig into Python memory. - # SQLite SUBSTR is 1-indexed, so we add 1 to start_pos. - # ------------------------------------------------------------------ - subseq_query = ( - f"SELECT substr({SEQUENCE_COLUMN_NAME}, ?, ?) FROM sequences WHERE {SEQUENCE_ID_COLUMN_NAME} = ?" - ) - result = cursor.execute( - subseq_query, - (start_pos + 1, eff_len, sequence_id), - ).fetchone() - - if result is None or result[0] is None: - raise ValueError(f"Sequence ID {sequence_id} not found in database for sample {sample_id}") - - seq = result[0].upper() - - # Apply reverse complement augmentation if enabled - if self.rc_aug and np.random.rand() > 0.5: - seq = self.reverse_complement(seq) - - # Tokenize - token_ids = header + self.tokenizer.text_to_ids(seq) + footer - - # Pad/trim - if len(token_ids) < self.seq_length: - token_ids += [pad_id] * (self.seq_length - len(token_ids)) - else: - token_ids = token_ids[: self.seq_length] - - tokens = torch.tensor(token_ids, dtype=torch.int64) - - # Flatten ctrl_ids and create special_ids list - flat_ctrl_ids = [] - if isinstance(ctrl_ids, list): - for item in ctrl_ids: - if isinstance(item, list): - flat_ctrl_ids.extend(item) - else: - flat_ctrl_ids.append(item) - - special_ids_list = [bos_id, eos_id, sep_id, pad_id] + flat_ctrl_ids - special_ids = torch.tensor(special_ids_list, dtype=torch.int64) - - # Create labels for next token prediction - labels = tokens.clone() - labels[:-1] = tokens[1:] - labels[-1] = pad_id - - # Create loss mask - loss_mask = torch.ones(self.seq_length, dtype=torch.float) - loss_mask[torch.isin(labels, special_ids)] = 0 - - batch = { - "tokens": tokens, - "labels": labels, - "loss_mask": loss_mask, - "position_ids": self.position_ids, - } - if self.create_attention_mask: - batch["attention_mask"] = self.attention_mask - - return batch - - def collate_fn(self, batch): - """Collate a batch of items into a single dictionary.""" - return default_collate(batch) - - def __del__(self): - """Close all database connections when the dataset is destroyed.""" - # Close window mapping DB - if hasattr(self, "window_db_conn") and self.window_db_conn: - self.window_db_conn.close() - - # Close all sequence shard DBs - if hasattr(self, "db_connections"): - for conn in self.db_connections.values(): - conn.close() - - if hasattr(self, "_log_file") and self._log_file: - try: - self._log_file.flush() - except Exception: - pass - try: - self._log_file.close() - except Exception: - pass - - # ------------------------------------------------------------------ - # Logging helper methods - # ------------------------------------------------------------------ - - def _init_window_logger(self, log_dir: Optional[str] = None): - """Initialise CSV file for window access logging.""" - import uuid - - rank = dist.get_rank() if dist.is_initialized() else 0 - self._rank = rank - log_uuid = str(uuid.uuid4()) - base_dir = Path(log_dir) if log_dir else Path(os.getcwd()) - base_dir = base_dir.resolve() - base_dir.mkdir(parents=True, exist_ok=True) - split_tag = getattr(self, "split", "unknown") - csv_path = (base_dir / f"window_access_{split_tag}_rank{rank}_{log_uuid[:8]}.csv").resolve() - # raise an error if the file already exists - if csv_path.exists(): - raise FileExistsError( - f"File {csv_path} already exists, this should only happen on a uuid conflict and should be extremely rare" - ) - self._log_file_path = str(csv_path) - - # Open CSV file in append mode and write header - self._log_file = open(self._log_file_path, mode="a", newline="") - self._log_writer = csv.writer(self._log_file) - self._log_writer.writerow( - [ - "window_idx", - "sequence_id", - "sample_id", - "window_in_seq_idx", - "rank", - "access_ts", - ] - ) - - print(f"Window access logger initialised at {self._log_file_path}") - - -def compute_num_windows(seq_len: int, window_size: int = 8192, stride: int = 7992) -> int: - """Helper method to compute number of windows for a sequence.""" - if seq_len < window_size: - return 1 - else: - return 1 + (seq_len - window_size) // stride - - -def precompute_window_database( - split_parquet_file: str, - output_window_db: str, - window_size: int = 8192, - stride: int = 7992, - window_min_length_threshold: int = 0, -): - """Pre-compute window mappings for a split using a Parquet file. The Parquet file must contain ID and length columns as configured by `SEQUENCE_ID_COLUMN_NAME` and `SEQUENCE_LENGTH_COLUMN_NAME`. - - The output database will contain two tables: - 1. `metadata`: Stores the window_size and stride used for creation. - 2. `window_mappings`: A mapping of window_idx to sequence_id and the - relative index of that window within the sequence. - - Args: - split_parquet_file: Path to a Parquet file with ID and length columns. - output_window_db: Path to output window mapping database - window_size: Window size (default: 8192) - stride: Stride between windows (default: 7992) - window_min_length_threshold: Minimum length of windows to include (default: 0) - """ - print(f"Creating window database at {output_window_db} from {split_parquet_file}") - print( - f"Using window_size={window_size}, stride={stride}, window_min_length_threshold={window_min_length_threshold}" - ) - - # Load sequence data from Parquet file - try: - df = pol.read_parquet(split_parquet_file) - except Exception as e: - raise IOError(f"Failed to read Parquet file at {split_parquet_file}") from e - - # Validate columns - if SEQUENCE_ID_COLUMN_NAME not in df.columns or SEQUENCE_LENGTH_COLUMN_NAME not in df.columns: - raise ValueError( - f"Parquet file {split_parquet_file} must contain '" - f"{SEQUENCE_ID_COLUMN_NAME}' and '{SEQUENCE_LENGTH_COLUMN_NAME}' columns." - ) - - # Sort by ID to ensure deterministic window ordering - df = df.sort(SEQUENCE_ID_COLUMN_NAME) - - conn = sqlite3.connect(output_window_db) - cursor = conn.cursor() - - # ------------------------------------------------------------------ - # High-performance SQLite settings for bulk insert (100M+ rows) - # ------------------------------------------------------------------ - cursor.execute("PRAGMA journal_mode=OFF;") # disable rollback journal - cursor.execute("PRAGMA synchronous=OFF;") # no fsync - cursor.execute("PRAGMA locking_mode=EXCLUSIVE;") # single-writer, avoids lock churn - cursor.execute("PRAGMA temp_store=MEMORY;") # temp tables in RAM - cursor.execute("PRAGMA cache_size=-1048576;") # ~1 GB cache (negative => KiB) - # page_size and mmap_size can only be set before DB creation; assume defaults suffice. - - # Drop old tables if they exist - cursor.execute("DROP TABLE IF EXISTS window_mappings") - cursor.execute("DROP TABLE IF EXISTS metadata") - - # Create metadata table - cursor.execute(""" - CREATE TABLE metadata ( - key TEXT PRIMARY KEY, - value INTEGER NOT NULL - ) - """) - cursor.executemany( - "INSERT INTO metadata (key, value) VALUES (?, ?)", - [ - ("window_size", window_size), - ("stride", stride), - ( - "window_min_length_threshold", - int(window_min_length_threshold) if window_min_length_threshold else 0, - ), - ], - ) - - # Create window mappings table - cursor.execute(""" - CREATE TABLE window_mappings ( - window_idx INTEGER PRIMARY KEY, - sequence_id TEXT NOT NULL, - window_in_seq_idx INTEGER NOT NULL - ) - """) - conn.commit() - - total_sequences = 0 - global_window_idx = 0 - batch_size = 20000 - batch = [] - skipped_windows = 0 - - for seq_id, seq_len in df.select([SEQUENCE_ID_COLUMN_NAME, SEQUENCE_LENGTH_COLUMN_NAME]).iter_rows(): - num_windows = compute_num_windows(seq_len, window_size, stride) - - windows_added_for_seq = 0 - for i in range(num_windows): - # Determine effective window length at this index - start_pos = i * stride if seq_len >= window_size else 0 - remaining = max(0, seq_len - start_pos) - effective_window_len = min(window_size, remaining) - - # Skip windows that are shorter than threshold (if enabled) - if window_min_length_threshold and effective_window_len < window_min_length_threshold: - skipped_windows += 1 - continue - - batch.append((global_window_idx, seq_id, i)) - global_window_idx += 1 - windows_added_for_seq += 1 - - # Only count sequences that contributed at least one retained window - if windows_added_for_seq > 0: - total_sequences += 1 - - if len(batch) >= batch_size: - cursor.executemany( - "INSERT INTO window_mappings (window_idx, sequence_id, window_in_seq_idx) VALUES (?, ?, ?)", - batch, - ) - conn.commit() - batch = [] - print(f"Processed {global_window_idx} windows... (skipped {skipped_windows})") - - if batch: - cursor.executemany( - "INSERT INTO window_mappings (window_idx, sequence_id, window_in_seq_idx) VALUES (?, ?, ?)", - batch, - ) - conn.commit() - - print("Creating index on sequence_id for faster lookups...") - cursor.execute("CREATE INDEX IF NOT EXISTS idx_sequence_id ON window_mappings(sequence_id)") - - # Persist total number of windows in metadata for fast retrieval - cursor.execute( - "INSERT OR REPLACE INTO metadata (key, value) VALUES (?, ?)", - ("total_windows", int(global_window_idx)), - ) - # Persist number of distinct sequences as well - cursor.execute( - "INSERT OR REPLACE INTO metadata (key, value) VALUES (?, ?)", - ("distinct_sequences", int(total_sequences)), - ) - - conn.commit() - conn.close() - - print(f"Finished. Found {total_sequences} sequences and {global_window_idx} total windows.") - if window_min_length_threshold and skipped_windows > 0: - print(f"Skipped {skipped_windows} windows due to window_min_length_threshold={window_min_length_threshold}.") - - -def main(): - """CLI for sharded Eden dataloader utilities.""" - parser = argparse.ArgumentParser(description="Utilities for sharded Eden dataloader: precompute window mappings.") - subparsers = parser.add_subparsers(dest="command", help="Available commands") - - # Precompute subcommand - precompute_parser = subparsers.add_parser("precompute", help="Pre-compute window mappings from a Parquet file") - precompute_parser.add_argument( - "split_parquet_file", - type=str, - help="Path to a Parquet file with sequence_id and length columns.", - ) - precompute_parser.add_argument("output_window_db", type=str, help="Path to output window mapping database") - precompute_parser.add_argument("--window-size", type=int, default=8192, help="Window size (default: 8192)") - precompute_parser.add_argument( - "--stride", - type=int, - default=7992, - help="Stride between windows (default: 7992)", - ) - precompute_parser.add_argument( - "--window-min-length-threshold", - type=int, - default=0, - help=("If > 0, skip sequences shorter than this length when precomputing windows. Defaults to 0 (disabled)."), - ) - - args = parser.parse_args() - - if args.command == "precompute": - precompute_window_database( - args.split_parquet_file, - args.output_window_db, - args.window_size, - args.stride, - args.window_min_length_threshold, - ) - else: - parser.print_help() - - -if __name__ == "__main__": - main() diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/data/tokenizer.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/data/tokenizer.py deleted file mode 100644 index 380c1a91f6..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/data/tokenizer.py +++ /dev/null @@ -1,79 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import ftfy -from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec -from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer - -from bionemo.evo2.utils.config import Evo2PreprocessingConfig - - -class Evo2Tokenizer: - """Tokenizer for Evo2.""" - - def __init__(self, params: Evo2PreprocessingConfig | None = None): - """Initialize the Evo2Tokenizer.""" - # Pass all NeMo2/Megatron-compliant parameters associated with config.Evo2PreprocessingConfig. - self.params: Evo2PreprocessingConfig = params if params is not None else Evo2PreprocessingConfig() - self.tokenizer: TokenizerSpec = get_nmt_tokenizer( - library=self.params.tokenizer_type.lower(), - vocab_file=str(self.params.vocab_file) if self.params.vocab_file is not None else None, - merges_file=str(self.params.merges_file) if self.params.merges_file is not None else None, - model_name=self.params.tokenizer_model_name, - tokenizer_model=self.params.pretrained_tokenizer_model, - special_tokens=self.params.special_tokens, - use_fast=self.params.fast_hf_tokenizer, - ) - - def tokenize( - self, - text: str | list[str], - use_ftfy: bool = False, - enforce_sample_length: None | int = None, - append_eod: bool = False, - drop_empty_sequences: bool = False, - ): - """Tokenize the input text data for Evo2.""" - if isinstance(text, str): - text = [text] - # Tokenize a document or batch of strings. - doc_ids = [] - for l, t in enumerate(text): - if use_ftfy: - t = ftfy.fix_text(t) - # Tokenize the string. - text_ids: list = self.tokenizer.text_to_ids(t) - if drop_empty_sequences and len(text_ids) == 0: - continue - # Append EOD token (EOD ID: 0) if appropriate. - eod_length = int(append_eod and l == len(text) - 1) - token_length = len(text_ids) + eod_length - text_ids += [0] * eod_length - if enforce_sample_length is not None: - # Pad shorter sequences (Pad ID: 1) and except excessive sequences. - if token_length > enforce_sample_length: - raise ValueError( - "Detected input text with a length greater than the maximum " - f"possible sample length of {enforce_sample_length}.)" - ) - else: - text_ids += [1] * (enforce_sample_length - token_length) - # Append to document. - doc_ids.append(text_ids) - return doc_ids diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/data/transcript_extraction.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/data/transcript_extraction.py deleted file mode 100644 index d61dc06c2e..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/data/transcript_extraction.py +++ /dev/null @@ -1,421 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import argparse -import math -import re -import sys -from collections import defaultdict - -from nemo.utils import logging - -from bionemo.noodles import complement_sequence, reverse_sequence -from bionemo.noodles.nvfaidx import NvFaidx - - -def parse_gtf_attributes(attributes: str): - """Parses the attributes field of a GTF file line into a dictionary. - - Args: - attributes (str): The attributes field from a GTF file line. - - Returns: - dict: A dictionary of attribute key-value pairs. - """ - # Split on all semicolons that are not inside quotes - attributes = re.split(r';(?=(?:[^"]*"[^"]*")*[^"]*$)', attributes) - out = {} - for a in attributes: - if len(a) == 0: - continue - key = a.split()[0] - value = a.split('"')[1] - out[key] = value - return out - - -def extract_transcript_exons(gtf_path: str, only_longest_transcript: bool): - """Extracts transcript exons from a GTF file and optionally keeps only the longest transcript per gene. - - Args: - gtf_path (str): Path to the GTF file. - only_longest_transcript (bool): Whether to keep only the longest transcript per gene. - - Returns: - dict: A dictionary containing transcript and exon information. - """ - genes = defaultdict(set) - gene2transcripts = defaultdict(set) - transcripts = {} - exons = {} - exon2transcript = {} - transcript2gene = {} - transcript2exon = defaultdict(set) - skip_transcripts = set() - - gtf_fields = ["seqname", "source", "feature", "start", "end", "score", "strand", "frame", "attribute"] - with open(gtf_path) as infile: - for line in infile: - # skip header lines - if line.startswith("#"): - continue - line = line.strip().split("\t") - if len(line) < 9: - continue - - # parse the attributes into a dictionary - line = dict(zip(gtf_fields, line)) - attribs = parse_gtf_attributes(line["attribute"]) - - if line["feature"] == "gene": - contig, start, end, strand = line["seqname"], line["start"], line["end"], line["strand"] - start, end = int(line["start"]) - 1, int(line["end"]) - gene_id = attribs.get("gene_id", None) - if not gene_id: - continue - genes[gene_id].add((contig, start, end, strand)) - - elif line["feature"] == "exon": - contig, start, end, strand = line["seqname"], line["start"], line["end"], line["strand"] - start, end = int(line["start"]) - 1, int(line["end"]) - gene_id = attribs.get("gene_id", None) - if not gene_id: - continue - transcript_id = attribs["transcript_id"] - gene2transcripts[gene_id].add(transcript_id) - - # Skip exons that have already been handled and are likely errors - if transcript_id in skip_transcripts: - continue - exon_number = int(attribs["exon_number"]) - - exon_id = (gene_id, transcript_id, exon_number) - if exon_id in exons: - del exons[exon_id] - transcripts.pop(transcript_id, None) - if transcript_id in transcript2exon: - del transcript2exon[transcript_id] - skip_transcripts.add(transcript_id) - continue - - exons[exon_id] = {"seqname": contig, "start": start, "end": end, "strand": strand} - if exon_id in exon2transcript: - raise Exception("Exon Already Exists in exon2transcript") - exon2transcript[exon_id] = transcript_id - transcript2exon[transcript_id].add(exon_id) - - elif line["feature"] == "transcript": - contig, start, end, strand = line["seqname"], line["start"], line["end"], line["strand"] - start, end = int(line["start"]) - 1, int(line["end"]) - gene_id = attribs.get("gene_id", None) - if not gene_id: - continue - gbkey = attribs["gbkey"] - transcript_biotype = attribs["transcript_biotype"] - transcript_id = attribs["transcript_id"] - if transcript_id in skip_transcripts: - continue - - transcripts[transcript_id] = { - "seqname": contig, - "start": start, - "end": end, - "strand": strand, - "gbkey": gbkey, - "transcript_biotype": transcript_biotype, - } - transcript2gene[transcript_id] = gene_id - gene2transcripts[gene_id].add(transcript_id) - - if only_longest_transcript: - transcript_lengths = defaultdict(int) - for exon in exons: - transcript_lengths[exon[1]] += exons[exon]["end"] - exons[exon]["start"] - - keep_transcripts = {} - keep_exons = {} - keep_exon2transcript = {} - keep_transcript2gene = {} - keep_transcript2exon = defaultdict(set) - keep_skip_transcripts = set() - - for gene in gene2transcripts: - this_transcripts = gene2transcripts[gene] - this_transcript_lengths = [(transcript, transcript_lengths[transcript]) for transcript in this_transcripts] - longest_transcript = max(this_transcript_lengths, key=lambda x: x[1])[0] - keep_transcripts[longest_transcript] = dict(transcripts[longest_transcript]) - for exon in transcript2exon[longest_transcript]: - keep_exons[exon] = dict(exons[exon]) - keep_exon2transcript[exon] = longest_transcript - keep_transcript2exon[longest_transcript].add(exon) - keep_transcript2gene[longest_transcript] = gene - - transcripts = keep_transcripts - exons = keep_exons - exon2transcript = keep_exon2transcript - transcript2gene = keep_transcript2gene - transcript2exon = keep_transcript2exon - skip_transcripts = keep_skip_transcripts - - return { - "transcripts": transcripts, - "exons": exons, - "exon2transcript": exon2transcript, - "transcript2gene": transcript2gene, - "transcript2exon": transcript2exon, - } - - -def extract_default_transcript_sequences(transcript_info, fasta_records, output_file): - """Extracts default transcript sequences from the provided transcript information and writes them to an output file. - - Args: - transcript_info (dict): Dictionary containing transcript and exon information. - fasta_records (NvFaidx): Indexed FASTA records. - output_file (TextIO): File object to write the output sequences. - """ - for transcript_id in transcript_info["transcripts"]: - gene_id = transcript_info["transcript2gene"][transcript_id] - this_exons = sorted(transcript_info["transcript2exon"][transcript_id], key=lambda x: x[-1]) - - seqname = None - exon_qc_failed = False - if len(this_exons) > 1: - for i in range(1, len(this_exons)): - this_exon = this_exons[i] - prev_exon = this_exons[i - 1] - this_coords = transcript_info["exons"][this_exon] - prev_coords = transcript_info["exons"][prev_exon] - if this_coords["strand"] != prev_coords["strand"]: - exon_qc_failed = True - if this_coords["strand"] == "+" and this_coords["start"] < prev_coords["start"]: - exon_qc_failed = True - if this_coords["strand"] == "-" and this_coords["start"] > prev_coords["start"]: - exon_qc_failed = True - if this_coords["seqname"] != prev_coords["seqname"]: - exon_qc_failed = True - - if exon_qc_failed: - continue - - transcript_seq = "" - for exon in this_exons: - coords = transcript_info["exons"][exon] - if seqname is None: - seqname = coords["seqname"] - exon_seq = str(fasta_records[coords["seqname"]][coords["start"] : coords["end"]]) - if coords["strand"] == "-": - exon_seq = reverse_sequence(complement_sequence(exon_seq)) - transcript_seq += exon_seq - - print(f">{seqname}|{gene_id}|{transcript_id}\n{transcript_seq}", file=output_file) - - -def extract_stitched_transcript_sequences( - transcript_info, fasta_records, output_file, stitch_token="@", promoter_size=1024, intron_window=32, overlap=False -): - """Extracts stitched transcript sequences from the provided transcript information and writes them to an output file. - - The "stitched" word refers to the process of combining sequences from different regions of the genome to form a single, - continuous transcript sequence. - This includes: - Promoter Region: A specified number of base pairs (bp) upstream of the transcript start site. - Exons: The coding regions of the transcript. - Intron Windows: A specified number of bp from the neighboring introns around each exon. - - The stitch_token is used to denote the boundaries between - these regions in the stitched transcript sequences. - - Args: - transcript_info (dict): Dictionary containing transcript and exon information. - fasta_records (NvFaidx): Indexed FASTA records. - output_file (TextIO): File object to write the output sequences. - stitch_token (str, optional): Token to use for stitching sequences. Defaults to "@". - promoter_size (int, optional): Number of bp to include in the promoter region. Defaults to 1024. - intron_window (int, optional): Number of bp to include from neighboring introns. Defaults to 32. - overlap (bool, optional): Whether to allow overlap of neighboring intron windows. Defaults to False. - """ - for transcript_id in transcript_info["transcripts"]: - gene_id = transcript_info["transcript2gene"][transcript_id] - this_exons = sorted(transcript_info["transcript2exon"][transcript_id], key=lambda x: x[-1]) - - exon_qc_failed = False - if len(this_exons) > 1: - for i in range(1, len(this_exons)): - this_exon = this_exons[i] - prev_exon = this_exons[i - 1] - this_coords = transcript_info["exons"][this_exon] - prev_coords = transcript_info["exons"][prev_exon] - if this_coords["strand"] != prev_coords["strand"]: - exon_qc_failed = True - if this_coords["strand"] == "+" and this_coords["start"] < prev_coords["start"]: - exon_qc_failed = True - if this_coords["strand"] == "-" and this_coords["start"] > prev_coords["start"]: - exon_qc_failed = True - if this_coords["seqname"] != prev_coords["seqname"]: - exon_qc_failed = True - - if exon_qc_failed: - continue - - transcript_seq = "" - seqname = None - for i in range(len(this_exons)): - # Previous Exon - prev_exon = this_exons[i - 1] if i > 0 else None - prev_coords = transcript_info["exons"].get(prev_exon, None) - # Current Exon - cur_exon = this_exons[i] - cur_coords = transcript_info["exons"].get(cur_exon, None) - exon_number = cur_exon[-1] - if seqname is None: - seqname = cur_coords["seqname"] - # Next Exon - next_exon = this_exons[i + 1] if i < len(this_exons) - 1 else None - next_coords = transcript_info["exons"].get(next_exon, None) - # Extract the stitched spliced sequence without overlapping intron windows. - intron_window_left = ( - min(intron_window, math.floor(abs(cur_coords["start"] - prev_coords["end"]) / 2)) - if not overlap and prev_coords is not None - else intron_window - ) - intron_window_right = ( - min(intron_window, math.ceil(abs(next_coords["start"] - cur_coords["end"]) / 2)) - if not overlap and next_coords is not None - else intron_window - ) - if cur_coords["strand"] == "+" and exon_number == 1: - exon_start = cur_coords["start"] - promoter_size - exon_end = cur_coords["end"] + intron_window_right - elif cur_coords["strand"] == "-" and exon_number == 1: - exon_start = cur_coords["start"] - intron_window_left - exon_end = cur_coords["end"] + promoter_size - else: - exon_start = cur_coords["start"] - intron_window_left - exon_end = cur_coords["end"] + intron_window_right - exon_seq = str(fasta_records[cur_coords["seqname"]][exon_start:exon_end]) - if cur_coords["strand"] == "-": - exon_seq = stitch_token + reverse_sequence(complement_sequence(exon_seq)) - transcript_seq += exon_seq - - if stitch_token and len(stitch_token) > 0: - transcript_seq = transcript_seq[len(stitch_token) :] - - print(f">{seqname}|{gene_id}|{transcript_id}\n{transcript_seq}", file=output_file) - - -def run(args): - """Main function to run the transcript extraction process based on command line arguments. - - Args: - args (argparse.Namespace): Parsed command line arguments. - """ - with open(args.output_path, "w") if args.output_path is not None else sys.stdout as output_file: - if args.verbose: - logging.info("Indexing FASTA file...") - - fasta_index = NvFaidx(args.fasta_path) - - if args.transcript_type == "default": - if args.verbose: - logging.info("Extracting default transcripts...") - if args.only_longest_transcript: - logging.info("Only extracting the longest transcript per gene.") - else: - logging.info("Extracting all transcripts regardless of length.") - - elif args.transcript_type == "stitched": - if args.verbose: - logging.info("Extracting stitched transcripts...") - if args.only_longest_transcript: - logging.info("Only extracting the longest transcript per gene.") - else: - logging.info("Extracting all transcripts regardless of length.") - - transcript_info = extract_transcript_exons(args.gtf_path, args.only_longest_transcript) - - if args.transcript_type == "default": - extract_default_transcript_sequences(transcript_info, fasta_index, output_file) - elif args.transcript_type == "stitched": - extract_stitched_transcript_sequences( - transcript_info, - fasta_index, - output_file, - promoter_size=args.stitched_promoter, - intron_window=args.stitched_intron, - overlap=args.stitched_overlap, - ) - - -def parse_args(): - """Parses command line arguments for the transcript extraction script. - - Returns: - argparse.Namespace: Parsed command line arguments. - """ - ap = argparse.ArgumentParser(description="Extract spliced transcripts from a FASTA and GTF.") - ap.add_argument("--fasta-path", type=str, required=True, help="Path to FASTA file to extract transcripts from.") - ap.add_argument( - "--gtf-path", - type=str, - required=True, - help="Path to gene transfer format (GTF) file associated with the FASTA.", - ) - ap.add_argument("--output-path", type=str, default=None, help="Path to output FASTA file.") - ap.add_argument( - "--transcript-type", - type=str, - default="default", - choices=["default", "stitched"], - help="Type of transcript to extract from the GTF and FASTA files for splicing. 'Stitched' transcripts include 1024 bp of sequence from the promoter and 32 bp around each exon.", - ) - ap.add_argument( - "--stitched-promoter", - type=int, - default=1024, - help="Number of bp to include in the promoter region when --transcript-type=stitched is used. Defaults to 1024.", - ) - ap.add_argument( - "--stitched-intron", - type=int, - default=32, - help="Number of bp to include from neighboring introns when --transcript-type=stitched is used. Defaults to 32.", - ) - ap.add_argument( - "--stitched-overlap", - action="store_true", - help="Allow overlap of neighboring intron windows when --transcript-type=stitched is used. Defaults to False, i.e. prevents overlap by shortening the intron windows for a contiguous splice.", - ) - ap.add_argument( - "--only-longest-transcript", action="store_true", help="Only extract the longest transcript per gene." - ) - ap.add_argument("-v", "--verbose", action="store_true", help="Turn on verbose log messages.") - return ap.parse_args() - - -def main(): - """Entry point for the script. Parses arguments and runs the extraction process.""" - args = parse_args() - if args.verbose: - logging.info(args) - run(args) - - -if __name__ == "__main__": - main() diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/models/__init__.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/models/__init__.py deleted file mode 100644 index 345166f3c4..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/models/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from bionemo.evo2.models.llama import ( - LLAMA_MODEL_OPTIONS, - Eden11BConfig, - Eden18BConfig, - Eden21BConfig, - Eden24BConfig, - Eden27BConfig, - Eden28BConfig, - Eden35BConfig, - EdenConfig, -) -from bionemo.evo2.models.mamba import MAMBA_MODEL_OPTIONS, HybridMambaConfig8BEvo2Loss, MambaModel - - -__all__ = [ - "LLAMA_MODEL_OPTIONS", - "MAMBA_MODEL_OPTIONS", - "Eden11BConfig", - "Eden18BConfig", - "Eden21BConfig", - "Eden24BConfig", - "Eden27BConfig", - "Eden28BConfig", - "Eden35BConfig", - "EdenConfig", - "HybridMambaConfig8BEvo2Loss", - "MambaModel", -] diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/models/llama.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/models/llama.py deleted file mode 100644 index 675303c42c..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/models/llama.py +++ /dev/null @@ -1,238 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from dataclasses import dataclass -from typing import Optional - -import torch -from nemo.collections import llm -from nemo.collections.llm.gpt.model.llama import HFLlamaImporter, LlamaModel -from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer -from nemo.lightning import io -from nemo.lightning.pytorch.utils import dtype_from_hf - - -@dataclass -class EdenConfig(llm.Llama31Config8B): - """Eden-flavoured Llama-3.1 ~8B (keeps all Eden behaviors). Inherits from the llama 3.1 config for proper handling of RoPE when converting checkpoints.""" - - rotary_base: int = 500_000 - seq_length: int = 8192 - num_layers: int = 32 - hidden_size: int = 4096 - ffn_hidden_size: int = 14336 - num_attention_heads: int = 32 - - scale_factor: int = 1 - low_freq_factor: int = 1 - high_freq_factor: int = 4 - old_context_len: int = 8192 - init_method_std: float = 0.02 - embedding_init_method_std: Optional[float] = None - - -@dataclass -class Eden11BConfig(EdenConfig): - """Eden-flavoured Llama-3.1 ~14B (keeps all Eden behaviors).""" - - # If you want long context like Eden-long, bump this; else inherit 8192. - seq_length: int = 8192 # or remove this line to keep 8192 - - # ~14B sizing (head_dim ≈ 128) - num_layers: int = 36 - hidden_size: int = 5120 - ffn_hidden_size: int = 13824 - num_attention_heads: int = 40 - num_query_groups: int = 8 # GQA (inherited value is also fine if already 8) - - -@dataclass -class Eden18BConfig(EdenConfig): - """Eden-flavoured Llama-3.1 ~18B (keeps all Eden behaviors).""" - - # If you want long context like Eden-long, bump this; else inherit 8192. - seq_length: int = 8192 # or remove this line to keep 8192 - - # ~18B sizing (head_dim ≈ 128) - num_layers: int = 48 - hidden_size: int = 6144 - ffn_hidden_size: int = 16384 - num_attention_heads: int = 48 - num_query_groups: int = 8 # GQA (inherited value is also fine if already 8) - old_context_len: int = 8192 # or remove this line to keep 8192 - - -@dataclass -class Eden21BConfig(EdenConfig): - """Eden-flavoured Llama-3.1 ~21B (keeps all Eden behaviors).""" - - seq_length: int = 8192 - - # ~21B sizing (head_dim = 128) - num_layers: int = 42 # 42 layers for 21B target - hidden_size: int = 7168 # 56 * 128 = 7168 for exact head_dim - ffn_hidden_size: int = 19456 # ~2.7x hidden_size - num_attention_heads: int = 56 # Divisible by 8 - num_query_groups: int = 8 # GQA - old_context_len: int = 8192 - - -@dataclass -class Eden24BConfig(EdenConfig): - """Eden-flavoured Llama-3.1 ~8B (keeps all Eden behaviors).""" - - # If you want long context like Eden-long, bump this; else inherit 8192. - seq_length: int = 32768 # or remove this line to keep 8192 - - # ~8B sizing (head_dim ≈ 128) - num_layers: int = 46 - hidden_size: int = 6144 - ffn_hidden_size: int = 23296 - num_attention_heads: int = 48 - num_query_groups: int = 8 # GQA (inherited value is also fine if already 8) - old_context_len: int = 8192 - - -@dataclass -class Eden27BConfig(EdenConfig): - """Eden-flavoured Llama-3.1 ~8B (keeps all Eden behaviors).""" - - # If you want long context like Eden-long, bump this; else inherit 8192. - seq_length: int = 32768 # or remove this line to keep 8192 - - # ~8B sizing (head_dim ≈ 128) - num_layers: int = 46 - hidden_size: int = 6656 - ffn_hidden_size: int = 23296 - num_attention_heads: int = 52 - num_query_groups: int = 8 # GQA (inherited value is also fine if already 8) - old_context_len: int = 8192 - - -@dataclass -class Eden28BConfig(EdenConfig): - """Eden-flavoured Llama-3.1 ~28B (keeps all Eden behaviors).""" - - # If you want long context like Eden-long, bump this; else inherit 8192. - seq_length: int = 8192 # or remove this line to keep 8192 - - # ~8B sizing (head_dim ≈ 128) - num_layers: int = 48 - hidden_size: int = 6144 - ffn_hidden_size: int = 26368 - num_attention_heads: int = 48 - num_query_groups: int = 8 # GQA (inherited value is also fine if already 8) - old_context_len: int = 8192 # or remove this line to keep 8192 - - -@dataclass -class Eden35BConfig(EdenConfig): - """Eden-flavoured Llama-3.1 ~35B (keeps all Eden behaviors).""" - - seq_length: int = 8192 - - # ~35B sizing (head_dim ≈ 128) - num_layers: int = 64 - hidden_size: int = 7168 - ffn_hidden_size: int = 20480 - num_attention_heads: int = 56 - num_query_groups: int = 8 # GQA - old_context_len: int = 8192 - - -@io.model_importer(LlamaModel, "hf") -class HFEdenLlamaImporter(HFLlamaImporter): - """Importer for Eden-flavoured Llama models which just overrides the tokenizer and config classes from NeMo.""" - - @property - def config(self) -> EdenConfig: - """Create a NeMo LlamaConfig from the HF model config. - - Translates the HF configuration parameters to the equivalent NeMo - configuration. - - Returns: - LlamaConfig: NeMo configuration for Llama models - """ - from transformers import AutoConfig, GenerationConfig - - source = AutoConfig.from_pretrained(str(self)) - try: - generation_config = GenerationConfig.from_pretrained(str(self)) - except Exception: - generation_config = None - - def make_vocab_size_divisible_by(vocab_size): - base = 128 - while vocab_size % base != 0: - base //= 2 - return base - - cls = EdenConfig - scale_factor = source.rope_scaling.get("factor", 8.0) if source.rope_scaling is not None else 8.0 - - args = {} - - output = cls( - num_layers=source.num_hidden_layers, - hidden_size=source.hidden_size, - ffn_hidden_size=( - source.intermediate_size - if not getattr(source, "intermediate_size_mlp", None) - else source.intermediate_size_mlp - ), - num_attention_heads=source.num_attention_heads, - init_method_std=source.initializer_range, - layernorm_epsilon=source.rms_norm_eps, - num_query_groups=source.num_key_value_heads, - seq_length=source.max_position_embeddings, - rotary_base=source.rope_theta, - gated_linear_unit=True, - make_vocab_size_divisible_by=make_vocab_size_divisible_by(source.vocab_size), - share_embeddings_and_output_weights=getattr(source, "tie_word_embeddings", False), - fp16=(dtype_from_hf(source) == torch.float16), - bf16=(dtype_from_hf(source) == torch.bfloat16), - params_dtype=dtype_from_hf(source), - generation_config=generation_config, - vocab_size=source.vocab_size, - kv_channels=getattr(source, "head_dim", None), - scale_factor=scale_factor, - **args, - ) - - return output - - @property - def tokenizer(self): - """Override the tokenizer to use the Eden-flavoured tokenizer.""" - from bionemo.evo2.run.utils import patch_eden_tokenizer # avoid circular import - - tokenizer = get_nmt_tokenizer("byte-level") - patch_eden_tokenizer(tokenizer) - return tokenizer - - -LLAMA_MODEL_OPTIONS = { - "8B": lambda **kwargs: llm.Llama3Config8B(**kwargs), - "7B": lambda **kwargs: EdenConfig(**kwargs), - "11B": lambda **kwargs: Eden11BConfig(**kwargs), - "18B": lambda **kwargs: Eden18BConfig(**kwargs), - "21B": lambda **kwargs: Eden21BConfig(**kwargs), - "24B": lambda **kwargs: Eden24BConfig(**kwargs), - "27B": lambda **kwargs: Eden27BConfig(**kwargs), - "28B": lambda **kwargs: Eden28BConfig(**kwargs), - "35B": lambda **kwargs: Eden35BConfig(**kwargs), -} diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/models/mamba.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/models/mamba.py deleted file mode 100644 index 13cf721754..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/models/mamba.py +++ /dev/null @@ -1,389 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from dataclasses import dataclass -from typing import Callable - -import megatron.core.models.mamba.mamba_model -import torch -import torch.nn.functional as F -from megatron.core import parallel_state -from megatron.core.inference.model_inference_wrappers.gpt.gpt_inference_wrapper import GPTInferenceWrapper -from megatron.core.inference.model_inference_wrappers.inference_wrapper_config import InferenceWrapperConfig -from megatron.core.transformer.spec_utils import ModuleSpec -from megatron.core.utils import WrappedTensor, deprecate_inference_params -from nemo.collections.llm.gpt.model.base import GPTModel, gpt_data_step -from nemo.collections.llm.gpt.model.megatron.hyena.hyena_utils import make_upper_case, reweighted_cross_entropy -from nemo.collections.llm.gpt.model.ssm import ( - NemotronHConfigBase, -) -from nemo.lightning import get_vocab_size -from typing_extensions import override - -from bionemo.evo2.utils.loss.embedding_variance import SquaredErrorTargetedVarianceLoss - - -logger = logging.getLogger(__name__) - - -def mamba_forward_step(model, batch) -> torch.Tensor: - """Forward step function for Mamba models, similar to hyena_forward_step. - - Args: - model: The Mamba model - batch: Dictionary containing input batch data - - Returns: - torch.Tensor: Output from the model forward pass - """ - forward_args = { - "input_ids": batch["tokens"], - "position_ids": batch["position_ids"], - "labels": batch["labels"], - "loss_mask": batch["loss_mask"], - } - forward_args["attention_mask"] = None - return model(**forward_args) - - -class MambaModel(GPTModel): - """Mamba model that extends GPTModel for integration with NeMo. - - Note that the loss calculation is handled by CustomMCoreMambaModel instead. - """ - - @override - def get_inference_wrapper( - self, params_dtype, inference_batch_times_seqlen_threshold, inference_max_seq_length=8192 - ) -> GPTInferenceWrapper: - """Gets the inference wrapper for the Mamba model.""" - from megatron.core.models.mamba import MambaModel as MCoreMambaModel - - # Find MCoreMambaModel instance - mcore_model = self.module - while mcore_model: - if isinstance(mcore_model, (MCoreMambaModel, Evo2StyleMCoreMambaModel)): - break - mcore_model = getattr(mcore_model, "module", None) - if mcore_model is None or not isinstance(mcore_model, (MCoreMambaModel, Evo2StyleMCoreMambaModel)): - raise ValueError("Mamba model instance not found in the model structure.") - - vocab_size = None - if self.tokenizer is not None: - vocab_size = self.tokenizer.vocab_size - elif hasattr(self.config, "vocab_size"): - vocab_size = self.config.vocab_size - else: - raise ValueError("Unable to find vocab size.") - - inference_wrapper_config = InferenceWrapperConfig( - hidden_size=mcore_model.config.hidden_size, - params_dtype=params_dtype, - inference_batch_times_seqlen_threshold=inference_batch_times_seqlen_threshold, - padded_vocab_size=vocab_size, - inference_max_seq_length=inference_max_seq_length, - ) - - model_inference_wrapper = GPTInferenceWrapper(mcore_model, inference_wrapper_config) - return model_inference_wrapper - - @override - def forward( - self, - input_ids: torch.Tensor, - position_ids: torch.Tensor, - attention_mask: torch.Tensor | None = None, - labels: torch.Tensor | None = None, - decoder_input: torch.Tensor | None = None, - inference_context=None, - packed_seq_params=None, - inference_params=None, - runtime_gather_output: bool | None = None, - loss_mask: torch.Tensor | None = None, - ) -> torch.Tensor: - """Forward pass that delegates to CustomMCoreMambaModel, which handles loss calculation.""" - extra_kwargs = {"packed_seq_params": packed_seq_params} if packed_seq_params is not None else {} - output_tensor = self.module( - input_ids, - position_ids, - attention_mask, - decoder_input=decoder_input, - labels=labels, # Pass labels to the Megatron module - inference_params=inference_params, - inference_context=inference_context, - runtime_gather_output=runtime_gather_output, - loss_mask=loss_mask, # Pass loss_mask to the Megatron module - **extra_kwargs, - ) - - # Return whatever CustomMCoreMambaModel.forward returns - # (logits during inference, loss during training) - return output_tensor - - -# Custom MCoreMambaModel with reweighted loss calculation -class Evo2StyleMCoreMambaModel(megatron.core.models.mamba.mamba_model.MambaModel): - """Custom version of MCoreMambaModel that implements reweighted loss calculation. - - Note that this is similar to the HyenaModel for uppercase/lowercase handling. - """ - - def __init__(self, *args, **kwargs): - """Initializes `Evo2StyleMCoreMambaModel` with unique parameters for the Evo2 variant of `MCoreMambaModel`.""" - super().__init__(*args, **kwargs) - if self.config.use_targeted_variance_loss: - if not hasattr(self.config, "embedding_init_method_std"): - logger.warning("embedding_init_method_std is not supported in this config, please upgrade Megatron-LM") - # 1.0 is the suggested value for embedding_init_method_std from the - # [Spike No More](https://arxiv.org/abs/2312.16903) paper. - embedding_init_method_std: float = getattr(self.config, "embedding_init_method_std", 1.0) - self.targeted_variance_loss = SquaredErrorTargetedVarianceLoss( - loss_coeff=self.config.targeted_variance_loss_loss_coeff, - var_target=embedding_init_method_std**2, - ) - - @override - def forward( - self, - input_ids: torch.Tensor, - position_ids: torch.Tensor, - attention_mask: torch.Tensor, - decoder_input: torch.Tensor | None = None, - labels: torch.Tensor | None = None, - inference_context=None, - runtime_gather_output: bool | None = None, - *, - inference_params=None, - loss_mask: torch.Tensor | None = None, - ): - """Forward pass with custom loss calculation for uppercase/lowercase reweighting. - - Note that this mimics the behavior in hyena_model.py lines 273-292. - - Forward function of the Mamba model. This function passes the input tensors - through the embedding layer, and then the decoder and finally into the post - processing layer (optional). - - It either returns the Loss values if labels are given or the final hidden units - """ - # If decoder_input is provided (not None), then input_ids and position_ids are ignored. - # Otherwise, apply embedding layer on input_ids and position_ids to get decoder_input. - - inference_context = deprecate_inference_params(inference_context, inference_params) - - # Decoder embedding. - if decoder_input is not None: - pass - elif self.pre_process: - decoder_input = self.embedding(input_ids=input_ids, position_ids=position_ids) - else: - # intermediate stage of pipeline - # decoder will get hidden_states from encoder.input_tensor - decoder_input = None - - rotary_pos_emb = None - if self.position_embedding_type == "rope": - rotary_seq_len = self.rotary_pos_emb.get_rotary_seq_len( - inference_context, self.decoder, decoder_input, self.config - ) - rotary_pos_emb = self.rotary_pos_emb(rotary_seq_len) - - # Wrap decoder_input to allow the decoder (MambaBlock) to delete the - # reference held by this caller function, enabling early garbage collection - # for inference. - if inference_context is not None and not self.training: - decoder_input = WrappedTensor(decoder_input) - - # The following assert will currently fail when running inference. - # Commented out for now. - # TODO (duncan/rwaleffe): (1) confirm that the externally-generated - # attention mask is not needed and is ignored by the model in - # inference mode, (2) reduce the size of the externally-generated - # attention mask to prevent CPU OOM (as we did for training), (3) - # force the attention mask passed to the model in inference mode to - # be None, so this assert will succeed. - # assert attention_mask is None, "The attention mask is ignored and should be set to None" - - # Run decoder. - hidden_states = self.decoder( - hidden_states=decoder_input, - attention_mask=attention_mask, - inference_context=inference_context, - rotary_pos_emb=rotary_pos_emb, - ) - - if not self.post_process: - return hidden_states - - # logits and loss - output_weight = None - if self.share_embeddings_and_output_weights: - output_weight = self.shared_embedding_or_output_weight() - - if ( - not self.training - and inference_context is not None - and inference_context.materialize_only_last_token_logits - ): - hidden_states = hidden_states[-1, :, :].unsqueeze(0) - - logits, _ = self.output_layer(hidden_states, weight=output_weight, runtime_gather_output=runtime_gather_output) - - if labels is None: - # [s b h] => [b s h] - return logits.transpose(0, 1).contiguous() - - # Apply reweighted loss calculation for uppercase/lowercase handling - labels, lowercase_mask = make_upper_case(labels) - loss = self.compute_language_model_loss(labels, logits) - normalize_per_batch = True if self.config.to_upper == "normalized_weighted" else False - loss = reweighted_cross_entropy( - loss, - (labels, loss_mask, lowercase_mask), - lowercase_weight=self.config.lowercase_loss_reweighting, - normalize_per_batch=normalize_per_batch, - ) - if self.training and self.config.use_targeted_variance_loss: - # Only use this in training, not validation etc. - var_loss = self.targeted_variance_loss(self.embedding.word_embeddings.weight) - loss += var_loss - return loss - - -def mamba_no_weight_decay_cond(name, param, exclude_embeddings: bool = False): - """Condition for no weight decay for Mamba parameters. - - Note that this follows the same pattern as in the original Mamba implementation. - """ - # Mamba-specific parameters that should not have weight decay - if ( - name.endswith("dt_bias") - or name.endswith("A_log") - or name.endswith("D") - or ("embedding" in name and exclude_embeddings) - or getattr(param, "_no_weight_decay", False) - ): - no_wd = True - # All other parameters - use default MCore behavior: - # Do not regularize biases and norm parameters - # (See megatron.core.optimizer._get_pram_groups) - # TODO exclude embeddings - else: - no_wd = name.endswith(".bias") or len(param.shape) == 1 - return no_wd - - -def mamba_no_weight_decay_cond_with_embeddings(name, param): - """Condition for no weight decay for Mamba parameters with embeddings. - - Note that this follows the same pattern as in the original Mamba implementation but also skips WD on embeddings. - """ - return mamba_no_weight_decay_cond(name, param, exclude_embeddings=True) - - -@dataclass -class HybridMambaConfig8BEvo2Loss(NemotronHConfigBase): - """Config for 8B hybrid Mamba model.""" - - hybrid_override_pattern: str = "M-M-M-M*-M-M-M-M-M*-M-M-M-M-M*-M-M-M-M-M*-M-M-M-M-M-" - num_layers: int = 52 - seq_length: int = 8192 - hidden_size: int = 4096 - mamba_ssm_ngroups: int = 8 - mamba_state_dim: int = 128 - mamba_head_dim: int = 64 - ffn_hidden_size: int = 21504 - num_attention_heads: int = 32 - init_method_std: float = 0.014 - num_query_groups: int = 8 - make_vocab_size_divisible_by: int = 128 - tokenizer_library: str = "byte-level" # Use Evo2 tokenizer - tokenizer_name: str = None - masked_softmax_fusion: bool = True - apply_query_key_layer_scaling: bool = False - persist_layer_norm: bool = True - attention_softmax_in_fp32: bool = False - vocab_size: int = 512 - first_last_layers_bf16: bool = True - is_hybrid_model: bool = True - forward_step_fn: Callable = mamba_forward_step - data_step_fn: Callable = gpt_data_step - # Set a reasonable default for to_upper to match HyenaModel behavior - to_upper: str = "normalized_weighted" - # Set lowercase loss reweighting factor - lowercase_loss_reweighting: float = 1.0 - activation_func: Callable = lambda x: torch.square(F.relu(x)) # lambda x: torch.pow(F.relu(x), 2) - # The trainer is responsible for using this when initializing the optimizer state: - # opt = MegatronOptimizerModule(opt_config, sched, no_weight_decay_cond=model_config.hyena_no_weight_decay_cond_fn) - hyena_no_weight_decay_cond_fn: Callable = mamba_no_weight_decay_cond - spike_no_more_embedding_init: bool = False # TODO: remove this. - layernorm_embeddings: bool = False - # If set to true, use targeted variance loss which encourages the word embedding weight variances - # to be close to a target value (1.0). - use_targeted_variance_loss: bool = False - targeted_variance_loss_loss_coeff: float = 0.1 - share_embeddings_and_output_weights: bool = False - - def __post_init__(self): - """Post-init logic for Evo2 to enable backwards compatibility with old configs.""" - # Specific post_init logic for Evo2 to enable backwards compatibility with old configs. - if not hasattr(self, "embedding_init_method_std"): - raise ValueError("embedding_init_method_std is not supported in this config, please upgrade Megatron-LM") - if self.spike_no_more_embedding_init and self.embedding_init_method_std is None: - logger.warning( - "spike_no_more_embedding_init is deprecated, please set " - "embedding_init_method_std=[desired_stdev] in the future. To get the old behavior set to 1.0. " - "For now setting to 1.0." - ) - self.embedding_init_method_std = 1.0 - # Continue with the remaining post-init logic defined in NemotronHConfigBase and/or TransformerConfig. - super().__post_init__() - - @override - def configure_model( - self, tokenizer, pre_process=None, post_process=None, vp_stage: int | None = None - ) -> Evo2StyleMCoreMambaModel: - """Configures the model for training or inference.""" - mamba_stack_spec = self.mamba_stack_spec - if not isinstance(mamba_stack_spec, ModuleSpec): - mamba_stack_spec = mamba_stack_spec() - - assert getattr(self, "virtual_pipeline_model_parallel_size", None) is None and vp_stage is None, ( - "Virtual pipeline model parallelism is temporarily unsupported in SSM/Mamaba " - "models due to upstream MCore MambaModel API dependency" - ) - return Evo2StyleMCoreMambaModel( - self, - mamba_stack_spec=mamba_stack_spec, - vocab_size=get_vocab_size(self, tokenizer.vocab_size, self.make_vocab_size_divisible_by), - max_sequence_length=self.seq_length, - hybrid_attention_ratio=self.hybrid_attention_ratio, - hybrid_mlp_ratio=self.hybrid_mlp_ratio, - hybrid_override_pattern=self.hybrid_override_pattern, - position_embedding_type=self.position_embedding_type, - rotary_percent=self.rotary_percent, - rotary_base=self.rotary_base, - seq_len_interpolation_factor=self.seq_len_interpolation_factor, - pre_process=pre_process or parallel_state.is_pipeline_first_stage(), - post_process=post_process or parallel_state.is_pipeline_last_stage(), - share_embeddings_and_output_weights=self.share_embeddings_and_output_weights, - ) - - -# Dictionary mapping model size names to config classes -MAMBA_MODEL_OPTIONS: dict[str, type[NemotronHConfigBase]] = { - "hybrid_mamba_8b": HybridMambaConfig8BEvo2Loss, -} diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/models/peft.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/models/peft.py deleted file mode 100644 index 8479acfdca..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/models/peft.py +++ /dev/null @@ -1,279 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from copy import deepcopy -from typing import List, Optional - -import lightning.pytorch as pl -from nemo.collections.llm.fn.mixin import FNMixin -from nemo.collections.llm.peft.lora import LoRA -from nemo.utils import logging -from torch import nn - - -class Evo2LoRA(LoRA): - """LoRA adapter specifically for Evo2/Hyena models.""" - - def __init__( - self, - peft_ckpt_path: Optional[str] = None, - freeze_modules: List[str] = ["encoder", "embedding"], - target_modules: List[str] = [ - "linear_qkv", - "linear_proj", - "linear_fc1", - "linear_fc2", - "short_filter", # Short convolution filters - "hyena_filter", # Hyena layer filters - "positional_encoding", # ROPE or other position encodings - ], - *args, - **kwargs, - ): - """Initialize the LoRA Adapter for Evo2. - - Args: - peft_ckpt_path: Path to pre-trained LoRA checkpoint. - freeze_modules: List of module names to freeze (Evo2-specific defaults). - target_modules: Modules to apply LoRA to (uses Evo2 defaults if None). - *args: placeholder. - **kwargs: - dim: LoRA rank dimension. - alpha: LoRA scaling parameter. - dropout: Dropout rate for LoRA layers. - dropout_position: Where to apply dropout ('pre' or 'post'). - lora_A_init_method: Initialization for A matrix ('xavier', 'uniform', 'normal'). - lora_B_init_method: Initialization for B matrix ('zero', 'normal'). - """ - """Initialize the LoRA Adapter for Evo2.""" - super().__init__(target_modules=target_modules, *args, **kwargs) - self.freeze_modules = freeze_modules - self.peft_ckpt_path = peft_ckpt_path - - # CRITICAL: Set model_transform to self - # The callback system expects this attribute - self.model_transform = self - - def setup(self, trainer, pl_module, stage): - """Setup callback - properly initialize transform.""" - super().setup(trainer, pl_module, stage) - - logging.info(f"Will attempt to apply to model if matches: \n{self.target_modules}") - - # Ensure model_transform is set - if not hasattr(self, "model_transform") or self.model_transform is None: - self.model_transform = self - - # Pass checkpoint path to wrapped IO if available - if hasattr(self, "wrapped_io") and self.peft_ckpt_path: - self.wrapped_io.adapter_ckpt_path = self.peft_ckpt_path - - def on_predict_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: - """Event hook. - - Apply transformations for prediction if needed. - - Args: - trainer: The trainer object. - pl_module: The LightningModule object. - """ - self._maybe_apply_transform(trainer) - - def adapter_key_filter(self, key: str) -> bool: - """Filter state dict keys to identify adapter parameters. - - Args: - key: State dict key to check - - Returns: - bool: True if key corresponds to an adapter parameter - """ - if isinstance(key, tuple): - return key[1].requires_grad - - if "_extra_state" in key: - return False - - # Check if it's an adapter parameter or not in freeze list - return ( - (not any(substring in key for substring in self.freeze_modules)) - or ".adapter." in key - or key.endswith(".adapters") - or "lora_A" in key - or "lora_B" in key - ) - - def __call__(self, model: nn.Module) -> nn.Module: - """Apply LoRA transformations to the model. - - Override to avoid fn.walk compatibility issues. - """ - # First, manually freeze specified modules - self._apply_selective_freeze(model) - - # Then apply LoRA transformations - self._apply_lora_transform(model) - - # THEN freeze ALL base model parameters - # This must happen AFTER LoRA is applied - self._freeze_base_model_parameters(model) - - # Log summary - self._log_lora_summary(model) - - return model - - def _apply_selective_freeze(self, model: nn.Module, prefix=""): - """Manually walk model and freeze specified modules.""" - for name, child in model.named_children(): - full_name = f"{prefix}.{name}" if prefix else name - - # Check if this module should be frozen - if name in self.freeze_modules: - logging.info(f"Freezing module: {full_name}") - for param in child.parameters(): - param.requires_grad = False - - # Recursively apply to children - self._apply_selective_freeze(child, full_name) - - def _freeze_base_model_parameters(self, model: nn.Module): - """Freeze all parameters except LoRA adapters and critical layers.""" - logging.info("\nFreezing base model parameters...") - frozen_count = 0 - kept_trainable = [] - - for name, param in model.named_parameters(): - # Keep LoRA/adapter parameters trainable - if any(adapter_term in name for adapter_term in ["adapter", "lora_A", "lora_B", "lora"]): - param.requires_grad = True - kept_trainable.append(name) - # CRITICAL: Keep output layer trainable to maintain gradient flow - elif "output_layer" in name or "lm_head" in name: - param.requires_grad = True - kept_trainable.append(name) - logging.info(f" Keeping output layer trainable: {name}") - # CRITICAL: Keep final layer norm trainable - elif "final_norm" in name or ("decoder" in name and "norm" in name and "24" in name): - param.requires_grad = True - kept_trainable.append(name) - logging.info(f" Keeping final norm trainable: {name}") - else: - param.requires_grad = False - frozen_count += 1 - - logging.info(f"Froze {frozen_count} parameter tensors") - logging.info(f"Kept {len(kept_trainable)} parameters trainable") - - def _apply_lora_transform(self, model: nn.Module, prefix=""): - """Apply LoRA with better tracking.""" - # Get all modules in a flat list first - modules_to_transform = [] - - for name, module in model.named_modules(): - # Skip if has children (not a leaf module) - if list(module.children()): - continue - - # Check if this matches our target modules - module_type = name.split(".")[-1] if "." in name else name - if module_type in self.target_modules: - modules_to_transform.append((name, module)) - - logging.info(f"\nFound {len(modules_to_transform)} modules to apply LoRA to") - - # Apply transformations - for full_name, module in modules_to_transform: - # Get parent and attribute name - parts = full_name.split(".") - parent = model - for part in parts[:-1]: - parent = getattr(parent, part) - - # Apply transform - attr_name = parts[-1] - transformed = self.transform(module, name=attr_name, prefix="") - - if transformed is not module: - setattr(parent, attr_name, transformed) - logging.info(f"Applied LoRA to: {full_name}") - - # Verify LoRA was applied - if hasattr(transformed, "adapter") or hasattr(transformed, "lora_A"): - logging.info(f" ✓ LoRA adapter confirmed on {full_name}") - - def selective_freeze(self, m: nn.Module, name=None, prefix=None): - """Selectively freeze modules based on freeze_modules list. - - Args: - m: Module to potentially freeze. - name: Name of the module. - prefix: Prefix for the module name. - - Returns: - nn.Module: The module (frozen or not). - """ - if name in self.freeze_modules: - FNMixin.freeze(m) - logging.info(f"Freezing module: {prefix}.{name}" if prefix else f"Freezing module: {name}") - - return m - - # Deepcopy compatibility - def __deepcopy__(self, memo): - """Custom deepcopy to handle unpickleable objects.""" - # Create a new instance with the same parameters - cls = self.__class__ - result = cls.__new__(cls) - - # Copy all attributes except problematic ones - memo[id(self)] = result - for k, v in self.__dict__.items(): - if k not in ["_metadata", "_fields"]: # Skip dataclass internals - try: - setattr(result, k, deepcopy(v, memo)) - except Exception: - # If deepcopy fails, just use the original reference - setattr(result, k, v) - - return result - - def __getstate__(self): - """Prepare object for pickling.""" - state = self.__dict__.copy() - # Remove unpickleable entries - state.pop("_metadata", None) - state.pop("_fields", None) - return state - - def __setstate__(self, state): - """Restore object from pickle.""" - self.__dict__.update(state) - - # Debug module - def _log_lora_summary(self, model: nn.Module): - """Log a summary of LoRA modifications.""" - total_params = sum(p.numel() for p in model.parameters()) - trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) - adapter_params = sum(p.numel() for n, p in model.named_parameters() if p.requires_grad and "adapter" in n) - - logging.info(f"\n{'=' * 50}") - logging.info("LoRA Summary:") - logging.info(f" Total parameters: {total_params:,}") - logging.info(f" Trainable parameters: {trainable_params:,}") - logging.info(f" Adapter parameters: {adapter_params:,}") # Changed from "LoRA parameters" - logging.info(f" Percentage trainable: {100 * trainable_params / total_params:.2f}%") - logging.info(f" Percentage adapters: {100 * adapter_params / total_params:.2f}%") - logging.info(f"{'=' * 50}\n") diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/run/__init__.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/run/__init__.py deleted file mode 100644 index 9981337fda..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/run/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/run/infer.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/run/infer.py deleted file mode 100644 index 6f1db52e9d..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/run/infer.py +++ /dev/null @@ -1,236 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import argparse -import sys -import time -from typing import Literal, Optional - -import nemo.lightning as nl -import torch -from megatron.core.inference.common_inference_params import CommonInferenceParams -from megatron.core.inference.inference_request import InferenceRequest -from nemo.collections.llm import inference -from nemo.utils import logging - - -CheckpointFormats = Literal["torch_dist", "zarr"] - - -def parse_args(): - """Parse arguments for Evo2 inference.""" - ap = argparse.ArgumentParser() - - # generation args: - default_prompt = ( - "|d__Bacteria;" - + "p__Pseudomonadota;" - + "c__Gammaproteobacteria;" - + "o__Enterobacterales;" - + "f__Enterobacteriaceae;" - + "g__Escherichia;" - + "s__Escherichia|" - ) - ap.add_argument( - "--prompt", - type=str, - default=default_prompt, - help="Prompt to generate text from Evo2. Defaults to a phylogenetic lineage tag for E coli.", - ) - ap.add_argument( - "--ckpt-dir", type=str, required=True, help="Path to checkpoint directory containing pre-trained Evo2 model." - ) - ap.add_argument("--temperature", type=float, default=1.0, help="Temperature during sampling for generation.") - ap.add_argument("--top-k", type=int, default=0, help="Top K during sampling for generation.") - ap.add_argument("--top-p", type=float, default=0.0, help="Top P during sampling for generation.") - ap.add_argument("--max-new-tokens", type=int, default=1024, help="Maximum number of tokens to generate.") - ap.add_argument("--seed", type=int, default=None, help="Random seed for generation.") - # compute args: - ap.add_argument("--tensor-parallel-size", type=int, default=1, help="Order of tensor parallelism. Defaults to 1.") - ap.add_argument( - "--pipeline-model-parallel-size", type=int, default=1, help="Order of pipeline parallelism. Defaults to 1." - ) - ap.add_argument( - "--context-parallel-size", type=int, default=1, help="Order of context parallelism. Defaults to 1." - ) - # output args: - ap.add_argument( - "--output-file", - type=str, - default=None, - help="Output file containing the generated text produced by the Evo2 model. If not provided, the output will be logged.", - ) - # extra: - ap.add_argument( - "--ckpt-format", - type=str, - choices=["torch_dist", "zarr"], - default="torch_dist", - help="Specify checkpoint format to use. Defaults to 'torch_dist', as 'zarr' is deprecated.", - ) - ap.add_argument( - "--fp8", - action="store_true", - default=False, - help="Whether to use vortex style FP8. Defaults to False.", - ) - ap.add_argument( - "--flash-decode", - action="store_true", - default=False, - help="Whether to use flash decode. Defaults to True.", - ) - return ap.parse_args() - - -def infer( - prompt: str, - ckpt_dir: str, - temperature: float, - top_k: int, - top_p: float, - max_new_tokens: int, - tensor_parallel_size: int, - pipeline_model_parallel_size: int, - context_parallel_size: int, - output_file: Optional[str] = None, - ckpt_format: CheckpointFormats = "torch_dist", - seed: Optional[int] = None, - vortex_style_fp8: bool = False, - flash_decode: bool = False, - return_log_probs: bool = False, -) -> list[InferenceRequest]: - """Inference workflow for Evo2. - - Args: - prompt (str): Prompt to generate text from Evo2. - ckpt_dir (str): Path to checkpoint directory containing pre-trained Evo2 model. - temperature (float): Temperature during sampling for generation. - top_k (int): Top K during sampling for generation. - top_p (float): Top P during sampling for generation. - max_new_tokens (int): Maximum number of tokens to generate. - tensor_parallel_size (int): Order of tensor parallelism. - pipeline_model_parallel_size (int): Order of pipeline parallelism. - context_parallel_size (int): Order of context parallelism. - output_file (str): Output file containing the generated text produced by the Evo2 model. - ckpt_format (CheckpointFormats): Checkpoint format to use. - seed (int): Random seed for generation. - vortex_style_fp8 (bool): Whether to use vortex style FP8. - flash_decode (bool): Whether to use flash decode. - return_log_probs (bool): Whether to return log probabilities. - - Returns: - None - """ - model_parallel_size = tensor_parallel_size * pipeline_model_parallel_size * context_parallel_size - if model_parallel_size > torch.cuda.device_count(): - raise ValueError( - f"Requested model parallel size {model_parallel_size} is greater than the " - f"number of available CUDA devices {torch.cuda.device_count()}" - ) - # Create PTL trainer. - trainer = nl.Trainer( - accelerator="gpu", - devices=model_parallel_size, - strategy=nl.MegatronStrategy( - tensor_model_parallel_size=tensor_parallel_size, - pipeline_model_parallel_size=pipeline_model_parallel_size, - context_parallel_size=context_parallel_size, - pipeline_dtype=torch.bfloat16, - ckpt_load_optimizer=False, # Needs to be false for a normal model checkpoint. - ckpt_save_optimizer=False, - ckpt_async_save=False, - save_ckpt_format=ckpt_format, - ckpt_load_strictness="log_all", - ), - log_every_n_steps=1, - limit_val_batches=10, - num_sanity_val_steps=0, - plugins=nl.MegatronMixedPrecision( - precision="bf16-mixed", - params_dtype=torch.bfloat16, - ), - ) - inference_wrapped_model, mcore_tokenizer = inference.setup_model_and_tokenizer( - path=ckpt_dir, - trainer=trainer, - params_dtype=torch.bfloat16, - inference_batch_times_seqlen_threshold=8192, # TODO - inference_max_seq_length=8192, # TODO - recompute_granularity=None, - recompute_num_layers=None, - recompute_method=None, - vortex_style_fp8=vortex_style_fp8, - flash_decode=flash_decode, - enable_flash_decode=flash_decode, - ) - t0 = time.perf_counter_ns() - # TODO: fix return type in NeMo inference.generate (it is a list[InferenceRequest] not a dict) - results: list[InferenceRequest] = inference.generate( - model=inference_wrapped_model, - max_batch_size=1, # vortex only supports batch size 1 - tokenizer=mcore_tokenizer, - prompts=[prompt], - random_seed=seed, - inference_params=CommonInferenceParams( - temperature=temperature, - top_k=top_k, - top_p=top_p, - return_log_probs=return_log_probs, - num_tokens_to_generate=max_new_tokens, - ), - ) - dt = (time.perf_counter_ns() - t0) / 1e9 # seconds - tokens_per_sec = (len(results[0].generated_text) + 1) / dt # +1 for the prompt - - print(f"Inference time: {dt} seconds, {tokens_per_sec} tokens/sec", file=sys.stderr) - if torch.distributed.get_rank() == 0: - if output_file is None: - logging.info(results) - else: - with open(output_file, "w") as f: - f.write(f"{results[0]}\n") - - return results - - -def main(): - """Main function for Evo2 inference.""" - # Parse args. - args = parse_args() - infer( - prompt=args.prompt, - ckpt_dir=args.ckpt_dir, - temperature=args.temperature, - top_k=args.top_k, - top_p=args.top_p, - max_new_tokens=args.max_new_tokens, - tensor_parallel_size=args.tensor_parallel_size, - pipeline_model_parallel_size=args.pipeline_model_parallel_size, - context_parallel_size=args.context_parallel_size, - output_file=args.output_file, - ckpt_format=args.ckpt_format, - seed=args.seed, - vortex_style_fp8=args.fp8, # Vortex only applied FP8 to some layers. - flash_decode=args.flash_decode, - ) - - -if __name__ == "__main__": - main() diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/run/predict.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/run/predict.py deleted file mode 100644 index 5f89472a49..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/run/predict.py +++ /dev/null @@ -1,712 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import argparse -import functools -import tempfile -from pathlib import Path -from typing import Any, Literal - -import nemo.lightning as nl -import torch -from lightning.pytorch import LightningDataModule -from megatron.core import parallel_state -from megatron.core.enums import Fp8Recipe -from megatron.core.tensor_parallel.mappings import _gather_along_last_dim -from megatron.core.utils import get_batch_on_this_cp_rank -from nemo.collections.llm.gpt.data.megatron.hyena.evo2_dataset import Evo2Dataset -from nemo.collections.llm.gpt.model.base import GPTModel, get_packed_seq_params -from nemo.collections.llm.gpt.model.hyena import HYENA_MODEL_OPTIONS, HyenaModel -from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer -from nemo.lightning import NeMoLogger -from nemo.lightning.data import WrappedDataLoader -from nemo.utils import logging as logger -from torch import Tensor - -from bionemo.evo2.data.fasta_dataset import SimpleFastaDataset -from bionemo.evo2.models.llama import LLAMA_MODEL_OPTIONS - -# Add import for Mamba models -from bionemo.evo2.models.mamba import MAMBA_MODEL_OPTIONS, MambaModel -from bionemo.evo2.models.peft import Evo2LoRA -from bionemo.evo2.run.utils import infer_model_type, patch_eden_tokenizer -from bionemo.llm.data import collate -from bionemo.llm.lightning import LightningPassthroughPredictionMixin -from bionemo.llm.utils.callbacks import PredictionWriter - - -CheckpointFormats = Literal["torch_dist", "zarr"] - -SHUFFLE_MESSAGE = ( - "Per token log probabilities are not supported when using context parallelism. The results will be " - "zigzag shuffled along the sequence dimension. Raise a feature request if you need this and do " - "not want to manually do the unshuffling yourself. You need to undo the shuffling that happened in " - "`megatron.core.utils.get_batch_on_this_cp_rank`." -) - - -def parse_args(): - """Parse arguments for Evo2 inference.""" - ap = argparse.ArgumentParser() - ap.add_argument("--num-nodes", type=int, default=1, help="Number of nodes to use for prediction, defaults to 1.") - ap.add_argument( - "--devices", - type=int, - help="Number of devices to use for prediction, defaults to tensor_model_parallel_size * pipeline_model_parallel_size * context_parallel_size.", - ) - ap.add_argument( - "--eden-tokenizer", - action="store_true", - help="Patch the tokenizer to work with the one used in training the Eden model.", - ) - ap.add_argument("--fasta", type=Path, required=True, help="Fasta path from which to generate logit predictions.") - ap.add_argument("--ckpt-dir", type=Path, required=True, help="NeMo2 checkpoint directory for inference.") - ap.add_argument("--min-length", type=int, required=False, help="Minimum sequence length for padding.") - ap.add_argument("--prepend-bos", action="store_true", help="Prepend BOS token to sequences. Defaults to False.") - ap.add_argument( - "--mask-phylogenetic-tags", - action="store_true", - help="Mask phylogenetic tags in loss computation. Defaults to False.", - ) - ap.add_argument("--tensor-parallel-size", type=int, default=1, help="Order of tensor parallelism. Defaults to 1.") - ap.add_argument( - "--pipeline-model-parallel-size", - type=int, - choices=[1], - default=1, - help="Order of pipeline parallelism. Defaults to 1 and currently only 1 is supported.", - ) - ap.add_argument( - "--context-parallel-size", type=int, default=1, help="Order of context parallelism. Defaults to 1." - ) - ap.add_argument( - "--fp8-recipe", - type=str, - default="delayed", - choices=list(Fp8Recipe.__members__.keys()), - help="FP8 recipe to use for FP8 tensors in the forward and backward pass. Note that some recipes are only " - "supported by certain architectures. For example 'mxfp8' requires at least blackwell, and 'blockwise' is only " - "implemented for hopper (but not blackwell). 'tensorwise' and 'delayed' are currently supported by all " - "architectures, but 'tensorwise' is preferred over 'delayed' which is the default for historical reasons.", - ) - ap.add_argument( - "--no-sequence-parallel", - action="store_true", - help="When using TP, skip sequence parallelism. Otherwise sequence parallelism is used whenever tensor " - "parallelism is used. sequence parallelism should save a small amount of GPU memory so it's on" - " by default.", - ) - ap.add_argument("--micro-batch-size", type=int, default=1, help="Batch size for prediction. Defaults to 1.") - ap.add_argument( - "--write-interval", - type=str, - default="epoch", - choices=["epoch", "batch"], - help="Interval to write predictions to disk. If doing very large predictions, you may want to set this to 'batch'.", - ) - ap.add_argument( - "--model-size", - type=str, - default="7b_arc_longcontext", - choices=sorted( - list(HYENA_MODEL_OPTIONS.keys()) + list(MAMBA_MODEL_OPTIONS.keys()) + list(LLAMA_MODEL_OPTIONS.keys()) - ), - help="Model size to use. Defaults to '7b_arc_longcontext'.", - ) - # output args: - ap.add_argument( - "--output-dir", - type=Path, - default=None, - help="Output dir that will contain the generated text produced by the Evo2 model. If not provided, the output will be logged.", - ) - ap.add_argument( - "--files-per-subdir", - type=int, - help="Number of files to write to each subdirectory. If provided, subdirectories with N files each will be created. Ignored unless --write-interval is 'batch'.", - ) - ap.add_argument( - "--full-fp8", - action="store_true", - help="Use full FP8 precision (faster but less accurate) rather than vortex style which " - "only applies FP8 to the projection layer of the hyena mixer, when using FP8.", - ) - ap.add_argument("--fp8", action="store_true", help="Use FP8 precision. Defaults to BF16.") - # extra: - ap.add_argument( - "--ckpt-format", - type=str, - choices=["torch_dist", "zarr"], - default="torch_dist", - help="Specify checkpoint format to use. Defaults to 'torch_dist', as 'zarr' is deprecated.", - ) - ap.add_argument( - "--output-log-prob-seqs", action="store_true", help="Output log probability of sequences. Defaults to False." - ) - ap.add_argument( - "--log-prob-collapse-option", - choices=["sum", "mean", "per_token"], - default="mean", - help="How to collapse the log probabilities across the sequence dimension.", - ) - ap.add_argument( - "--hybrid-override-pattern", - type=str, - help="Override the hybrid override pattern in the config (specifies hyena layer ordering and type).", - ) - ap.add_argument( - "--num-layers", type=int, help="If set, override the number of layers specified in the requested config." - ) - ap.add_argument( - "--seq-len-interpolation-factor", - type=int, - help="If set, override the sequence length interpolation factor specified in the requested config. If you " - "know a model was trained with a specific interpolation factor for ROPE, provide it here, it can make a big " - "difference in accuracy.", - ) - ap.add_argument( - "--lora-checkpoint-path", - type=Path, - required=False, - default=None, - help="Path to the lora states to restore from.", - ) - return ap.parse_args() - - -def _gather_along_cp_dim(input_, seq_dim: int = 1): - """Gather tensors and concatenate along the last dimension.""" - world_size = parallel_state.get_context_parallel_world_size() - # Bypass the function if we are using only 1 GPU. - if world_size == 1: - return input_ - - dim_size = list(input_.size()) - dim_size[0] = dim_size[0] * world_size - - output = torch.empty(dim_size, dtype=input_.dtype, device=torch.cuda.current_device()) - # TODO: handle zigzag packing here. Currently this just gathers along ranks, but if you want to see the sequence in - # the original order you need to undo the zigzag packing that happens in - # `megatron.core.utils.get_batch_on_this_cp_rank`. - torch.distributed.all_gather_into_tensor( - output, input_.contiguous(), group=parallel_state.get_context_parallel_group() - ) - tensor_list = output.chunk(world_size, dim=0) - output = torch.cat(tensor_list, dim=seq_dim).contiguous() - - return output - - -def _to_cpu(inputs: dict[str, Tensor]) -> dict[str, Tensor]: - return {k: v.cpu() for k, v in inputs.items()} - - -def _identity(inputs: dict[str, Tensor]) -> dict[str, Tensor]: - return inputs - - -class BasePredictor(LightningPassthroughPredictionMixin): - """Base predictor for GPT-style models.""" - - def __init__( - self, - *args, - output_log_prob_seqs: bool = False, - include_tokens_with_logprob_seqs: bool = False, - log_prob_collapse_option: Literal["sum", "mean", "per_token"] = "mean", - **kwargs, - ): - """Initialize the base predictor with arguments needed for writing predictions.""" - super().__init__(*args, **kwargs) - self.output_log_prob_seqs = output_log_prob_seqs - self.log_prob_collapse_option = log_prob_collapse_option - self.include_tokens_with_logprob_seqs = include_tokens_with_logprob_seqs - self.shuffle_warning_raised = False - - def predict_step( - self, batch, batch_idx: int | None = None, to_cpu: bool = True - ) -> Tensor | dict[str, Tensor] | None: - """Alias for forward_step, also log the pad mask since sequences may not all have the same length.""" - if len(batch) == 0: - return - assert self.training is False, "predict_step should be called in eval mode" - with torch.no_grad(): - forward_out = self.forward_step(batch) - if not parallel_state.is_pipeline_last_stage(): - return None - # Reminder: the model's predictions for input i land at output i+1. To get everything to align, we prepend the - # EOS token to the input sequences and take the outputs for all but the first token. - forward_out_tp_gathered = _gather_along_last_dim( - forward_out, group=parallel_state.get_tensor_model_parallel_group() - ) - - forward_out_gathered = _gather_along_cp_dim(forward_out_tp_gathered) - loss_mask_gathered = _gather_along_cp_dim(batch["loss_mask"]) - tokens_gathered = _gather_along_cp_dim(batch["tokens"]) - cp_group_size = max(parallel_state.get_context_parallel_world_size(), 1) - assert self.tokenizer.vocab_size == forward_out_gathered.shape[-1] - to_cpu_fn = _to_cpu if to_cpu else _identity - if self.output_log_prob_seqs: - if self.log_prob_collapse_option == "per_token" and cp_group_size > 1 and not self.shuffle_warning_raised: - logger.warning(SHUFFLE_MESSAGE) - self.shuffle_warning_raised = True - softmax_logprobs = torch.log_softmax(forward_out_gathered, dim=-1) - softmax_logprobs = softmax_logprobs[:, :-1] - input_ids = tokens_gathered[:, 1:] - if softmax_logprobs.shape[1] != input_ids.shape[1]: - raise RuntimeError( - f"Softmax logprobs shape {softmax_logprobs.shape} does not match input ids shape {input_ids.shape}" - ) - - logprobs = torch.gather( - softmax_logprobs, # Gather likelihoods... - 2, # along the vocab dimension... - input_ids.unsqueeze(-1), # using the token ids to index. - ).squeeze(-1) - log_prob_per_token = logprobs * loss_mask_gathered[:, 1:].float() - if self.log_prob_collapse_option == "per_token": - return to_cpu_fn( - { - "log_probs_seqs": log_prob_per_token, - "seq_idx": batch["seq_idx"], - "loss_mask": loss_mask_gathered[:, 1:], - } - ) - else: - log_prob_seqs = torch.sum(log_prob_per_token, dim=1) - if self.log_prob_collapse_option == "mean": - log_prob_seqs = log_prob_seqs / torch.clamp(loss_mask_gathered[:, 1:].float().sum(dim=-1), min=1.0) - return to_cpu_fn({"log_probs_seqs": log_prob_seqs, "seq_idx": batch["seq_idx"]}) - else: - # If the user wants to match back to logits, then they will need to do the offsetting logic themselves. - if cp_group_size > 1 and not self.shuffle_warning_raised: - logger.warning(SHUFFLE_MESSAGE) - self.shuffle_warning_raised = True - logprob_seqs_result = { - "token_logits": forward_out_gathered, - "pad_mask": loss_mask_gathered, - "seq_idx": batch["seq_idx"], - } - if self.include_tokens_with_logprob_seqs: - logprob_seqs_result["tokens"] = tokens_gathered - # Note, to match up tokens with logprobs, you need to offset by 1. Eg something like this: - # shifted_token_logits = token_logits[:, :-1] - # shifted_pad_mask = pad_mask[:, 1:] - # shifted_tokens = tokens[:, 1:] - return to_cpu_fn(logprob_seqs_result) - - -class HyenaPredictor(BasePredictor, HyenaModel): - """A predictor for the Hyena model. This adds in the predict step and the passthrough method.""" - - def configure_model(self, *args, **kwargs) -> None: - """Configure the model.""" - super().configure_model(*args, **kwargs) - self.trainer.strategy._init_model_parallel = True - - -class MambaPredictor(BasePredictor, MambaModel): - """Mamba model for prediction with additional metrics.""" - - -class LlamaPredictor(BasePredictor, GPTModel): - """Llama model for prediction with additional metrics.""" - - -def hyena_predict_forward_step(model, batch) -> torch.Tensor: - """Performs a forward step for the Hyena model. - - Args: - model: The Hyena model - batch: Dictionary containing input batch data with keys: - - tokens: Input token IDs - - position_ids: Position IDs - - labels: Labels for loss computation - - loss_mask: Mask for loss computation - - Returns: - torch.Tensor: Output from the model forward pass - """ - forward_args = { - "input_ids": batch["tokens"], - "position_ids": batch["position_ids"], - # "labels": batch["labels"], - # "loss_mask": batch["loss_mask"], - } - - forward_args["attention_mask"] = None - if "cu_seqlens" in batch: - forward_args["packed_seq_params"] = get_packed_seq_params(batch) - return model(**forward_args) - - -def hyena_predict_data_step(dataloader_iter) -> dict[str, torch.Tensor]: - """Data step for the Hyena model prediction. Modified from the original gpt data step to include the seq_idx.""" - from megatron.core import parallel_state - - # Based on: https://github.com/NVIDIA/Megatron-LM/blob/main/pretrain_gpt.py#L87 - # https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py#L828-L842 - - batch = next(dataloader_iter) - - _batch: dict - if isinstance(batch, tuple) and len(batch) == 3: - _batch = batch[0] - else: - _batch = batch - - required_device_keys = set() - required_host_keys = set() - - required_device_keys.add("attention_mask") - if "cu_seqlens" in _batch: - required_device_keys.add("cu_seqlens") - required_host_keys.add("cu_seqlens_argmin") - required_host_keys.add("max_seqlen") - - if parallel_state.is_pipeline_first_stage(): - required_device_keys.update(("tokens", "position_ids")) - include_seq_idx = False - if parallel_state.is_pipeline_last_stage(): - include_seq_idx = True - required_device_keys.update(("labels", "tokens", "loss_mask")) - - _batch_required_keys = {} - for key, val in _batch.items(): - if key in required_device_keys: - _batch_required_keys[key] = val.cuda(non_blocking=True) - elif key in required_host_keys: - _batch_required_keys[key] = val.cpu() - else: - _batch_required_keys[key] = None - - # slice batch along sequence dimension for context parallelism - output = get_batch_on_this_cp_rank(_batch_required_keys) - if include_seq_idx: - output["seq_idx"] = _batch["seq_idx"].cuda(non_blocking=True) - return output - - -class PredictDataModule(LightningDataModule): - """Create a dataloader for prediction.""" - - def __init__( - self, - dataset: torch.utils.data.Dataset, - batch_size: int = 1, - tokenizer=None, - min_length: int | None = None, - ): - """Create a dataloader for prediction.""" - super().__init__() - self.dataset = dataset - self.batch_size = batch_size - self.tokenizer = tokenizer - self.min_length = min_length - default_pad_id = 0 - self.pad_token_id = getattr(tokenizer, "pad_id", default_pad_id) if tokenizer is not None else default_pad_id - - def setup(self, stage: str | None = None) -> None: - """Set up the dataloader.""" - pass - - def predict_dataloader(self): - """Create a dataloader for prediction.""" - # need to use this to communicate that we are in predict mode and safe to not drop last batch - return WrappedDataLoader( - mode="predict", - dataset=self.dataset, - batch_size=self.batch_size, - num_workers=8, - shuffle=False, - drop_last=False, - collate_fn=functools.partial( - collate.padding_collate_fn, - padding_values={"tokens": self.pad_token_id, "position_ids": self.pad_token_id, "loss_mask": False}, - min_length=self.min_length, - max_length=None, - ), - ) - - -def predict( - fasta_path: Path, - ckpt_dir: str, - output_dir: Path, - tensor_parallel_size: int, - pipeline_model_parallel_size: int, - context_parallel_size: int, - num_nodes: int = 1, - devices: int | None = None, - eden_tokenizer: bool = False, - model_size: str = "7b", - ckpt_format: CheckpointFormats = "torch_dist", - fp8: bool = False, - full_fp8: bool = False, - fp8_recipe: str = "delayed", - work_dir: Path | None = None, - micro_batch_size: int = 1, - output_log_prob_seqs: bool = False, - log_prob_collapse_option: Literal["sum", "mean", "per_token"] = "mean", - write_interval: Literal["epoch", "batch"] = "epoch", - prepend_bos: bool = False, - no_sequence_parallel: bool = False, - hybrid_override_pattern: str | None = None, - num_layers: int | None = None, - seq_len_interpolation_factor: int | None = None, - files_per_subdir: int | None = None, - lora_checkpoint_path: Path | None = None, - mask_phylogenetic_tags: bool = False, - min_length: int | None = None, - extra_callbacks: list | None = None, # use this for making testing the predict loop easier. -): - """Inference workflow for Evo2. - - Returns: - None - """ - if fp8 and not full_fp8 and fp8_recipe != "delayed": - logger.warning( - "fp8_recipe is ignored when using fp8 and not full_fp8 since it is set inside of the layer " - "config to match vortex style FP8." - ) - if work_dir is None: - work_dir = Path(tempfile.mkdtemp()) - if files_per_subdir is None and write_interval == "batch": - logger.warning( - "--files-per-subdir is not set with --write-interval batch, will write all predictions to a " - "single directory. This may cause problems if you are predicting on a very large dataset." - ) - sequence_parallel = tensor_parallel_size > 1 and not no_sequence_parallel - output_dir.mkdir(parents=True, exist_ok=True) # Make sure the output directory exists, files will be written here. - model_parallel_size = tensor_parallel_size * pipeline_model_parallel_size * context_parallel_size - if devices is None: - devices = model_parallel_size - world_size = num_nodes * devices - if world_size % model_parallel_size != 0: - raise ValueError( - f"world_size must be divisible by model_parallel_size, got {world_size} and" - f" {model_parallel_size}. Please set --num-nodes and --devices such that num_nodes * devices is divisible " - "by model_parallel_size, which is TP * CP * PP." - ) - global_batch_size = micro_batch_size * world_size // model_parallel_size - - callbacks = [ - PredictionWriter( - output_dir=output_dir, - write_interval=write_interval, - batch_dim_key_defaults={"token_logits": 0}, - seq_dim_key_defaults={"token_logits": 1}, - files_per_subdir=files_per_subdir, - save_all_model_parallel_ranks=False, # only write one copy of predictions. - ) - ] - if extra_callbacks is not None: - callbacks.extend(extra_callbacks) - - # The following two config options are really only used for testing, but may also be useful for getting output from - # specific layers of the model. - config_modifiers_init: dict[str, Any] = { - "distribute_saved_activations": False if sequence_parallel and tensor_parallel_size > 1 else True, - } - if hybrid_override_pattern is not None: - config_modifiers_init["hybrid_override_pattern"] = hybrid_override_pattern - if num_layers is not None: - config_modifiers_init["num_layers"] = num_layers - if seq_len_interpolation_factor is not None: - config_modifiers_init["seq_len_interpolation_factor"] = seq_len_interpolation_factor - - tokenizer = get_nmt_tokenizer("byte-level") - if eden_tokenizer: - patch_eden_tokenizer(tokenizer) - - model_type = infer_model_type(model_size) - - # Select model config based on model type - if model_type == "hyena": - if model_size not in HYENA_MODEL_OPTIONS: - raise ValueError(f"Invalid model size for Hyena: {model_size}") - config = HYENA_MODEL_OPTIONS[model_size]( - forward_step_fn=hyena_predict_forward_step, - data_step_fn=hyena_predict_data_step, - # Only use vortex style FP8 in the model config if using FP8 and not full FP8. This will only apply FP8 to - # the projection layer of the hyena mixer. - vortex_style_fp8=fp8 and not full_fp8, - **config_modifiers_init, - ) - - if lora_checkpoint_path: - model_transform = Evo2LoRA(peft_ckpt_path=str(lora_checkpoint_path)) - callbacks.append(model_transform) - else: - model_transform = None - - model = HyenaPredictor( - config, - tokenizer=tokenizer, - output_log_prob_seqs=output_log_prob_seqs, - log_prob_collapse_option=log_prob_collapse_option, - model_transform=model_transform, - ) - elif model_type == "mamba": # mamba - if model_size not in MAMBA_MODEL_OPTIONS: - raise ValueError(f"Invalid model size for Mamba: {model_size}") - config = MAMBA_MODEL_OPTIONS[model_size]( - forward_step_fn=hyena_predict_forward_step, # Can reuse the same forward steps - data_step_fn=hyena_predict_data_step, - **config_modifiers_init, - ) - - model = MambaPredictor( - config, - tokenizer=tokenizer, - output_log_prob_seqs=output_log_prob_seqs, - log_prob_collapse_option=log_prob_collapse_option, - ) - elif model_type == "llama": - if model_size not in LLAMA_MODEL_OPTIONS: - raise ValueError(f"Invalid model size for Llama: {model_size}") - config = LLAMA_MODEL_OPTIONS[model_size]( - forward_step_fn=hyena_predict_forward_step, - data_step_fn=hyena_predict_data_step, - **config_modifiers_init, - ) - model = LlamaPredictor( - config, - tokenizer=tokenizer, - output_log_prob_seqs=output_log_prob_seqs, - log_prob_collapse_option=log_prob_collapse_option, - ) - else: - # This shouldn't be possible to reach. - raise ValueError(f"Invalid model type: {model_type}.") - - # Create PTL trainer. - trainer = nl.Trainer( - accelerator="gpu", - num_nodes=num_nodes, - devices=devices, - strategy=nl.MegatronStrategy( - drop_last_batch=False, - tensor_model_parallel_size=tensor_parallel_size, - pipeline_model_parallel_size=pipeline_model_parallel_size, - context_parallel_size=context_parallel_size, - pipeline_dtype=torch.bfloat16, - ckpt_load_optimizer=False, # Needs to be false for a normal model checkpoint. - ckpt_save_optimizer=False, - ckpt_async_save=False, - sequence_parallel=sequence_parallel, - save_ckpt_format=ckpt_format, - ckpt_load_strictness="log_all", - setup_optimizers=False, - store_optimizer_states=False, - configure_optimizers=False, - data_sampler=nl.MegatronDataSampler( - micro_batch_size=micro_batch_size, - global_batch_size=global_batch_size, - seq_len=8192, - output_log=False, # this is needed for predict step to work - ), - ), - log_every_n_steps=1, - limit_val_batches=10, - num_sanity_val_steps=0, - callbacks=callbacks, - plugins=nl.MegatronMixedPrecision( - precision="bf16-mixed", - params_dtype=torch.bfloat16, - # Only use FP8 in this plugin when using full FP8 precision and FP8. - # Otherwise use vortex_style_fp8 in the model config. - fp8_recipe=fp8_recipe, - fp8="hybrid" if fp8 and full_fp8 else None, - fp8_amax_history_len=16 if fp8 and full_fp8 else 1, - fp8_amax_compute_algo="max" if fp8 and full_fp8 else "most_recent", - ), - ) - - nemo_logger = NeMoLogger(log_dir=str(work_dir)) - nemo_logger.setup(trainer, resume_if_exists=True) - resume = nl.AutoResume( - resume_if_exists=True, - resume_ignore_no_checkpoint=False, - resume_past_end=False, - resume_from_path=str(ckpt_dir), - restore_config=None, - ) - - resume.setup(trainer, model) # this pulls weights from the starting checkpoint. - - if mask_phylogenetic_tags: - - def custom_loss_masker(tokens): - # Run the evo2 dataset mask_phylogenetic_tags function - return Evo2Dataset.mask_phylogenetic_tags( - tokens, - Evo2Dataset.TAG_BOUNDS, - Evo2Dataset.TAG_CHARS, - tokenizer.eod if tokenizer is not None else Evo2Dataset.DEFAULT_EOD, - Evo2Dataset.MAX_TAG_LEN, - ) - else: - custom_loss_masker = None - - dataset = SimpleFastaDataset(fasta_path, tokenizer, prepend_bos=prepend_bos, custom_loss_masker=custom_loss_masker) - datamodule = PredictDataModule(dataset, batch_size=micro_batch_size, tokenizer=tokenizer, min_length=min_length) - trainer.predict(model, datamodule=datamodule) # TODO return_predictions=False - dataset.write_idx_map( - output_dir - ) # Finally write out the index map so we can match the predictions to the original sequences. - - -def main(): - """Entrypoint for Evo2 prediction (single inference step, no new tokens).""" - args = parse_args() - predict( - num_nodes=args.num_nodes, - devices=args.devices, - fasta_path=args.fasta, - ckpt_dir=args.ckpt_dir, - tensor_parallel_size=args.tensor_parallel_size, - pipeline_model_parallel_size=args.pipeline_model_parallel_size, - context_parallel_size=args.context_parallel_size, - output_dir=args.output_dir, - model_size=args.model_size, - ckpt_format=args.ckpt_format, - fp8=args.fp8, - full_fp8=args.full_fp8, - fp8_recipe=args.fp8_recipe, - micro_batch_size=args.micro_batch_size, - output_log_prob_seqs=args.output_log_prob_seqs, - log_prob_collapse_option=args.log_prob_collapse_option, - prepend_bos=args.prepend_bos, - no_sequence_parallel=args.no_sequence_parallel, - hybrid_override_pattern=args.hybrid_override_pattern, - seq_len_interpolation_factor=args.seq_len_interpolation_factor, - num_layers=args.num_layers, - files_per_subdir=args.files_per_subdir, - write_interval=args.write_interval, - lora_checkpoint_path=args.lora_checkpoint_path, - mask_phylogenetic_tags=args.mask_phylogenetic_tags, - min_length=args.min_length, - eden_tokenizer=args.eden_tokenizer, - ) - - -if __name__ == "__main__": - main() diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/run/train.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/run/train.py deleted file mode 100644 index fc24aa13c1..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/run/train.py +++ /dev/null @@ -1,1124 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import os -from pathlib import Path -from typing import List, Optional - -# TODO add back support for slurm resilience. -# import nvidia_resiliency_ext.ptl_resiliency as res_module -import torch -from lightning.pytorch.callbacks import LearningRateMonitor, RichModelSummary -from megatron.core.distributed import DistributedDataParallelConfig -from megatron.core.enums import Fp8Recipe -from megatron.core.optimizer import OptimizerConfig -from nemo import lightning as nl -from nemo.collections import llm -from nemo.collections.llm.gpt.data import MockDataModule, PreTrainingDataModule -from nemo.collections.llm.gpt.data.megatron.hyena.config import parse_dataset_config -from nemo.collections.llm.gpt.data.megatron.hyena.evo2_dataset import Evo2Dataset, Evo2DatasetPadEodLossMask -from nemo.collections.llm.gpt.model.hyena import HYENA_MODEL_OPTIONS -from nemo.collections.llm.recipes.tp_overlap_configs.userbuffers import ( - userbuffers_bf16_h100_h8192_tp4_mbs1_seqlen8192, - userbuffers_fp8_h100_h8192_tp4_mbs1_seqlen8192, -) -from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer -from nemo.lightning.pytorch import callbacks as nl_callbacks -from nemo.lightning.pytorch.callbacks.flops_callback import FLOPsMeasurementCallback -from nemo.lightning.pytorch.callbacks.megatron_comm_overlap import MegatronCommOverlapCallback -from nemo.lightning.pytorch.optim import CosineAnnealingScheduler -from nemo.lightning.pytorch.optim.megatron import MegatronOptimizerModule -from nemo.lightning.pytorch.strategies.utils import RestoreConfig -from nemo.utils import logging as logger -from nemo.utils.exp_manager import TimingCallback - -from bionemo.evo2.data.sharded_eden_dataloader import ShardedEdenDataModule -from bionemo.evo2.models.llama import LLAMA_MODEL_OPTIONS -from bionemo.evo2.models.mamba import MAMBA_MODEL_OPTIONS, MambaModel, mamba_no_weight_decay_cond_with_embeddings -from bionemo.evo2.models.peft import Evo2LoRA -from bionemo.evo2.run.utils import infer_model_type, patch_eden_tokenizer -from bionemo.evo2.utils.callbacks import GarbageCollectAtInferenceTime, _FirstBatchCudaSync -from bionemo.evo2.utils.config import hyena_no_weight_decay_cond_with_embeddings -from bionemo.evo2.utils.logging.callbacks import TEVCallback -from bionemo.llm.utils.datamodule_utils import infer_global_batch_size -from bionemo.llm.utils.logger_utils import WandbConfig, setup_nemo_lightning_logger - - -torch._dynamo.config.suppress_errors = True - -# Force first batch to run with CUDA_LAUNCH_BLOCKING enabled to avoid CUDA asynchronous initialization -# race condition in TE LayerNormLinear. This is unset after the first batch. -# See https://github.com/NVIDIA/bionemo-framework/issues/1301 for more details. -os.environ.setdefault("CUDA_LAUNCH_BLOCKING", "1") - - -def parse_args(args: Optional[List[str]] = None) -> argparse.Namespace: - """Parse arguments for Evo2 model training.""" - parser = argparse.ArgumentParser( - description=( - "Train an Evo2/Hyena-family model using NeMo 2.0.\n\n" - "Choose exactly one data source:\n" - " - --dataset-config: blended/weighted dataset YAML.\n" - " - --mock-data: synthetic mock data for testing/debugging.\n" - " - --fasta-data: single FASTA file input (requires --fasta-file).\n" - " - --sharded-eden-data: pre-sharded SQLite sequence DBs + precomputed windows per split\n" - " (requires --sequence-db-dir, --train-window-db, --val-window-db, --test-window-db)." - ), - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - ) - data_group = parser.add_mutually_exclusive_group(required=True) - - data_group.add_argument( - "-d", - "--dataset-config", - type=str, - help="Path to the blended / weighted training dataset configuration YAML. Mutually exclusive with " - "--mock-data, --fasta-data and --sharded-eden-data.", - ) - data_group.add_argument( - "--mock-data", - action="store_true", - help="Use synthetic mock data for quick testing/debugging. Mutually exclusive with --dataset-config, --fasta-data and --sharded-eden-data.", - ) - - data_group.add_argument( - "--fasta-data", - action="store_true", - help=( - "Train on a single FASTA file (EdenDataModule). Requires --fasta-file. Mutually exclusive with " - "--dataset-config, --mock-data and --sharded-eden-data." - ), - ) - - data_group.add_argument( - "--sharded-eden-data", - action="store_true", - help=( - "Train on pre-sharded SQLite sequence databases with precomputed windows per split " - "(ShardedEdenDataModule). Requires: --sequence-db-dir, --train-window-db, --val-window-db, --test-window-db. " - "Mutually exclusive with --dataset-config, --mock-data and --fasta-data." - ), - ) - - # Dataset configuration (unified) - parser.add_argument( - "--fasta-file", - type=str, - help=( - "Absolute path to FASTA file containing training data. Required when using --fasta-data; " - "ignored otherwise." - ), - ) - parser.add_argument( - "--sequence-db-dir", - type=str, - help=( - "Directory containing per-sample SQLite databases with sequences. Required with --sharded-eden-data; " - "ignored otherwise." - ), - ) - parser.add_argument( - "--train-window-db", - type=str, - help=( - "Path to the precomputed training split windows SQLite database. Required with --sharded-eden-data; " - "ignored otherwise." - ), - ) - parser.add_argument( - "--val-window-db", - type=str, - help=( - "Path to the precomputed validation split windows SQLite database. Required with --sharded-eden-data; " - "ignored otherwise." - ), - ) - parser.add_argument( - "--test-window-db", - type=str, - help=( - "Path to the precomputed test split windows SQLite database. Required with --sharded-eden-data; " - "ignored otherwise." - ), - ) - parser.add_argument( - "--dataset-num-epochs", - type=int, - default=1, - help=( - "When using --sharded-eden-data, wrap each split with a MultiEpochDatasetResampler over this many epochs. " - "Default 1 means each split length equals its base dataset length." - ), - ) - parser.add_argument( - "--stride", - type=int, - default=7992, - help=( - "Stride between adjacent windows used by ShardedEdenDataModule. Must match the stride used when " - "precomputing the windows databases. Ignored for other data modes." - ), - ) - parser.add_argument( - "--window-min-length-threshold", - type=int, - default=0, - help=( - "If > 0, prune windows shorter than this effective length during precomputation and require matching " - "value in the window DB metadata. Defaults to 0 (disabled)." - ), - ) - parser.add_argument( - "--log-windows", - action="store_true", - default=False, - help=("Enable window access logging for ShardedEdenDataset (applies only to --sharded-eden-data)."), - ) - parser.add_argument( - "--window-log-dir", - type=str, - default=None, - help=("Directory for window-access logging SQLite files (applies only to --sharded-eden-data)."), - ) - parser.add_argument( - "--rc-aug", - action="store_true", - default=False, - help=("Enable reverse-complement augmentation (applies only to --sharded-eden-data)."), - ) - parser.add_argument( - "--dataset-dir", - type=str, - help="Absolute path to the dataset directory. Defaults to using the absolute or relative paths (dataset_prefix) specified in the dataset config YAML. Only used with --dataset-config.", - ) - - parser.add_argument("--num-nodes", type=int, default=1, help="Number of nodes to use for training, defaults to 1.") - parser.add_argument("--devices", type=int, default=1, help="Number of devices to use for training, defaults to 1.") - parser.add_argument("--seq-length", type=int, default=8192, help="Training sequence length") - parser.add_argument( - "--tensor-parallel-size", type=int, default=1, help="Order of tensor parallelism. Defaults to 1." - ) - parser.add_argument( - "--pipeline-model-parallel-size", type=int, default=1, help="Order of pipeline parallelism. Defaults to 1." - ) - parser.add_argument( - "--context-parallel-size", type=int, default=1, help="Order of context parallelism. Defaults to 1." - ) - parser.add_argument( - "--create-tensorboard-logger", action="store_true", default=False, help="Create a tensorboard logger." - ) - parser.add_argument("--wandb-entity", type=str, default=None, help="The team posting this run") - parser.add_argument("--wandb-project", type=str, default=None, help="Wandb project name ") - parser.add_argument("--wandb-tags", nargs="+", type=str, default=None, help="Tags associated with this run") - parser.add_argument( - "--wandb-group", type=str, default=None, help="A unique string shared by all runs in a given group" - ) - parser.add_argument( - "--wandb-job-type", - type=str, - default=None, - help="A unique string representing a type of run, which is useful when you're grouping runs together into larger experiments using group.", - ) - parser.add_argument( - "--wandb-run-name", - type=str, - default=None, - help="A unique string representing the name of the wandb run. If not provided, the name will be generated from the model and training specifications.", - ) - - parser.add_argument( - "--wandb-id", type=str, default=None, help="Sets the version, mainly used to resume a previous run" - ) - parser.add_argument( - "--wandb-anonymous", action="store_true", help="Enable or explicitly disable anonymous logging" - ) - parser.add_argument( - "--wandb-log-model", action="store_true", help="Save checkpoints in wandb dir to upload on W&B servers" - ) - parser.add_argument("--wandb-offline", action="store_true", help="Use wandb in offline mode") - parser.add_argument("--sequence-parallel", action="store_true", help="Set to enable sequence parallelism.") - parser.add_argument("--fp8", action="store_true", help="Set to enable FP8") - parser.add_argument("--micro-batch-size", type=int, default=1, help="Micro-batch size for data-parallel training.") - parser.add_argument( - "--global-batch-size", - type=int, - default=None, - help="Global batch size for training. If set to None, infer it from the TP, CP, and PP parameters.", - ) - parser.add_argument( - "--grad-acc-batches", type=int, default=1, help="Number of batches to accumulate gradients over." - ) - parser.add_argument( - "--max-steps", - type=int, - help="Number of training optimizer update steps. This controls the total number of steps as well as the " - "shape of the learning rate curve.", - default=500000, - ) - parser.add_argument( - "--constant-steps", - type=int, - help="Number of steps to keep the learning rate constant at minimum after annealing. This controls the " - "shape of the learning rate curve.", - default=80000, - ) - parser.add_argument( - "--early-stop-on-step", - type=int, - help="Stop training on this step, if set. This may be useful for testing or debugging purposes.", - ) - parser.add_argument( - "--val-check-interval", type=int, help="Number of steps between validation measurements and model checkpoints." - ) - parser.add_argument("--grad-reduce-in-fp32", action="store_true", default=False, help="Gradient reduce in FP32.") - parser.add_argument( - "--fp8-wgrad", - action="store_true", - default=False, - help="Faster option that is maybe less accurate (TBD) when using fp8.", - ) - parser.add_argument("--use-megatron-comm-overlap-llama3-8k", action="store_true", default=False) - parser.add_argument( - "--tp-comm-overlap-backend", - type=str, - choices=["nccl", "mpi", "gloo"], - default="nccl", - help="TP communication backend to use. Defaults to 'nccl'.", - ) - parser.add_argument("--align-param-gather", action="store_true", default=False) - parser.add_argument( - "--model-size", - type=str, - choices=sorted( - list(HYENA_MODEL_OPTIONS.keys()) + list(MAMBA_MODEL_OPTIONS.keys()) + list(LLAMA_MODEL_OPTIONS.keys()) - ), - default="7b", - help="Model size/configuration to use. Options depend on the selected model-type.", - ) - parser.add_argument( - "--add-bias-output", - action="store_true", - default=False, - help="Add bias to the output layer to enable learning a simple prior.", - ) - parser.add_argument( - "--result-dir", type=Path, required=False, default=Path("./results"), help="Path to the result directory." - ) - parser.add_argument("--experiment-name", type=str, required=False, default="evo2", help="Name of the experiment.") - - parser.add_argument( - "--limit-val-batches", - type=int, - default=20, - help="Number of validation steps", - ) - parser.add_argument( - "--limit-test-batches", - type=int, - help="Number of test steps (sometimes useful for getting around megatron errors of too few samples). Defaults " - "to the same as limit_val_batches.", - ) - parser.add_argument( - "--log-every-n-steps", - type=int, - default=1, - required=False, - help="Number of steps between logging.", - ) - parser.add_argument( - "--ckpt-dir", - type=str, - default=None, - help="Directory to restore an initial checkpoint from. Use this for supervised fine-tuning.", - ) - parser.add_argument( - "--use-precision-aware-optimizer", - action="store_true", - default=False, - help="Use precision aware optimizer that stores main weights in FP32 when doing mixed precision training.", - ) - parser.add_argument( - "--bf16-main-grads", - action="store_true", - default=False, - help="Use bf16 for main gradients, only use this with --use-precision-aware-optimizer.", - ) - parser.add_argument("--wd", type=float, default=0.01, help="Weight decay for optimizer.") - parser.add_argument( - "--adam-beta1", - type=float, - default=0.9, - help="Adam optimizer beta1 parameter.", - ) - parser.add_argument( - "--adam-beta2", - type=float, - default=0.95, - help="Adam optimizer beta2 parameter.", - ) - parser.add_argument( - "--adam-eps", - type=float, - default=1e-8, - help="Adam optimizer epsilon parameter. The inverse of this value (1/eps) represents the maximum adaptive learning rate per parameter.", - ) - parser.add_argument( - "--restore-optimizer-from-ckpt", - action="store_true", - help="Restore optimizer state from initial checkpoint. Defaults to False.", - ) - parser.add_argument( - "--average-in-collective", - action="store_true", - default=False, - help="Avaerage optimizer state in collective rather than dividing by dp size and summing.", - ) - parser.add_argument("--seed", type=int, default=1234, help="Set random seed for training.") - parser.add_argument("--workers", type=int, default=8, help="Number of workers to use for data loading.") - parser.add_argument( - "--gc-interval", - type=int, - default=0, - help="Set to a value > 0 if you want to synchronize garbage collection, will do gc every gc-interval steps.", - ) - parser.add_argument( - "--enable-preemption", - action="store_true", - default=False, - help="Enable preemption hooks. If enabled this will save a checkpoint whenever slurm exits.", - ) - parser.add_argument( - "--ckpt-async-save", - action="store_true", - default=False, - ) - parser.add_argument( - "--ckpt-format", - type=str, - choices=["torch_dist", "zarr"], - default="torch_dist", - help="Specify checkpoint format to use. Defaults to 'torch_dist', as 'zarr' is deprecated. Only use if " - "resuming training from a zarr checkpoint.", - ) - parser.add_argument( - "--eod-pad-in-loss-mask", - action="store_true", - default=False, - help="Do not predict EOD/Pad tokens (typical default, but not default in original evo2).", - ) - parser.add_argument( - "--cross-entropy-loss-fusion", - action="store_true", - default=False, - help="Use the faster, but maybe less accurate fused form of cross entropy, " - "which also has bf16 grads internally.", - ) - parser.add_argument( - "--no-fp32-residual-connection", - action="store_true", - default=False, - help="If set, turn off fp32 residual connections which may be faster but may impact accuracy.", - ) - parser.add_argument( - "--debug-ddp-parity-freq", - type=int, - default=0, - help="Set to value > 0 to debug DDP weight parity between ranks.", - ) - parser.add_argument( - "--hybrid-override-pattern", - type=str, - help="Override the hybrid override pattern in the config (specifies hyena layer ordering and type).", - ) - parser.add_argument( - "--num-layers", type=int, help="If set, override the number of layers specified in the requested config." - ) - parser.add_argument( - "--create-tflops-callback", - action="store_true", - default=False, - help="Enable tflops calculation callback for Hyena / Evo2. Defaults to False.", - ) - parser.add_argument( - "--log-parameters-and-shapes", - action="store_true", - default=False, - help="Log training parameters shapes and dtypes for debugging.", - ) - parser.add_argument("--lr", type=float, default=3e-4, help="Learning rate.") - parser.add_argument("--min-lr", type=float, default=3e-5, help="Min learning rate in cosine annealing.") - parser.add_argument("--warmup-steps", type=int, default=2500, help="Number of warmup steps in cosine annealing") - parser.add_argument( - "--fp8-recipe", - type=str, - default="delayed", - choices=list(Fp8Recipe.__members__.keys()), - help="FP8 recipe to use for FP8 tensors in the forward and backward pass. Note that some recipes are only " - "supported by certain architectures. For example 'mxfp8' requires at least blackwell, and 'blockwise' is only " - "implemented for hopper (but not blackwell). 'tensorwise' and 'delayed' are currently supported by all " - "architectures, but 'tensorwise' is preferred over 'delayed' which is the default for historical reasons.", - ) - # NSYS profiling/tooling arguments - parser.add_argument( - "--nsys-profiling", - action="store_true", - default=False, - help="Enable targeted `nsys` profiling on the training loop for a defined step range. To actually get profiling" - " output you must run the whole program with `nsys`. For example: " - " `nsys profile -s none -o output_report_name -t cuda,nvtx --force-overwrite true " - "--capture-range=cudaProfilerApi --capture-range-end=stop [regular python command here]`", - ) - # start, end, rank - parser.add_argument( - "--nsys-start-step", - type=int, - required=False, - default=0, - help="Start nsys profiling after this step.", - ) - parser.add_argument( - "--spike-no-more-embedding-init", - action="store_true", - default=False, - help="If set, the embeddings are initialized with a Normal(0, 1.0) distribution rather " - "than the default Normal(0, 0.02). This may help avoid loss spiking during training. Consider using this with " - "--no-weight-decay-embeddings to avoid shrinking the embeddings to 0 by skipping weight decay on these layers, " - "or with --use-targeted-variance-loss to maintain a 1.0 variance during training even with weight decay. This " - "also turns off shared weights between embeddings and outputs.", - ) - parser.add_argument( - "--no-weight-decay-embeddings", - action="store_true", - default=False, - help="If set, do not apply weight decay to the embeddings.", - ) - parser.add_argument( - "--use-targeted-variance-loss", - action="store_true", - default=False, - help="Use targeted variance loss.", - ) - parser.add_argument( - "--nsys-end-step", - type=int, - required=False, - help="End nsys profiling after this step.", - ) - parser.add_argument( - "--no-renormalize-loss", - action="store_true", - default=False, - help="Do not renormalize the loss weights.", - ) - parser.add_argument( - "--mamba-lowercase-loss-weight", - type=float, - default=0.1, - help="Loss weight for the Mamba model for lowercase bases, if you are using a Mamba model. " - "Default is 0.1 like the Evo2 paper. Set to 1.0 to disable differential loss weighting.", - ) - # rank as list of integers - parser.add_argument( - "--nsys-ranks", - type=int, - nargs="+", - required=False, - default=[0], - help="Enable nsys profiling for these ranks.", - ) - parser.add_argument( - "--activation-checkpoint-recompute-num-layers", - type=int, - help="If set, override the default value set in the config.", - ) - parser.add_argument( - "--disable-checkpointing", - action="store_false", - default=True, - dest="create_checkpoint_callback", - help="Disable creating a ModelCheckpoint callback.", - ) - parser.add_argument( - "--clip-grad", - type=float, - default=1.0, - help="Grad clip value. Note that when using DDP this may need to be inflated.", - ) - parser.add_argument( - "--seq-len-interpolation-factor", - type=float, - help="Adjusts the linear scaling of ROPE (Rotary Position Embedding) for context extension. " - "Set this factor relative to your base context length e.g., for an original context length of 8192 and " - "an extended context length of 524288, use 524288/8192 = 64.", - ) - parser.add_argument( - "--overlap-param-gather", - action="store_true", - default=False, - help="Overlap the parameter gather with the optimizer step. This is currently disabled due to a NeMo bug " - "when using DDP. Making this an option defaulting to False is a temporary solution until the bug is fixed.", - ) - parser.add_argument( - "--overlap-grad-reduce", - action="store_true", - default=False, - help="Overlap the gradient reduce with the optimizer step.", - ) - parser.add_argument( - "--hidden-dropout", - type=float, - default=0.0, - help="Dropout probability for the hyena layers", - ) - parser.add_argument( - "--ffn-hidden-size", - type=int, - default=None, - help="FFN hidden size for the hyena layers", - ) - parser.add_argument( - "--log-num-zeros-in-grad", - action="store_true", - default=False, - help="Log the number of zeros in the gradient.", - ) - parser.add_argument( - "--attention-dropout", - type=float, - default=0.0, - help="Dropout probability for the attention layers.", - ) - parser.add_argument( - "--use-subquadratic_ops", - action="store_true", - help="Use subquadratic_ops for improved performance.", - ) - parser.add_argument( - "--save-top-k", - type=int, - default=5, - help="Number of best checkpoints to keep. Set to -1 to save all checkpoints.", - ) - parser.add_argument( - "--metric-to-monitor-for-checkpoints", - type=str, - default="val_loss", - help="Metric to monitor for checkpoints.", - ) - parser.add_argument( - "--save-last-checkpoint", - action="store_true", - default=True, - help="Save the last checkpoint.", - ) - parser.add_argument( - "--no-save-last-checkpoint", - action="store_false", - dest="save_last_checkpoint", - default=True, - help="Disable saving the last checkpoint.", - ) - parser.add_argument("--lora-finetune", action="store_true", help="Use LoRA fine-tuning", default=False) - parser.add_argument("--lora-checkpoint-path", type=str, default=None, help="LoRA checkpoint path") - parser.add_argument( - "--no-calculate-per-token-loss", - action="store_true", - default=False, - help="Calculate a simpler mean across the microbatch of the loss prior to DDP reduction rather than the global" - " per-token mean loss. Use this if speed is critical and if you do not need token masking in your loss.", - ) - parser.add_argument( - "--no-check-for-nan-in-grad", - action="store_true", - default=False, - help="Skip checking for NaNs in gradients. Only use this for debugging purposes.", - ) - parser.add_argument( - "--garbage-collect-at-inference", - action="store_true", - default=False, - help="Enable CUDA memory cleanup before validation to prevent initialization errors.", - ) - parser.add_argument( - "--lora-alpha", - type=int, - default=None, - help="Alpha parameter for LoRA fine-tuning.", - ) - parser.add_argument( - "--lora-dim", - type=int, - default=None, - help="Dim parameter for LoRA fine-tuning.", - ) - - recompute_group = parser.add_mutually_exclusive_group(required=False) - recompute_group.add_argument("--no-activation-checkpointing", action="store_true", default=False) - recompute_group.add_argument("--selective-activation-checkpointing", action="store_true", default=False) - return parser.parse_args(args=args) - - -def train(args: argparse.Namespace) -> nl.Trainer: - """Main function to run Evo2 training.""" - tokenizer = get_nmt_tokenizer( - "byte-level", - ) - - # Infer global batch size. - global_batch_size = args.global_batch_size - if global_batch_size is None: - global_batch_size = infer_global_batch_size( - micro_batch_size=args.micro_batch_size, - num_nodes=args.num_nodes, - devices=args.devices, - accumulate_grad_batches=args.grad_acc_batches, - tensor_model_parallel_size=args.tensor_parallel_size, - pipeline_model_parallel_size=args.pipeline_model_parallel_size, - context_model_parallel_size=args.context_parallel_size, - ) - if args.mock_data: - data_module = MockDataModule( - seq_length=args.seq_length, - micro_batch_size=args.micro_batch_size, - global_batch_size=global_batch_size, - num_train_samples=args.max_steps * global_batch_size, - num_val_samples=args.limit_val_batches * global_batch_size, - num_test_samples=1, - num_workers=args.workers, - tokenizer=tokenizer, - ) - elif args.fasta_data: - raise NotImplementedError("Fasta data is not supported yet. Need to add EdenDataModule") - # data_module = EdenDataModule( - # fasta_file=args.fasta_file, - # seq_length=args.seq_length, - # micro_batch_size=args.micro_batch_size, - # global_batch_size=global_batch_size, - # num_workers=args.workers, - # tokenizer=tokenizer, - # seed=args.seed, - # ) - elif args.sharded_eden_data: - # Validate required arguments for sharded data - if not args.sequence_db_dir or not args.train_window_db or not args.val_window_db or not args.test_window_db: - raise ValueError( - "--sequence-db-dir, --train-window-db, --val-window-db, and --test-window-db are required when using --sharded-eden-data." - ) - logger.info(f"Patching the tokenizer for compatibility with Eden model training: {tokenizer}") - patch_eden_tokenizer(tokenizer) # Eden tokenizer uses different IDs for BOS, EOS, SEP, and PAD than default. - data_module = ShardedEdenDataModule( - sequence_db_dir=args.sequence_db_dir, - train_window_db_path=args.train_window_db, - val_window_db_path=args.val_window_db, - test_window_db_path=args.test_window_db, - seq_length=args.seq_length, - tokenizer=tokenizer, - micro_batch_size=args.micro_batch_size, - global_batch_size=global_batch_size, - num_workers=args.workers, - rc_aug=args.rc_aug, - stride=args.stride, - window_min_length_threshold=args.window_min_length_threshold, - seed=args.seed, - num_epochs=args.dataset_num_epochs, - log_windows=args.log_windows, - log_dir=args.window_log_dir, - ) - else: - blended_dataset_config = parse_dataset_config( - dataset_config_path=args.dataset_config, dataset_path=args.dataset_dir - ) - dataset_cls = Evo2DatasetPadEodLossMask if args.eod_pad_in_loss_mask else Evo2Dataset - # Instantiate pre-training module. - data_module = PreTrainingDataModule( - paths=blended_dataset_config, - dataset_cls=dataset_cls, - seq_length=args.seq_length, - micro_batch_size=args.micro_batch_size, - global_batch_size=global_batch_size, - seed=args.seed, - num_workers=args.workers, - tokenizer=tokenizer, - eod_mask_loss=args.eod_pad_in_loss_mask, - ) - if args.no_activation_checkpointing: - activation_checkpointing_args = { - "recompute_granularity": None, - "recompute_method": None, - "recompute_num_layers": None, - } - elif args.selective_activation_checkpointing: - activation_checkpointing_args = { - "recompute_granularity": "selective", - "recompute_method": None, - "recompute_num_layers": None, - } - else: - if args.activation_checkpoint_recompute_num_layers is not None: - activation_checkpointing_args = { - "recompute_num_layers": args.activation_checkpoint_recompute_num_layers, - } - else: - activation_checkpointing_args = {} - # Retrieve model config. - config_modifiers_init = { - "calculate_per_token_loss": not args.no_calculate_per_token_loss, # override megatron internal behavior. - "tp_comm_overlap": args.use_megatron_comm_overlap_llama3_8k, - "seq_length": args.seq_length, - "hidden_dropout": args.hidden_dropout, - "attention_dropout": args.attention_dropout, - "to_upper": "weighted" if args.no_renormalize_loss else "normalized_weighted", - "distribute_saved_activations": False if args.sequence_parallel else True, - "cross_entropy_loss_fusion": args.cross_entropy_loss_fusion, - "fp32_residual_connection": not args.no_fp32_residual_connection, - **activation_checkpointing_args, - } - if args.add_bias_output: - config_modifiers_init["add_bias_output"] = args.add_bias_output - if args.spike_no_more_embedding_init: - config_modifiers_init["embedding_init_method_std"] = 1.0 - # When using spike_no_more_embedding_init, we don't want to share embeddings and outputs. - config_modifiers_init["share_embeddings_and_output_weights"] = False - if args.ffn_hidden_size: - config_modifiers_init["ffn_hidden_size"] = args.ffn_hidden_size - if args.use_targeted_variance_loss: - config_modifiers_init["use_targeted_variance_loss"] = True - if args.use_subquadratic_ops: - config_modifiers_init["use_subquadratic_ops"] = True - if args.hybrid_override_pattern: - config_modifiers_init["hybrid_override_pattern"] = args.hybrid_override_pattern - if args.num_layers: - config_modifiers_init["num_layers"] = args.num_layers - - model_type = infer_model_type(args.model_size) - - # Create model based on selected model type - if model_type == "hyena": - if args.model_size not in HYENA_MODEL_OPTIONS: - raise ValueError(f"Invalid model size for Hyena: {args.model_size}") - model_config = HYENA_MODEL_OPTIONS[args.model_size](**config_modifiers_init) - if args.no_weight_decay_embeddings: - # Override the default weight decay condition for Hyena with our bionemo version that also excludes - # embeddings - model_config.hyena_no_weight_decay_cond_fn = hyena_no_weight_decay_cond_with_embeddings - # Lora adaptors configuration - lora_transform = None - if args.lora_finetune: - lora_kwargs = { - k: v - for k, v in { - "alpha": args.lora_alpha, - "dim": args.lora_dim, - }.items() - if v is not None - } - - lora_transform = Evo2LoRA(peft_ckpt_path=args.lora_checkpoint_path, **lora_kwargs) - - model = llm.HyenaModel(model_config, tokenizer=data_module.tokenizer, model_transform=lora_transform) - elif model_type == "mamba": # mamba - if args.no_weight_decay_embeddings: - config_modifiers_init["hyena_no_weight_decay_cond_fn"] = mamba_no_weight_decay_cond_with_embeddings - config_modifiers_init["lowercase_loss_reweighting"] = args.mamba_lowercase_loss_weight - if args.model_size not in MAMBA_MODEL_OPTIONS: - raise ValueError(f"Invalid model size for Mamba: {args.model_size}") - model_config = MAMBA_MODEL_OPTIONS[args.model_size](**config_modifiers_init) - model = MambaModel(model_config, tokenizer=data_module.tokenizer) - elif model_type == "llama": - config_modifiers_init.pop("to_upper") # llama model does not handle custom loss renormalization settings. - model_config = LLAMA_MODEL_OPTIONS[args.model_size](**config_modifiers_init) - model = llm.LlamaModel(model_config, tokenizer=data_module.tokenizer) - - # Setup callbacks. - callbacks = [ - RichModelSummary(max_depth=4), - LearningRateMonitor(), - TimingCallback(), - TEVCallback(), - ] - - callbacks.append(_FirstBatchCudaSync()) - - if args.garbage_collect_at_inference: - callbacks.append(GarbageCollectAtInferenceTime()) - - if args.lora_finetune: - callbacks.append(lora_transform) - if args.enable_preemption: - callbacks.append(nl_callbacks.PreemptionCallback()) - if args.debug_ddp_parity_freq > 0: - callbacks.append(nl_callbacks.DdpParityChecker(interval=args.debug_ddp_parity_freq)) - if args.log_parameters_and_shapes: - callbacks.append(nl_callbacks.ParameterDebugger()) - if args.create_tflops_callback: - # Add callback that logs the tera-FLOPS per second per GPU during training. - flop_meas_callback = FLOPsMeasurementCallback( - model_config, - data_module, - "hyena", - ) - callbacks.append(flop_meas_callback) - - # TODO(@cye): Add this back when it works with 24.12. - # if args.straggler_detection: - # callbacks.append( - # res_module.StragglerDetectionCallback( - # report_time_interval=300, - # calc_relative_gpu_perf=True, - # calc_individual_gpu_perf=True, - # num_gpu_perf_scores_to_print=5, - # gpu_relative_perf_threshold=0.7, - # gpu_individual_perf_threshold=0.7, - # stop_if_detected=True, - # enable_ptl_logging=True, - # ) - # ) - if args.use_megatron_comm_overlap_llama3_8k: - # Pick the floating point appropriate config. - if args.fp8: - tp_comm_overlap_cfg = userbuffers_fp8_h100_h8192_tp4_mbs1_seqlen8192 - else: - tp_comm_overlap_cfg = userbuffers_bf16_h100_h8192_tp4_mbs1_seqlen8192 - callbacks.append( - MegatronCommOverlapCallback( - tp_comm_overlap=model_config.tp_comm_overlap, - tp_comm_overlap_cfg=tp_comm_overlap_cfg, - tp_comm_bootstrap_backend=args.tp_comm_overlap_backend, - wgrad_deferral_limit=22, # default from NeMo - overlap_param_gather_with_optimizer_step=False, # Currently disabled due to an issue with checkpointing. - align_param_gather=args.align_param_gather, - ) - ) - - if args.gc_interval > 0: - callbacks.append( - nl_callbacks.GarbageCollectionCallback( - gc_interval_train=args.gc_interval, gc_interval_val=args.gc_interval - ) - ) - if args.nsys_profiling: - if args.nsys_end_step is None: - nsys_end_step = args.max_steps - else: - nsys_end_step = args.nsys_end_step - callbacks.append( - nl_callbacks.NsysCallback( - start_step=args.nsys_start_step, end_step=nsys_end_step, ranks=args.nsys_ranks, gen_shape=True - ) - ) - # Average in collective is only supported when per-token loss is not calculated. - average_in_collective = args.average_in_collective and args.no_calculate_per_token_loss - wandb_run_name = ( - f"evo2-size-{args.model_size}-TP{args.tensor_parallel_size}-" - f"PP{args.pipeline_model_parallel_size}-CP{args.context_parallel_size}" - f"-GBS{global_batch_size}-MBS{args.micro_batch_size}-SkipLossRenorm{args.no_renormalize_loss}" - f"-NOAC{args.no_activation_checkpointing}-SELAC{args.selective_activation_checkpointing}" - f"-ACRNL{model_config.recompute_num_layers}" - f"-PAT{getattr(model_config, 'hybrid_override_pattern', 'None')}" - f"-F32R{model_config.fp32_residual_connection}" - f"-FCE{model_config.cross_entropy_loss_fusion}" - f"-AIC{average_in_collective}" - f"-PTL{not args.no_calculate_per_token_loss}" - f"-PEOD{args.eod_pad_in_loss_mask}" - f"-BO{args.add_bias_output}" - f"-GCLP{args.clip_grad}" - f"-HDO{args.hidden_dropout}" - f"-ADO{args.attention_dropout}" - f"-LR{args.lr}-MINLR{args.min_lr}-WUSTEPS{args.warmup_steps}-CONSTSTEPS{args.constant_steps}-WD{args.wd}" - f"-GRFP32{args.grad_reduce_in_fp32}-FP8WG{args.fp8_wgrad and args.fp8}" - f"-B1{args.adam_beta1}-B2{args.adam_beta2}-EPS{args.adam_eps}" - f"-PAO{args.use_precision_aware_optimizer}" - f"-B16MG{args.bf16_main_grads}" - f"-EWD{args.no_weight_decay_embeddings}-SNI{args.spike_no_more_embedding_init}" - f"-OGR{args.overlap_grad_reduce}-OPG{args.overlap_param_gather}" - f"-TVL{args.use_targeted_variance_loss}" - f"-NODES{args.num_nodes}-FP8{args.fp8}" - ) - if model_type == "mamba": - # Include this setting for mamba models. - wandb_run_name += f"-LLW{args.mamba_lowercase_loss_weight}" - elif model_type == "llama": - wandb_run_name += f"-LLAMA{args.model_size}" - - wandb_config: Optional[WandbConfig] = ( - None - if args.wandb_project is None - else WandbConfig( - offline=args.wandb_offline, - project=args.wandb_project, - name=args.wandb_run_name if args.wandb_run_name is not None else wandb_run_name, - entity=args.wandb_entity, - tags=args.wandb_tags, - group=args.wandb_group, - job_type=args.wandb_job_type, - id=args.wandb_id, - anonymous=args.wandb_anonymous, - log_model=args.wandb_log_model, - ) - ) - nemo_logger = setup_nemo_lightning_logger( - root_dir=args.result_dir, - name=args.experiment_name, - initialize_tensorboard_logger=args.create_tensorboard_logger, - wandb_config=wandb_config, - ) - - # Ensure window logging directory lives under the run directory - if args.sharded_eden_data and args.log_windows: - window_log_leaf = Path(args.window_log_dir).name if args.window_log_dir else "window_logs" - window_log_dir = Path(nemo_logger.save_dir) / window_log_leaf - try: - window_log_dir.mkdir(parents=True, exist_ok=True) - except Exception: - pass - # Propagate to data module (datasets are built later during setup) - if isinstance(data_module, ShardedEdenDataModule): - data_module.log_dir = str(window_log_dir) - - if args.create_checkpoint_callback: - checkpoint_path = str(Path(nemo_logger.save_dir) / "checkpoints") - checkpoint_callback = nl_callbacks.ModelCheckpoint( - dirpath=checkpoint_path, - save_last=args.save_last_checkpoint, - monitor=args.metric_to_monitor_for_checkpoints, - save_top_k=args.save_top_k, - every_n_train_steps=args.val_check_interval, - always_save_context=True, - filename="{epoch}-{step}-{consumed_samples}", - save_weights_only=False, - save_optim_on_train_end=True, - save_context_on_train_end=True, - ) - callbacks.append(checkpoint_callback) - - # Note: `nl.AutoResume` is only created if a `ModelCheckpoint` exists, because `nl.AutoResume.setup()` - # expects the trainer to have a `checkpoint_callback` set. See: https://github.com/NVIDIA/NeMo/blob/29c230b8a3352bef2128ba2d226a327d52d05be3/nemo/lightning/resume.py#L128 - # - # In principle, this shouldn't be a constraint — it should be possible to create `nl.AutoResume` even if - # checkpointing is not enabled. - auto_resume = nl.AutoResume( - resume_if_exists=True, - resume_ignore_no_checkpoint=True, - resume_past_end=False, - resume_from_directory=checkpoint_path, - restore_config=( - RestoreConfig( - path=args.ckpt_dir, - load_model_state=True, - load_optim_state=args.restore_optimizer_from_ckpt, - ) - if args.ckpt_dir - else None - ), - ) - else: - auto_resume = None - - ddp: DistributedDataParallelConfig = DistributedDataParallelConfig( - check_for_nan_in_grad=not args.no_check_for_nan_in_grad, - overlap_grad_reduce=args.overlap_grad_reduce, - overlap_param_gather=args.overlap_param_gather, # Verify that this works using - grad_reduce_in_fp32=args.grad_reduce_in_fp32, - align_param_gather=args.align_param_gather, - average_in_collective=average_in_collective, - ) - # Initialize Megatron Strategy and Trainer. - strategy = nl.MegatronStrategy( - ddp=ddp, - tensor_model_parallel_size=args.tensor_parallel_size, - pipeline_model_parallel_size=args.pipeline_model_parallel_size, - context_parallel_size=args.context_parallel_size, - pipeline_dtype=torch.bfloat16, - sequence_parallel=args.sequence_parallel, - ckpt_load_optimizer=True, - ckpt_save_optimizer=True, - ckpt_async_save=args.ckpt_async_save, - save_ckpt_format=args.ckpt_format, - ckpt_load_strictness="log_all", # or rebasing to https://github.com/NVIDIA/NeMo/pull/11988/files#diff-7667eae242a8ef776bff78cd08e79bc81df4896a450f0a781f6ed317a3dfb7ffR139 - fp8_recipe=None, - ) - trainer = nl.Trainer( - devices=args.devices, - num_nodes=args.num_nodes, - max_steps=args.max_steps if args.early_stop_on_step is None else args.early_stop_on_step, - accelerator="gpu", - strategy=strategy, - callbacks=callbacks, - log_every_n_steps=args.log_every_n_steps, - limit_val_batches=args.limit_val_batches, - limit_test_batches=args.limit_test_batches if args.limit_test_batches is not None else args.limit_val_batches, - num_sanity_val_steps=0, - use_distributed_sampler=False, - plugins=nl.MegatronMixedPrecision( - precision="bf16-mixed", - fp8_recipe=args.fp8_recipe, - params_dtype=torch.bfloat16, - grad_reduce_in_fp32=args.grad_reduce_in_fp32, - fp8="hybrid" if args.fp8 else None, - fp8_amax_history_len=16 if args.fp8 else 1, - fp8_amax_compute_algo="max" if args.fp8 else "most_recent", - fp8_wgrad=args.fp8 - and ( - args.fp8_wgrad or args.use_megatron_comm_overlap_llama3_8k - ), # faster and less accurate when set to True, and MUST be True if using TP communication overlap - ), - val_check_interval=args.val_check_interval, - enable_checkpointing=args.create_checkpoint_callback, - ) - - # Optimizer and scheduler setup - opt_config = OptimizerConfig( - optimizer="adam", - lr=args.lr, - adam_beta1=args.adam_beta1, - adam_beta2=args.adam_beta2, - weight_decay=args.wd, - clip_grad=args.clip_grad, - adam_eps=args.adam_eps, - use_distributed_optimizer=True, - log_num_zeros_in_grad=args.log_num_zeros_in_grad, - use_precision_aware_optimizer=args.use_precision_aware_optimizer, - main_grads_dtype=torch.bfloat16 if args.bf16_main_grads else torch.float32, - bf16=True, - fp8_recipe=None, - ) - - sched = CosineAnnealingScheduler( - max_steps=trainer.max_steps, - warmup_steps=args.warmup_steps, - min_lr=args.min_lr, - constant_steps=args.constant_steps, - ) - # This is where the no weight decay condition is applied to the optimizer state. - opt = MegatronOptimizerModule( - opt_config, sched, no_weight_decay_cond=getattr(model_config, "hyena_no_weight_decay_cond_fn", None) - ) - llm.train(model, data_module, trainer, log=nemo_logger, resume=auto_resume, optim=opt, tokenizer="data") - - return trainer - - -def main(): - """Parsing args and running evo2 training.""" - args = parse_args() - train(args=args) - - -if __name__ == "__main__": - main() diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/run/utils.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/run/utils.py deleted file mode 100644 index 6e8163f328..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/run/utils.py +++ /dev/null @@ -1,52 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Utility functions for Evo2 run functions.""" - -from typing import Literal - -from nemo.collections.llm.gpt.model.hyena import HYENA_MODEL_OPTIONS - -from bionemo.evo2.models.llama import LLAMA_MODEL_OPTIONS -from bionemo.evo2.models.mamba import MAMBA_MODEL_OPTIONS - - -def patch_eden_tokenizer(tokenizer): - """Patch the Eden tokenizer to work with the Evo2 tokenizer.""" - bos_id, eos_id, sep_id, pad_id = 1, 2, 3, 0 - - # Patch the private attrs so tokenizer.bos_id/.eos_id/.pad_id work - tokenizer._bos_id = bos_id - tokenizer._eos_id = eos_id - tokenizer._sep_id = sep_id - tokenizer._pad_id = pad_id - - -def infer_model_type(model_size: str) -> Literal["hyena", "mamba", "llama"]: - """Infer the model type from the model size.""" - all_keys = set(HYENA_MODEL_OPTIONS.keys()) | set(MAMBA_MODEL_OPTIONS.keys()) | set(LLAMA_MODEL_OPTIONS.keys()) - if len(all_keys) != len(HYENA_MODEL_OPTIONS.keys()) + len(MAMBA_MODEL_OPTIONS.keys()) + len( - LLAMA_MODEL_OPTIONS.keys() - ): - raise ValueError( - "Duplicate model sizes found in HYENA_MODEL_OPTIONS, MAMBA_MODEL_OPTIONS, and LLAMA_MODEL_OPTIONS." - ) - if model_size in HYENA_MODEL_OPTIONS: - return "hyena" - elif model_size in MAMBA_MODEL_OPTIONS: - return "mamba" - elif model_size in LLAMA_MODEL_OPTIONS: - return "llama" - else: - raise ValueError(f"Invalid model size: {model_size}") diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/__init__.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/__init__.py deleted file mode 100644 index 9981337fda..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/callbacks.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/callbacks.py deleted file mode 100644 index 8b83c2230d..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/callbacks.py +++ /dev/null @@ -1,60 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import os - -import torch -from lightning.pytorch import Callback - - -class _FirstBatchCudaSync(Callback): - # TEMPORARY CALLBACK. Remove once bug is fixed. - # First batch CUDA sync callback: adds barriers for the first training batch to avoid race condition - # See https://github.com/NVIDIA/bionemo-framework/issues/1301 for more details. - def __init__(self): - self._done = False - - def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): - if not self._done and torch.cuda.is_available(): - torch.cuda.synchronize() - - def on_after_backward(self, trainer, pl_module): - if not self._done and torch.cuda.is_available(): - torch.cuda.synchronize() - - def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): - if not self._done and torch.cuda.is_available(): - torch.cuda.synchronize() - # Unset blocking for subsequent batches - os.environ.pop("CUDA_LAUNCH_BLOCKING", None) - self._done = True - - -class GarbageCollectAtInferenceTime(Callback): - """Callback to clean up CUDA memory before validation to prevent initialization errors.""" - - def on_validation_start(self, trainer, pl_module) -> None: - """Clean up CUDA memory before validation to prevent initialization errors.""" - if torch.cuda.is_available(): - try: - torch.cuda.empty_cache() - torch.cuda.synchronize() - current_device = torch.cuda.current_device() - torch.cuda.set_device(current_device) - torch.cuda.synchronize() - gc.collect() - except Exception as e: - print(f"Warning: CUDA cleanup failed: {e}") diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/README.md b/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/README.md deleted file mode 100644 index a7e8e20e66..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/README.md +++ /dev/null @@ -1,74 +0,0 @@ -# Evo2 Checkpoint Conversion Library - -This library contains helper scripts for converting checkpoint formats for Evo2. - -## Converting ZeRO-1 / PyTorch Checkpoints to NeMo2 Checkpoints - -To convert a single PyTorch or ZeRO-1 checkpoints (`.pt`) into NeMo2 format, run the following command: - -``` -python sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/convert_to_nemo.py --model-path --output-dir --model-size --ckpt-format -``` - -where `--model-size` can be set to `7b` or `40b` (or their `_arc_1m` variants with modified GLU dimensions) and `--ckpt-format` can be set to `torch_dist` or `zarr`. - -The NeMo2 checkpoint should have the following structure for `torch_dist`: - -``` -default--val_loss=2.3738-epoch=0-consumed_samples=800.0-last -├── context -│ ├── io.json -│ └── model.yaml -└── weights - ├── __*_*.distcp - ├── common.pt - └── metadata.json -``` - -and the following structure for `zarr`: - -``` -interleaved_hyena_7b_fix_shape -├── context -│ ├── io.json -│ └── model.yaml -└── weights - ├── common.pt - ├── metadata.json - └── # Example: module.decoder.layers.0.mixer.dense - └── shard_*_*.pt -``` - -## Converting ZeRO-1 MP{N} to ZeRO-1 MP1 - -To convert sharded (MP>1) ZeRO-1 checkpoints to un-sharded (MP1) checkpoints (or any order of model parallelism) compatible with the `convert_to_nemo.py` conversion script, you can run the following command: - -``` -python sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/convert_checkpoint_model_parallel_evo2.py --source_dir --output_dir --mp_size -``` - -ZeRO-1 checkpoints should have the following structure: - -``` -arc_7b_tp8_pretrained_ckpt/global_step199400 -└── mp_rank_*_model_states.pt -``` - -## Converting ZeRO-3 to ZeRO-1 - -To convert ZeRO-3 checkpoints into ZeRO-1 checkpoints, run the following command: - -``` -python sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/convert_zero3_to_zero1.py --overwrite --mp_size -``` - -ZeRO-3 checkpoints should have the following structure: - -``` -arc_40b_zero3_w32_mp8_test_notfinal_ckpt/global_step1 -├── bf16_zero_pp_rank_*_mp_rank_*_optim_states.pt -├── configs -│ ├── 40b_test_chkpt.yml -│ └── opengenome.yml -└── zero_pp_rank_*_mp_rank_*_model_states.pt -``` diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/__init__.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/__init__.py deleted file mode 100644 index 9981337fda..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/convert_checkpoint_model_parallel_evo2.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/convert_checkpoint_model_parallel_evo2.py deleted file mode 100644 index 7edb5d76c6..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/convert_checkpoint_model_parallel_evo2.py +++ /dev/null @@ -1,403 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This script converts (potentially sharded) ZeRo1 checkpoint parameters to the desired level of model tensor parallelism for the Evo 2 architecture. - -It only supports Zero-1 checkpoints and does not convert any optimizer state, -only the parameters. - -Usage: - python convert_checkpoint_model_parallel_evo2.py \ - --input-checkpoint-dir /path/to/input/checkpoint/global_step1000 \ - --output-checkpoint-dir /path/to/output/checkpoint_mp2/global_step1000 \ - --output-model-parallelism 2 -""" - -import argparse -import os -import re -from collections import OrderedDict -from glob import glob -from pathlib import Path -from typing import List, Optional, Set, Union - -import torch -from nemo.utils import logging -from params import EVO2_PARAMS, Param - - -DEVICE = "cpu" -DEFAULT_PARAM_PATTERN = r"sequential\.\d+\.(.+)" - - -def get_args(): - """Parse command-line arguments.""" - parser = argparse.ArgumentParser( - description="Convert checkpoint parameters to desired model parallelism.", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - ) - parser.add_argument( - "--source_dir", - type=str, - required=True, - help="Path to the input checkpoint directory containing ZeRo1 checkpoint shards, i.e. mp_rank_*_model_states.pt.", - ) - parser.add_argument( - "--glob-pattern", - type=str, - default="mp_rank_*_model_states.pt", - required=False, - help="Filename pattern to glob for ZeRo1 checkpoint shards.", - ) - parser.add_argument( - "--output_dir", - type=str, - required=True, - help="Path to the output checkpoint directory to dump the --mp_size converted model checkpoint (ZeRo1).", - ) - parser.add_argument("--mp_size", type=int, required=True, help="Desired output model parallelism to convert to.") - parser.add_argument( - "--exclude-extra", - action="store_true", - help="Exclude extra states in the conversion. Default to False, i.e. include extra states.", - ) - parser.add_argument("--verbose", action="store_true", help="Print more information about the conversion.") - args = parser.parse_args() - return args - - -def concatenate_tensors_across_shards( - tensor_name: str, - data_shards: List[OrderedDict[str, torch.Tensor]], - partition_dim: int, - hidden_dim: Optional[int] = None, - verbose: bool = False, -) -> torch.Tensor: - """Concatenate tensor shards across multiple shards. - - Args: - tensor_name (str): Name of the tensor to concatenate. - data_shards (List[OrderedDict[str, torch.Tensor]]): List of data shards containing tensors. - partition_dim (int): Dimension along which to partition the tensor. - hidden_dim (int, optional): Hidden dimension of the tensor. Defaults to None. - verbose (bool, optional): Whether to print detailed information. Defaults to False. - - Returns: - torch.Tensor: Concatenated tensor. - """ - # Retrieve tensor shards. - tensors = [shard["module"][tensor_name] for shard in data_shards] - - # Check shape of tensors without tensor parallelism, i.e. stored in all shards of the checkpoint. - if partition_dim is None: - for i, tensor in enumerate(tensors): - if not torch.allclose(tensors[0], tensor): - logging.info( - f"WARNING: Synchronized params differ for param {tensor_name}: abs max diff = {(tensors[0] - tensor).abs().max()}." - ) - # Get the distribution of tensors[0] and tensor. - if verbose: - ref_tensor = tensors[0].flatten().to(torch.float32) - ref_min, ref_max = ref_tensor.min(), ref_tensor.max() - - q = torch.tensor([0.25, 0.5, 0.75], device=ref_tensor.device) - ref_quantiles = ref_tensor.quantile(q) - logging.info(f"rank0 tensor: min={ref_min}, max={ref_max} quantiles={ref_quantiles}") - - target_tensor = tensor.flatten().to(torch.float32) - target_min, target_max = target_tensor.min(), target_tensor.max() - target_quantiles = target_tensor.quantile(q) - logging.info(f"rank{i} tensor: min={target_min}, max={target_max} quantiles={target_quantiles}") - - logging.info(f"rank0 tensor distribution:\n {ref_tensor.histc(100, min=ref_min, max=ref_max)}") - logging.info(f"rank{i} distribution:\n {target_tensor.histc(100, min=ref_min, max=ref_max)}") - - logging.info(f"tensor {tensor_name} not partitioned, returning rank0 tensor {tensors[0].shape}") - return tensors[0] - # Check for sharding across the hidden dimension. - elif partition_dim == hidden_dim: - raise ValueError(f"Detected sharding for {tensor_name} across hidden dimension at index {hidden_dim}.") - - # Check that the tensors have a consistent hidden dimension. - expected_dim = None - if hidden_dim is not None: - for tensor in tensors: - if expected_dim is None: - # Store expected hidden dimension for all tensors. - expected_dim = tensor.shape[hidden_dim] - if not tensor.shape[hidden_dim] == expected_dim: - raise ValueError(f"Tensor {tensor_name} has invalid hidden shape {tensor.shape}.") - - # Concatenate shards. - return torch.cat(tensors, dim=partition_dim) - - -def split_tensor_across_shards( - data_shards: List[OrderedDict], - tensor: torch.Tensor, - tensor_name: str, - partition_dim: int, -) -> None: - """Split a tensor across multiple shards. - - Args: - data_shards (List[OrderedDict]): List of data shards to store the split tensors. - tensor (torch.Tensor): Tensor to split. - tensor_name (str): Name of the tensor. - partition_dim (int): Dimension along which to partition the tensor. - """ - if partition_dim is None: - # No sharding. Synchronize weights across all shards. - for data_shard in data_shards: - data_shard["module"][tensor_name] = tensor - data_shard["param_shapes"][tensor_name] = tensor.shape - else: - # Split the tensor along the partition dimension across shards. - n_shards = len(data_shards) - if tensor.shape[partition_dim] % n_shards != 0: - raise ValueError( - f"Cannot shard {tensor_name} of dimension {tensor.shape[partition_dim]} across {n_shards} evenly." - ) - for chunk, data_shard in zip( - torch.chunk(tensor, chunks=n_shards, dim=partition_dim), - data_shards, - ): - data_shard["module"][tensor_name] = chunk.clone() - data_shard["param_shapes"][tensor_name] = chunk.shape - - -def format_output_filename(shard: int) -> str: - """Format the output filename for a given shard index. - - Args: - shard (int): Shard index. - - Returns: - str: Formatted output filename. - """ - return f"mp_rank_{str(shard).zfill(2)}_model_states.pt" - - -def check_params( - detected: List[str], - expected: Union[Set[str], List[str]], - buffers: Set[str], - param_pattern: str = DEFAULT_PARAM_PATTERN, - verbose: bool = False, -): - """Check that all model parameters are expected. - - Args: - detected (List[str]): Detected model parameters names. - expected (Set[str]): Expected model parameters names. - buffers (Set[str]): Set of buffer names. - param_pattern (str, optional): Regex pattern to match parameter names. Defaults to DEFAULT_PARAM_PATTERN. - verbose (bool, optional): Whether to print detailed information. Defaults to False. - """ - # Expected model parameters. - expected = set(expected) if not isinstance(expected, set) else expected - # Detected model parameters. - model_param_names = [] - for k in detected: - match = re.search(param_pattern, k) - if match is not None: - model_param_names.append(match.group(1)) - else: - logging.info(f"Could not match {k}") - detected_param_set = set(model_param_names) - if verbose: - logging.info("Detected Params:\n {detected_params}".format(detected_params="\n ".join(detected_param_set))) - - # Log unexpected model parameters. - missing_params = expected - detected_param_set - extra_params = detected_param_set - expected - extra_params = [param for param in extra_params if param not in buffers] - extra_params = [param for param in extra_params if not param.endswith("._extra_state")] - if len(extra_params) > 0: - logging.info(f"WARNING: detected extra params: {extra_params}") - if len(missing_params) > 0: - logging.info(f"WARNING: missing params: {missing_params}") - if not (extra_params or missing_params): - logging.info("No missing or extra params detected!") - - -def convert_model_weights( - input_data_shards: List[OrderedDict], - output_data_shards: List[OrderedDict], - model_parameter_names: List[str], - param_list: List[Param], - verbose: bool = False, - exclude_extra: bool = False, -): - """Convert model weights from input model parallelism to output model parallelism. - - Args: - input_data_shards (List[OrderedDict]): List of input data shards. - output_data_shards (List[OrderedDict]): List of output data shards. - model_parameter_names (List[str]): List of model parameter names. - param_list (List[Param]): List of parameter information. - verbose (bool, optional): Whether to print detailed information. Defaults to False. - exclude_extra (bool, optional): Whether to exclude extra states in the conversion. Defaults to False. - """ - logging.info( - f"Converting {len(model_parameter_names)} parameters from {len(input_data_shards)} input shards to {len(output_data_shards)} output shards..." - ) - converted = 0 - skipped = 0 - for model_parameter in model_parameter_names: - if args.verbose: - logging.info(f"Processing {model_parameter}...") - - # Ignore FP8 extra state. - if model_parameter.endswith("._extra_state"): - if "extra_state" in model_parameter: - logging.info(f"Ignoring {model_parameter} -> contains extra state.") - skipped += 1 - continue - - # Get the partition dimension and hidden dimension of each parameter. - param_info = None - for param in param_list: - if ".".join(model_parameter.split(".")[2:]) == param.name: - if param_info is None: - param_info = param - else: - raise ValueError( - f"Found more than one matching model parallelism parameter for {model_parameter}: {param_info}, {param}" - ) - if param_info is None: - raise ValueError(f"Could not find {model_parameter} among known parameters.") - - # Concatenate shards. - concatenated_tensor = concatenate_tensors_across_shards( - model_parameter, input_data_shards, param_info.partition_dim, param_info.hidden_dim, verbose=verbose - ) - # Split into shards. - split_tensor_across_shards( - output_data_shards, - concatenated_tensor, - model_parameter, - param_info.partition_dim, - ) - converted += 1 - logging.info(f"Converted {converted} of {len(model_parameter_names)} parameters (skipped {skipped} params).") - num_params = len(output_data_shards[0]["module"]) - logging.info(f"Total Params: {num_params}") - if not all(num_params == len(shard["module"]) for shard in output_data_shards): - raise ValueError("Shards have different number of parameters, which is not permitted in model parallelism.") - - if not exclude_extra: - logging.info("Adding extra states from rank0 input shard...") - rank0_model = input_data_shards[0]["module"] - for k in rank0_model.keys(): - for i, output_shard in enumerate(output_data_shards): - if k not in output_shard["module"]: - if i == 0: - logging.info(f"Adding {k} to output shards.") - output_shard["module"][k] = rank0_model[k] - new_params = len(output_data_shards[0]["module"]) - num_params - logging.info(f"Added {new_params} extra states, total params: {num_params + new_params}") - if not all(num_params + new_params == len(shard["module"]) for shard in output_data_shards): - raise ValueError("Shards have different number of parameters after adding extra states.") - - for shard_idx, output_data_shard in enumerate(output_data_shards): - output_path = Path(output_data_shard["output_dir"]) / format_output_filename(shard_idx) - torch.save( - output_data_shard, - output_path, - ) - logging.info(f"Converted checkpoint saved to: {output_path}") - - -def convert_zero1_model_parallel_checkpoint( - source_dir: str, - output_dir: str, - glob_pattern: str = "mp_rank_*_model_states.pt", - model_parallel: int = 8, - param_list: List[Param] = EVO2_PARAMS, - exclude_extra_params: bool = False, - verbose: bool = False, -): - """Convert sharded ZeRo1 checkpoint to desired model parallelism. - - Args: - source_dir (str): Path to the input checkpoint directory. - output_dir (str): Path to the output checkpoint directory. - glob_pattern (str): Filename pattern to glob for ZeRo1 checkpoint shards. Defaults to "mp_rank_*_model_states.pt". - model_parallel (int): Desired output model parallelism. Defaults to 8. - param_list (List[Param]): List of parameter information. Defaults to EVO2_PARAMS. - exclude_extra_params (bool): Whether to exclude extra states in the conversion. Defaults to False. - verbose (bool): Whether to print detailed information. Defaults to False. - """ - # Argument validation. - if not os.path.exists(source_dir): - raise ValueError(f"Input checkpoint dir ({source_dir}) not found.") - os.makedirs(output_dir, exist_ok=True) - logging.info(f"Converting checkpoint from {source_dir} to {output_dir}") - - # Identify all checkpoint model path files. - parameter_paths = sorted(glob(f"{source_dir}/{glob_pattern}")) - if len(parameter_paths) == 0: - raise ValueError(f"No parameter files found in {source_dir}") - - # Load all shards from the ZeRo1 checkpoint. - input_data_shards = [torch.load(path, map_location=DEVICE) for path in parameter_paths] - buffers = {buf for x in input_data_shards for buf in x.get("buffer_names", [])} - - # Initialize output MP shards. - output_data_shards = [ - { - "module": OrderedDict(), - "param_shapes": OrderedDict(), - "dp_world_size": input_data_shards[0]["dp_world_size"], - "output_dir": output_dir, - } - for _ in range(model_parallel) - ] - model_parameter_names = input_data_shards[0]["module"].keys() - - # Check no missing or extra params - check_params( - detected=list(model_parameter_names), - expected={param.name for param in param_list}, - buffers=buffers, - verbose=verbose, - ) - # Convert the checkpoint - convert_model_weights( - input_data_shards, - output_data_shards, - model_parameter_names, - param_list, - verbose=verbose, - exclude_extra=exclude_extra_params, - ) - logging.info("Done!") - - -if __name__ == "__main__": - args = get_args() - convert_zero1_model_parallel_checkpoint( - args.source_dir, - args.output_dir, - args.glob_pattern, - args.mp_size, - EVO2_PARAMS, - args.exclude_extra, - args.verbose, - ) diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/convert_to_nemo.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/convert_to_nemo.py deleted file mode 100644 index 1772044053..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/convert_to_nemo.py +++ /dev/null @@ -1,81 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import argparse - -from nemo.collections.llm.gpt.model.hyena import ( - HYENA_MODEL_OPTIONS, - HuggingFaceSavannaHyenaImporter, - PyTorchHyenaImporter, -) - -from bionemo.evo2.models.llama import LLAMA_MODEL_OPTIONS, HFEdenLlamaImporter -from bionemo.evo2.run.utils import infer_model_type - - -def parse_args(): - """Parse command-line arguments.""" - parser = argparse.ArgumentParser() - parser.add_argument( - "--model-path", - type=str, - required=True, - help="Path to the Evo2 un-sharded (MP1) model checkpoint file, or a Hugging Face model name. Any model " - "from the Savanna Evo2 family is supported such as 'hf://arcinstitute/savanna_evo2_1b_base'.", - ) - parser.add_argument("--output-dir", type=str, required=True, help="Output directory path for the converted model.") - parser.add_argument( - "--use-subquadratic_ops", - action="store_true", - help="The checkpoint being converted should use subquadratic_ops.", - ) - parser.add_argument( - "--model-size", - type=str, - choices=sorted(set(HYENA_MODEL_OPTIONS.keys()) | set(LLAMA_MODEL_OPTIONS.keys())), - required=True, - help="Model architecture to use, choose between 1b, 7b, 40b, or test (a sub-model of 4 layers, " - "less than 1B parameters). '*_arc_longcontext' models have GLU / FFN dimensions that support 1M " - "context length when trained with TP>>8. Note that Mamba models are not supported for conversion yet.", - ) - return parser.parse_args() - - -def main(): - """Convert a PyTorch Evo2 model checkpoint to a NeMo model checkpoint.""" - args = parse_args() - model_type = infer_model_type(args.model_size) - if model_type == "hyena": - config_modifiers_init = {} - if args.use_subquadratic_ops: - config_modifiers_init["use_subquadratic_ops"] = True - evo2_config = HYENA_MODEL_OPTIONS[args.model_size](**config_modifiers_init) - if args.model_path.startswith("hf://"): - importer = HuggingFaceSavannaHyenaImporter(args.model_path.lstrip("hf://"), model_config=evo2_config) - else: - importer = PyTorchHyenaImporter(args.model_path, model_config=evo2_config) - elif model_type == "llama": - importer = HFEdenLlamaImporter(args.model_path) - else: - raise ValueError(f"Importer model type: {model_type}.") - importer.apply(args.output_dir) - - -if __name__ == "__main__": - main() diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/convert_zero3_to_zero1.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/convert_zero3_to_zero1.py deleted file mode 100644 index 9f122a90e9..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/convert_zero3_to_zero1.py +++ /dev/null @@ -1,147 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -#!/usr/bin/env python - -import argparse -import os -import time -from multiprocessing import Pool -from typing import List, Optional - -import zero3_conversion_lib -from zero3_conversion_lib import get_elapsed, process_single_rank - - -def convert_zero_checkpoint_to_fp32_state_dict( - checkpoint_dir: str, - output_dir: str, - tag: Optional[str] = None, - exclude_frozen_parameters: bool = False, - mp_size: int = 8, - overwrite: bool = False, - num_workers: int = 1, - ranks_to_process: Optional[List[int]] = None, -): - """Converts a DeepSpeed Zero-3 checkpoint to a PyTorch FP32 state_dict. - - Args: - checkpoint_dir (str): Path to the desired checkpoint folder. - output_dir (str): Directory to save the PyTorch FP32 state_dict output files. - tag (Optional[str]): Checkpoint tag used as a unique identifier or sub-directory that contains the checkpoint. - exclude_frozen_parameters (bool): Whether to exclude frozen parameters. - mp_size (int): Model parallel size of the source checkpoint. - overwrite (bool): Whether to overwrite existing MP shards. - num_workers (int): Number of workers to use for processing. - ranks_to_process (Optional[List[int]]): List of ranks to process. - - Raises: - FileNotFoundError: If the checkpoint directory does not exist. - """ - ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) if tag is not None else checkpoint_dir - - if not os.path.isdir(ds_checkpoint_dir): - raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") - - output_dir = os.path.join(output_dir, tag) if tag is not None else output_dir - if not os.path.exists(output_dir): - os.makedirs(output_dir, exist_ok=True) - - num_workers = min(num_workers, mp_size) - - if ranks_to_process is not None: - ranks_to_process = list(ranks_to_process) - assert len(ranks_to_process) <= mp_size, f"Expected {mp_size} ranks to process, got {len(ranks_to_process)}" - assert all(0 <= r < mp_size for r in ranks_to_process), ( - f"Expected ranks to be in range [0, {mp_size}), got {ranks_to_process}" - ) - else: - ranks_to_process = list(range(mp_size)) - - print(f"Processing ranks: {ranks_to_process}", flush=True) - - start = time.time() - if num_workers > 1: - with Pool(num_workers) as p: - p.starmap( - process_single_rank, - [(i, ds_checkpoint_dir, output_dir, overwrite, exclude_frozen_parameters) for i in ranks_to_process], - ) - else: - for i in ranks_to_process: - process_single_rank(i, ds_checkpoint_dir, output_dir, overwrite, exclude_frozen_parameters) - - total_time = get_elapsed(time.time() - start) - print(f"All done!\n-> Total time: {total_time}\n-> All outputs written to {os.path.abspath(output_dir)}") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "checkpoint_dir", type=str, help="path to the desired checkpoint folder, e.g., path/checkpoint-12" - ) - parser.add_argument( - "output_dir", - type=str, - help="directory to the pytorch fp32 state_dict output files(e.g. path/checkpoint-12-output/)", - ) - parser.add_argument("--overwrite", action="store_true", help="Overwrite existing MP shards") - parser.add_argument( - "-t", - "--tag", - type=str, - default=None, - help="Checkpoint tag used as a unique identifier or sub-directory that contains the checkpoint, e.g. 'global_step1' or 'latest'.", - ) - parser.add_argument("--exclude_frozen_parameters", action="store_true", help="exclude frozen parameters") - parser.add_argument("-d", "--debug", action="store_true", help="enable debug") - parser.add_argument("--mp_size", required=True, type=int, help="Model parallel size of source checkpoint") - parser.add_argument("--rank_start", default=None, type=int, help="Start rank to process") - parser.add_argument("--rank_end", default=None, type=int, help="End rank to process") - parser.add_argument("--num_workers", default=1, type=int, help="Number of workers to use for processing") - args = parser.parse_args() - - if args.rank_start is not None: - if args.rank_end is None: - args.rank_end = args.mp_size - 1 - else: - assert args.rank_end < args.mp_size, "Expected end rank to be less than mp_size" - - assert args.rank_start < args.rank_end, "Expected start rank to be less than end rank" - assert args.rank_start >= 0, "Expected start rank to be greater than 0" - args.ranks_to_process = list(range(args.rank_start, args.rank_end + 1)) - else: - args.ranks_to_process = list(range(args.mp_size)) - - print("Args:") - for k, v in args.__dict__.items(): - print(f" {k}: {v}", flush=True) - print("") - zero3_conversion_lib.debug = args.debug - - convert_zero_checkpoint_to_fp32_state_dict( - args.checkpoint_dir, - args.output_dir, - tag=args.tag, - exclude_frozen_parameters=args.exclude_frozen_parameters, - mp_size=args.mp_size, - overwrite=args.overwrite, - num_workers=args.num_workers, - ranks_to_process=args.ranks_to_process, - ) diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/evo2_remove_optimizer.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/evo2_remove_optimizer.py deleted file mode 100644 index 724bcd3b25..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/evo2_remove_optimizer.py +++ /dev/null @@ -1,194 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import argparse -import logging -from pathlib import Path -from typing import Type - -from nemo.collections.llm.gpt.model.base import GPTModel -from nemo.collections.llm.gpt.model.hyena import ( - HyenaModel, -) -from nemo.lightning import io, teardown - -from bionemo.evo2.models.mamba import MambaModel - - -def parse_args(): - """Parse command-line arguments.""" - parser = argparse.ArgumentParser() - parser.add_argument( - "--model-path", - type=str, - required=True, - help="Path to the Evo2 un-sharded (MP1) model checkpoint file, or a Hugging Face model name. Any model " - "from the Savanna Evo2 family is supported such as 'hf://arcinstitute/savanna_evo2_1b_base'.", - ) - parser.add_argument("--output-dir", type=str, required=True, help="Output directory path for the converted model.") - parser.add_argument( - "--model-type", - type=str, - choices=["hyena", "mamba", "llama"], - default="hyena", - help="Model architecture to use, choose between 'hyena', 'mamba', or 'llama'.", - ) - return parser.parse_args() - - -class _OptimizerRemoverBase: - MODEL_CLS: Type - - """Base class for optimizer remover importers.""" - - def __new__(cls, path: str | Path, model_config=None): - """Creates a new importer instance. - - Args: - path: Path to the PyTorch model - model_config: Optional model configuration - - Returns: - PyTorchHyenaImporter instance - """ - instance = super().__new__(cls, path) - if model_config is None: - model_config = io.load_context(path, subpath="model.config") - instance.model_config = model_config - return instance - - def init(self): - """Initializes a new HyenaModel instance. - - Returns: - HyenaModel: Initialized model - """ - return self.MODEL_CLS(self.config, tokenizer=self.tokenizer) - - def get_source_model(self): - """Returns the source model.""" - model, _ = self.nemo_load(self) - return model - - def apply(self, output_path: Path, checkpoint_format: str = "torch_dist", **kwargs) -> Path: - """Applies the model conversion from PyTorch to NeMo format. - - Args: - output_path: Path to save the converted model - checkpoint_format: Format for saving checkpoints - **kwargs: Additional keyword arguments to pass to the nemo_setup and nemo_save methods - - Returns: - Path: Path to the saved NeMo model - """ - source = self.get_source_model() - - target = self.init() - trainer = self.nemo_setup(target, ckpt_async_save=False, save_ckpt_format=checkpoint_format, **kwargs) - source.to(self.config.params_dtype) - target.to(self.config.params_dtype) - self.convert_state(source, target) - self.nemo_save(output_path, trainer, **kwargs) - - logging.info(f"Converted Hyena model to Nemo, model saved to {output_path}") - - teardown(trainer, target) - del trainer, target - - return output_path - - def convert_state(self, source, target): - """Converts the state dictionary from source format to target format. - - Args: - source: Source model state - target: Target model - - Returns: - Result of applying state transforms - """ - mapping = {k: k for k in source.module.state_dict().keys()} - return io.apply_transforms( - source, - target, - mapping=mapping, - ) - - @property - def tokenizer(self): - """Gets the tokenizer for the model. - - Returns: - Tokenizer instance - """ - from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer - - tokenizer = get_nmt_tokenizer( - library=getattr(self.model_config, "tokenizer_library", "byte-level"), - ) - - return tokenizer - - @property - def config(self): - """Gets the model configuration. - - Returns: - HyenaConfig: Model configuration - """ - return self.model_config - - -@io.model_importer(HyenaModel, "pytorch") -class HyenaOptimizerRemover(_OptimizerRemoverBase, io.ModelConnector["HyenaModel", HyenaModel]): - """Removes the optimizer state from a nemo2 format model checkpoint.""" - - MODEL_CLS = HyenaModel - - -@io.model_importer(GPTModel, "pytorch") -class LlamaOptimizerRemover(_OptimizerRemoverBase, io.ModelConnector["GPTModel", GPTModel]): - """Removes the optimizer state from a nemo2 format model checkpoint.""" - - MODEL_CLS = GPTModel - - -@io.model_importer(MambaModel, "pytorch") -class MambaOptimizerRemover(_OptimizerRemoverBase, io.ModelConnector["MambaModel", MambaModel]): - """Removes the optimizer state from a nemo2 format model checkpoint.""" - - MODEL_CLS = MambaModel - - -def main(): - """Convert a PyTorch Evo2 model checkpoint to a NeMo model checkpoint.""" - args = parse_args() - if args.model_type == "hyena": - optimizer_remover = HyenaOptimizerRemover(args.model_path) - elif args.model_type == "mamba": - optimizer_remover = MambaOptimizerRemover(args.model_path) - elif args.model_type == "llama": - optimizer_remover = LlamaOptimizerRemover(args.model_path) - else: - raise ValueError(f"Invalid model type: {args.model_type}.") - optimizer_remover.apply(args.output_dir) - - -if __name__ == "__main__": - main() diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/nemo2_to_hf.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/nemo2_to_hf.py deleted file mode 100644 index 588b917399..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/nemo2_to_hf.py +++ /dev/null @@ -1,49 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse - -from nemo.collections.llm.gpt.model.llama import HFLlamaExporter -from nemo.collections.llm.gpt.model.nemotron import HFNemotronExporter - - -def parse_args(): - """Parse command-line arguments.""" - parser = argparse.ArgumentParser() - parser.add_argument( - "--model-type", type=str, required=True, help="Model type to convert.", choices=["hyena", "mamba", "llama"] - ) - parser.add_argument("--model-path", type=str, required=True, help="Model path to convert.") - parser.add_argument("--output-dir", type=str, required=True, help="Output directory path for the converted model.") - return parser.parse_args() - - -def main(): - """Convert a NeMo2 Evo2 model checkpoint to a Hugging Face model checkpoint.""" - args = parse_args() - model_type = args.model_type - if model_type == "hyena": - raise ValueError("Hyena models are not supported for conversion to Hugging Face yet.") - elif model_type == "mamba": - exporter = HFNemotronExporter(args.model_path) - elif model_type == "llama": - exporter = HFLlamaExporter(args.model_path) - else: - raise ValueError(f"Invalid model type: {model_type}.") - exporter.apply(args.output_dir) - - -if __name__ == "__main__": - main() diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/params.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/params.py deleted file mode 100644 index 818fde3f3d..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/params.py +++ /dev/null @@ -1,63 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from dataclasses import dataclass - - -@dataclass -class Param: - """A dataclass representing a parameter in a checkpoint. - - Attributes: - name (str): The name of the parameter in the checkpoint. - partition_dim (int): The dimension index that gets sharded. `None` for no sharding. - hidden_dim (int): The hidden dimension index. `None` for no hidden dimension. - """ - - name: str # Name of the parameter in the checkpoint. - partition_dim: int # The dimension index that gets sharded. `None` for no sharding. - hidden_dim: int # The hidden dimension index. `None` for no hidden dimension. - - -EVO2_PARAMS = [ - # Only layer_00. - Param("word_embeddings.weight", 0, 1), # torch.Size([64, 8192]) - Param("input_layernorm.weight", None, 0), # torch.Size([8192]) - Param("post_attention_layernorm.weight", None, 0), # torch.Size([8192]) - Param("pre_mlp_layernorm.weight", None, 0), # torch.Size([8192]) - Param("outer_mlp_layernorm.weight", None, 0), # torch.Size([8192]) - Param("mixer.dense_projection.weight", 0, 1), # torch.Size([3072, 8192]), - Param("mixer.hyena_proj_conv.short_conv_weight", 0, None), # torch.Size([3072, 3]), - Param("mixer.mixer.conv_bias", 0, None), # torch.Size([1024]), - Param("mixer.mixer.filter.decay", 0, None), # torch.Size([64, 8192]), - Param("mixer.mixer.filter.gamma", 0, None), # torch.Size([1024, 16]), - Param("mixer.mixer.filter.h", 0, None), # torch.Size([64, 8192]), - Param("mixer.mixer.filter.p", 0, None), # torch.Size([1024, 16]), - Param("mixer.mixer.filter.R", 0, None), # torch.Size([1024, 16]), - Param("mixer.mixer.filter.t", None, 0), # torch.Size([1, 1, seqlen]), - Param("mixer.mixer.short_conv.short_conv_weight", 0, None), # torch.Size([64, 1, 7]), - Param("mixer.rotary_emb.inv_freq", None, None), # torch.Size([64]) - Param("mixer.dense.weight", 1, 0), # torch.Size([8192, 2048]), - Param("mixer.dense.bias", None, 0), # torch.Size([8192]) - Param("mlp.w1.weight", 0, 1), # torch.Size([2736, 8192]), - Param("mlp.w2.weight", 0, 1), # torch.Size([2736, 8192]), - Param("mlp.w3.weight", 1, 0), # torch.Size([8192, 2736]), - # Only last layer. - Param("norm.weight", None, 0), # torch.Size([8192]), -] diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/zero3_conversion_lib.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/zero3_conversion_lib.py deleted file mode 100644 index a2d984aac0..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/checkpoint/zero3_conversion_lib.py +++ /dev/null @@ -1,699 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -"""Helper utility for converting ZeRO3 and ZeRO2 checkpoints to PyTorch.""" - -import glob -import math -import os -import re -import time -from collections import OrderedDict -from dataclasses import dataclass -from typing import Any, Dict, Iterable, List, Set - -import psutil -import torch -from tqdm import tqdm - - -BUFFER_NAMES = "buffer_names" -DS_VERSION = "ds_version" -FP32_FLAT_GROUPS = "fp32_flat_groups" -FROZEN_PARAM_FRAGMENTS = "frozen_param_fragments" -FROZEN_PARAM_SHAPES = "frozen_param_shapes" -OPTIMIZER_STATE_DICT = "optimizer_state_dict" -PARAM_SHAPES = "param_shapes" -PARTITION_COUNT = "partition_count" -SINGLE_PARTITION_OF_FP32_GROUPS = "single_partition_of_fp32_groups" -ZERO_STAGE = "zero_stage" -EXTRA_STATE = "._extra_state" - - -@dataclass -class ZeroModelState: - """A dataclass representing the state of a ZeRO model. - - Attributes: - buffers (Dict): Buffers in the model state. - extra_states (Dict): Extra states in the model state. - param_shapes (List): Shapes of the parameters. - shared_params (List): Shared parameters in the model state. - ds_version (int): Version of the DeepSpeed checkpoint. - frozen_param_shapes (Dict): Shapes of the frozen parameters. - frozen_param_fragments (Dict): Fragments of the frozen parameters. - """ - - buffers: Dict - extra_states: Dict - param_shapes: List - shared_params: List - ds_version: int - frozen_param_shapes: Dict - frozen_param_fragments: Dict - - -debug = 0 -device = torch.device("cpu") - - -def profile_memory_decorator(func: Iterable): - """A decorator to profile memory usage of a function. - - Args: - func (Iterable): The function to be decorated. - - Returns: - wrapper: The decorated function with memory profiling. - """ - - def profile_memory(): - pid = os.getpid() - process = psutil.Process(pid) - memory_info = process.memory_info() - print_pid(f"{pid}: RSS = {memory_info.rss / 1024**2:.2f} MB") - - def wrapper(*args, **kwargs): - profile_memory() - func(*args, **kwargs) - profile_memory() - - return wrapper - - -def print_pid(msg: str): - """Prints the process ID along with a message. - - Args: - msg (str): The message to be printed. - """ - pid = os.getpid() - print(f"{pid=}:{msg}") - - -def atoi(text: str): - """Converts a string to an integer if it is a digit, otherwise returns the string. - - Args: - text (str): The text to be converted. - - Returns: - int or str: The converted integer or the original string. - """ - return int(text) if text.isdigit() else text - - -def natural_keys(text: str): - """Sorts a list in human order. - - Args: - text (str): The text to be sorted. - - Returns: - list: The sorted list. - - Note: - alist.sort(key=natural_keys) sorts in human order. - http://nedbatchelder.com/blog/200712/human_sorting.html - (See Toothy's implementation in the comments) - """ - return [atoi(c) for c in re.split(r"(\d+)", text)] - - -def get_checkpoint_files(checkpoint_dir: str, glob_pattern: str): - """Retrieves checkpoint files from a directory based on a glob pattern. - - Args: - checkpoint_dir (str): The directory to search for checkpoint files. - glob_pattern (str): The glob pattern to match files. - - Returns: - list: A sorted list of checkpoint files. - - Raises: - FileNotFoundError: If no files matching the glob pattern are found. - """ - # XXX: need to test that this simple glob rule works for multi-node setup too - ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) - - if len(ckpt_files) == 0: - raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") - - return ckpt_files - - -def get_model_files_by_rank(checkpoint_dir: str, rank: int): - """Retrieves model files for a specific rank from a checkpoint directory. - - Args: - checkpoint_dir (str): The directory to search for model files. - rank (int): The rank to search for. - - Returns: - list: A list of model files for the specified rank. - """ - return get_checkpoint_files(checkpoint_dir, f"*mp_rank_{rank:02}_model_states.pt") - - -def get_optim_files_by_rank(checkpoint_dir: str, rank: int): - """Retrieves optimizer files for a specific rank from a checkpoint directory. - - Args: - checkpoint_dir (str): The directory to search for optimizer files. - rank (int): The rank to search for. - - Returns: - list: A list of optimizer files for the specified rank. - """ - return get_checkpoint_files(checkpoint_dir, f"*mp_rank_{rank:02}_optim_states.pt") - - -def create_ds_output_path(rank: int): - """Creates the output path for a DeepSpeed checkpoint. - - Args: - rank (int): The rank to create the output path for. - - Returns: - str: The output path for the DeepSpeed checkpoint. - """ - return f"mp_rank_{rank:02}_model_states.pt" - - -def create_zero3_model_state_path(dp_rank: int, mp_rank: int): - """Creates the path for a ZeRO3 model state file. - - Args: - dp_rank (int): The data parallel rank. - mp_rank (int): The model parallel rank. - - Returns: - str: The path for the ZeRO3 model state file. - """ - return f"zero_pp_rank_{dp_rank}_mp_rank_{mp_rank:02}_model_states.pt" - - -def create_zero3_optim_state_path(dp_rank: int, mp_rank: int): - """Creates the path for a ZeRO3 optimizer state file. - - Args: - dp_rank (int): The data parallel rank. - mp_rank (int): The model parallel rank. - - Returns: - str: The path for the ZeRO3 optimizer state file. - """ - return f"bf16_zero_pp_rank_{dp_rank}_mp_rank_{mp_rank:02}_optim_states.pt" - - -def get_model_state_file(checkpoint_dir: str, zero_stage: int): - """Retrieves the model state file from a checkpoint directory based on the ZeRO stage. - - Args: - checkpoint_dir (str): The directory to search for the model state file. - zero_stage (int): The ZeRO stage to search for. - - Returns: - str: The path to the model state file. - - Raises: - FileNotFoundError: If the directory or model state file is not found. - ValueError: If the ZeRO stage is not supported. - """ - if not os.path.isdir(checkpoint_dir): - raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") - - # there should be only one file - if zero_stage <= 2: - file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") - elif zero_stage == 3: - file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") - else: - raise ValueError(f"Unsupported zero stage {zero_stage}. Expected 1, 2, or 3") - - if not os.path.exists(file): - raise FileNotFoundError(f"can't find model states file at '{file}'") - - return file - - -def parse_model_states(files: Set[str]): - """Parses model state files and returns a list of ZeroModelState objects. - - Args: - files (Set[str]): A set of file paths to parse. - - Returns: - List[ZeroModelState]: A list of parsed ZeroModelState objects. - - Raises: - ValueError: If a file is not a model state checkpoint. - """ - zero_model_states = [] - for file in files: - state_dict = torch.load(file, map_location=device) - - if BUFFER_NAMES not in state_dict: - raise ValueError(f"{file} is not a model state checkpoint") - buffer_names = state_dict[BUFFER_NAMES] - if debug: - print_pid("Found buffers:", buffer_names) - - # recover just the buffers while restoring them to fp32 if they were saved in fp16 - buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} - - extra_states = {k: v for k, v in state_dict["module"].items() if k.endswith(EXTRA_STATE)} - - # collect parameters that are included in param_shapes - param_shapes = state_dict[PARAM_SHAPES] - param_names = [] - for s in param_shapes: - for name in s.keys(): - param_names.append(name) - - # update with frozen parameters - frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) - if frozen_param_shapes is not None: - if debug: - print_pid(f"Found frozen_param_shapes: {frozen_param_shapes}") - param_names += list(frozen_param_shapes.keys()) - - # handle shared params - shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] - - ds_version = state_dict.get(DS_VERSION, None) - - frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) - - z_model_state = ZeroModelState( - buffers=buffers, - extra_states=extra_states, - param_shapes=param_shapes, - shared_params=shared_params, - ds_version=ds_version, - frozen_param_shapes=frozen_param_shapes, - frozen_param_fragments=frozen_param_fragments, - ) - zero_model_states.append(z_model_state) - - return zero_model_states - - -def parse_optim_states(files: Set[str], ds_checkpoint_dir: str): - """Parses optimizer state files and returns the ZeRO stage, world size, and fp32 flat groups. - - Args: - files (Set[str]): A set of file paths to parse. - ds_checkpoint_dir (str): The directory containing the DeepSpeed checkpoint. - - Returns: - tuple: A tuple containing the ZeRO stage, world size, and fp32 flat groups. - - Raises: - ValueError: If a file is not a ZeRO checkpoint or if the number of files does not match the expected world size. - """ - total_files = len(files) - state_dicts = [] - for f in files: - state_dict = torch.load(f, map_location=device) - # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights - # and also handle the case where it was already removed by another helper script - state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) - state_dict[OPTIMIZER_STATE_DICT] = { - FP32_FLAT_GROUPS: state_dict[OPTIMIZER_STATE_DICT][FP32_FLAT_GROUPS], - ZERO_STAGE: state_dict[OPTIMIZER_STATE_DICT][ZERO_STAGE], - PARTITION_COUNT: state_dict[OPTIMIZER_STATE_DICT][PARTITION_COUNT], - } - state_dicts.append(state_dict) - - if ZERO_STAGE not in state_dicts[0][OPTIMIZER_STATE_DICT]: - raise ValueError(f"{files[0]} is not a zero checkpoint") - zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] - world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] - - # For ZeRO-2 each param group can have different partition_count as data parallelism for expert - # parameters can be different from data parallelism for non-expert parameters. So we can just - # use the max of the partition_count to get the dp world_size. - - if type(world_size) is list: - world_size = max(world_size) - - if world_size != total_files: - raise ValueError( - f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " - "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." - ) - - # the groups are named differently in each stage - if zero_stage <= 2: - fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS - elif zero_stage == 3: - fp32_groups_key = FP32_FLAT_GROUPS - else: - raise ValueError(f"unknown zero stage {zero_stage}") - - if zero_stage <= 2: - fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] - elif zero_stage == 3: - # if there is more than one param group, there will be multiple flattened tensors - one - # flattened tensor per group - for simplicity merge them into a single tensor - # - # XXX: could make the script more memory efficient for when there are multiple groups - it - # will require matching the sub-lists of param_shapes for each param group flattened tensor - - fp32_flat_groups = [ - torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts)) - ] - - return zero_stage, world_size, fp32_flat_groups - - -def _get_fp32_state_dict_from_zero_checkpoint( - ds_checkpoint_dir: str, rank: int, exclude_frozen_parameters: bool = False -): - """Returns the fp32 state dictionary reconstructed from a ZeRO checkpoint. - - Args: - ds_checkpoint_dir (str): Path to the DeepSpeed checkpoint folder. - rank (int): The rank to process. - exclude_frozen_parameters (bool): Whether to exclude frozen parameters. - - Returns: - OrderedDict: The reconstructed fp32 state dictionary. - """ - print_pid(f"Processing zero checkpoint '{ds_checkpoint_dir}'") - - # optim_files = get_optim_files(ds_checkpoint_dir) - # zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) - - optim_files = get_optim_files_by_rank(ds_checkpoint_dir, rank=rank) - optim_files_check = get_checkpoint_files(ds_checkpoint_dir, f"bf16*_{rank:02d}_optim_states.pt") - assert set(optim_files) == set(optim_files_check), f"Expected {optim_files_check}, got {optim_files}" - # check ordering as well - for f1, f2 in zip(optim_files, optim_files_check): - assert os.path.basename(f1) == os.path.basename(f2), ( - f"Found mismatching optim files for rank {rank}: {os.path.basename(f1)} != {os.path.basename(f2)}" - ) - print_pid(f" -> Optim files for rank {rank}: {len(optim_files)}") - - if debug: - print_pid(f"{optim_files=}") - - if os.environ.get("ZERO3_CONVERSION_DEBUG", "0") == "1": - breakpoint() - - zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) - assert len(optim_files) == world_size, f"Expected {world_size} optim files, got {len(optim_files)}" - if debug: - print_pid( - f" -> rank{rank} stage: {zero_stage} {world_size=} {len(fp32_flat_groups)=} {fp32_flat_groups.shape=}" - ) - - model_files = get_model_files_by_rank(ds_checkpoint_dir, rank=rank) - model_files_check = get_checkpoint_files(ds_checkpoint_dir, f"zero_*_mp_rank_{rank:02d}_model_states.pt") - assert set(model_files) == set(model_files_check), f"Expected {model_files_check}, got {model_files}" - - for f1, f2 in zip(model_files, model_files_check): - assert os.path.basename(f1) == os.path.basename(f2), ( - f"Found mismatching optim files for rank {rank}: {os.path.basename(f1)} != {os.path.basename(f2)}" - ) - print_pid(f" -> Model files for rank {rank}: {len(model_files)}") - - assert len(optim_files) == len(model_files), ( - f"Expected same number of optim and model files: {len(optim_files)} != {len(model_files)}" - ) - assert len(optim_files) > 0, f"Expected at least one optim file, got {len(optim_files)}" - - zero_model_states = parse_model_states(model_files) - print_pid(f"Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}") - - return _get_fp32_state_dict_from_zero3_checkpoint( - world_size, fp32_flat_groups, zero_model_states, exclude_frozen_parameters - ) - - -def zero3_partitioned_param_info(unpartitioned_numel: int, world_size: int): - """Returns the partitioned and padding number of elements for a parameter. - - Args: - unpartitioned_numel (int): The number of elements in the unpartitioned parameter. - world_size (int): The world size. - - Returns: - tuple: A tuple containing the partitioned number of elements and the padding number of elements. - """ - remainder = unpartitioned_numel % world_size - padding_numel = (world_size - remainder) if remainder else 0 - partitioned_numel = math.ceil(unpartitioned_numel / world_size) - return partitioned_numel, padding_numel - - -def _zero3_merge_frozen_params(state_dict: Dict[str, Any], world_size: int, zero_model_states: List[ZeroModelState]): - """Merges frozen parameters into the state dictionary. - - Args: - state_dict (Dict[str, Any]): The state dictionary to update. - world_size (int): The world size. - zero_model_states (List[ZeroModelState]): The list of ZeroModelState objects. - - Returns: - None - """ - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - if debug: - for i in range(world_size): - num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) - print_pid(f"rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}") - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size - print_pid(f"Frozen params: Have {avail_numel} numels to process.") - print_pid(f"Frozen params: Need {wanted_numel} numels in {wanted_params} params") - - total_params = 0 - total_numel = 0 - for name, shape in zero_model_states[0].frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) - state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print_pid( - f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - print_pid(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -# @profile_memory_decorator -def _zero3_merge_trainable_params( - state_dict: Dict[str, Any], - world_size: int, - fp32_flat_groups: List[torch.Tensor], - zero_model_states: List[ZeroModelState], -): - """Merges trainable parameters into the state dictionary. - - Args: - state_dict (Dict[str, Any]): The state dictionary to update. - world_size (int): The world size. - fp32_flat_groups (List[torch.Tensor]): The list of fp32 flat groups. - zero_model_states (List[ZeroModelState]): The list of ZeroModelState objects. - - Returns: - None - """ - if os.environ.get("ZERO3_CONVERSION_DEBUG", "0") == "1": - breakpoint() - - param_shapes = zero_model_states[0].param_shapes - avail_numel = fp32_flat_groups[0].numel() * world_size - # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each - # param, re-consolidating each param, while dealing with padding if any - - # merge list of dicts, preserving order - param_shapes = {k: v for d in param_shapes for k, v in d.items()} - - if debug: - for i in range(world_size): - print_pid(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") - - wanted_params = len(param_shapes) - wanted_numel = sum(shape.numel() for shape in param_shapes.values()) - # not asserting if there is a mismatch due to possible padding - avail_numel = fp32_flat_groups[0].numel() * world_size - print_pid(f"Trainable params: Have {avail_numel} numels to process.") - print_pid(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - offset = 0 - total_numel = 0 - total_params = 0 - pid = os.getpid() - for name, shape in tqdm(param_shapes.items(), desc=f"{pid=}: Gathering Sharded Weights"): - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - total_params += 1 - # NOTE: partitioned_numel includes padding, padding applies if unpartitioned_numel is not divisible by world_size - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print_pid( - f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - # XXX: memory usage doubles here - state_dict[name] = ( - torch.cat(tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)), 0) - .narrow(0, 0, unpartitioned_numel) - .view(shape) - ) - offset += partitioned_numel - - offset *= world_size - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print_pid(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero3_checkpoint( - world_size: int, - fp32_flat_groups: List[torch.Tensor], - zero_model_states: List[ZeroModelState], - exclude_frozen_parameters: bool, -): - """Returns the fp32 state dictionary reconstructed from a ZeRO3 checkpoint. - - Args: - world_size (int): The world size. - fp32_flat_groups (List[torch.Tensor]): The list of fp32 flat groups. - zero_model_states (List[ZeroModelState]): The list of ZeroModelState objects. - exclude_frozen_parameters (bool): Whether to exclude frozen parameters. - - Returns: - OrderedDict: The reconstructed fp32 state dictionary. - """ - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print_pid(f"added {len(buffers)} buffers") - - # extra state (e.g., fp8) - extra_states = zero_model_states[0].extra_states - state_dict.update(extra_states) - if debug: - print_pid(f"added {len(extra_states)} extra_states") - - if not exclude_frozen_parameters: - _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) - - _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def get_elapsed(t: float): - """Converts elapsed time in seconds to a formatted string. - - Args: - t (float): The elapsed time in seconds. - - Returns: - str: The formatted elapsed time as a string. - """ - minutes = t // 60 - seconds = t % 60 - if minutes > 0: - total_time = f"{minutes:.0f}min{seconds:.0f}s" - else: - total_time = f"{seconds:.1f}s" - return total_time - - -def process_single_rank( - rank: int, - ds_checkpoint_dir: str, - output_dir: str, - overwrite: bool = False, - exclude_frozen_parameters: bool = False, -): - """Processes a single rank to gather and save the state dictionary. - - Args: - rank (int): The rank to process. - ds_checkpoint_dir (str): Path to the DeepSpeed checkpoint folder. - output_dir (str): Directory to save the output. - overwrite (bool): Whether to overwrite existing files. Default is False. - exclude_frozen_parameters (bool): Whether to exclude frozen parameters. Default is False. - """ - print_pid(f"Gathering rank {rank} state_dict...") - - start = time.time() - output_path = os.path.join(output_dir, create_ds_output_path(rank)) - if os.path.exists(output_path) and not overwrite: - print_pid(f"Output path {output_path} exists, skipping") - return - - print_pid(f" -> Gathering data parallel partitions for mp rank {rank}...") - - if os.environ.get("ZERO3_CONVERSION_DEBUG", "0") == "1": - breakpoint() - - state_dict = _get_fp32_state_dict_from_zero_checkpoint( - ds_checkpoint_dir=ds_checkpoint_dir, rank=rank, exclude_frozen_parameters=exclude_frozen_parameters - ) - print_pid(f" -> Done processing rank {rank} state_dict, gathered {len(state_dict)} params") - - checkpoint = { - "module": state_dict, - "param_shapes": OrderedDict(), - "dp_world_size": 1, - } - - for param, value in state_dict.items(): - if isinstance(value, torch.Tensor): - checkpoint["param_shapes"][param] = value.shape - - print_pid(f" -> Saving mp rank {rank} checkpoint to {output_path}") - torch.save(checkpoint, f"{output_path}") - - total_time = get_elapsed(time.time() - start) - print_pid(f" -> rank {rank} took {total_time}") diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/config.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/config.py deleted file mode 100644 index 2e9d14fe20..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/config.py +++ /dev/null @@ -1,101 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from pathlib import Path -from typing import Literal - -from nemo.collections.llm.gpt.model.megatron.hyena.hyena_utils import hyena_no_weight_decay_cond -from pydantic import BaseModel - - -def hyena_no_weight_decay_cond_with_embeddings(name, param): - """Condition for no weight decay for Hyena parameters with embeddings.""" - if "embedding" in name: - return True - return hyena_no_weight_decay_cond(name, param) - - -class Evo2TaxonomyLineage(BaseModel): - """Pydantic model class that defines the source lineage of a DNA sequence.""" - - domain: None | str = None - phylum: None | str = None - clazz: None | str = None - order: None | str = None - family: None | str = None - genus: None | str = None - species: None | str = None - - -class Evo2PreprocessingConfig(BaseModel): - """Pydantic model class specifying the configuration schema for a preprocessed IndexedDataset (.bin, .idx).""" - - # Paths - datapaths: list[Path] = [] - output_dir: None | Path = None - output_prefix: None | str = None - # Random Datasplit - train_split: float = 0.7 - valid_split: float = 0.2 - test_split: float = 0.1 - # Overwrite existing binaries. Otherwise, skip already preprocessed datasets. - overwrite: bool = False - # Raw Preprocessing Transforms - embed_reverse_complement: bool = False - random_reverse_complement: float = 0.0 - random_lineage_dropout: float = 0.0 - transcribe: None | Literal["transcribe", "back_transcribe"] = None - force_uppercase: bool = False - indexed_dataset_dtype: str = "uint8" - # Tokenization Transforms - append_eod: bool = True - enforce_sample_length: None | int = None - ftfy: bool = False - # NeMo Tokenizer Configuration - tokenizer_type: Literal[ - "Byte-Level", - "HuggingFace", - "SentencePiece", - "Regex", - "Megatron", - "Tiktoken", - ] = "Byte-Level" - vocab_file: None | Path = None - vocab_size: None | int = 512 - merges_file: None | Path = None - tokenizer_model_name: None | str = None - pretrained_tokenizer_model: None | str = None - special_tokens: None | dict[str, str] = {} - fast_hf_tokenizer: bool = False - # Compute Configuration - # NOTE: If preprocessing a large amount of short individual sequences (< 1000 bp), do NOT use - # multiprocessing (workers > 1) because sequence-level parallel IPC will dominate the preprocessing time! - workers: int = 1 - preproc_concurrency: int = 100000 - chunksize: int = 1 - # Filters - drop_empty_sequences: bool = False - nnn_filter: bool = False - # RNG - seed: None | int = None - # Evo2 Taxonomic Lineage Tags - # SeqID Sub-String Indexing: "ABC" will have taxonomy data from "A". - taxonomy_data: dict[str, Evo2TaxonomyLineage] = {} - # Periodicity of injecting phylogenetic lineage tags in the sequence prior to tokenization. - prompt_spacer_length: int = 131072 diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/logging/__init__.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/logging/__init__.py deleted file mode 100644 index 25e6abfbc5..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/logging/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/logging/callbacks.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/logging/callbacks.py deleted file mode 100644 index fab38cb305..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/logging/callbacks.py +++ /dev/null @@ -1,114 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Copyright The Lightning AI team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import torch -from lightning.pytorch import Callback -from megatron.core import parallel_state -from megatron.core.tensor_parallel.mappings import _gather_along_last_dim - - -class TEVCallback(Callback): - """Callback for logging TEV statistics before each optimizer step. - - This callback handles different parallelism strategies: - - Pipeline Parallelism: Only computes on first pipeline stage - - Tensor Parallelism: Gathers embedding shards across TP ranks - - Context Parallelism: Gathers across CP ranks - - Data Parallelism: Only logs on rank 0 of each model parallel group - """ - - @torch.no_grad() - def on_before_optimizer_step(self, trainer, pl_module, optimizer) -> None: - """Called before each optimizer step during training. - - This method calculates and logs Token Embedding Variance (TEV) statistics: - 1. Gets embedding parameter only on pipeline rank 0 (where embeddings live) - 2. Gathers embedding shards across tensor and context parallel ranks - 3. Calculates the token embedding variance (TEV) - 4. Logs the mean and standard deviation of TEV values only on data parallel rank 0 - - Args: - trainer: The Lightning trainer instance - pl_module: The current Lightning module being trained - optimizer: The optimizer being used - - Note: - The callback assumes embeddings live on pipeline rank 0, which is the standard - configuration in Megatron-LM. - """ - # Only compute on pipeline rank 0 where embeddings live - if not parallel_state.is_pipeline_first_stage(): - return - - # Get all named parameters from the model - named_params = dict(pl_module.named_parameters()) - - # Find all parameter keys containing 'embed' - embed_keys = [key for key in named_params.keys() if "embed" in key] - - # Validate we have exactly one embedding layer - if len(embed_keys) == 0: - raise ValueError("No embed keys found.") - if len(embed_keys) > 1: - raise ValueError("Multiple embed keys found.") - - # Get the embedding parameter - embed = named_params[embed_keys[0]] - - # If using tensor parallelism, gather embedding shards - if parallel_state.get_tensor_model_parallel_world_size() > 1: - embed = _gather_along_last_dim(embed, group=parallel_state.get_tensor_model_parallel_group()) - - # If using context parallelism, gather across context parallel ranks - if parallel_state.get_context_parallel_world_size() > 1: - world_size = parallel_state.get_context_parallel_world_size() - dim_size = list(embed.size()) - dim_size[0] = dim_size[0] * world_size - - output = torch.empty(dim_size, dtype=embed.dtype, device=torch.cuda.current_device()) - torch.distributed.all_gather_into_tensor( - output, embed.contiguous(), group=parallel_state.get_context_parallel_group() - ) - embed = output - - # Calculate token embedding variance (TEV) - # First center the embeddings by subtracting the mean - # Then calculate the mean squared deviation (variance) - # Finally take the square root to get standard deviation - tev = torch.sqrt(torch.mean(torch.pow(embed - embed.mean(dim=0), 2), dim=0)) - - # Calculate statistics of the TEV values - tev_mean = torch.mean(tev).item() - tev_sd = torch.std(tev).item() - - # Only log on data parallel rank 0 to avoid duplicate logging - if parallel_state.get_data_parallel_rank() == 0: - # Log the TEV statistics - pl_module.log("tev_mean", tev_mean, on_step=True, on_epoch=False, sync_dist=False) - pl_module.log("tev_sd", tev_sd, on_step=True, on_epoch=False, sync_dist=False) diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/loss/__init__.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/loss/__init__.py deleted file mode 100644 index 25e6abfbc5..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/loss/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/loss/embedding_variance.py b/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/loss/embedding_variance.py deleted file mode 100644 index 0a3f38fa16..0000000000 --- a/sub-packages/bionemo-evo2/src/bionemo/evo2/utils/loss/embedding_variance.py +++ /dev/null @@ -1,192 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# This assumes you have a parallel_state module similar to Megatron-LM's. -# If not, you'll need to pass tp_world_size and tp_group directly if they -# are obtained differently. -# from megatron.core import parallel_state # Example import - - -import torch -import torch.distributed -from megatron.core import parallel_state -from torch.autograd import Function - - -class SquaredErrorTargetedVarianceLossFunction(Function): - """This loss function is used to calculate the loss based on the squared difference between the global mean of per-word variances and target.""" - - @staticmethod - def forward(ctx, we_weight: torch.Tensor, loss_coeff: float, var_target: float) -> torch.Tensor: - """Calculates a loss based on the squared difference between the global mean of per-word variances and target. - - Assumes vocab-parallel sharding for we_weight (dim 0 is sharded). - - Args: - ctx (torch.autograd.FunctionContext): Context object for backward pass. - we_weight (torch.Tensor): Local shard of embedding weights (V_local, H). - loss_coeff (float): Loss coefficient. - var_target (float): Targeted variance for the embedding weights. - - Returns: - torch.Tensor: Scalar loss value. - - weights - """ - if not we_weight.is_floating_point(): - we_weight = we_weight.float() - - V_local, H = we_weight.shape # V_local: words on this rank, H: embedding dim - - # Save dimensions for backward pass - ctx.H_embedding_dim = H - ctx.V_local_word_count = V_local - ctx.loss_coeff = loss_coeff - ctx.var_target = var_target - - # Handle H=0 edge case (embedding dimension is zero) - if H == 0: - ctx.is_H_dim_zero = True - # Mean variance is 0 if H=0. Loss is based on (0 - VAR_TARGET)^2. - loss_value = loss_coeff * (0.0 - var_target) ** 2 - final_loss_tensor = torch.tensor(loss_value, device=we_weight.device, dtype=we_weight.dtype) - # Save we_weight for shape, None for we_mean_per_word and V_final (as they are not well-defined or zero) - ctx.save_for_backward(we_weight, None, None) - return final_loss_tensor - ctx.is_H_dim_zero = False - - # Get TP info (assuming parallel_state is globally accessible) - # Ensure parallel_state is imported and available in the execution scope. - # from some_module import parallel_state # Make sure this is accessible - tp_world_size = parallel_state.get_tensor_model_parallel_world_size() or 1 - tp_group = parallel_state.get_tensor_model_parallel_group() # Can be None - ctx.tp_world_size_val = tp_world_size - - # 1. Per-word mean (across embedding dimension H) - # Shape: (V_local, 1) - we_mean_per_word = we_weight.mean(dim=1, keepdim=True) - - # 2. Per-word variance (across embedding dimension H) - # we_sq_diffs_per_word shape: (V_local, H) - we_sq_diffs_per_word = (we_weight - we_mean_per_word) ** 2 - # we_var_per_word_local shape: (V_local,) (biased variance) - we_var_per_word_local = we_sq_diffs_per_word.mean(dim=1, keepdim=False) - - # 3. Mean of these per-word variances *on this local rank* - # v_local_mean_of_vars shape: scalar tensor - v_local_mean_of_vars = torch.tensor(0.0, device=we_weight.device, dtype=we_weight.dtype) - if V_local > 0: # Avoid NaN from mean of empty tensor if V_local is 0 - v_local_mean_of_vars = we_var_per_word_local.mean(dim=0, keepdim=False) - - # 4. Globally average these local mean variances - # V_final_globally_avg_var is the V in the loss formula L = alpha*(V-T)^2 - V_final_globally_avg_var = v_local_mean_of_vars.clone() - if tp_world_size > 1: - # Computes V_final = (1/tp_world_size) * sum(v_local_mean_of_vars from each rank) - V_final_globally_avg_var /= tp_world_size - torch.distributed.all_reduce(V_final_globally_avg_var, group=tp_group, op=torch.distributed.ReduceOp.SUM) - - # 5. Calculate final loss: LOSS_COEFF * (V_final - VAR_TARGET)^2 - final_loss = loss_coeff * (V_final_globally_avg_var - var_target) ** 2 - - # Save tensors needed for gradient computation in backward - ctx.save_for_backward(we_weight, we_mean_per_word, V_final_globally_avg_var) - # Other necessary scalars (H, V_local, tp_world_size) are already on ctx. - - return final_loss - - @staticmethod - def backward(ctx, grad_output: torch.Tensor) -> tuple[torch.Tensor, None, None]: - """Backward pass for the SquaredErrorTargetedVarianceLossFunction.""" - we_weight, we_mean_per_word, V_final_saved = ctx.saved_tensors - - # Handle H=0 edge case (gradient is zero) - if getattr(ctx, "is_H_dim_zero", False): - return torch.zeros_like(we_weight), None, None # Grad for we_weight only - - H = ctx.H_embedding_dim - V_local = ctx.V_local_word_count - tp_world_size = ctx.tp_world_size_val - loss_coeff = ctx.loss_coeff - var_target = ctx.var_target - - # Handle V_local=0 edge case (no words on this rank, so no gradient) - if V_local == 0: - return torch.zeros_like(we_weight), None, None # Grad for we_weight only - - # Chain rule: d(TotalLoss)/dw = d(TotalLoss)/d(final_loss) * d(final_loss)/dw - # grad_output is d(TotalLoss)/d(final_loss) - - # 1. Calculate d(final_loss) / d(V_final_saved) - # final_loss = LOSS_COEFF * (V_final_saved - VAR_TARGET)**2 - # dL_dV_final is d(final_loss) / d(V_final_saved) - dL_dV_final = loss_coeff * 2.0 * (V_final_saved - var_target) - - # grad_V_final is d(TotalLoss) / d(V_final_saved) - grad_V_final = grad_output * dL_dV_final # Scalar - - # 2. Propagate gradient from V_final_saved to v_local_mean_of_vars (on current rank) - # V_final_saved = (1/tp_world_size) * sum_k(v_local_mean_of_vars_k) - # So, d(V_final_saved) / d(v_local_mean_of_vars_current_rank) = 1 / tp_world_size - # grad_v_local_mean is d(TotalLoss) / d(v_local_mean_of_vars_current_rank) - grad_v_local_mean = grad_V_final * (1.0 / tp_world_size) # Scalar - - # 3. Propagate gradient from v_local_mean_of_vars to we_var_per_word_local_i - # v_local_mean_of_vars = mean(we_var_per_word_local) = (1/V_local) * sum_i(we_var_per_word_local_i) - # So, d(v_local_mean_of_vars) / d(we_var_per_word_local_i) = 1 / V_local - # The coefficient to apply for the next step of chain rule: - # This is grad_v_local_mean scaled by (1/V_local) - # This represents d(TotalLoss)/d(we_var_per_word_local_i), assuming it's uniform. - coeff_for_per_word_var_grad = grad_v_local_mean * (1.0 / V_local) # Scalar - - # 4. Propagate gradient from we_var_per_word_local_i to we_weight_ik - # we_var_per_word_local_i = (1/H) * sum_k (we_weight_ik - we_mean_per_word_i[0])^2 - # d(we_var_per_word_local_i) / d(we_weight_ik) = (2/H) * (we_weight_ik - we_mean_per_word_i[0]) - # The term (we_weight_ik - we_mean_per_word_i[0]) is (we_weight - we_mean_per_word) - - # Combine coefficients for the (we_weight - we_mean_per_word) term: - # This is coeff_for_per_word_var_grad * (2/H) - final_scalar_coefficient = coeff_for_per_word_var_grad * (2.0 / H) - - grad_we_weight = final_scalar_coefficient * (we_weight - we_mean_per_word) - - # The forward function only takes we_weight as a tensor input requiring grad, the other two inputs - # are floats and do not get gradients. - return grad_we_weight, None, None - - -class SquaredErrorTargetedVarianceLoss(torch.nn.Module): - """Applies a loss that will encourage variance of some parameter to be close to var_target.""" - - def __init__(self, loss_coeff: float = 0.1, var_target: float = 1.0): - """Applies a loss that will encourage variance of some parameter to be close to var_target. - - Args: - loss_coeff: Loss coefficient. Defaults to 0.1. - var_target: targetted variance for the embedding weights. Defaults to 1.0. - """ - super().__init__() - self.loss_coeff = loss_coeff - self.var_target = var_target - - def forward(self, we_weight: torch.Tensor) -> torch.Tensor: - """Applies the loss to the embedding weights with the user requested loss coefficient and targeted variance. - - Args: - we_weight: Embedding weights. - - Returns: - torch.Tensor: Loss value. - """ - return SquaredErrorTargetedVarianceLossFunction.apply(we_weight, self.loss_coeff, self.var_target) diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/conftest.py b/sub-packages/bionemo-evo2/tests/bionemo/evo2/conftest.py deleted file mode 100644 index be73d4a69f..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/conftest.py +++ /dev/null @@ -1,61 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# conftest.py -import gc - -import pytest -import torch - -from bionemo.testing.torch import get_device_and_memory_allocated - - -def pytest_sessionstart(session): - """Called at the start of the test session.""" - if torch.cuda.is_available(): - torch.cuda.reset_peak_memory_stats() - print( - f""" - sub-packages/bionemo-evo2/tests/bionemoe/evo2: Starting test session - {get_device_and_memory_allocated()} - """ - ) - - -def pytest_sessionfinish(session, exitstatus): - """Called at the end of the test session.""" - if torch.cuda.is_available(): - print( - f""" - sub-packages/bionemo-evo2/tests/bionemoe/evo2: Test session complete - {get_device_and_memory_allocated()} - """ - ) - - -@pytest.fixture(autouse=True) -def cleanup_after_test(): - """Clean up GPU memory after each test.""" - yield - if torch.cuda.is_available(): - torch.cuda.empty_cache() - gc.collect() - - -def pytest_addoption(parser: pytest.Parser): - """Pytest configuration for bionemo.evo2.run tests. Adds custom command line options for dataset paths.""" - parser.addoption("--dataset-dir", action="store", default=None, help="Path to preprocessed dataset directory") - parser.addoption("--training-config", action="store", default=None, help="Path to training data config YAML file") diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/cds_prompts.csv b/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/cds_prompts.csv deleted file mode 100644 index bd8146634f..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/cds_prompts.csv +++ /dev/null @@ -1,4 +0,0 @@ -Sequence,Name,Percent -ATGAGTCAGAATACGCTGAAAGTTCATGATTTAAATGAAGATGCGGAATTTGATGAGAACGGAGTTGAGGTTTTTGACGAAAAGGCCTTAGTAGAACAGGAACCCAGTGATAACGATTTGGCCGAAGAGGAACTGTTATCGCAGGGAGCCACACAGCGTGTGTTGGACGCGACTCAGCTTTACCTTGGTGAGATTGGTTATTCACCACTGTTAACGGCCGAAGAAGAAGTTTATTTTGCGCGTCGCGCACTGCGTGGAGATGTCGCCTCTCGCCGCCGGATGATCGAGAGTAACTTGCGTCTGGTGGTAAAAATTGCCCGCCGTTATGGCAATCGTGGTCTGGCGTTGCTGGACCTTATCGAAGAGGGCAACCTGGGGCTGATCCGCGCGGTAGAGAAGTTTGACCCGGAACGTGGTTTCCGCTTCTCAACATACGCAACCTGGTGGATTCGCCAGACGATTGAACGGGCGATTATGAACCAAACCCGTACTATTCGTTTGCCGATTCACATCGTAAAGGAGCTGAACGTTTACCTGCGAACCGCACGTGAGTTGTCCCATAAGCTGGACCATGAACCAAGTGCGGAAGAGATCGCAGAGCAACTGGATAAGCCAGTTGATGACGTCAGCCGTATGCTTCGTCTTAACGAGCGCATTACCTCGGTAGACACCCCGCTGGGTGGTGATTCCGAAAAAGCGTTGCTGGACATCCTGGCCGATGAAAAAGAGAACGGTCCGGAAGATACCACGCAAGATGACGATATGAAGCAGAGCATCGTCAAATGGCTGTTCGAGCTGAACGCCAAACAGCGTGAAGTGCTGGCACGTCGATTCGGTTTGCTGGGGTACGAAGCGGCAACACTGGAAGATGTAGGTCGTGAAATTGGCCTCACCCGTGAACGTGTTCGCCAGATTCAGGTTGAAGGCCTGCGCCGTTTGCGCGAAATCCTGCAAACGCAGGGGCTGAATATCGAAGCGCTGTTCCGCGAGTAA,rpoS_NC_000913.3_cds_NP_417221.1_2701,100% -ATGAGCGACCTTGCGAGAGAAATTACACCGGTCAACATTGAGGAGGAGCTGAAGAGCTCCTATCTGGATTATGCGATGTCGGTCATTGTTGGCCGTGCGCTGCCGGATGTCCGAGATGGCCTGAAGCCGGTACACCGTCGCGTACTTTACGCCATGAACGTATTGGGCAATGACTGGAACAAAGCCTATAAAAAATCTGCCCGTGTCGTTGGTGACGTAATCGGTAAATACCATCCCCACGGCGATTCCGCAGTGTATGACACCATCGTTCGTATGGCGCAGCCATTCTCGCTGCGTTACATGCTGGTGGATGGTCAGGGTAACTTCGGTTCTATTGACGGCGACTCCGCGGCGGCAATGCGTTATACGGAGATCCGTCTGGCGAAAATCGCCCACGAACTGATGGCCGATCTCGAAAAAGAGACGGTGGATTTCGTGGATAACTATGACGGTACGGAAAAAATTCCGGACGTCATGCCGACCAAAATTCCGAATCTGCTGGTGAACGGTTCTTCCGGTATCGCAGTAGGTATGGCGACGAATATCCCGCCGCACAACCTGACGGAAGTGATTAACGGCTGCCTGGCGTATATCGACAACGAAGACATCAGCATTGAAGGGCTGATGGAACATATTCCGGGGCCGGACTTCCCGACCGCCGCGATCATCAACGGTCGTCGTGGTATCGAAGAAGCCTACCGCACCGGTCGTGGCAAAGTGTACATTCGCGCCCGCGCGGAAGTTGAAGCTGACGCCAAAACGGGCCGTGAAACCATCATCGTCCATGAAATTCCCTATCAGGTGAACAAAGCGCGCCTGATCGAGAAAATCGCCGAGCTGGTGAAAGATAAACGCGTGGAAGGCATCAGCGCGCTGCGTGACGAATCCGACAAAGACGGGATGCGCATCGTGATTGAAGTGAAACGCGATGCGGTGGGCGAGGTGGTGCTTAATAATCTCTACTCCCAGACCCAGCTACAGGTTTCCTTCGGTATTAACATGGTGGCGCTGCATCACGGCCAGCCGAAGATCATGAACCTGAAAGATATCATTTCAGCGTTCGTGCGCCACCGCCGTGAAGTGGTGACGCGTCGGACTATTTTTGAACTGCGTAAAGCCCGTGACCGTGCGCATATCCTTGAAGCTCTGGCGATTGCGCTGGCCAACATCGACCCGATTATCGAACTGATTCGCCGCGCGCCAACGCCGGCGGAAGCAAAAGCGGCGCTGATTTCGCGTCCGTGGGATCTGGGCAACGTTGCTGCGATGCTGGAGCGCGCTGGTGATGACGCCGCGCGTCCGGAATGGCTGGAGCCAGAATTTGGCGTGCGTGACGGTCAGTACTACCTGACTGAACAGCAGGCGCAGGCGATTCTGGATCTGCGTTTGCAGAAACTGACCGGCCTGGAGCATGAAAAACTGCTCGACGAATACAAAGAGCTGCTGGAGCAGATTGCTGAATTGCTGCACATTCTGGGCAGCGCCGATCGCCTGATGGAAGTGATCCGCGAAGAGATGGAGTTAATTCGCGATCAGTTCGGCGATGAGCGTCGTACCGAAATCACCGCCAACAGCGCCGATATTAATATCGAAGATCTGATTAGCCAGGAAGATGTTGTCGTGACGCTGTCTCACCAGGGTTACGTCAAATATCAACCGCTGACAGATTACGAAGCGCAACGTCGTGGTGGGAAAGGTAAATCTGCCGCGCGTATTAAAGAAGAAGACTTTATCGACCGCCTGCTGGTGGCTAACACCCATGACACCATCCTCTGCTTCTCCAGCCGGGGCCGTCTGTACTGGATGAAGGTCTATCAGCTGCCGGAAGCCAGCCGCGGCGCGCGCGGTCGTCCGATCGTCAACCTGCTGCCGCTGGAAGCCAACGAACGTATCACCGCGATTCTGCCGGTTCGTGAGTATGAAGAAGGCGTCAACGTCTTTATGGCGACCGCCAGCGGTACCGTGAAGAAAACGGCGCTGACCGAATTCAGCCGTCCGCGTTCCGCCGGTATTATCGCGGTGAACCTCAACGACGGCGACGAGCTGATTGGCGTTGACCTGACTTCTGGTTCTGACGAAGTCATGCTGTTCTCGGCCGCGGGTAAAGTGGTGCGCTTCAAAGAAGACGCCGTCCGTGCGATGGGGCGTACCGCGACCGGTGTGCGCGGTATTAAGCTGGCGGGAGACGATAAAGTCGTCTCTCTGATCATCCCACGCGGCGAAGGCGCTATTCTGACCGTAACGCAAAACGGCTACGGGAAGCGTACCGCAGCGGACGAGTACCCGACCAAGTCTCGTGCGACGCAGGGCGTTATCTCTATCAAAGTGACCGAGCGCAACGGTTCCGTTGTCGGTGCGGTACAGGTAGACGATTGCGACCAGATCATGATGATCACGGATGCCGGTACTCTGGTGCGTACCCGTGTGTCCGAGATCAGCGTAGTGGGACGTAATACCCAGGGCGTTATCCTTATCCGCACGGCGGAAGATGAAAACGTGGTGGGTCTGCAACGCGTTGCTGAACCGGTAGATGACGAAGAACTCGACGCTATCGACGGCAGCGTGGCGGAAGGGGATGAGGATATCGCCCCGGAAGCGGAAAGCGATGACGACGTTGCGGATGACGCTGACGAGTAA,gyrA_NC_003197.2_cds_NP_461214.1_2209,100% -ATGGACTCTATCGTCGGCGACGCAATTGACGAGGCCGAGGCCGAGGACATGGGGGATGAGTCGGCTCAGGTCGACGGCGCGGCCAACATCAACCGGTCCGGGACGATGACTGACGACGAACTGAAAGCGGTTCTCAAAGACCTCCAGACCAACATCACGGTGGTCGGGTGCGGCGGTGCCGGCGGTAACACCGTCAACCGGATGCACGAGGAGGGAATCAAGGGGGCGAAGCTCGTCGCCGCCAACACCGACGTGCAGCACCTCGTGGAAATCGGGGCCGATACGAAGATTCTCATGGGCGAGCAGAAGACCCAAGGCCGCGGCGCGGGCTCGCTCCCGCAGGTCGGTGAGGAGGCCGCCCTCGAATCCCAAGAGGAGATTTACGACGCCATCGAGGGCTCCGACATGGTGTTCGTCACCGCCGGACTCGGCGGCGGCACCGGCACCGGTTCGGCTCCCGTCGTCGCCAAGGCGGCCCGCGAGTCGGGCGCGCTCACCATCGCCATCGTCACGACGCCCTTTACGGCCGAAGGCGAGGTACGACGGACGAACGCCGAGGCCGGTCTCGAACGGCTCCGCGACGTGTCGGACACGGTCATCGTCGTCCCGAACGACCGCCTGCTCGACGCCGTGGGCAAACTCCCCGTCCGGCAGGCGTTCAAGGTCTCCGACGAGGTGCTGATGCGCTCGGTCAAGGGCATCACCGAACTCATCACTAAGCCCGGTCTCGTCAACCTCGACTTCGCCGACGTGAAGACCGTCATGGAGCGCGGCGGCGTCGCCATGATCGGTCTCGGCGAGTCCGACTCCGAGTCCAAGGCTCAGGAGTCCGTCAAGTCCGCCCTCCGCTCGCCGCTTCTTGACGTGGACATCTCCGGCGCGAACTCCGCGCTCGTCAACGTCACCGGCGGTTCGGACATGAGCATCGAGGAGGCCGAGGGCGTCGTCGAGGAGATTTACGACCGCATCGACCCCGACGCGCGCATCATCTGGGGGACCTCCGTCGACGACGAACTCGAAGGCATGATGCGGACGATGATCGTCGTCACCGGCGTCGAGTCGCCCCAAATCTACGGCCGCAACGGCGAGGCACAGGCGCACGCCGAAGAGCGTCTCGAAGACATCGACTACGTCGAGTAG,ftsZ_NC_013967.1_cds_WP_004044352.1_564,100% diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/prompts.csv b/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/prompts.csv deleted file mode 100644 index 134a9a39a1..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/prompts.csv +++ /dev/null @@ -1,5 +0,0 @@ -Sequence,Name,Percent -GAATAGGAACAGCTCCGGTCTACAGCTCCCAGCGTGAGCGACGCAGAAGACGGTGATTTCTGCATTTCCATCTGAGGTACCGGGTTCATCTCACTAGGGAGTGCCAGACAGTGGGCGCAGGCCAGTGTGTGTGCGCACCGTGCGCGAGCCGAAGCAGGGCGAGGCATTGCCTCACCTGGGAAGCGCAAGGGGTCAGGGAGTTCCCTTTCCGAGTCAAAGAAAGGGGTGATGGACGCACCTGGAAAATCGGGTCACTCCCACCCGAATATTGCGCTTTTCAGACCGGCTTAAGAAACGGCGCACCACGAGACTATATCCCACACCTGGCTCAGAGGGTCCTACGCCCACGGAATCTCGCTGATTGCTAGCACAGCAGTCTGAGATCAAACTGCAAGGCGGCAACGAGGCTGGGGGAGGGGCGCCCGCCATTGCCCAGGCTTGCTTAGGTAAACAAAGCAGCCGGGAAGCTCGAACTGGGTGGAGCCCACCACAGCTCAAGGAGGCCTGCCTGCCTCTGTAGGCTCCACCTCTGGGGGCAGGGCACAGACAAACAAAAAGGCAGCAGTAACCTCTGCAGACTTAAGTGTCCCTGTCTGACAGCTTTGAAGAGAGCAGTGGTTCTCCCAGCACGCAGCTGGAGATCTGAGAACGGGCAGACTGCCTCCTCAAGTGGGTCCCTGACCCCTGACCCCCGAGCAGCCTAACTGGGAGGCACCCCCCAGCAGGGGCACACTGACACCTCACACGGCAGGGTATTCCAACAGACCTGCAGCTGAGGGTCCTGTCTGTTAGAAGGAAAACTAACAACCAGAAAGGACATCTACACCGAAAACCCATCTGTACATCACCATCATCAAAGACCAAAAGTAGATAAAACCACAAAGATGGGGAAAAAACAGAACAGAAAAACTGGAAACTCTAAAACGCAGAGCGCCTCTCCTCCTCCAAAGGAACGCAGTTCCTCACCAGCAACAGAACAAAGCTGGATGGAGAATGATTTTGACGAGCTGAGAGAAGAAGGCTTCAGACGATCAAATTACTCTGAGCTACGGGAGGACATTCAAACCAAAGGCAAAGAAGTTGAAAACTTTGAAAAAAATTTAGAAGAATGTATAACTAGAATAACCAATACAGAGAAGTGCTTAAAGGAGCTGATGGAGCTGAAAACCAAGGCTCGAGAACTACGTGAAGAATGCAGAAGCCTCAGGAGCCGATGCGATCAACTGGAAGAAAGGGTATCAGCAATGGAAGATGAAATGAATGAAATGAAGCGAGAAGGGAAGTTTAGAGAAAAAAGAATAAAAAGAAATGAGCAAAGCCTCCAAGAAATATGGGACTATGTGAAAAGACCAAATCTACGTCTGATTGGTGTACCTGAAAGTGATGTGGAGAATGGAACCAAGTTGGAAAACACTCTGCAGGATATTATCCAGGAGAACTTCCCCAATCTAGCAAGGCAGGCCAACGTTCAGATTCAGGAAATACAGAGAACGCCACAAAGATACTCCTCGAGAAGAGCAACTCCAAGACACATAATTGTCAGATTCACCAAAGTTGAAATGAAGGAAAAAATGTTAAGGGCAGCCAGAGAGAAAGGTCGGGTTACCCTCAAAGGGAAGCCTATCAGACTAACAGCAGATCTCTCGGCAGAAACCCTACAAGCCAGAAGAGAGTGGGGGCCAATATTCAACATTCTTAAAGAAAAGAATTTTCAACCCAGAATTTCATTTCCAGCCAAACTAAGCTTCATAAGTGAAGGAGAAAGAAAATACTTTACAGACAAGCAAATGCTGAGAGATTTTGTCACCACCAGGCCTACCCTAAAAGAGCTCCTGAAGGAAGCACTAAACATGGAAAGGAACAACCGGTACCAGCCGCTGCAAAATCATGCCAAAATGTAAAGACCATCGAGACTAGGAAGAAACTGCATCAACTAATGAGCAAAATCACCAGCTAACATCATAATGACAGGATCAAATTCACACATAACAATATTAACTTTAAATATAAATGGACTAAATTCTGCAATTAAAAGACACAGACTGGCAAGTTGGATAAAGAGTCAAGACCCATCAGTGTGCTGTATTCAGGAAACCCATCTCATGTGCAGAGACACACATAGGCTCAAAATAAAAGGATGGAGGAAGATCTACCAAGCAAATGGAAAACAAAAAAGGCAGGGGTTGCAATCCTAGTCTCTGATAAAACAGACTTTAAACCAACAAAGATCAAAAGAGACAAAGAAGGCCATTACATAATGGTAAAGGGATCAATTCAACAAGAGGAGCTAACTATCCTAAATATTTATGCACCCAATACAGGAGCACCCAGATTCATAAAGCAAGTCCTGAGTGACCTACAAAGAGACTTAGACTCCCACACATTAATAATGGGAGACTTTAACACCCCACTGTCAATATTAGACAGATCAACGAGACAGAAAGTCAACAAGGATACCCAGGAATTGAACTCAGCTCTGCACCAAGCAGACCTAATAGACATCTACAGAACTCTCCACCCCAAATCAACAGAATATACATTTTTTTCAGCACCACACCACACCTATTCCAAAATCGACCACATAGTTGGAAGTAAAGCTCTCCTCAGCAAATGTAAAAGAACAGAAATTATAACAAACTATCTCTCAGACCACAGTGCAATCAAACTAGAACTCAGGATTAAGAATCTCACTCAAAGCCGCTCAACTACATGGAAACTGAACAACCTGCTCCTGAATGACTACTGGGTACATAACGAAATGAAGGCAGAAATAAAGATGTTCTTTGAAACCAACGAGAACAAAGACACCACATACCAGAATCTCTGGGACGCATTCAAAGCAGTGTGTAGAGGGAAATTTATAGCACTAAATGCCTACAAGAGAAAGCAGGAAAGATCCAAAATTGACACCCTAACATCACAATTAAAAGAACTAGAAAAGCAAGAGCAAACACATTCAAAAGCTAGCAGAAGGCAAGAAATAACTAAAATCAGAGCAGAACTGAAGGAAATAGAGACACAAAAAACCCTTCAAAAAATCAATGAATCCAGGAGCTGGTTTTTTGAAAGGATCAACAAAATTGATAGACCGCTAGCAAGACTAATAAAGAAAAAAAGAGAGAAGAATCAAATAGACACAATAAAAAATGATAAAGGGGATATCACCACCGATCCCACAGAAATACAAACTACCATCAGAGAATACTACAAACACCTCTACGCAAATAAACTAGAAAATCTAGAAGAAATGGATACATTCCTCGACACATACACTCTCCCAAGACTAAAACAGGAAGAAGTTGAATCTCTGAATGGACCAATAACAGGCTCTGAAATTGTGGCAATAATCAATAGTTTACCAACCAAAAAGAGTCCAGGACCAGATGGATTCACAGCCGAATTCTACCAGAGGTACAAGGAGGAACTGGTACCATTCCTTCTGAAACTATTCCAATCAATAGAAAAAGAGGGAATCCTCCCTAACTCATTTTATGAGGCCAGCATCATTCTGATACCAAAGCCGGGCAGAGACACAACCAAAAAAGAGAATTTTAGACCAATATCCTTGATGAACATTGATGCAAAAATCCTCAATAAAATACTGGCAAACCGAATCCAGCAGCACATCAAAAAGCTTATCCACCATGATCAAGTGGGCTTCATCCCTGGGATGCAAGGCTGGTTCAATATACGCAAATCAATAAATGTAATCCAGCATATAAACAGAGCCAAAGACAAAAACCACATGATTATCTCAATAGATGCAGAAAAAGCCTTTGACAAAATTCAACAACCCTTCATGCTAAAAACTCTCAATAAATTAGGTATTGATGGGACGTATTTCAAAATAATAAGAGCTATCTATGACAAACCCACAGCCAATATCATACTGAATGGGCAAAAACTGGAAGCATTCCCTTTGAAAACTGGCACAAGACAGGGATGCCCTCTCTCACCGCTCCTATTCAACATAGTGTTGGAAGTTCTGGCCAGGGCAATCAGGCAGGAGAAGGAAATAAAGGGTATTCAATTAGGAAAAGAGGAAGTCAAATTGTCCCTGTTTGCAGACGACATGATTGTTTATCTAGAAAACCCCATTGTCTCAGCCCAAAATCTCCTTAAGCTGATAAGCAACTTCAGCAAAGTCTCAGGATACAAAATCAATGTACAAAAATCACAAGCATTCTTATACACCAACAACAGACAAACAGAGAGCCAAATCATGGGTGAACTCCCATTCACAATTGCTTCAAAGAGGATAAAATACCTAAGAATCCAACTTACAAGGGATGTGAAGGACCTCTTCAAGGAGAACTACAAACCACTGCTCAAGGAAATAAAAGAGGACACAAACAAATGGAAGAACATTCCATGCTCATGGGTAGGAAGAATCAATATCGTGAAAATGGCCATACTGCCCAAGGTAATTTACAGATTCAATGCCATCCCCATCAAGCTACCAATGACTTTCTTCACAGAATTGGAAAAAACTACTTTAAAGTTCATATGGAACCAAAAAAGAGCCCGCATTGCCAAGTCAATCCTAAGCCAAAAGAACAAAGCTGGAGGCATCACACTACCTTACTTCAAACTATACTACAAGGCTACAGTAACCAAAACAGCATGGTACTGGTACCAAAACAGAGATATAGATCAATGGAACAGAACAGAGCCCTCAGAAATAATGCCACATATCTACAACTATCTGATCTTTGACAAACCTGAGAAAAACAAGCAATGGGGAAAGGATTCCCTATTTAATAAATGGTGCTGGGAAAACTGGCTAGCCATATGTAGAAAGCTGAAACTGGATCTCTTCCTTACACCTTTATACAAAAATCAATTCAAGATGGATTAAAGATTTAAACGTTAAACCTAAAACCATAAAAACCCTAGAAGAAAACCTAGGCATTACCATTCAGGACATAGGCGTGGGCAAGGACTTCATGTCCAAAACACCAAAAGCAATGGCAACAAAAGACAAAATTGACAAATGGGATCTAATTAAACTAAAGAGCTTCTGCACAGCAAAAGAAACTACCATCAGAGTGAACAGGCAACCTACAACATGGGAGAAAATTTTCGCAACCTACTCATCTGACAAAGGGCTAATATCCAGAATCTACAATGAACTCAAACAAATTTACAAGAAAAAAACAAACAACCCCATCAAAAAGTGGGCGAAGGACATGAACAGACACTTCTCAAAAGAAGACATTTATGCAGCCAAAAAACACATGAAGAAATGCTCATCATCACTGGCCATCAGAGAAATGCAAATCAAAACCACTATGAGATATCATCTCACACCAGTTAGAATGGCAATCATTAAAAAGTCAGGAAACAACAGGTGCTGGAGAGGATGCGGAGAAATAGGAACACTTTTACACTGTTGGTGGGACTGTAAACTAGTTCAACCATTGTGGAAGTCAGTGTGGCGATTCCTCAGGGATCTAGAACTAGAAATACCATTTGACCCAGCCATCCCATTACTGGGTATATACCCAGAGGACTATAAATCATGCTGCTATAAAGACACATGCACTCGTATGTTTATTGCGGCACTATTCACAATAGCAAAAACTTGGAACCAACCCAAATGTCCAACAATGATAGACTGGATTAAGAAAATGTGGCACATATACACCATGGAATATTATGCAGCCATAAAAAATGATGAGTTCATATCCTTTGTAGGGACATGGATGAAATTGGAAACCATCATTCTCAGTAAACTATCGCAAGAACAAAAAACCAAACACCGCATATTCTCACTCATAGGTGGGAATTGAACAATGAGATCACATGGACACAGGAAGGGGAATATCACACTCTGGGGACTGTGGTGGGGTCGGGGGAGGGGGGAGGGGTAGCATTGGGAGATATACCTAATGCTAGATGACACATTAGTGGGTGCAGCGCACCAGCATGGCACATGTATACATATGTAACTAACCTGCACAATGTGCACATGTACCCTAAAACTTAGAGTATAATTAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGATCACACCACTGCACTCCAGCCTGGGTGTCAAAGCGAGACCCTGTCTCAGGAAAAAAAAAAAAAAAAAAAAAAAAAGGCTTAATTGATTGAACCAGATTCGAGAAAACAGTGCTAAATTATAATTTTCTCAATACTGTAAATATTTTTCAATCTTCAGCTTCATTAACTTCTATAATTGAAATTATCCCAATTATTACCTGACATGTACTAAAATTCCCTAAAATGGATCTTGAGTAACATTTTCACAGTACGATAATTTTTCTCTCTGTATATATTTATATAGTCACATATATGCACATACATTATACAAGCATTACTTTTCTATAACTGTAAGGTCAGAATTTGAAGTTGTGTTTTCTTTATCTTTTTATTTCCAATACTTGGCATCAAGTTGATATTCATTAGAAGTAAAGGAGGAAGGAAATGAATAATCTTCAGATACTAAGAACATTACACTTAAATTATTATTAAATCTAATTTGCATTCTCATATATGGCTTAGCT,L1RE2,100% -GACACCATCGAATGGCGCAAAACCTTTCGCGGTATGGCATGATAGCGCCCGGAAGAGAGTCAATTCAGGGTGGTGAATGTGAAACCAGTAACGTTATACGATGTCGCAGAGTATGCCGGTGTCTCTTATCAGACCGTTTCCCGCGTGGTGAACCAGGCCAGCCACGTTTCTGCGAAAACGCGGGAAAAAGTGGAAGCGGCGATGGCGGAGCTGAATTACATTCCCAACCGCGTGGCACAACAACTGGCGGGCAAACAGTCGTTGCTGATTGGCGTTGCCACCTCCAGTCTGGCCCTGCACGCGCCGTCGCAAATTGTCGCGGCGATTAAATCTCGCGCCGATCAACTGGGTGCCAGCGTGGTGGTGTCGATGGTAGAACGAAGCGGCGTCGAAGCCTGTAAAGCGGCGGTGCACAATCTTCTCGCGCAACGCGTCAGTGGGCTGATCATTAACTATCCGCTGGATGACCAGGATGCCATTGCTGTGGAAGCTGCCTGCACTAATGTTCCGGCGTTATTTCTTGATGTCTCTGACCAGACACCCATCAACAGTATTATTTTCTCCCATGAAGACGGTACGCGACTGGGCGTGGAGCATCTGGTCGCATTGGGTCACCAGCAAATCGCGCTGTTAGCGGGCCCATTAAGTTCTGTCTCGGCGCGTCTGCGTCTGGCTGGCTGGCATAAATATCTCACTCGCAATCAAATTCAGCCGATAGCGGAACGGGAAGGCGACTGGAGTGCCATGTCCGGTTTTCAACAAACCATGCAAATGCTGAATGAGGGCATCGTTCCCACTGCGATGCTGGTTGCCAACGATCAGATGGCGCTGGGCGCAATGCGCGCCATTACCGAGTCCGGGCTGCGCGTTGGTGCGGATATCTCGGTAGTGGGATACGACGATACCGAAGACAGCTCATGTTATATCCCGCCGTCAACCACCATCAAACAGGATTTTCGCCTGCTGGGGCAAACCAGCGTGGACCGCTTGCTGCAACTCTCTCAGGGCCAGGCGGTGAAGGGCAATCAGCTGTTGCCCGTCTCACTGGTGAAAAGAAAAACCACCCTGGCGCCCAATACGCAAACCGCCTCTCCCCGCGCGTTGGCCGATTCATTAATGCAGCTGGCACGACAGGTTTCCCGACTGGAAAGCGGGCAGTGAGCGCAACGCAATTAATGTGAGTTAGCTCACTCATTAGGCACCCCAGGCTTTACACTTTATGCTTCCGGCTCGTATGTTGTGTGGAATTGTGAGCGGATAACAATTTCACACAGGAAACAGCTATGACCATGATTACGGATTCACTGGCCGTCGTTTTACAACGTCGTGACTGGGAAAACCCTGGCGTTACCCAACTTAATCGCCTTGCAGCACATCCCCCTTTCGCCAGCTGGCGCTGGAGTGCGATCTTCCTGAGGCCGATACTGTCGTCGTCCCCTCAAACTGGCAGATGCACGGTTACGATGCGCCCATCTACACCAACGTAACCTATCCCATTACGGTCAATCCGCCGTTTGTTCCCACGGAGAATCCGACGGGTTGTTACTCGCTCACATTTAATGTTGATGAAAGCTGGCTACAGGAAGGCCAGACGCGAATTATTTTTGATGGCGTTAACTCGGCGTTTCATCTGTGGTGCAACGGGCGCTGGGTCGGTTACGGCCAGGACAGTCGTTTGCCGTCTGAATTTGACCTGAGCGCATTTTTACGCGCCGGAGAAAACCGCCTCGCGGTGATGGTGCTGCGTTGGAGTGACGGCAGTTATCTGGAAGATCAGGATATGTGGCGGATGAGCGGCATTTTCCGTGACGTCTCGTTGCTGCATAAACCGACTACACAAATCAGCGATTTCCATGTTGCCACTCGCTTTAATGATGATTTCAGCCGCGCTGTACTGGAGGCTGAAGTTCAGATGTGCGGCGAGTTGCGTGACTACCTACGGGTAACAGTTTCTTTATGGCAGGGTGAAACGCAGGTCGCCAGCGGCACCGCGCCTTTCGGCGGTGAAATTATCGATGAGCGTGGTGGTTATGCCGATCGCGTCACACTACGTCTGAACGTCGAAAACCCGAAACTGTGGAGCGCCGAAATCCCGAATCTCTATCGTGCGGTGGTTGAACTGCACACCGCCGACGGCACGCTGATTGAAGCAGAAGCCTGCGATGTCGGTTTCCGCGAGGTGCGGATTGAAAATGGTCTGCTGCTGCTGAACGGCAAGCCGTTGCTGATTCGAGGCGTTAACCGTCACGAGCATCATCCTCTGCATGGTCAGGTCATGGATGAGCAGACGATGGTGCAGGATATCCTGCTGATGAAGCAGAACAACTTTAACGCCGTGCGCTGTTCGCATTATCCGAACCATCCGCTGTGGTACACGCTGTGCGACCGCTACGGCCTGTATGTGGTGGATGAAGCCAATATTGAAACCCACGGCATGGTGCCAATGAATCGTCTGACCGATGATCCGCGCTGGCTACCGGCGATGAGCGAACGCGTAACGCGAATGGTGCAGCGCGATCGTAATCACCCGAGTGTGATCATCTGGTCGCTGGGGAATGAATCAGGCCACGGCGCTAATCACGACGCGCTGTATCGCTGGATCAAATCTGTCGATCCTTCCCGCCCGGTGCAGTATGAAGGCGGCGGAGCCGACACCACGGCCACCGATATTATTTGCCCGATGTACGCGCGCGTGGATGAAGACCAGCCCTTCCCGGCTGTGCCGAAATGGTCCATCAAAAAATGGCTTTCGCTACCTGGAGAGACGCGCCCGCTGATCCTTTGCGAATACGCCCACGCGATGGGTAACAGTCTTGGCGGTTTCGCTAAATACTGGCAGGCGTTTCGTCAGTATCCCCGTTTACAGGGCGGCTTCGTCTGGGACTGGGTGGATCAGTCGCTGATTAAATATGATGAAAACGGCAACCCGTGGTCGGCTTACGGCGGTGATTTTGGCGATACGCCGAACGATCGCCAGTTCTGTATGAACGGTCTGGTCTTTGCCGACCGCACGCCGCATCCAGCGCTGACGGAAGCAAAACACCAGCAGCAGTTTTTCCAGTTCCGTTTATCCGGGCAAACCATCGAAGTGACCAGCGAATACCTGTTCCGTCATAGCGATAACGAGCTCCTGCACTGGATGGTGGCGCTGGATGGTAAGCCGCTGGCAAGCGGTGAAGTGCCTCTGGATGTCGCTCCACAAGGTAAACAGTTGATTGAACTGCCTGAACTACCGCAGCCGGAGAGCGCCGGGCAACTCTGGCTCACAGTACGCGTAGTGCAACCGAACGCGACCGCATGGTCAGAAGCCGGGCACATCAGCGCCTGGCAGCAGTGGCGTCTGGCGGAAAACCTCAGTGTGACGCTCCCCGCCGCGTCCCACGCCATCCCGCATCTGACCACCAGCGAAATGGATTTTTGCATCGAGCTGGGTAATAAGCGTTGGCAATTTAACCGCCAGTCAGGCTTTCTTTCACAGATGTGGATTGGCGATAAAAAACAACTGCTGACGCCGCTGCGCGATCAGTTCACCCGTGCACCGCTGGATAACGACATTGGCGTAAGTGAAGCGACCCGCATTGACCCTAACGCCTGGGTCGAACGCTGGAAGGCGGCGGGCCATTACCAGGCCGAAGCAGCGTTGTTGCAGTGCACGGCAGATACACTTGCTGATGCGGTGCTGATTACGACCGCTCACGCGTGGCAGCATCAGGGGAAAACCTTATTTATCAGCCGGAAAACCTACCGGATTGATGGTAGTGGTCAAATGGCGATTACCGTTGATGTTGAAGTGGCGAGCGATACACCGCATCCGGCGCGGATTGGCCTGAACTGCCAGCTGGCGCAGGTAGCAGAGCGGGTAAACTGGCTCGGATTAGGGCCGCAAGAAAACTATCCCGACCGCCTTACTGCCGCCTGTTTTGACCGCTGGGATCTGCCATTGTCAGACATGTATACCCCGTACGTCTTCCCGAGCGAAAACGGTCTGCGCTGCGGGACGCGCGAATTGAATTATGGCCCACACCAGTGGCGCGGCGACTTCCAGTTCAACATCAGCCGCTACAGTCAACAGCAACTGATGGAAACCAGCCATCGCCATCTGCTGCACGCGGAAGAAGGCACATGGCTGAATATCGACGGTTTCCATATGGGGATTGGTGGCGACGACTCCTGGAGCCCGTCAGTATCGGCGGAATTCCAGCTGAGCGCCGGTCGCTACCATTACCAGTTGGTCTGGTGTCAAAAATAATAATAACCGGGCAGGCCATGTCTGCCCGTATTTCGCGTAAGGAAATCCATTATGTACTATTTAAAAAACACAAACTTTTGGATGTTCGGTTTATTCTTTTTCTTTTACTTTTTTATCATGGGAGCCTACTTCCCGTTTTTCCCGATTTGGCTACATGACATCAACCATATCAGCAAAAGTGATACGGGTATTATTTTGCCGCTATTTCTCTGTTCTCGCTATTATTCCAACCGCTGTTTGGTCTGCTTTCTGACAAACTCGGGCTGCGCAAATACCTGCTGTGGATTATTACCGGCATGTTAGTGATGTTTGCGCCGTTCTTTATTTTTATCTTCGGGCCACTGTTACAATACAACATTTTAGTAGGATCGATTGTTGGTGGTATTTATCTAGGCTTTTGTTTTAACGCCGGTGCGCCAGCAGTAGAGGCATTTATTGAGAAAGTCAGCCGTCGCAGTAATTTCGAATTTGGTCGCGCGCGGATGTTTGGCTGTGTTGGCTGGGCGCTGTGTGCCTCGATTGTCGGCATCATGTTCACCATCAATAATCAGTTTGTTTTCTGGCTGGGCTCTGGCTGTGCACTCATCCTCGCCGTTTTACTCTTTTTCGCCAAAACGGATGCGCCCTCTTCTGCCACGGTTGCCAATGCGGTAGGTGCCAACCATTCGGCATTTAGCCTTAAGCTGGCACTGGAACTGTTCAGACAGCCAAAACTGTGGTTTTTGTCACTGTATGTTATTGGCGTTTCCTGCACCTACGATGTTTTTGACCAACAGTTTGCTAATTTCTTTACTTCGTTCTTTGCTACCGGTGAACAGGGTACGCGGGTATTTGGCTACGTAACGACAATGGGCGAATTACTTAACGCCTCGATTATGTTCTTTGCGCCACTGATCATTAATCGCATCGGTGGGAAAAACGCCCTGCTGCTGGCTGGCACTATTATGTCTGTACGTATTATTGGCTCATCGTTCGCCACCTCAGCGCTGGAAGTGGTTATTCTGAAAACGCTGCATATGTTTGAAGTACCGTTCCTGCTGGTGGGCTGCTTTAAATATATTACCAGCCAGTTTGAAGTGCGTTTTTCAGCGACGATTTATCTGGTCTGTTTCTGCTTCTTTAAGCAACTGGCGATGATTTTTATGTCTGTACTGGCGGGCAATATGTATGAAAGCATCGGTTTCCAGGGCGCTTATCTGGTGCTGGGTCTGGTGGCGCTGGGCTTCACCTTAATTTCCGTGTTCACGCTTAGCGGCCCCGGCCCGCTTTCCCTGCTGCGTCGTCAGGTGAATGAAGTCGCTTAAGCAATCAATGTCGGATGCGGCGCGACGCTTATCCGACCAACATATCATAACGGAGTGATCGCATTGAACATGCCAATGACCGAAAGAATAAGAGCAGGCAAGCTATTTACCGATATGTGCGAAGGCTTACCGGAAAAAAGACTTCGTGGGAAAACGTTAATGTATGAGTTTAATCACTCGCATCCATCAGAAGTTGAAAAAAGAGAAAGCCTGATTAAAGAAATGTTTGCCACGGTAGGGGAAAACGCCTGGGTAGAACCGCCTGTCTATTTCTCTTACGGTTCCAACATCCATATAGGCCGCAATTTTTATGCAAATTTCAATTTAACCATTGTCGATGACTACACGGTAACAATCGGTGATAACGTACTGATTGCACCCAACGTTACTCTTTCCGTTACGGGACACCCTGTACACCATGAATTGAGAAAAAACGGCGAGATGTACTCTTTTCCGATAACGATTGGCAATAACGTCTGGATCGGAAGTCATGTGGTTATTAATCCAGGCGTCACCATCGGGGATAATTCTGTTATTGGCGCGGGTAGTATCGTCACAAAAGACATTCCACCAAACGTCGTGGCGGCTGGCGTTCCTTGTCGGGTTATTCGCGAAATAAACGACCGGGATAAGCACTATTATTTCAAAGATTATAAAGTTGAATCGTCAGTTTAAATTATAAAAATTGCCTGATACGCTGCGCTTATCAGGCCTACAAGTTCAGCGATCTACATTAGCCGCATCCGGCATGAACAAAGCGCAGGAACAAGCGTCGCATCATGCCTCTTTGACCCACAGCTGCGGAAAACGTACTGGTGCAAAACGCAGGGTTATGATCATCAGCCCAACGACGCACAGCGCATGAAATGCCCAGTCCATCAGGTAATTGCCGCTGATACTACGCAGCACGCCAGAAAACCACGGGGCAAGCCCGGCGATGATAAAACCGATTCCCTGCATAAACGCCACCAGCTTGCCAGCAATAGCCGGTTGCACAGAGTGATCGAGCGCCAGCAGCAAACAGAGCGGAAACGCGCCGCCCAGACCTAACCCACACCATCGCCCACAATACCGGCAATTGCATCGGCAGCCAGATAAAGCCGCAGAACCCCACCAGTTGTAACACCAGCGCCAGCATTAACAGTTTGCGCCGATCCTGATGGCGAGCCATAGCAGGCATCAGCAAAGCTCCTGCGGCTTGCCCAAGCGTCATCAATGCCAGTAAGGAACCGCTGTACTGCGCGCTGGCACCAATCTCAATATAGAAAGCGGGTAACCAGGCAATCAGGCTGGCGTAACCGCCGTTAATCAGACCGAAGTAAACACCCAGCGTCCACGCGCGGGGAGTGAATACCACGCGAACCGGAGTGGTTGTTGTCTTGTGGGAAGAGGCGACCTCGCGGGCGCTTTGCCACCACCAGGCAAAGAGCGCAACAACGGCAGGCAGCGCCACCAGGCGAGTGTTTGATACCAGGTTTCGCTATGTTGAACTAACCAGGGCGT,ECOLAC,100% -GTTAATGTAGCTTAAAACAAAAGCAAGGTACTGAAAATACCTAGACGAGTATATCCAACTCCATAAACAACAAAGGTTTGGTCCCGGCCTTCTTATTGGTTACTAGGAAACTTATACATGCAAGTATCCGCCCGCCAGTGAATACGCCTTCTAAATCATCACTGATCAAAGAGAGCTGGCATCAAGCACACACCCCAAGTGTAGCTCATGACGTCTCGCCTAGCCACACCCCCACGGGAAACAGCAGTAGTAAATATTTAGCAATTAACAAAAGTTAGACTAAGTTATCCTAATAAAGGACTGGTCAATTTCGTGCCAGCAACCGCGGCCATACGATTAGTCCAAATTAATAAGCATACGGCGTAAAGCGTATTAGAAGAATTAAAAAAATAAAGTTAAATCTTATACTAGCTGTTTAAAGCTCAAGATAAGACATAAATAGCCTACGAAAGTGACTTTAATAATCCTAAACATACGATAGCTAGGGTACAAACTGAGATTAGATACCTCACTATGCCTAGCCCTAAACTTTGATAGCTACCTTTACAAAGCTATCCGCCAGAGAACTACTAGCCAGAGCTTAAAACTTAAAGGACTTGGCGGTGCTTTATATCCACCTAGGGGAGCCTGTCTCGTAACCGATGAACCCCGATACACCTTACCGTCACTTGCTAATTCAGTCCATATACCACCATCTTCAGCAAACCCCTATAGGGCACAAAAGTGAGCTTAATCATAACCCATGAAAAAGTTAGGCCGAGGTGTCGCCTACGTGACGGTCAAAGATGGGCTACATTTTCTATTATAGAATAGACAAACGGATACCACTCTGAAATGGGTGGTTGAAGGCGGATTTAGTAGTAAACTAAGAATAGAGAGCTTAATTGAACAAGGCCATGAAGCGCGTACACACCGCCCGTCACTCTCCTCAAGTACCTCCACATCAAACAATCATATTACAGATTTAAACAAATACAAGAGGAGACAAGTCGTAACAAGGTAAGCGTACTGGAAAGTGTGCTTGGGTAACTCAAAGTGTAGCTTAACAAAAAGCATCTGGCTTACACCTAGAAGACCTCATTCACAATGATCACTTTGAACTAAATCTAGCCCTACCAACCTTACACCCAACTCTCACACTACATTAAATTAAAACATTCATTTATCAAAAAGTATAGGAGATAGAAATTTCACTAAGGCGCAATAGAGATAGTACCGCAAGGGAATGATGAAAGATAATTTAATAGTAAAAAATAGCAAGGATTAACCCCTTTACCTTTTGCATAATGAATTAACTAGAAAAATCTGACAAAGAGAACTACAGCCAGAAACCCCGAAATCAGACGAGCTATCTGATAGTAATCCCCAGGATCAATTCATCTATGTGGCAAAATAGTGAAAAAACTTACAGATAGAGGTGAAATACCAATCGAGCCTGATGATAGCTGGTTGTCCAGAAATAGAATTTCAGTTCTACCTAAAACTTACCACAAAAACAAAATAATTCCAATGTAAGTTTTAGAGATATTCAAAAGGGGTACAGCTCTTTTGACCAAGGATACAACCTTGATTAGCGAGTAAATTCACCATTAATTTCATAGTTGGCTTGGAAGCAGCCATCAATTAAGAAAGCGTTAAAGCTCAACAACCAACCAAACTAAAAAATCCCAAGAATTAATTAATGATCTCCTAAACATAATACTGGACTAATCTATATAAATAGAAGAAATAATGTTAGTATAAGTAATAAGAAGTATTTCTCCCTGCATAAGCTTATATCAGATCGGATGCCCACTGATAGTTAACAATCAAATAATTAAATACAAAAATAAAACCTTTATTACACCAATTGTTAACCCAACACAGGCATGCTTAAGGGAAAGATTAAAAGAAGGAAAAGGAACTCGGCAAACATAAACCCCGCCTGTTTACCAAAAACATCACCTCGAGCATTACTAGTATTCGAGGCACTGCCTGCCCAGTGACCAAGTGTTAAACGGCCGCGGTACTCTGACCGTGCAAAGGTAGCATAATCATTTGTTCCTTAATTAGGGACTTGTATGAACGGCCACACGAGGGTTTAACTGTCTCTTTCCTCTAATCAATGAAATTGACCTTCTCGTGAAGAGGCGAGAATAAACATATAAGACGAGAAGACCCTATGGAGCTTAAATTAACTAATTTAATTGCTATCCTATAAATCTACAAGATACAACTAAACAGCATAATAAATTAACAATTTTGGTTGGGGTGACCTCGGAGAAGAAAAAAACCTCCGAACGATATTATAATTCAGACTTTACAAGTCAAGATTCACTAATCGCTTATTGACCCAATACTTGATCAACGGAACAAGTTACCCTAGGGATAACAGCGCAATCCTACTCTAGAGTCCCTATCGACAGCAGGGTTTACGACCTCGATGTTGGATCAGGACATCCTAATGGTGCAGCCGCTATTAAGGGTTCGTTTGTTCAACGATTAAAGTCCTACGTGATCTGAGTTCAGACCGGAGCAATCCAGGTCGGTTTCTATCTATAGTTTATTTATTCCAGTACGAAAGGACAGAAAAAATGAGGCCAATCTTACCAAGACGCCTTCAGCTAAATTTATGAATAAATCTCAATCTAGATAAGCTAAACCACCCAATCCAAGAACAGGATTTGTTAAGATAGCAAAAATTGGTTACTGCATAAAACTTAAGCTTTTACTTACGGAGGTTCAACTCCTCTTCTTAACAATGTTCTTGATTAATGTCCTAACAGTAACCTTGCCTATCCTTCTAGCAGTAGCCTTCCTCACCTTAGTTGAACGAAAGGCCTTAGGCTACATACAACTTCGTAAAGGCCCCAATGTAGTAGGACCCTACGGTCTTCTTCAACCTATCGCAGATGCAATCAAGCTATTTACCAAAGAACCCGTCTATCCACAAACCTCCTCAAAATTCCTATTTACCATTGCCCCAATTCTAGCCCTAACCTTAGCCCTAACTGTATGAGCTCCTCTTCCAATACCATATCCCCTAATTAACTTAAATCTAAGCCTATTATTTATTCTCGCAATATCAAGTCTGATAGTTTACTCCATCCTATGATCAGGCTGAGCATCAAATTCAAAATACGCCCTCATAGGAGCCCTACGAGCAGTAGCCCAAACCATCTCCTATGAAGTCTCTATAACAACTATTATCTTATCAATAGTACTAATAAATGGGTCCTTTACACTAACCGCCTTCGCTACAACACAAGAACATCTATGACTAATCTTTCCTATATGACCCCTAATAATAATATGATTTACATCAACTTTAGCAGAAACTAACCGAGCTCCATTTGATTTAACCGAAGGAGAATCAGAACTAGTTTCCGGCTTCAACGTCGAATATTCAGCTGGCCCTTTCGCCCTATTTTTTATAGCCGAATACGCTAACATTATCATAATAAATGCCCTTACTGTAATTCTATTTATAGGAACCTCTTGTAACCCCCAAATACCAGAAATTAGCACCATCAACTTTGTCGTAAAAACTATAATCTTAACTATCTGCTTTTTATGAGTACGAGCATCTTACCCACGATTCCGATACGACCAACTAATATATCTCCTCTGAAAAAATTTTCTTCCACTAACTCTAGCCCTATGCATGTGACACATCTCAATCTTGATTTCACTAGCATGCATCCCACCACAAGCATAGAAATATGTCTGACAAAAGAATTACTTTGATAGAGTAAATTATAGAGGTCTAAACCCTCTTATTTCTAGAATTGCAGGAATCGAACCTAAACTCGAGAATTCAAAAATCTCAGTGCTACCAATTACACCATATCCTACTAGTAAGGTCAGCTAAATTAAGCTATCGGGCCCATACCCCGAAAATGTCGGATTACACCCCTCCCATACTAATAAACCCACTAGCCCTTAGCCTAATCCTAACAACACTACTCGCAGGAACACTAATTACTATAATAAGCTCCCATTGACTAACAGCCTGAATAGGACTAGAAATGAACATACTTACTATAATTCCTATTCTAATAAAGACAACCAATCCACGATCCACAGAAGCCGCCACGAAATACTTTATAACCCAAGCCACAGCATCCATAATACTTATAATAGCTTTAACAATTAATCTAATATACTCAGGACAATGATCAATTATAAAAATAACCAACCCTGTAGCATCAAATGTAGCATTAATAGCTCTAATAACCAAACTAGGCTCAGCCCCATTCCACTTCTGAGTCCCAGAAGTAACGCAAGGAGTCGAACTCACATCAGGAATGATCTTATTAACCTGACAAAAATTAGCACCATTATCTCTATTATATCAAATAGCCACCTACACCAACACCAACCTAATCTATCTTTCTGGCCTACTTTCAATCCTAATTGGAGGATGAGGAGGCCTAAACCAAACACAACTACGAAAAATCTTAGCCTACTCTTCAATCTCCCATATAGGCTGAATACTCATTATTTTACCCTTTAACCCTACCCTTACTCTCCTGAACTTAGCCATTTACATTTTACTAACACTATCCATCTTTATAATCCTAGCAAATACCCTCACAACTTCAATATCATCCCTAACTCTAATATGAAACAAAACACCTGCAATAACCATTATACTTATAACTACCTTGCTATCCTTAGGAGGACTACCTCCACTTTCAGGATTCACACCTAAATGACTTATAATCCATGAACTGACTAAAAACAACAGTATTATCATACCACTAACCATAGCCATTATAACACTACTAAACATGTACTTCTACATACGACTAATCTACTACTCATCACTCACAATTCTTCCATCTACAAATAATATGAAAATAACCTGACAATTTACTAGTACTAAACACACAATAATATTACCGACTCTAATCACCCTATCTAACATACTACTTCCCCTAACCCCAATAATTTCAATGCTAGAATAGGAATTTAGGTTAAAACAGACCAAGAGCCTTCAAAGCCCTAAGTAAGTGCATTATACTTAACTCCTGAAATAAGGACTGCAAGATATCACCTTACATCAACTGAATGCAAATCAGACGCTTTAACTAAACTAAGCCCTTCTAGATTGGAGGGCTTCAATCCCACGAAAATCTTAGTTAACAACTAAACACCCTAGCTAACTGGCTTCAATCTACTTCTCCCGCCTTGAAGGAGGGGGAAAAAGGCGGGAGAAGCCCCGGCAGAATTGAAGCTGCTTTTTTCGAATTTGCAGTTCGACATGTTTACACTTTCAAGGCCTGGTAAAAAGAGATTACTCTCTGTGGTTAGATTTACAGTCTAATACTTACTCAGCCATTTTACCTATGTTTGCTAACCGCTGACTATATTCAACAAACCACAAAGACATTGGGACACTGTATCTATTATTTGGTGCTTGAGCTGGTATAGTAGGGACTGCTTTTAGTATCCTAATTCGGGCAGAACTAGGTCAACCAGGCTCTCTTCTTGGAGACGACCAAATCTATAATGTTATTGTCACAGCACACGCCTTTGTAATAATCTTCTTTATAGTTATGCCAATTATAATTGGAGGCTTTGGAAACTGATTAATTCCACTTATAATCGGAGCACCTGATATAGCTTTTCCTCGAATAAACAATATGAGTTTTTGACTACTACCTCCATCTTTCCTACTACTCTTGGCATCTTCCATAGTAGAAGCTGGGACAGGCACTGGTTGGACCGTATACCCTCCCCTGGCAGGAAACCTAGCCCATGCAGGAGCTTCTGTAGATTTAACTATTTTTTCACTTCACCTTGCAGGAGTATCCTCTATTTTAAGTGCAATTAATTTTATCACTACCATCATTAACATAAAACCTCCAGCTATGTCTCAATATCACATACCCTTATTTGTATGGTCCATTTTAGTTACAGCCGTCCTTCTTCTTCTATCCCTCCCAGTTCTAGCAGCAGGTATTACAATATTATTAACGGACCGCAACCTCAATACTACTTTCTTTGACCCTGCAGGAGGAGGAGACCCAATTCTATACCAACACCTATTCTGGTTTTTTGGACACCCTGAAGTCTATATTCTAATTCTCCCAGGATTTGGAATAGTTTCTCATATCGTTACGTACTACTCAGGGAAAAAAGAACCCTTCGGTTATATAGGAATAGTATGGGCTAT,NC_007596.2Mammuthusprimigeniusmitochondrion,30% -GATCACAGGTCTATCACCCTATTAACCACTCACGGGAGCTCTCCATGCATTTGGTATTTTCGTCTGGGGGGTATGCACGCGATAGCATTGCGAGACGCTGGAGCCGGAGCACCCTATGTCGCAGTATCTGTCTTTGATTCCTGCCTCATCCTATTATTTATCGCACCTACGTTCAATATTACAGGCGAACATACTTACTAAAGTGTGTTAATTAATTAATGCTTGTAGGACATAATAATAACAATTGAATGTCTGCACAGCCACTTTCCACACAGACATCATAACAAAAAATTTCCACCAAACCCCCCCTCCCCCGCTTCTGGCCACAGCACTTAAACACATCTCTGCCAAACCCCAAAAACAAAGAACCCTAACACCAGCCTAACCAGATTTCAAATTTTATCTTTTGGCGGTATGCACTTTTAACAGTCACCCCCCAACTAACACATTATTTTCCCCTCCCACTCCCATACTACTAATCTCATCAATACAACCCCCGCCCATCCTACCCAGCACACACACACCGCTGCTAACCCCATACCCCGAACCAACCAAACCCCAAAGACACCCCCCACAGTTTATGTAGCTTACCTCCTCAAAGCAATACACTGAAAATGTTTAGACGGGCTCACATCACCCCATAAACAAATAGGTTTGGTCCTAGCCTTTCTATTAGCTCTTAGTAAGATTACACATGCAAGCATCCCCGTTCCAGTGAGTTCACCCTCTAAATCACCACGATCAAAAGGAACAAGCATCAAGCACGCAGCAATGCAGCTCAAAACGCTTAGCCTAGCCACACCCCCACGGGAAACAGCAGTGATTAACCTTTAGCAATAAACGAAAGTTTAACTAAGCTATACTAACCCCAGGGTTGGTCAATTTCGTGCCAGCCACCGCGGTCACACGATTAACCCAAGTCAATAGAAGCCGGCGTAAAGAGTGTTTTAGATCACCCCCTCCCCAATAAAGCTAAAACTCACCTGAGTTGTAAAAAACTCCAGTTGACACAAAATAGACTACGAAAGTGGCTTTAACATATCTGAACACACAATAGCTAAGACCCAAACTGGGATTAGATACCCCACTATGCTTAGCCCTAAACCTCAACAGTTAAATCAACAAAACTGCTCGCCAGAACACTACGAGCCACAGCTTAAAACTCAAAGGACCTGGCGGTGCTTCATATCCCTCTAGAGGAGCCTGTTCTGTAATCGATAAACCCCGATCAACCTCACCACCTCTTGCTCAGCCTATATACCGCCATCTTCAGCAAACCCTGATGAAGGCTACAAAGTAAGCGCAAGTACCCACGTAAAGACGTTAGGTCAAGGTGTAGCCCATGAGGTGGCAAGAAATGGGCTACATTTTCTACCCCAGAAAACTACGATAGCCCTTATGAAACTTAAGGGTCGAAGGTGGATTTAGCAGTAAACTAAGAGTAGAGTGCTTAGTTGAACAGGGCCCTGAAGCGCGTACACACCGCCCGTCACCCTCCTCAAGTATACTTCAAAGGACATTTAACTAAAACCCCTACGCATTTATATAGAGGAGACAAGTCGTAACATGGTAAGTGTACTGGAAAGTGCACTTGGACGAACCAGAGTGTAGCTTAACACAAAGCACCCAACTTACACTTAGGAGATTTCAACTTAACTTGACCGCTCTGAGCTAAACCTAGCCCCAAACCCACTCCACCTTACTACCAGACAACCTTAGCCAAACCATTTACCCAAATAAAGTATAGGCGATAGAAATTGAAACCTGGCGCAATAGATATAGTACCGCAAGGGAAAGATGAAAAATTATAACCAAGCATAATATAGCAAGGACTAACCCCTATACCTTCTGCATAATGAATTAACTAGAAATAACTTTGCAAGGAGAGCCAAAGCTAAGACCCCCGAAACCAGACGAGCTACCTAAGAACAGCTAAAAGAGCACACCCGTCTATGTAGCAAAATAGTGGGAAGATTTATAGGTAGAGGCGACAAACCTACCGAGCCTGGTGATAGCTGGTTGTCCAAGATAGAATCTTAGTTCAACTTTAAATTTGCCCACAGAACCCTCTAAATCCCCTTGTAAATTTAACTGTTAGTCCAAAGAGGAACAGCTCTTTGGACACTAGGAAAAAACCTTGTAGAGAGAGTAAAAAATTTAACACCCATAGTAGGCCTAAAAGCAGCCACCAATTAAGAAAGCGTTCAAGCTCAACACCCACTACCTAAAAAATCCCAAACATATAACTGAACTCCTCACACCCAATTGGACCAATCTATCACCCTATAGAAGAACTAATGTTAGTATAAGTAACATGAAAACATTCTCCTCCGCATAAGCCTGCGTCAGATTAAAACACTGAACTGACAATTAACAGCCCAATATCTACAATCAACCAACAAGTCATTATTACCCTCACTGTCAACCCAACACAGGCATGCTCATAAGGAAAGGTTAAAAAAAGTAAAAGGAACTCGGCAAATCTTACCCCGCCTGTTTACCAAAAACATCACCTCTAGCATCACCAGTATTAGAGGCACCGCCTGCCCAGTGACACATGTTTAACGGCCGCGGTACCCTAACCGTGCAAAGGTAGCATAATCACTTGTTCCTTAAATAGGGACCTGTATGAATGGCTCCACGAGGGTTCAGCTGTCTCTTACTTTTAACCAGTGAAATTGACCTGCCCGTGAAGAGGCGGGCATAACACAGCAAGACGAGAAGACCCTATGGAGCTTTAATTTATTAATGCAAACAGTACCTAACAAACCCACAGGTCCTAAACTACCAAACCTGCATTAAAAATTTCGGTTGGGGCGACCTCGGAGCAGAACCCAACCTCCGAGCAGTACATGCTAAGACTTCACCAGTCAAAGCGAACTACTATACTCAATTGATCCAATAACTTGACCAACGGAACAAGTTACCCTAGGGATAACAGCGCAATCCTATTCTAGAGTCCATATCAACAATAGGGTTTACGACCTCGATGTTGGATCAGGACATCCCGATGGTGCAGCCGCTATTAAAGGTTCGTTTGTTCAACGATTAAAGTCCTACGTGATCTGAGTTCAGACCGGAGTAATCCAGGTCGGTTTCTATCTACNTTCAAATTCCTCCCTGTACGAAAGGACAAGAGAAATAAGGCCTACTTCACAAAGCGCCTTCCCCCGTAAATGATATCATCTCAACTTAGTATTATACCCACACCCACCCAAGAACAGGGTTTGTTAAGATGGCAGAGCCCGGTAATCGCATAAAACTTAAAACTTTACAGTCAGAGGTTCAATTCCTCTTCTTAACAACATACCCATGGCCAACCTCCTACTCCTCATTGTACCCATTCTAATCGCAATGGCATTCCTAATGCTTACCGAACGAAAAATTCTAGGCTATATACAACTACGCAAAGGCCCCAACGTTGTAGGCCCCTACGGGCTACTACAACCCTTCGCTGACGCCATAAAACTCTTCACCAAAGAGCCCCTAAAACCCGCCACATCTACCATCACCCTCTACATCACCGCCCCGACCTTAGCTCTCACCATCGCTCTTCTACTATGAACCCCCCTCCCCATACCCAACCCCCTGGTCAACCTCAACCTAGGCCTCCTATTTATTCTAGCCACCTCTAGCCTAGCCGTTTACTCAATCCTCTGATCAGGGTGAGCATCAAACTCAAACTACGCCCTGATCGGCGCACTGCGAGCAGTAGCCCAAACAATCTCATATGAAGTCACCCTAGCCATCATTCTACTATCAACATTACTAATAAGTGGCTCCTTTAACCTCTCCACCCTTATCACAACACAAGAACACCTCTGATTACTCCTGCCATCATGACCCTTGGCCATAATATGATTTATCTCCACACTAGCAGAGACCAACCGAACCCCCTTCGACCTTGCCGAAGGGGAGTCCGAACTAGTCTCAGGCTTCAACATCGAATACGCCGCAGGCCCCTTCGCCCTATTCTTCATAGCCGAATACACAAACATTATTATAATAAACACCCTCACCACTACAATCTTCCTAGGAACAACATATGACGCACTCTCCCCTGAACTCTACACAACATATTTTGTCACCAAGACCCTACTTCTAACCTCCCTGTTCTTATGAATTCGAACAGCATACCCCCGATTCCGCTACGACCAACTCATACACCTCCTATGAAAAAACTTCCTACCACTCACCCTAGCATTACTTATATGATATGTCTCCATACCCATTACAATCTCCAGCATTCCCCCTCAAACCTAAGAAATATGTCTGATAAAAGAGTTACTTTGATAGAGTAAATAATAGGAGCTTAAACCCCCTTATTTCTAGGACTATGAGAATCGAACCCATCCCTGAGAATCCAAAATTCTCCGTGCCACCTATCACACCCCATCCTAAAGTAAGGTCAGCTAAATAAGCTATCGGGCCCATACCCCGAAAATGTTGGTTATACCCTTCCCGTACTAATTAATCCCCTGGCCCAACCCGTCATCTACTCTACCATCTTTGCAGGCACACTCATCACAGCGCTAAGCTCGCACTGATTTTTTACCTGAGTAGGCCTAGAAATAAACATGCTAGCTTTTATTCCAGTTCTAACCAAAAAAATAAACCCTCGTTCCACAGAAGCTGCCATCAAGTATTTCCTCACGCAAGCAACCGCATCCATAATCCTTCTAATAGCTATCCTCTTCAACAATATACTCTCCGGACAATGAACCATAACCAATACTACCAATCAATACTCATCATTAATAATCATAATAGCTATAGCAATAAAACTAGGAATAGCCCCCTTTCACTTCTGAGTCCCAGAGGTTACCCAAGGCACCCCTCTGACATCCGGCCTGCTTCTTCTCACATGACAAAAACTAGCCCCCATCTCAATCATATACCAAATCTCTCCCTCACTAAACGTAAGCCTTCTCCTCACTCTCTCAATCTTATCCATCATAGCAGGCAGTTGAGGTGGATTAAACCAAACCCAGCTACGCAAAATCTTAGCATACTCCTCAATTACCCACATAGGATGAATAATAGCAGTTCTACCGTACAACCCTAACATAACCATTCTTAATTTAACTATTTATATTATCCTAACTACTACCGCATTCCTACTACTCAACTTAAACTCCAGCACCACGACCCTACTACTATCTCGCACCTGAAACAAGCTAACATGACTAACACCCTTAATTCCATCCACCCTCCTCTCCCTAGGAGGCCTGCCCCCGCTAACCGGCTTTTTGCCCAAATGGGCCATTATCGAAGAATTCACAAAAAACAATAGCCTCATCATCCCCACCATCATAGCCACCATCACCCTCCTTAACCTCTACTTCTACCTACGCCTAATCTACTCCACCTCAATCACACTACTCCCCATATCTAACAACGTAAAAATAAAATGACAGTTTGAACATACAAAACCCACCCCATTCCTCCCCACACTCATCGCCCTTACCACGCTACTCCTACCTATCTCCCCTTTTATACTAATAATCTTATAGAAATTTAGGTTAAATACAGACCAAGAGCCTTCAAAGCCCTCAGTAAGTTGCAATACTTAATTTCTGTAACAGCTAAGGACTGCAAAACCCCACTCTGCATCAACTGAACGCAAATCAGCCACTTTAATTAAGCTAAGCCCTTACTAGACCAATGGGACTTAAACCCACAAACACTTAGTTAACAGCTAAGCACCCTAATCAACTGGCTTCAATCTACTTCTCCCGCCGCCGGGAAAAAAGGCGGGAGAAGCCCCGGCAGGTTTGAAGCTGCTTCTTCGAATTTGCAATTCAATATGAAAATCACCTCGGAGCTGGTAAAAAGAGGCCTAACCCCTGTCTTTAGATTTACAGTCCAATGCTTCACTCAGCCATTTTACCTCACCCCCACTGATGTTCGCCGACCGTTGACTATTCTCTACAAACCACAAAGACATTGGAACACTATACCTATTATTCGGCGCATGAGCTGGAGTCCTAGGCACAGCTCTAAGCCTCCTTATTCGAGCCGAGCTGGGCCAGCCAGGCAACCTTCTAGGTAACGACCACATCTACAACGTTATCGTCACAGCCCATGCATTTGTAATAATCTTCTTCATAGTAATACCCATCATAATCGGAGGCTTTGGCAACTGACTAGTTCCCCTAATAATCGGTGCCCCCGATATGGCGTTTCCCCGCATAAACAACATAAGCTTCTGACTCTTACCTCCCTCTCTCCTACTCCTGCTCGCATCTGCTATAGTGGAGGCCGGAGCAGGAACAGGTTGAACAGTCTACCCTCCCTTAGCAGGGAACTACTCCCACCCTGGAGCCTCCGTAGACCTAACCATCTTCTCCTTACACCTAGCAGGTGTCTCCTCTATCTTAGGGGCCATCAATTTCATCACAACAATTATCAATATAAAACCCCCTGCCATAACCCAATACCAAACGCCCCTCTTCGTCTGATCCGTCCTAATCACAGCAGTCCTACTTCTCCTATCTCTCCCAGTCCTAGCTGCTGGCATCACTATACTACTAACAGACCGCAACCTCAACACCACCTTCTTCGACCCCGCCGGAGGAGGAGACCCCATTCTATACCAACACCTATTCTGATTTTTCGGTCACCCTGAAGTTTATATTCTTATCCTACCAGGCTTCGGAATAATCTCCCATATTGTAACTTACTACTCCGGAAAAAAAGAACCATTTGGATACATAGGTATGGTCTGAGCTATGATATCAATTGGCTTCCTAGGGTTTATCGTGTGAGCACACCATATATTTACAGTAGGAATAGACGTAGACACACGAGCATATTTCACCTCCGCTACCATAATCATCGCTATCCCCACCGGCGTCAAAGTATTTAGCTGACTCGCCACACTCCACGGAAGCAATATGAAATGATCTGCTGCAGTGCTCTGAGCCCTAGGATTCATCTTTCTTTTCACCGTAGGTGGCCTGACTGGCATTGTATTAGCAAACTCATCACTAGACATCGTACTACACGACACGTACTACGTTGTAGCCCACTTCCACTATGTCCTATCAATAGGAGCTGTATTTGCCATCATAGGAGGCTTCATTCACTGATTTCCCCTATTCTCAGGCTACACCCTAGACCAAACCTACGCCAAAATCCATTTCACTATCATATTCATCGGCGTAAATCTAACTTTCTTCCCACAACACTTTCTCGGCCTATCCGGAATGCCCCGACGTTACTCGGACTACCCCGATGCATACACCACATGAAACATCCTATCATCTGTAGGCTCATTCATTTCTCTAACAGCAGTAATATTAATAATTTTCATGATTTGAGAAGCCTTCGCTTCGAAGCGAAAAGTCCTAATAGTAGAAGAACCCTCCATAAACCTGGAGTGACTATATGGATGCCCCCCACCCTACCACACATTCGAAGAACCCGTATACATAAAATCTAGACAAAAAAGGAAGGAATCGAACCCCCCAAAGCTGGTTTCAAGCCAACCCCATGGCCTCCATGACTTTTTCAAAAAGGTATTAGAAAAACCATTTCATAACTTTGTCAAAGTTAAATTATAGGCTAAATCCTATATATCTTAATGGCACATGCAGCGCAAGTAGGTCTACAAG,NC_012920.1_homosapiens_mitochondrion,50% diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/test_fasta_dataset.py b/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/test_fasta_dataset.py deleted file mode 100644 index b18d011a61..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/test_fasta_dataset.py +++ /dev/null @@ -1,89 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from pathlib import Path - -import pytest -import torch -from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer - -from bionemo.evo2.data.fasta_dataset import SimpleFastaDataset -from bionemo.testing.data.fasta import create_fasta_file - - -@pytest.fixture -def fasta_dataset(tmp_path: Path) -> None: - """Fixture to create a SimpleFastaDataset for testing.""" - test_fasta_file_path = create_fasta_file(tmp_path / "test.fasta", num_sequences=10, sequence_length=100) - tokenizer = get_nmt_tokenizer("byte-level") - return SimpleFastaDataset(test_fasta_file_path, tokenizer) - - -def test_simple_fasta_dataset_initialization(fasta_dataset: SimpleFastaDataset) -> None: - """Test initialization of SimpleFastaDataset.""" - # Check dataset length - assert len(fasta_dataset) == 10, "Dataset length should match number of sequences" - - # Check seqids - assert len(fasta_dataset.seqids) == 10, "Seqids should match number of sequences" - - -def test_simple_fasta_dataset_getitem(fasta_dataset: SimpleFastaDataset) -> None: - """Test __getitem__ method of SimpleFastaDataset.""" - # Test first item - item = fasta_dataset[0] - - # Check keys - expected_keys = {"tokens", "position_ids", "seq_idx", "loss_mask"} - assert set(item.keys()) == expected_keys, "Item should have correct keys" - - # Check token type - assert isinstance(item["tokens"], torch.Tensor), "Tokens should be a torch.Tensor" - assert item["tokens"].dtype == torch.long, "Tokens should be long dtype" - - # Check position_ids - assert isinstance(item["position_ids"], torch.Tensor), "Position IDs should be a torch.Tensor" - assert item["position_ids"].dtype == torch.long, "Position IDs should be long dtype" - - # Validate sequence index - assert isinstance(item["seq_idx"], torch.Tensor), "Seq_idx should be a torch.Tensor" - assert item["seq_idx"].item() == 0, "First item should have seq_idx 0" - - -def test_simple_fasta_dataset_write_idx_map(fasta_dataset: SimpleFastaDataset, tmp_path: Path) -> None: - """Test write_idx_map method of SimpleFastaDataset.""" - # Create output directory - output_dir = tmp_path / "output" - output_dir.mkdir(parents=True, exist_ok=True) - - # Write index map - fasta_dataset.write_idx_map(output_dir) - - # Check if file was created - idx_map_file = output_dir / "seq_idx_map.json" - assert idx_map_file.exists(), "seq_idx_map.json should be created" - - import json - - with open(idx_map_file, "r") as f: - idx_map = json.load(f) - - assert len(idx_map) == 10, "Index map should have an entry for each sequence" - for idx, seqid in enumerate(fasta_dataset.seqids): - assert idx_map[seqid] == idx, f"Index for {seqid} should match" diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/test_preprocess.py b/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/test_preprocess.py deleted file mode 100644 index 1a412e2525..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/test_preprocess.py +++ /dev/null @@ -1,89 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from pathlib import Path - -from bionemo.evo2.data.preprocess import Evo2Preprocessor -from bionemo.evo2.utils.config import Evo2PreprocessingConfig -from bionemo.testing.data.fasta import create_fasta_file - - -def create_preprocessing_config( - tmp_path: Path, sample_data_path: Path, output_prefix: str = "test_alu_uint8_distinct" -) -> Evo2PreprocessingConfig: - """Creates a preprocessing configuration with test settings.""" - config_dict = { - "datapaths": [str(sample_data_path)], - "output_dir": str(tmp_path), - "output_prefix": output_prefix, - "train_split": 0.6, - "valid_split": 0.2, - "test_split": 0.2, - "overwrite": True, - "embed_reverse_complement": True, - "random_reverse_complement": 0.0, - "random_lineage_dropout": 0.0, - "include_sequence_id": False, - "transcribe": "back_transcribe", - "indexed_dataset_dtype": "uint8", - "tokenizer_type": "Byte-Level", - "vocab_file": None, - "vocab_size": None, - "merges_file": None, - "pretrained_tokenizer_model": None, - "special_tokens": None, - "fast_hf_tokenizer": True, - "append_eod": True, - "enforce_sample_length": None, - "ftfy": False, - "workers": 1, - "preproc_concurrency": 100000, - "chunksize": 25, - "drop_empty_sequences": True, - "nnn_filter": True, - } - return Evo2PreprocessingConfig(**config_dict) - - -def test_preprocessor_creates_expected_files(tmp_path: Path) -> None: - """Verifies that preprocessing creates all expected output files.""" - test_fasta_file_path = create_fasta_file(tmp_path / "test.fasta", num_sequences=10, sequence_length=10000) - output_dir = tmp_path / "processed_data" - output_dir.mkdir(parents=True, exist_ok=True) - preprocessing_config = create_preprocessing_config( - tmp_path / "processed_data", test_fasta_file_path, output_prefix="test_alu_uint8_distinct" - ) - preprocessor = Evo2Preprocessor(preprocessing_config) - preprocessor.preprocess_offline(preprocessing_config) - - # Check that all expected files exist - output_dir = Path(preprocessing_config.output_dir) - prefix = preprocessing_config.output_prefix - expected_files = [ - output_dir / Path(prefix + "_byte-level_" + split + suffix) - for suffix in [".bin", ".idx"] - for split in ["train", "val", "test"] - ] - for file_path in expected_files: - assert file_path.exists(), f"Expected file {file_path} was not created" - assert file_path.stat().st_size > 0, f"File {file_path} is empty" - - # Check that no unexpected files were created - all_files = [f for f in output_dir.iterdir() if f.is_file()] - assert set(all_files) == set(expected_files), "Unexpected files were created" diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/test_sharded_eden_dataset.py b/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/test_sharded_eden_dataset.py deleted file mode 100644 index 2986c977ad..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/test_sharded_eden_dataset.py +++ /dev/null @@ -1,545 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sqlite3 -import tempfile -from pathlib import Path -from unittest.mock import Mock, patch - -import numpy as np -import polars as pol -import pytest -import torch - -from bionemo.evo2.data.sharded_eden_dataloader import ( - ShardedEdenDataModule, - ShardedEdenDataset, - extract_sample_id, - precompute_window_database, -) - - -@pytest.fixture -def temp_dir(): - """Create a temporary directory for test data.""" - with tempfile.TemporaryDirectory() as tmp_dir: - yield tmp_dir - - -@pytest.fixture -def sample_sequences(): - """Create dummy sample sequences for testing.""" - return [ - ("BCR__ECT-SAMPLE1__CT1-1", "ATCGATCGATCGATCG" * 1000), # 16000 bases - ("BCR__ECT-SAMPLE1__CT1-2", "GCTAGCTAGCTAGCTA" * 800), # 12800 bases - ("BCR__ECT-SAMPLE2__CT1-1", "TAGCTAGCTAGCTAGC" * 1200), # 19200 bases - ("BCR__ECT-SAMPLE2__CT1-2", "CGATCGATCGATCGA" * 600), # 9600 bases - ("BCR__ECT-SAMPLE3__CT1-1", "ATCGATCGATCGATCG" * 700), # 11200 bases - ] - - -@pytest.fixture -def sequence_db_dir(temp_dir, sample_sequences): - """Create sample SQLite databases for testing.""" - db_dir = Path(temp_dir) / "sequence_db_dir" - db_dir.mkdir(exist_ok=True) - - # Group sequences by sample - sequences_by_sample = {} - for seq_id, sequence in sample_sequences: - sample_id = extract_sample_id(seq_id) - if sample_id not in sequences_by_sample: - sequences_by_sample[sample_id] = [] - sequences_by_sample[sample_id].append((seq_id, sequence)) - - # Create database for each sample - for sample_id, sequences in sequences_by_sample.items(): - sample_dir = db_dir / sample_id - sample_dir.mkdir(exist_ok=True) - - db_path = sample_dir / f"glm_dataset_{sample_id}.sqlite" - conn = sqlite3.connect(str(db_path)) - cursor = conn.cursor() - - # Create table - cursor.execute(""" - CREATE TABLE sequences ( - contig_id TEXT PRIMARY KEY, - nt_sequence TEXT NOT NULL - ) - """) - - # Insert sequences - for seq_id, sequence in sequences: - cursor.execute("INSERT INTO sequences (contig_id, nt_sequence) VALUES (?, ?)", (seq_id, sequence)) - - conn.commit() - conn.close() - - return str(db_dir) - - -@pytest.fixture -def train_parquet(temp_dir, sample_sequences): - """Create training split Parquet file.""" - # Use first 3 sequences for training - train_data = pol.DataFrame( - { - "contig_id": [seq[0] for seq in sample_sequences[:3]], - "length": [len(seq[1]) for seq in sample_sequences[:3]], - } - ) - - parquet_path = Path(temp_dir) / "train_split.parquet" - train_data.write_parquet(str(parquet_path)) - return str(parquet_path) - - -@pytest.fixture -def val_parquet(temp_dir, sample_sequences): - """Create validation split Parquet file.""" - # Use last 2 sequences for validation - val_data = pol.DataFrame( - { - "contig_id": [seq[0] for seq in sample_sequences[3:]], - "length": [len(seq[1]) for seq in sample_sequences[3:]], - } - ) - - parquet_path = Path(temp_dir) / "val_split.parquet" - val_data.write_parquet(str(parquet_path)) - return str(parquet_path) - - -@pytest.fixture -def test_parquet(temp_dir, sample_sequences): - """Create test split Parquet file.""" - # Use middle sequence for testing - test_data = pol.DataFrame({"contig_id": [sample_sequences[2][0]], "length": [len(sample_sequences[2][1])]}) - - parquet_path = Path(temp_dir) / "test_split.parquet" - test_data.write_parquet(str(parquet_path)) - return str(parquet_path) - - -@pytest.fixture -def window_dbs(temp_dir, train_parquet, val_parquet, test_parquet): - """Create window databases for all splits.""" - train_db = Path(temp_dir) / "train_windows.db" - val_db = Path(temp_dir) / "val_windows.db" - test_db = Path(temp_dir) / "test_windows.db" - - # Pre-compute window databases - precompute_window_database(train_parquet, str(train_db), window_size=8192, stride=7992) - precompute_window_database(val_parquet, str(val_db), window_size=8192, stride=7992) - precompute_window_database(test_parquet, str(test_db), window_size=8192, stride=7992) - - return {"train": str(train_db), "val": str(val_db), "test": str(test_db)} - - -def test_extract_sample_id(): - """Test sample ID extraction from sequence IDs.""" - assert extract_sample_id("BCR__ECT-SAMPLE1__CT1-1") == "SAMPLE1" - assert extract_sample_id("BCR__ECT-SAMPLE2__CT1-2") == "SAMPLE2" - assert extract_sample_id("BCR__ECT-SAMPLE3__CT1-1") == "SAMPLE3" - - -def test_precompute_window_database(temp_dir, train_parquet): - """Test window database pre-computation.""" - output_db = Path(temp_dir) / "test_windows.db" - - precompute_window_database(train_parquet, str(output_db), window_size=8192, stride=7992) - - # Verify database was created - assert output_db.exists() - - # Check database contents - conn = sqlite3.connect(str(output_db)) - cursor = conn.cursor() - - # Check metadata - cursor.execute("SELECT key, value FROM metadata") - metadata = dict(cursor.fetchall()) - - assert metadata["window_size"] == 8192 - assert metadata["stride"] == 7992 - assert "total_windows" in metadata - assert "distinct_sequences" in metadata - - # Check window mappings - cursor.execute("SELECT COUNT(*) FROM window_mappings") - window_count = cursor.fetchone()[0] - assert window_count > 0 - - conn.close() - - -def test_sharded_eden_dataset_initialization(sequence_db_dir, window_dbs): - """Test ShardedEdenDataset initialization.""" - # Mock tokenizer - mock_tokenizer = Mock() - mock_tokenizer.bos_id = 1 - mock_tokenizer.eos_id = 2 - mock_tokenizer._sep_id = 3 - mock_tokenizer.pad_id = 0 - mock_tokenizer.text_to_ids.return_value = [10, 11, 12] # Dummy token IDs - - # Create dataset - dataset = ShardedEdenDataset( - tokenizer=mock_tokenizer, - sequence_db_dir=sequence_db_dir, - window_db_path=window_dbs["train"], - seq_length=8192, - create_attention_mask=False, - stride=7992, - rc_aug=False, - use_control_tags=False, - split="train", - ) - - # Verify dataset properties - assert dataset.seq_length == 8192 - assert dataset.stride == 7992 - assert dataset.split == "train" - assert len(dataset) > 0 # Should have some windows - - # Verify database connections - assert hasattr(dataset, "db_connections") - assert len(dataset.db_connections) > 0 - - # Verify window database connection - assert hasattr(dataset, "window_db_conn") - assert dataset.window_db_conn is not None - - # Clean up - dataset.__del__() - - -@patch("bionemo.evo2.data.sharded_eden_dataloader.get_nmt_tokenizer") -def test_sharded_eden_datamodule_initialization(mock_get_tokenizer, sequence_db_dir, window_dbs): - """Test ShardedEdenDataModule initialization.""" - # Mock tokenizer - mock_tokenizer = Mock() - mock_tokenizer.bos_id = 1 - mock_tokenizer.eos_id = 2 - mock_tokenizer._sep_id = 3 - mock_tokenizer.pad_id = 0 - mock_tokenizer.text_to_ids.return_value = [10, 11, 12] - mock_get_tokenizer.return_value = mock_tokenizer - - # Create data module - data_module = ShardedEdenDataModule( - sequence_db_dir=sequence_db_dir, - train_window_db_path=window_dbs["train"], - val_window_db_path=window_dbs["val"], - test_window_db_path=window_dbs["test"], - seq_length=8192, - micro_batch_size=1, - global_batch_size=4, - num_workers=0, # Use 0 for testing - rc_aug=False, - use_control_tags=False, - ) - - # Verify data module properties - assert data_module.sequence_db_dir == sequence_db_dir - assert data_module.train_window_db_path == window_dbs["train"] - assert data_module.val_window_db_path == window_dbs["val"] - assert data_module.test_window_db_path == window_dbs["test"] - assert data_module.seq_length == 8192 - assert data_module.micro_batch_size == 1 - assert data_module.global_batch_size == 4 - assert data_module.num_workers == 0 - - # Verify tokenizer was created - assert data_module.tokenizer is not None - - # Verify data sampler was created - assert data_module.data_sampler is not None - assert data_module.data_sampler.seq_len == 8192 - assert data_module.data_sampler.micro_batch_size == 1 - assert data_module.data_sampler.global_batch_size == 4 - - -def test_dataset_getitem(sequence_db_dir, window_dbs): - """Test dataset item retrieval.""" - # Mock tokenizer - mock_tokenizer = Mock() - mock_tokenizer.bos_id = 1 - mock_tokenizer.eos_id = 2 - mock_tokenizer._sep_id = 3 - mock_tokenizer.pad_id = 0 - mock_tokenizer.text_to_ids.return_value = [10, 11, 12] - - # Create dataset - dataset = ShardedEdenDataset( - tokenizer=mock_tokenizer, - sequence_db_dir=sequence_db_dir, - window_db_path=window_dbs["train"], - seq_length=8192, - create_attention_mask=False, - stride=7992, - rc_aug=False, - use_control_tags=False, - split="train", - ) - - # Get first item - item = dataset[np.int64(0)] - - # Verify item structure - assert isinstance(item, dict) - assert "tokens" in item - assert "labels" in item - assert "loss_mask" in item - assert "position_ids" in item - - # Verify tensor shapes - assert item["tokens"].shape == (8192,) - assert item["labels"].shape == (8192,) - assert item["loss_mask"].shape == (8192,) - assert item["position_ids"].shape == (8192,) - - # Verify data types - assert item["tokens"].dtype == torch.int64 - assert item["labels"].dtype == torch.int64 - assert item["loss_mask"].dtype == torch.float32 - assert item["position_ids"].dtype == torch.int64 - - # Clean up - dataset.__del__() - - -def test_dataset_with_control_tags(sequence_db_dir, window_dbs): - """Test dataset with control tags enabled.""" - # Mock tokenizer - mock_tokenizer = Mock() - mock_tokenizer.bos_id = 1 - mock_tokenizer.eos_id = 2 - mock_tokenizer._sep_id = 3 - mock_tokenizer.pad_id = 0 - mock_tokenizer.text_to_ids.return_value = [100, 101] # Control tag IDs - - # Create dataset with control tags - dataset = ShardedEdenDataset( - tokenizer=mock_tokenizer, - sequence_db_dir=sequence_db_dir, - window_db_path=window_dbs["train"], - seq_length=8192, - create_attention_mask=False, - stride=7992, - rc_aug=False, - use_control_tags=True, # Enable control tags - split="train", - ) - - # Verify control tags were prepared - assert hasattr(dataset, "ctrl_ids_map") - assert len(dataset.ctrl_ids_map) > 0 - - # Get first item - item = dataset[np.int64(0)] - - # Verify item contains control tags - assert "tokens" in item - assert "labels" in item - assert "loss_mask" in item - - # Clean up - dataset.__del__() - - -def test_dataset_with_attention_mask(sequence_db_dir, window_dbs): - """Test dataset with attention mask creation.""" - # Mock tokenizer - mock_tokenizer = Mock() - mock_tokenizer.bos_id = 1 - mock_tokenizer.eos_id = 2 - mock_tokenizer._sep_id = 3 - mock_tokenizer.pad_id = 0 - mock_tokenizer.text_to_ids.return_value = [10, 11, 12] - - # Create dataset with attention mask - dataset = ShardedEdenDataset( - tokenizer=mock_tokenizer, - sequence_db_dir=sequence_db_dir, - window_db_path=window_dbs["train"], - seq_length=8192, - create_attention_mask=True, # Enable attention mask - stride=7992, - rc_aug=False, - use_control_tags=False, - split="train", - ) - - # Verify attention mask was created - assert hasattr(dataset, "attention_mask") - assert dataset.attention_mask.shape == (1, 8192, 8192) - - # Get first item - item = dataset[np.int64(0)] - - # Verify attention mask is included - assert "attention_mask" in item - assert item["attention_mask"].shape == (1, 8192, 8192) - - # Clean up - dataset.__del__() - - -def test_dataset_reverse_complement(sequence_db_dir, window_dbs): - """Test reverse complement functionality.""" - # Mock tokenizer - mock_tokenizer = Mock() - mock_tokenizer.bos_id = 1 - mock_tokenizer.eos_id = 2 - mock_tokenizer._sep_id = 3 - mock_tokenizer.pad_id = 0 - mock_tokenizer.text_to_ids.return_value = [10, 11, 12] - - # Create dataset with reverse complement augmentation - dataset = ShardedEdenDataset( - tokenizer=mock_tokenizer, - sequence_db_dir=sequence_db_dir, - window_db_path=window_dbs["train"], - seq_length=8192, - create_attention_mask=False, - stride=7992, - rc_aug=True, # Enable reverse complement - use_control_tags=False, - split="train", - ) - - # Test reverse complement method - test_seq = "ATCG" - rc_seq = dataset.reverse_complement(test_seq) - assert rc_seq == "CGAT" - - # Test with N bases - test_seq_with_n = "ATCN" - rc_seq_with_n = dataset.reverse_complement(test_seq_with_n) - assert rc_seq_with_n == "NGAT" - - # Clean up - dataset.__del__() - - -def test_dataset_collate_fn(sequence_db_dir, window_dbs): - """Test dataset collate function.""" - # Mock tokenizer - mock_tokenizer = Mock() - mock_tokenizer.bos_id = 1 - mock_tokenizer.eos_id = 2 - mock_tokenizer._sep_id = 3 - mock_tokenizer.pad_id = 0 - mock_tokenizer.text_to_ids.return_value = [10, 11, 12] - - # Create dataset - dataset = ShardedEdenDataset( - tokenizer=mock_tokenizer, - sequence_db_dir=sequence_db_dir, - window_db_path=window_dbs["train"], - seq_length=8192, - create_attention_mask=False, - stride=7992, - rc_aug=False, - use_control_tags=False, - split="train", - ) - - # Create a batch of items - batch = [dataset[np.int64(0)], dataset[np.int64(1)]] if len(dataset) > 1 else [dataset[np.int64(0)]] - - # Test collate function - collated = dataset.collate_fn(batch) - - # Verify collated structure - assert isinstance(collated, dict) - assert "tokens" in collated - assert "labels" in collated - assert "loss_mask" in collated - assert "position_ids" in collated - - # Verify batch dimension - assert collated["tokens"].dim() == 2 - assert collated["tokens"].shape[0] == len(batch) - - # Clean up - dataset.__del__() - - -def test_window_min_length_threshold(temp_dir, train_parquet): - """Test window database creation with length threshold.""" - output_db = Path(temp_dir) / "threshold_windows.db" - - # Create database with length threshold - precompute_window_database( - train_parquet, - str(output_db), - window_size=8192, - stride=7992, - window_min_length_threshold=10000, # Only windows >= 10000 bases - ) - - # Verify database was created - assert output_db.exists() - - # Check metadata - conn = sqlite3.connect(str(output_db)) - cursor = conn.cursor() - - cursor.execute("SELECT key, value FROM metadata") - metadata = dict(cursor.fetchall()) - - assert metadata["window_min_length_threshold"] == 10000 - - conn.close() - - -def test_dataset_length_and_iteration(sequence_db_dir, window_dbs): - """Test dataset length and basic iteration.""" - # Mock tokenizer - mock_tokenizer = Mock() - mock_tokenizer.bos_id = 1 - mock_tokenizer.eos_id = 2 - mock_tokenizer._sep_id = 3 - mock_tokenizer.pad_id = 0 - mock_tokenizer.text_to_ids.return_value = [10, 11, 12] - - # Create dataset - dataset = ShardedEdenDataset( - tokenizer=mock_tokenizer, - sequence_db_dir=sequence_db_dir, - window_db_path=window_dbs["train"], - seq_length=8192, - create_attention_mask=False, - stride=7992, - rc_aug=False, - use_control_tags=False, - split="train", - ) - - # Test length - dataset_len = len(dataset) - assert dataset_len > 0 - - # Test iteration (just first few items) - for i in range(min(3, dataset_len)): - item = dataset[np.int64(i)] - assert isinstance(item, dict) - assert "tokens" in item - - # Clean up - dataset.__del__() diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/test_tokenizer.py b/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/test_tokenizer.py deleted file mode 100644 index dc0742862d..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/data/test_tokenizer.py +++ /dev/null @@ -1,240 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import pytest - -from bionemo.evo2.data.tokenizer import Evo2Tokenizer -from bionemo.evo2.utils.config import Evo2PreprocessingConfig - - -@pytest.fixture -def tokenizer() -> Evo2Tokenizer: - return Evo2Tokenizer(Evo2PreprocessingConfig()) - - -def test_tokenizer_handles_long_dna_sequence(tokenizer: Evo2Tokenizer) -> None: - """ - Verifies tokenizer correctly processes a long DNA sequence into expected token IDs. - This sequence excerpt was pulled from mmseqs_results_rep_seq_distinct.fasta. - """ - sequence = "TACACCTATATTTTTTAAGGTATGTAAACATCTACTTTTAGTGATACTAACAAAAATATAGAATAATAATTAGTGTTTTTGTATATTAATGTATGGGTAGGATCACAAATAAATTACGAAACCTTTTCCTATAATATTATAA" - tokens = tokenizer.tokenize(sequence) - expected_tokens = [ - [ - 84, - 65, - 67, - 65, - 67, - 67, - 84, - 65, - 84, - 65, - 84, - 84, - 84, - 84, - 84, - 84, - 65, - 65, - 71, - 71, - 84, - 65, - 84, - 71, - 84, - 65, - 65, - 65, - 67, - 65, - 84, - 67, - 84, - 65, - 67, - 84, - 84, - 84, - 84, - 65, - 71, - 84, - 71, - 65, - 84, - 65, - 67, - 84, - 65, - 65, - 67, - 65, - 65, - 65, - 65, - 65, - 84, - 65, - 84, - 65, - 71, - 65, - 65, - 84, - 65, - 65, - 84, - 65, - 65, - 84, - 84, - 65, - 71, - 84, - 71, - 84, - 84, - 84, - 84, - 84, - 71, - 84, - 65, - 84, - 65, - 84, - 84, - 65, - 65, - 84, - 71, - 84, - 65, - 84, - 71, - 71, - 71, - 84, - 65, - 71, - 71, - 65, - 84, - 67, - 65, - 67, - 65, - 65, - 65, - 84, - 65, - 65, - 65, - 84, - 84, - 65, - 67, - 71, - 65, - 65, - 65, - 67, - 67, - 84, - 84, - 84, - 84, - 67, - 67, - 84, - 65, - 84, - 65, - 65, - 84, - 65, - 84, - 84, - 65, - 84, - 65, - 65, - ] - ] - assert expected_tokens == tokens - - -def test_tokenizer_processes_pipe_delimited_sequence(tokenizer: Evo2Tokenizer) -> None: - """Verifies tokenizer correctly handles pipe-delimited sequences with info tags.""" - tokens = tokenizer.tokenize("|info|ATG|info|ATG|") - expected_tokens = [[124, 105, 110, 102, 111, 124, 65, 84, 71, 124, 105, 110, 102, 111, 124, 65, 84, 71, 124]] - assert expected_tokens == tokens - - -def test_tokenizer_drops_empty_sequences(tokenizer: Evo2Tokenizer) -> None: - """Verifies tokenizer removes empty sequences when drop_empty_sequences is True.""" - tokens = tokenizer.tokenize(["A", "", "T"], drop_empty_sequences=True) - expected_tokens = [[65], [84]] - assert expected_tokens == tokens - - -def test_tokenizer_appends_eod_token(tokenizer: Evo2Tokenizer) -> None: - """Verifies tokenizer correctly appends end-of-document token.""" - tokens = tokenizer.tokenize(["ATCG"], append_eod=True) - expected_tokens = [[65, 84, 67, 71, 0]] - assert expected_tokens == tokens - - -def test_tokenizer_pads_sequence_to_required_length(tokenizer: Evo2Tokenizer) -> None: - """Verifies tokenizer correctly pads sequence to specified length.""" - tokens = tokenizer.tokenize(["ATCG"], enforce_sample_length=10) - expected_tokens = [[65, 84, 67, 71, 1, 1, 1, 1, 1, 1]] - assert expected_tokens == tokens - - -def test_tokenizer_raises_error_for_invalid_length(tokenizer: Evo2Tokenizer) -> None: - """Verifies tokenizer raises ValueError when sequence exceeds enforced length.""" - with pytest.raises(ValueError): - tokenizer.tokenize(["ATCGATCGATCG"], enforce_sample_length=4) - - -def test_tokenizer_fixes_unicode_with_ftfy(tokenizer: Evo2Tokenizer) -> None: - """Verifies tokenizer correctly processes broken unicode characters using ftfy.""" - tokens = tokenizer.tokenize("✠ATCG", use_ftfy=True) - expected_tokens = [[226, 156, 160, 65, 84, 67, 71]] - assert expected_tokens == tokens - - -def test_tokenizer_processes_special_characters(tokenizer: Evo2Tokenizer) -> None: - """ - Evo2_Dataset uses specific ASCII encodings for specific characters: - CONTROL_TAGS: ClassVar[list[int]] = [64, 35] # '@' tag for splice splits/windows, '#' for contig splits - TAG_BOUNDS = 124 # start and end delim: '|' - TAG_CHARS: ClassVar[set[int]] = {95, 59, 32} # chars only found in control tags: _, ;, space - DEFAULT_EOD = 0 - This test verifies tokenizer correctly handles these special characters. - """ - special_chars = "".join(["@", "#", "|", "_", ";", " "]) - tokens = tokenizer.tokenize(special_chars, append_eod=True) - expected_tokens = [[64, 35, 124, 95, 59, 32, 0]] - assert expected_tokens == tokens diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/models/__init__.py b/sub-packages/bionemo-evo2/tests/bionemo/evo2/models/__init__.py deleted file mode 100644 index 4c0c148742..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/models/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/models/test_llama.py b/sub-packages/bionemo-evo2/tests/bionemo/evo2/models/test_llama.py deleted file mode 100644 index c57f6f18f6..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/models/test_llama.py +++ /dev/null @@ -1,117 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import os -import subprocess - -import pytest -import torch -from transformers import AutoModelForCausalLM - -from bionemo.core.data.load import load - - -@pytest.fixture(scope="module") -def eden_llama_og2_step_182313_on_evo2_rrna_highly_conserved_PMC4140814(): - """Test data for Evo2 llama inference. - - Returns: - tree - . - ├── per_layer_activations - │ └── activations_rank000_dl00_batch000000.pt - ├── predictions__rank_0__dp_rank_0.pt - ├── ribosomal_rrna_highly_conserved_PMC4140814.fasta - └── seq_idx_map.json - - 1 directory, 4 files - """ - return load("evo2_llama/eden_llama_og2_step_182313_on_evo2_rrna_highly_conserved_PMC4140814:1.0") - - -@pytest.fixture(scope="module") -def llama_7b_8k_og2(): - return load("evo2_llama/7B-8k-og2:1.0") - - -@pytest.mark.skipif(os.environ.get("BIONEMO_DATA_SOURCE") != "pbss", reason="Test data is not available on NGC") -def test_golden_values_llama( - tmp_path, eden_llama_og2_step_182313_on_evo2_rrna_highly_conserved_PMC4140814, llama_7b_8k_og2 -): - fasta_path = ( - eden_llama_og2_step_182313_on_evo2_rrna_highly_conserved_PMC4140814 - / "ribosomal_rrna_highly_conserved_PMC4140814.fasta" - ) - gold_values_path = ( - eden_llama_og2_step_182313_on_evo2_rrna_highly_conserved_PMC4140814 / "predictions__rank_0__dp_rank_0.pt" - ) - output_dir = tmp_path / "predictions_llama" - prediction_cmd = ( - f"predict_evo2 --fasta {fasta_path} --ckpt-dir {llama_7b_8k_og2} --output-dir {output_dir} --model-size 7B" - ) - subprocess.run(prediction_cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - predictions = torch.load(output_dir / "predictions__rank_0__dp_rank_0.pt", weights_only=True) - gold_values = torch.load(gold_values_path, weights_only=True) - assert predictions["token_logits"].shape == gold_values["token_logits"].shape - torch.testing.assert_close(predictions["token_logits"], gold_values["token_logits"], atol=0.5, rtol=0) - - -@pytest.mark.skipif(os.environ.get("BIONEMO_DATA_SOURCE") != "pbss", reason="Test data is not available on NGC") -def test_checkpoint_conversion( - tmp_path, eden_llama_og2_step_182313_on_evo2_rrna_highly_conserved_PMC4140814, llama_7b_8k_og2 -): - target_dir = tmp_path / "llama_7b_8k_og2" - convert_cmd = f"evo2_nemo2_to_hf --model-type llama --model-path {llama_7b_8k_og2} --output-dir {target_dir}" - subprocess.run(convert_cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - assert target_dir.exists() - assert target_dir.is_dir() - hf_model = AutoModelForCausalLM.from_pretrained( - target_dir, - torch_dtype=torch.bfloat16, - local_files_only=True, # Force loading from local path, not HF Hub - use_cache=False, # Disable use_cache to get the correct forward pass outside of generate. - ).eval() - # # Add hooks to capture inputs/outputs for forward pass - # activations = {} - # def capture_hook(name): - # def hook(module, input, output): - # # if not isinstance(input, torch.Tensor): - # # input = None - # # if not isinstance(output, torch.Tensor): - # # output = None - # activations[name] = { - # 'input': input, - # 'output': output - # } - # return hook - # # Register hooks on key layers - # for name, module in hf_model.named_modules(): - # module.register_forward_hook(capture_hook(name)) - fasta_path = ( - eden_llama_og2_step_182313_on_evo2_rrna_highly_conserved_PMC4140814 - / "ribosomal_rrna_highly_conserved_PMC4140814.fasta" - ) - with open(fasta_path, "r") as f: - fasta_seq = f.readlines()[1].strip() - input_ids = torch.tensor([ord(c) for c in fasta_seq]).unsqueeze(0) # add batch dimension - with torch.no_grad(): - outputs = hf_model(input_ids) - gold_values_path = ( - eden_llama_og2_step_182313_on_evo2_rrna_highly_conserved_PMC4140814 / "predictions__rank_0__dp_rank_0.pt" - ) - gold_values = torch.load(gold_values_path, weights_only=True) - assert outputs.logits.shape == gold_values["token_logits"].shape - torch.testing.assert_close(outputs.logits, gold_values["token_logits"].to(dtype=torch.bfloat16), atol=0.5, rtol=0) diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/__init__.py b/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/__init__.py deleted file mode 100644 index 25e6abfbc5..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/common.py b/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/common.py deleted file mode 100644 index 92ebd07afc..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/common.py +++ /dev/null @@ -1,71 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def small_training_cmd( - path, - max_steps, - val_check, - global_batch_size: int | None = None, - devices: int = 1, - additional_args: str = "", -): - """Command for training.""" - cmd = ( - f"train_evo2 --mock-data --result-dir {path} --devices {devices} " - "--model-size 1b_nv --num-layers 4 --hybrid-override-pattern SDH* --limit-val-batches 1 " - "--no-activation-checkpointing --add-bias-output --create-tensorboard-logger --create-tflops-callback " - f"--max-steps {max_steps} --warmup-steps 1 --val-check-interval {val_check} " - f"--seq-length 16 --hidden-dropout 0.1 --attention-dropout 0.1 {additional_args} " - f"{'--global-batch-size ' + str(global_batch_size) if global_batch_size is not None else ''}" - ) - return cmd - - -def small_training_finetune_cmd( - path, - max_steps, - val_check, - prev_ckpt, - devices: int = 1, - global_batch_size: int | None = None, - create_tflops_callback: bool = True, - additional_args: str = "", -): - """Command for finetuning.""" - cmd = ( - f"train_evo2 --mock-data --result-dir {path} --devices {devices} " - "--model-size 1b_nv --num-layers 4 --hybrid-override-pattern SDH* --limit-val-batches 1 " - "--no-activation-checkpointing --add-bias-output --create-tensorboard-logger " - f"--max-steps {max_steps} --warmup-steps 1 --val-check-interval {val_check} " - f"--seq-length 16 --hidden-dropout 0.1 --attention-dropout 0.1 {additional_args} --ckpt-dir {prev_ckpt} " - f"{'--create-tflops-callback' if create_tflops_callback else ''} " - f"{'--global-batch-size ' + str(global_batch_size) if global_batch_size is not None else ''}" - ) - return cmd - - -def predict_cmd(ckpt_dir: str, output_dir: str, fasta_file_path: str, additional_args: str = ""): - """Command fro predict.""" - cmd = ( - f"predict_evo2 --fasta {fasta_file_path} --ckpt-dir {ckpt_dir} --output-dir {output_dir} " - "--model-size 1b_nv --num-layers 4 --hybrid-override-pattern SDH* --tensor-parallel-size 1 " - f"--pipeline-model-parallel-size 1 --context-parallel-size 1 {additional_args}" - ) - - return cmd diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/test_finetune.py b/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/test_finetune.py deleted file mode 100644 index 8036b81fc1..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/test_finetune.py +++ /dev/null @@ -1,202 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - -import pytest - -from bionemo.testing.subprocess_utils import run_command_in_subprocess - -from .common import small_training_cmd, small_training_finetune_cmd - - -def extract_val_losses(log_text: str, n: int): - """ - Extracts validation losses every n-th occurrence (starting at 0). - Iteration index is derived by counting val_loss appearances. - - Args: - log_text (str): The log output as a string. - n (int): Interval of occurrences (e.g., n=5 -> get val_loss at 0, 5, 10...). - - Returns: - List of tuples: (step, validation_loss_value). - """ - # Regex to capture val_loss values - pattern = re.compile(r"val_loss: ([0-9.]+)") - - results = [] - for idx, match in enumerate(pattern.finditer(log_text)): - if idx % n == 0: # take every n-th val_loss occurrence - results.append((idx, float(match.group(1)))) - - return results - - -@pytest.mark.timeout(2048) # Optional: fail if the test takes too long. -@pytest.mark.slow -@pytest.mark.parametrize("with_peft", [True, False]) -def test_train_evo2_finetune_runs(tmp_path, with_peft: bool): - """ - This test runs the `train_evo2` command with mock data in a temporary directory. - It uses the temporary directory provided by pytest as the working directory. - The command is run in a subshell, and we assert that it returns an exit code of 0. - """ - num_steps = 25 - val_steps = 10 - global_batch_size = 128 - - # Note: The command assumes that `train_evo2` is in your PATH. - command = small_training_cmd( - tmp_path / "pretrain", - max_steps=num_steps, - val_check=val_steps, - global_batch_size=global_batch_size, - additional_args=" --lr 0.1 ", - ) - stdout_pretrain: str = run_command_in_subprocess(command=command, path=str(tmp_path)) - assert "Restoring model weights from RestoreConfig(path='" not in stdout_pretrain - - log_dir = tmp_path / "pretrain" / "evo2" - checkpoints_dir = log_dir / "checkpoints" - tensorboard_dir = log_dir / "dev" - - # Check if logs dir exists - assert log_dir.exists(), "Logs folder should exist." - # Check if checkpoints dir exists - assert checkpoints_dir.exists(), "Checkpoints folder does not exist." - - expected_checkpoint_suffix = f"{num_steps * global_batch_size}.0-last" - # Check if any subfolder ends with the expected suffix - matching_subfolders = [ - p for p in checkpoints_dir.iterdir() if p.is_dir() and (expected_checkpoint_suffix in p.name) - ] - - assert matching_subfolders, ( - f"No checkpoint subfolder ending with '{expected_checkpoint_suffix}' found in {checkpoints_dir}." - ) - - # Check if directory with tensorboard logs exists - assert tensorboard_dir.exists(), "TensorBoard logs folder does not exist." - - event_files = list(tensorboard_dir.rglob("events.out.tfevents*")) - assert len(event_files) == 1, f"No or multiple TensorBoard event files found under {tensorboard_dir}" - - val_losses = extract_val_losses(stdout_pretrain, val_steps) - - for i in range(1, len(val_losses)): - assert val_losses[i][1] <= val_losses[i - 1][1], ( - f"Validation loss increased at step {val_losses[i][0]}: {val_losses[i][1]} > {val_losses[i - 1][1]}" - ) - - # Check if directory with tensorboard logs exists - assert tensorboard_dir.exists(), "TensorBoard logs folder does not exist." - # Recursively search for files with tensorboard logger - event_files = list(tensorboard_dir.rglob("events.out.tfevents*")) - assert event_files, f"No TensorBoard event files found under {tensorboard_dir}" - assert len(matching_subfolders) == 1, "Only one checkpoint subfolder should be found." - if with_peft: - result_dir = tmp_path / "lora_finetune" - additional_args = "--lora-finetune --lr 0.1 " - else: - result_dir = tmp_path / "finetune" - additional_args = " --lr 0.1 " - - command_finetune = small_training_finetune_cmd( - result_dir, - max_steps=num_steps, - val_check=val_steps, - global_batch_size=global_batch_size, - prev_ckpt=matching_subfolders[0], - create_tflops_callback=not with_peft, - additional_args=additional_args, - ) - stdout_finetune: str = run_command_in_subprocess(command=command_finetune, path=str(tmp_path)) - assert "Restoring model weights from RestoreConfig(path='" in stdout_finetune - - log_dir_ft = result_dir / "evo2" - checkpoints_dir_ft = log_dir_ft / "checkpoints" - tensorboard_dir_ft = log_dir_ft / "dev" - - # Check if logs dir exists - assert log_dir_ft.exists(), "Logs folder should exist." - # Check if checkpoints dir exists - assert checkpoints_dir_ft.exists(), "Checkpoints folder does not exist." - - expected_checkpoint_suffix = f"{num_steps * global_batch_size}.0-last" - # Check if any subfolder ends with the expected suffix - matching_subfolders_finetune = [ - p for p in checkpoints_dir_ft.iterdir() if p.is_dir() and (expected_checkpoint_suffix in p.name) - ] - - assert matching_subfolders_finetune, ( - f"No checkpoint subfolder ending with '{expected_checkpoint_suffix}' found in {checkpoints_dir_ft}." - ) - - # Check if directory with tensorboard logs exists - assert tensorboard_dir_ft.exists(), "TensorBoard logs folder does not exist." - # Recursively search for files with tensorboard logger - event_files_ft = list(tensorboard_dir_ft.rglob("events.out.tfevents*")) - assert len(event_files_ft) == 1, f"No or multiple TensorBoard event files found under {tensorboard_dir_ft}" - - val_losses_ft = extract_val_losses(stdout_finetune, val_steps) - - # Check that each validation loss is less than or equal to the previous one - for i in range(1, len(val_losses_ft)): - assert val_losses_ft[i][1] <= val_losses_ft[i - 1][1], ( - f"Validation loss increased at step {val_losses_ft[i][0]}: {val_losses_ft[i][1]} > {val_losses_ft[i - 1][1]}" - ) - - assert len(matching_subfolders_finetune) == 1, "Only one checkpoint subfolder should be found." - - # With LoRA, test resuming from a saved LoRA checkpoint - if with_peft: - result_dir = tmp_path / "lora_finetune_resume" - - # Resume from LoRA checkpoint - command_resume_finetune = small_training_finetune_cmd( - result_dir, - max_steps=num_steps, - val_check=val_steps, - global_batch_size=global_batch_size, - prev_ckpt=matching_subfolders[0], - create_tflops_callback=False, - additional_args=f"--lora-finetune --lora-checkpoint-path {matching_subfolders_finetune[0]} --lr 0.1 ", - ) - stdout_finetune: str = run_command_in_subprocess(command=command_resume_finetune, path=str(tmp_path)) - - log_dir_ft = result_dir / "evo2" - checkpoints_dir_ft = log_dir_ft / "checkpoints" - tensorboard_dir_ft = log_dir_ft / "dev" - - # Check if logs dir exists - assert log_dir_ft.exists(), "Logs folder should exist." - # Check if checkpoints dir exists - assert checkpoints_dir_ft.exists(), "Checkpoints folder does not exist." - - # Recursively search for files with tensorboard logger - event_files_ft = list(tensorboard_dir_ft.rglob("events.out.tfevents*")) - assert len(event_files_ft) == 1, f"No or multiple TensorBoard event files found under {tensorboard_dir_ft}" - - val_losses_ft = extract_val_losses(stdout_finetune, val_steps) - - # Check that each validation loss is less than or equal to the previous one - for i in range(1, len(val_losses_ft)): - assert val_losses_ft[i][1] <= val_losses_ft[i - 1][1], ( - f"Validation loss increased at step {val_losses_ft[i][0]}: {val_losses_ft[i][1]} > {val_losses_ft[i - 1][1]}" - ) diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/test_infer.py b/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/test_infer.py deleted file mode 100644 index a0de81b4d4..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/test_infer.py +++ /dev/null @@ -1,79 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import pytest -import torch - -from bionemo.core.data.load import load -from bionemo.evo2.run.infer import infer -from bionemo.testing.megatron_parallel_state_utils import clean_parallel_state_context -from bionemo.testing.torch import check_fp8_support - - -RANDOM_SEED = 42 - - -@pytest.mark.parametrize("fast", [True, False]) -def test_run_infer(fast: bool): - # Create PTL trainer. - tensor_parallel_size = 1 - pipeline_model_parallel_size = 1 - context_parallel_size = 1 - temperature = 1.0 - top_k = 0 - top_p = 0.0 - max_new_tokens = 1 - - # Generation args. - default_prompt = ( - "|d__Bacteria;" - + "p__Pseudomonadota;" - + "c__Gammaproteobacteria;" - + "o__Enterobacterales;" - + "f__Enterobacteriaceae;" - + "g__Escherichia;" - + "s__Escherichia|" - ) - try: - checkpoint_path = load("evo2/1b-8k:1.0") - except ValueError as e: - if e.args[0].endswith("does not have an NGC URL."): - raise ValueError( - "Please re-run test with `BIONEMO_DATA_SOURCE=pbss py.test ...`, " - "one or more files are missing from ngc." - ) - else: - raise e - - is_fp8_supported, _, _ = check_fp8_support(torch.cuda.current_device()) - - with clean_parallel_state_context(): - infer( - prompt=default_prompt, - ckpt_dir=checkpoint_path, - temperature=temperature, - top_k=top_k, - top_p=top_p, - max_new_tokens=max_new_tokens, - tensor_parallel_size=tensor_parallel_size, - pipeline_model_parallel_size=pipeline_model_parallel_size, - context_parallel_size=context_parallel_size, - vortex_style_fp8=is_fp8_supported, - flash_decode=fast, - ) diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/test_inference.py b/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/test_inference.py deleted file mode 100644 index c1d205c732..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/test_inference.py +++ /dev/null @@ -1,104 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import nemo.lightning as nl -import torch -from megatron.core.inference.common_inference_params import CommonInferenceParams -from nemo.collections.llm import generate - -from bionemo.core.data.load import load -from bionemo.testing.megatron_parallel_state_utils import clean_parallel_state_context - - -RANDOM_SEED = 42 - - -def test_infer_model_generates_expected_single_token_output(): - # Create PTL trainer. - TENSOR_PARALLEL_SIZE = 1 - PIPELINE_MODEL_PARALLEL_SIZE = 1 - CONTEXT_PARALLEL_SIZE = 1 - NUM_GPUS = 1 - NUM_NODES = 1 - - strategy = nl.MegatronStrategy( - tensor_model_parallel_size=TENSOR_PARALLEL_SIZE, - pipeline_model_parallel_size=PIPELINE_MODEL_PARALLEL_SIZE, - context_parallel_size=CONTEXT_PARALLEL_SIZE, - pipeline_dtype=torch.bfloat16, - ckpt_load_optimizer=False, # Needs to be false for a normal model checkpoint. - ckpt_save_optimizer=False, - ckpt_async_save=False, - save_ckpt_format="torch_dist", - ckpt_load_strictness="log_all", - ) - trainer = nl.Trainer( - accelerator="gpu", - num_nodes=NUM_NODES, - devices=NUM_GPUS, - strategy=strategy, - log_every_n_steps=1, - limit_val_batches=10, - num_sanity_val_steps=0, - plugins=nl.MegatronMixedPrecision( - precision="bf16-mixed", - params_dtype=torch.bfloat16, - ), - ) - - prompt = ( - "|d__Bacteria;" - + "p__Pseudomonadota;" - + "c__Gammaproteobacteria;" - + "o__Enterobacterales;" - + "f__Enterobacteriaceae;" - + "g__Escherichia;" - + "s__Escherichia|" - ) - temperature = 1.0 - top_k = 0 - top_p = 0.0 - max_new_tokens = 1 - try: - checkpoint_path = load("evo2/1b-8k:1.0") - except ValueError as e: - if e.args[0].endswith("does not have an NGC URL."): - raise ValueError( - "Please re-run test with `BIONEMO_DATA_SOURCE=pbss py.test ...`, " - "one or more files are missing from ngc." - ) - else: - raise e - with clean_parallel_state_context(): - results = generate( - path=checkpoint_path, - prompts=[prompt], - trainer=trainer, - inference_params=CommonInferenceParams( - temperature, - top_k, - top_p, - return_log_probs=False, - num_tokens_to_generate=max_new_tokens, - ), - random_seed=RANDOM_SEED, - text_only=True, - ) - - assert isinstance(results, list) - assert results == ["T"] diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/test_predict.py b/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/test_predict.py deleted file mode 100644 index 75319464e9..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/test_predict.py +++ /dev/null @@ -1,506 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import glob -import json -import os -import subprocess -import sys -import tempfile -from pathlib import Path - -import lightning as pl -import pytest -import torch -from lightning.fabric.plugins.environments.lightning import find_free_network_port - -from bionemo.core.data.load import load -from bionemo.evo2.run.predict import predict -from bionemo.llm.lightning import batch_collator -from bionemo.testing.data.fasta import ALU_SEQUENCE, create_fasta_file -from bionemo.testing.megatron_parallel_state_utils import clean_parallel_state_context -from bionemo.testing.subprocess_utils import run_command_in_subprocess -from bionemo.testing.torch import check_fp8_support - -from .common import predict_cmd, small_training_finetune_cmd - - -def is_a6000_gpu() -> bool: - # Check if any of the visible GPUs is an A6000 - for i in range(torch.cuda.device_count()): - device_name = torch.cuda.get_device_name(i) - if "A6000" in device_name: - return True - return False - - -@pytest.fixture(scope="module") -def checkpoint_1b_8k_bf16_path() -> Path: - try: - checkpoint_path = load("evo2/1b-8k-bf16:1.0") - except ValueError as e: - if e.args[0].endswith("does not have an NGC URL."): - raise ValueError( - "Please re-run test with `BIONEMO_DATA_SOURCE=pbss py.test ...`, " - "one or more files are missing from ngc." - ) - else: - raise e - yield checkpoint_path - - -@pytest.fixture(scope="module") -def checkpoint_7b_1m_path() -> Path: - try: - checkpoint_path = load("evo2/7b-1m:1.0") - except ValueError as e: - if e.args[0].endswith("does not have an NGC URL."): - raise ValueError( - "Please re-run test with `BIONEMO_DATA_SOURCE=pbss py.test ...`, " - "one or more files are missing from ngc." - ) - else: - raise e - yield checkpoint_path - - -def test_predict_does_not_instantiate_optimizer(tmp_path: Path, checkpoint_1b_8k_bf16_path: Path): - output_dir = tmp_path / "test_output" - fasta_file_path = tmp_path / "test.fasta" - create_fasta_file( - fasta_file_path, - 1, - sequence_lengths=[512], - repeating_dna_pattern=ALU_SEQUENCE, - ) - - class AssertNoOptimizerCallback(pl.Callback): - def on_predict_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx=0): - assert not trainer.optimizers, ( - f"Optimizer should not be instantiated for prediction, got {trainer.optimizers}" - ) - trainer_model_opt = getattr(trainer.model, "optim", None) - assert trainer_model_opt is None or not trainer_model_opt.state_dict(), ( - f"Model optimizer found, got {trainer_model_opt} with state {trainer_model_opt.state_dict()}" - ) - - with clean_parallel_state_context(): - predict( - fasta_path=fasta_file_path, - ckpt_dir=str(checkpoint_1b_8k_bf16_path), - output_dir=output_dir, - tensor_parallel_size=1, - pipeline_model_parallel_size=1, - context_parallel_size=1, - num_nodes=1, - devices=1, - model_size="1b", - ckpt_format="torch_dist", - fp8=False, - full_fp8=False, - work_dir=tmp_path, - micro_batch_size=1, - output_log_prob_seqs=True, - log_prob_collapse_option="mean", - write_interval="epoch", - prepend_bos=False, - no_sequence_parallel=False, - hybrid_override_pattern="SDH*", - num_layers=4, - seq_len_interpolation_factor=None, - files_per_subdir=None, - lora_checkpoint_path=None, - extra_callbacks=[ - AssertNoOptimizerCallback(), - ], # use this for making testing the loop easier. - ) - - -@pytest.mark.parametrize( - "ddp,pp,wi", - [ - pytest.param(1, 1, "epoch", id="ddp=1,pp=1,wi=epoch"), - pytest.param(2, 1, "epoch", id="ddp=2,pp=1,wi=epoch"), - pytest.param(2, 1, "batch", id="ddp=2,pp=1,wi=batch"), - pytest.param( - 1, - 2, - "epoch", - id="ddp=1,pp=2,wi=epoch", - marks=pytest.mark.skip("Pipeline parallelism test currently hangs."), - ), - ], -) -def test_predict_evo2_runs( - tmp_path, - ddp: int, - pp: int, - wi: str, - checkpoint_1b_8k_bf16_path: Path, - num_sequences: int = 5, - target_sequence_lengths: list[int] = [3149, 3140, 1024, 3148, 3147], -): - """ - This test runs the `predict_evo2` command with mock data in a temporary directory. - It uses the temporary directory provided by pytest as the working directory. - The command is run in a subshell, and we assert that it returns an exit code of 0. - - Since it's the full output this does not support CP, so we only test with TP=1. We also want coverage of the - case where the sequence lengths are different and not necessarily divisible by CP. - """ - world_size = ddp * pp - if world_size > torch.cuda.device_count(): - pytest.skip(f"World size {world_size} is less than the number of GPUs {torch.cuda.device_count()}") - fasta_file_path = tmp_path / "test.fasta" - create_fasta_file( - fasta_file_path, num_sequences, sequence_lengths=target_sequence_lengths, repeating_dna_pattern=ALU_SEQUENCE - ) - # Create a mock data directory. - # a local copy of the environment - env = dict(**os.environ) - if is_a6000_gpu(): - # Fix hanging issue on A6000 GPUs with multi-gpu tests - env["NCCL_P2P_DISABLE"] = "1" - - # Build the command string. - # Note: The command assumes that `train_evo2` is in your PATH. - output_dir = tmp_path / "test_output" - command = ( - f"torchrun --nproc_per_node {world_size} --nnodes 1 --no-python " - f"predict_evo2 --fasta {fasta_file_path} --ckpt-dir {checkpoint_1b_8k_bf16_path} " - f"--output-dir {output_dir} --model-size 1b " - f"--micro-batch-size 3 --write-interval {wi} " - f"--pipeline-model-parallel-size {pp} --num-nodes 1 --devices {world_size}" - ) - - # Run the command in a subshell, using the temporary directory as the current working directory. - open_port = find_free_network_port() - env["MASTER_PORT"] = str(open_port) - result = subprocess.run( - command, - shell=True, # Use the shell to interpret wildcards (e.g. SDH*) - cwd=tmp_path, # Run in the temporary directory - capture_output=True, # Capture stdout and stderr for debugging - env=env, # Pass in the env where we override the master port. - text=True, # Decode output as text - ) - - # For debugging purposes, print the output if the test fails. - if result.returncode != 0: - sys.stderr.write("STDOUT:\n" + result.stdout + "\n") - sys.stderr.write("STDERR:\n" + result.stderr + "\n") - - # Assert that the command completed successfully. - assert result.returncode == 0, "train_evo2 command failed." - - # Assert that the output directory was created. - pred_files = glob.glob(os.path.join(output_dir, "predictions__rank_*.pt")) - if wi == "batch": - assert len(pred_files) == 2, f"Expected 2 prediction file (for this test), got {len(pred_files)}" - else: - assert len(pred_files) == ddp, f"Expected {ddp} prediction file (for this test), got {len(pred_files)}" - with open(output_dir / "seq_idx_map.json", "r") as f: - seq_idx_map = json.load( - f - ) # This gives us the mapping from the sequence names to the indices in the predictions. - preds = [torch.load(pf) for pf in pred_files] - preds = batch_collator( - [p for p in preds if p is not None], - batch_dim_key_defaults={"token_logits": 0}, - seq_dim_key_defaults={"token_logits": 1}, - ) - assert isinstance(preds, dict) - assert "token_logits" in preds - assert "pad_mask" in preds - assert "seq_idx" in preds - - assert len(preds["token_logits"]) == len(preds["pad_mask"]) == len(preds["seq_idx"]) == num_sequences - assert len(seq_idx_map) == num_sequences - for original_idx, pad_mask, token_logits in zip(preds["seq_idx"], preds["pad_mask"], preds["token_logits"]): - # seq_idx is not sorted necessarily, so use the saved "seq_idx" to determine the original order. - expected_len = target_sequence_lengths[original_idx] - assert pad_mask.sum() == expected_len - assert token_logits.shape == (max(target_sequence_lengths), 512) - - -@pytest.fixture(scope="module") -def baseline_predictions_7b_1m_results( - checkpoint_7b_1m_path: Path, - num_sequences: int = 5, - target_sequence_lengths: list[int] = [2048, 2048, 2048, 2048, 2048], -) -> dict[int, float]: - with tempfile.TemporaryDirectory() as tmp_dir: - tmp_path = Path(tmp_dir) - fasta_file_path = tmp_path / "test.fasta" - create_fasta_file( - fasta_file_path, - num_sequences, - sequence_lengths=target_sequence_lengths, - repeating_dna_pattern=ALU_SEQUENCE, - ) - output_dir = tmp_path / "test_output" - command = ( - f"torchrun --nproc_per_node 1 --nnodes 1 --no-python " - f"predict_evo2 --fasta {fasta_file_path} --ckpt-dir {checkpoint_7b_1m_path} " - f"--num-layers 4 --hybrid-override-pattern SDH* " # subset of layers for testing - # FIXME changing batch size from 3 to 1 required dropping rel=1e-6 to rel=1e-3 - # even when model parallelism is not used. This should be investigated. - f"--micro-batch-size 3 " - f"--output-dir {output_dir} --model-size 7b_arc_longcontext " - f"--num-nodes 1 --write-interval epoch " - "--output-log-prob-seqs --log-prob-collapse-option sum" - ) - # Create a mock data directory. - # a local copy of the environment - env = dict(**os.environ) - open_port = find_free_network_port() - env["MASTER_PORT"] = str(open_port) - result = subprocess.run( - command, - shell=True, # Use the shell to interpret wildcards (e.g. SDH*) - cwd=tmp_path, # Run in the temporary directory - capture_output=True, # Capture stdout and stderr for debugging - env=env, # Pass in the env where we override the master port. - text=True, # Decode output as text - ) - assert result.returncode == 0, "predict_evo2 command failed." - # Assert that the output directory was created. - pred_files = glob.glob(os.path.join(output_dir, "predictions__rank_*.pt")) - preds = [torch.load(pf) for pf in pred_files] - preds = batch_collator( - [p for p in preds if p is not None], - ) - yield dict(zip([i.item() for i in preds["seq_idx"]], [p.item() for p in preds["log_probs_seqs"]])) - - -@pytest.mark.parametrize( - "ddp,cp,pp,tp,fp8,wi", - [ - pytest.param(1, 1, 1, 1, False, "epoch", id="ddp=1,cp=1,pp=1,tp=1,fp8=False,wi=epoch"), - pytest.param(2, 1, 1, 1, False, "epoch", id="ddp=2,cp=1,pp=1,tp=1,fp8=False,wi=epoch"), - pytest.param( - 2, 1, 1, 1, False, "batch", id="ddp=2,cp=1,pp=1,tp=1,fp8=False,wi=batch" - ), # simulate a large prediction run with dp parallelism - pytest.param(1, 2, 1, 1, False, "epoch", id="ddp=1,cp=2,pp=1,tp=1,fp8=False,wi=epoch"), - pytest.param(1, 2, 1, 1, False, "batch", id="ddp=1,cp=2,pp=1,tp=1,fp8=False,wi=batch"), - pytest.param( - 1, - 1, - 2, - 1, - False, - "epoch", - id="ddp=1,cp=1,pp=2,tp=1,fp8=False,wi=epoch", - marks=pytest.mark.skip("Pipeline parallelism test currently hangs."), - ), - pytest.param( - 1, 1, 1, 2, True, "epoch", id="ddp=1,cp=1,pp=1,tp=2,fp8=True,wi=epoch" - ), # Cover case where FP8 was not supported with TP=2 - pytest.param(1, 1, 1, 2, False, "epoch", id="ddp=1,cp=1,pp=1,tp=2,fp8=False,wi=epoch"), - ], - ids=lambda x: f"ddp={x[0]},cp={x[1]},pp={x[2]},tp={x[3]},fp8={x[4]},wi={x[5]}", -) -def test_predict_evo2_equivalent_with_log_probs( - tmp_path, - ddp: int, - cp: int, - pp: int, - tp: int, - fp8: bool, - wi: str, - checkpoint_7b_1m_path: Path, - baseline_predictions_7b_1m_results: dict[int, float], - num_sequences: int = 5, - target_sequence_lengths: list[int] = [2048, 2048, 2048, 2048, 2048], -): - """ - This test runs the `predict_evo2` command with mock data in a temporary directory. - It uses the temporary directory provided by pytest as the working directory. - The command is run in a subshell, and we assert that it returns an exit code of 0. - - For this test, we want coverage of CP, so we make sure sequence lengths are all the same and divisible by CP. - - The other thing this test does is check that the log probabilities are equivalent to the baseline predictions - without model parallelism. - """ - - world_size = ddp * cp * pp * tp - mp_size = cp * pp * tp - if world_size > torch.cuda.device_count(): - pytest.skip(f"World size {world_size} is less than the number of GPUs {torch.cuda.device_count()}") - is_fp8_supported, _, _ = check_fp8_support(torch.cuda.current_device()) - if not is_fp8_supported and fp8: - pytest.skip("FP8 is not supported on this GPU.") - - fasta_file_path = tmp_path / "test.fasta" - create_fasta_file( - fasta_file_path, num_sequences, sequence_lengths=target_sequence_lengths, repeating_dna_pattern=ALU_SEQUENCE - ) - # Create a mock data directory. - # a local copy of the environment - env = dict(**os.environ) - if is_a6000_gpu(): - # Fix hanging issue on A6000 GPUs with multi-gpu tests - env["NCCL_P2P_DISABLE"] = "1" - - fp8_option = "--fp8" if fp8 else "" - # Build the command string. - # Note: The command assumes that `train_evo2` is in your PATH. - output_dir = tmp_path / "test_output" - command = ( - f"torchrun --nproc_per_node {world_size} --nnodes 1 --no-python " - f"predict_evo2 --fasta {fasta_file_path} --ckpt-dir {checkpoint_7b_1m_path} " - f"--micro-batch-size 3 --write-interval {wi} " - f"--num-layers 4 --hybrid-override-pattern SDH* " # subset of layers for testing - f"--output-dir {output_dir} --model-size 7b_arc_longcontext --tensor-parallel-size {tp} {fp8_option} " - f"--pipeline-model-parallel-size {pp} --context-parallel-size {cp} --num-nodes 1 --devices {world_size} " - "--output-log-prob-seqs --log-prob-collapse-option sum" - ) - - # Run the command in a subshell, using the temporary directory as the current working directory. - open_port = find_free_network_port() - env["MASTER_PORT"] = str(open_port) - result = subprocess.run( - command, - shell=True, # Use the shell to interpret wildcards (e.g. SDH*) - cwd=tmp_path, # Run in the temporary directory - capture_output=True, # Capture stdout and stderr for debugging - env=env, # Pass in the env where we override the master port. - text=True, # Decode output as text - ) - - # For debugging purposes, print the output if the test fails. - if result.returncode != 0: - sys.stderr.write("STDOUT:\n" + result.stdout + "\n") - sys.stderr.write("STDERR:\n" + result.stderr + "\n") - - # Assert that the command completed successfully. - assert result.returncode == 0, "train_evo2 command failed." - - # Assert that the output directory was created. - pred_files = glob.glob(os.path.join(output_dir, "predictions__rank_*.pt")) - if wi == "batch": - assert len(pred_files) == 2, f"Expected 2 prediction file (for this test), got {len(pred_files)}" - else: - assert len(pred_files) == ddp, f"Expected {ddp} prediction file (for this test), got {len(pred_files)}" - with open(output_dir / "seq_idx_map.json", "r") as f: - seq_idx_map = json.load( - f - ) # This gives us the mapping from the sequence names to the indices in the predictions. - preds = [torch.load(pf) for pf in pred_files] - preds = batch_collator( - [p for p in preds if p is not None], - ) - assert isinstance(preds, dict) - assert "log_probs_seqs" in preds - assert "seq_idx" in preds - assert len(preds["log_probs_seqs"]) == len(preds["seq_idx"]) == num_sequences - assert len(seq_idx_map) == num_sequences - for original_idx, log_probs in zip(preds["seq_idx"], preds["log_probs_seqs"]): - if mp_size > 1 and not fp8: - # FIXME changing batch size so it doesn't match also required dropping rel=1e-6 to rel=1e-3. - # This should be investigated. - rel = 1e-3 - elif fp8: - # NOTE: This is hand-tuned on a b300 to pass for now as of 9/10/2025. - rel = 1e-2 - else: - rel = 1e-6 - assert log_probs.item() == pytest.approx(baseline_predictions_7b_1m_results[original_idx.item()], rel=rel) - - -@pytest.mark.timeout(512) -@pytest.mark.slow -def test_different_results_with_without_peft(tmp_path): - try: - base_model_checkpoint_path = load("evo2/1b-8k:1.0") - except ValueError as e: - if e.args[0].endswith("does not have an NGC URL."): - raise ValueError( - "Please re-run test with `BIONEMO_DATA_SOURCE=pbss py.test ...`, " - "one or more files are missing from ngc." - ) - else: - raise e - - num_steps = 2 - - result_dir = tmp_path / "lora_finetune" - - # Note: The command assumes that `train_evo2` is in your PATH. - command_finetune = small_training_finetune_cmd( - result_dir, - max_steps=num_steps, - val_check=num_steps, - prev_ckpt=base_model_checkpoint_path, - create_tflops_callback=False, - additional_args="--lora-finetune", - ) - stdout_finetune: str = run_command_in_subprocess(command=command_finetune, path=str(tmp_path)) - assert "Restoring model weights from RestoreConfig(path='" in stdout_finetune - assert "Loading adapters from" not in stdout_finetune - - # Check if checkpoints dir exists - checkpoints_dir = result_dir / "evo2" / "checkpoints" - assert checkpoints_dir.exists(), "Checkpoints folder does not exist." - - # Create a sample FASTA file to run predictions - fasta_file_path = tmp_path / "test.fasta" - create_fasta_file(fasta_file_path, 3, sequence_lengths=[32, 65, 129], repeating_dna_pattern=ALU_SEQUENCE) - - result_dir_original = tmp_path / "results_original" - cmd_predict = predict_cmd(base_model_checkpoint_path, result_dir_original, fasta_file_path) - stdout_predict: str = run_command_in_subprocess(command=cmd_predict, path=str(tmp_path)) - - # Assert that the output directory was created. - pred_files_original = glob.glob(str(result_dir_original / "predictions__rank_*.pt")) - assert len(pred_files_original) == 1, f"Expected 1 prediction file (for this test), got {len(pred_files_original)}" - - # Find the checkpoint dir generated by finetuning - expected_checkpoint_suffix = f"{num_steps}.0-last" - # Check if any subfolder ends with the expected suffix - matching_subfolders = [ - p for p in checkpoints_dir.iterdir() if p.is_dir() and (expected_checkpoint_suffix in p.name) - ] - - assert matching_subfolders, ( - f"No checkpoint subfolder ending with '{expected_checkpoint_suffix}' found in {checkpoints_dir}." - ) - - result_dir_peft = tmp_path / "results_peft" - additional_args = f"--lora-checkpoint-path {matching_subfolders[0]}" - cmd_predict = predict_cmd(base_model_checkpoint_path, result_dir_peft, fasta_file_path, additional_args) - stdout_predict: str = run_command_in_subprocess(command=cmd_predict, path=str(tmp_path)) - assert "Loading adapters from" in stdout_predict - - pred_files_peft = glob.glob(str(result_dir_peft / "predictions__rank_*.pt")) - assert len(pred_files_peft) == 1, f"Expected 1 prediction file (for this test), got {len(pred_files_peft)}" - - results_original = torch.load(f"{result_dir_original}/predictions__rank_0__dp_rank_0.pt") - results_peft = torch.load(f"{result_dir_peft}/predictions__rank_0__dp_rank_0.pt") - - seq_idx_original = results_original["seq_idx"] - seq_idx_peft = results_peft["seq_idx"] - assert torch.equal(seq_idx_original, seq_idx_peft), f"Tensors differ: {seq_idx_original} vs {seq_idx_peft}" - - logits_original = results_original["token_logits"] - logits_peft = results_peft["token_logits"] - assert (logits_original != logits_peft).any() - assert logits_original.shape == logits_peft.shape, ( - f"Shapes don't match: {logits_original.shape} vs {logits_peft.shape}" - ) diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/test_train.py b/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/test_train.py deleted file mode 100644 index aee595edd9..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/run/test_train.py +++ /dev/null @@ -1,637 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import argparse -import io -import os -import shlex -from contextlib import redirect_stderr, redirect_stdout -from typing import Tuple - -import pytest -import torch -from nemo import lightning as nl -from transformer_engine.pytorch.fp8 import check_fp8_support - -from bionemo.evo2.run.train import parse_args, train -from bionemo.testing.assert_optimizer_grads_match import assert_optimizer_states_match -from bionemo.testing.lightning import extract_global_steps_from_log -from bionemo.testing.megatron_parallel_state_utils import distributed_model_parallel_state -from bionemo.testing.subprocess_utils import run_command_in_subprocess - -from .common import small_training_cmd, small_training_finetune_cmd - - -fp8_available, reason_for_no_fp8 = check_fp8_support() - - -def run_train_with_std_redirect(args: argparse.Namespace) -> Tuple[str, nl.Trainer]: - """Run a function with output capture.""" - stdout_buf, stderr_buf = io.StringIO(), io.StringIO() - with redirect_stdout(stdout_buf), redirect_stderr(stderr_buf): - with distributed_model_parallel_state(): - trainer: nl.Trainer = train(args) - - train_stdout = stdout_buf.getvalue() - train_stderr = stderr_buf.getvalue() - print("Captured STDOUT:\n", train_stdout) - print("Captured STDERR:\n", train_stderr) - return train_stdout, trainer - - -def distributed_training_cmd( - path, - max_steps, - val_check, - num_devices, - dp, - tp, - cp, - pp, - dataset_dir=None, - training_config=None, - additional_args: str = "", -): - """Create distributed training command with specified parallelism settings. - - Args: - path: Result directory path - max_steps: Maximum training steps - val_check: Validation check interval - num_devices: Total number of devices - dp: Data parallel size - tp: Tensor parallel size - cp: Context parallel size - pp: Pipeline parallel size - dataset_dir: Path to preprocessed dataset directory (if None, uses --mock-data) - training_config: Path to training data config YAML file (required if dataset_dir is provided) - additional_args: Additional command line arguments - """ - micro_batch_size = 1 if dp == 2 else 2 - - # Use real dataset if provided, otherwise fall back to mock data - if dataset_dir and training_config: - data_args = f"-d {training_config} --dataset-dir {dataset_dir}" - else: - data_args = "--mock-data" - - cmd = ( - f"train_evo2 {data_args} --result-dir {path} --devices {num_devices} " - f"--tensor-parallel-size {tp} --pipeline-model-parallel-size {pp} --context-parallel-size {cp} " - "--model-size 7b --num-layers 4 --hybrid-override-pattern SDH* --limit-val-batches 1 " - "--no-activation-checkpointing --add-bias-output --create-tensorboard-logger --create-tflops-callback " - f"--max-steps {max_steps} --warmup-steps 1 --val-check-interval {val_check} --limit-val-batches 1 " - f"--seq-length 64 --hidden-dropout 0.0 --attention-dropout 0.0 --seed 42 --workers 0 " - f"--micro-batch-size {micro_batch_size} --global-batch-size 2 " - f"--adam-beta1 0 --adam-beta2 0 {additional_args}" - ) - return cmd - - -def small_training_mamba_cmd(path, max_steps, val_check, devices: int = 1, additional_args: str = ""): - cmd = ( - f"train_evo2 --mock-data --result-dir {path} --devices {devices} " - "--model-size hybrid_mamba_8b --num-layers 2 --hybrid-override-pattern M- --limit-val-batches 1 " - "--no-activation-checkpointing --create-tensorboard-logger --create-tflops-callback " - f"--max-steps {max_steps} --warmup-steps 1 --val-check-interval {val_check} --limit-val-batches 1 " - f"--seq-length 8 --hidden-dropout 0.1 --attention-dropout 0.1 {additional_args}" - ) - return cmd - - -def small_training_mamba_finetune_cmd( - path, max_steps, val_check, prev_ckpt, devices: int = 1, additional_args: str = "" -): - cmd = ( - f"train_evo2 --mock-data --result-dir {path} --devices {devices} " - "--model-size hybrid_mamba_8b --num-layers 2 --hybrid-override-pattern M- --limit-val-batches 1 " - "--no-activation-checkpointing --create-tensorboard-logger --create-tflops-callback " - f"--max-steps {max_steps} --warmup-steps 1 --val-check-interval {val_check} --limit-val-batches 1 " - f"--seq-length 16 --hidden-dropout 0.1 --attention-dropout 0.1 {additional_args} --ckpt-dir {prev_ckpt}" - ) - return cmd - - -def small_training_llama_cmd(path, max_steps, val_check, devices: int = 1, additional_args: str = ""): - cmd = ( - f"train_evo2 --no-fp32-residual-connection --mock-data --result-dir {path} --devices {devices} " - "--model-size 8B --num-layers 2 --limit-val-batches 1 " - "--no-activation-checkpointing --create-tensorboard-logger --create-tflops-callback " - f"--max-steps {max_steps} --warmup-steps 1 --val-check-interval {val_check} --limit-val-batches 1 " - f"--seq-length 8 --hidden-dropout 0.1 --attention-dropout 0.1 {additional_args}" - ) - return cmd - - -def small_training_llama_finetune_cmd( - path, max_steps, val_check, prev_ckpt, devices: int = 1, additional_args: str = "" -): - cmd = ( - f"train_evo2 --no-fp32-residual-connection --mock-data --result-dir {path} --devices {devices} " - "--model-size 8B --num-layers 2 --limit-val-batches 1 " - "--no-activation-checkpointing --create-tensorboard-logger --create-tflops-callback " - f"--max-steps {max_steps} --warmup-steps 1 --val-check-interval {val_check} --limit-val-batches 1 " - f"--seq-length 16 --hidden-dropout 0.1 --attention-dropout 0.1 {additional_args} --ckpt-dir {prev_ckpt}" - ) - return cmd - - -@pytest.mark.timeout(512) # Optional: fail if the test takes too long. -@pytest.mark.slow -def test_train_evo2_finetune_runs(tmp_path): - """ - This test runs the `train_evo2` command with mock data in a temporary directory. - It uses the temporary directory provided by pytest as the working directory. - The command is run in a subshell, and we assert that it returns an exit code of 0. - """ - num_steps = 2 - # Note: The command assumes that `train_evo2` is in your PATH. - command = small_training_cmd(tmp_path / "pretrain", max_steps=num_steps, val_check=num_steps) - stdout_pretrain: str = run_command_in_subprocess(command=command, path=str(tmp_path)) - assert "Restoring model weights from RestoreConfig(path='" not in stdout_pretrain - - log_dir = tmp_path / "pretrain" / "evo2" - checkpoints_dir = log_dir / "checkpoints" - tensorboard_dir = log_dir / "dev" - - # Check if logs dir exists - assert log_dir.exists(), "Logs folder should exist." - # Check if checkpoints dir exists - assert checkpoints_dir.exists(), "Checkpoints folder does not exist." - - expected_checkpoint_suffix = f"{num_steps}.0-last" - # Check if any subfolder ends with the expected suffix - matching_subfolders = [ - p for p in checkpoints_dir.iterdir() if p.is_dir() and (expected_checkpoint_suffix in p.name) - ] - - assert matching_subfolders, ( - f"No checkpoint subfolder ending with '{expected_checkpoint_suffix}' found in {checkpoints_dir}." - ) - - # Check if directory with tensorboard logs exists - assert tensorboard_dir.exists(), "TensorBoard logs folder does not exist." - # Recursively search for files with tensorboard logger - event_files = list(tensorboard_dir.rglob("events.out.tfevents*")) - assert event_files, f"No TensorBoard event files found under {tensorboard_dir}" - assert len(matching_subfolders) == 1, "Only one checkpoint subfolder should be found." - command_finetune = small_training_finetune_cmd( - tmp_path / "finetune", max_steps=num_steps, val_check=num_steps, prev_ckpt=matching_subfolders[0] - ) - stdout_finetune: str = run_command_in_subprocess(command=command_finetune, path=str(tmp_path)) - assert "Restoring model weights from RestoreConfig(path='" in stdout_finetune - - log_dir_ft = tmp_path / "finetune" / "evo2" - checkpoints_dir_ft = log_dir_ft / "checkpoints" - tensorboard_dir_ft = log_dir_ft / "dev" - - # Check if logs dir exists - assert log_dir_ft.exists(), "Logs folder should exist." - # Check if checkpoints dir exists - assert checkpoints_dir_ft.exists(), "Checkpoints folder does not exist." - - expected_checkpoint_suffix = f"{num_steps}.0-last" - # Check if any subfolder ends with the expected suffix - matching_subfolders_ft = [ - p for p in checkpoints_dir_ft.iterdir() if p.is_dir() and (expected_checkpoint_suffix in p.name) - ] - - assert matching_subfolders_ft, ( - f"No checkpoint subfolder ending with '{expected_checkpoint_suffix}' found in {checkpoints_dir_ft}." - ) - - # Check if directory with tensorboard logs exists - assert tensorboard_dir_ft.exists(), "TensorBoard logs folder does not exist." - # Recursively search for files with tensorboard logger - event_files = list(tensorboard_dir_ft.rglob("events.out.tfevents*")) - assert event_files, f"No TensorBoard event files found under {tensorboard_dir_ft}" - - assert len(matching_subfolders_ft) == 1, "Only one checkpoint subfolder should be found." - - -@pytest.mark.timeout(512) # Optional: fail if the test takes too long. -@pytest.mark.slow -def test_train_evo2_mamba_finetune_runs(tmp_path): - """ - This test runs the `train_evo2` command with mock data in a temporary directory. - It uses the temporary directory provided by pytest as the working directory. - The command is run in a subshell, and we assert that it returns an exit code of 0. - """ - num_steps = 2 - # Note: The command assumes that `train_evo2` is in your PATH. - command = small_training_mamba_cmd(tmp_path / "pretrain", max_steps=num_steps, val_check=num_steps) - stdout_pretrain: str = run_command_in_subprocess(command=command, path=str(tmp_path)) - assert "Restoring model weights from RestoreConfig(path='" not in stdout_pretrain - - log_dir = tmp_path / "pretrain" / "evo2" - checkpoints_dir = log_dir / "checkpoints" - tensorboard_dir = log_dir / "dev" - - # Check if logs dir exists - assert log_dir.exists(), "Logs folder should exist." - # Check if checkpoints dir exists - assert checkpoints_dir.exists(), "Checkpoints folder does not exist." - - expected_checkpoint_suffix = f"{num_steps}.0-last" - # Check if any subfolder ends with the expected suffix - matching_subfolders = [ - p for p in checkpoints_dir.iterdir() if p.is_dir() and (expected_checkpoint_suffix in p.name) - ] - - assert matching_subfolders, ( - f"No checkpoint subfolder ending with '{expected_checkpoint_suffix}' found in {checkpoints_dir}." - ) - - # Check if directory with tensorboard logs exists - assert tensorboard_dir.exists(), "TensorBoard logs folder does not exist." - # Recursively search for files with tensorboard logger - event_files = list(tensorboard_dir.rglob("events.out.tfevents*")) - assert event_files, f"No TensorBoard event files found under {tensorboard_dir}" - - assert len(matching_subfolders) == 1, "Only one checkpoint subfolder should be found." - command_finetune = small_training_mamba_finetune_cmd( - tmp_path / "finetune", max_steps=num_steps, val_check=num_steps, prev_ckpt=matching_subfolders[0] - ) - stdout_finetune: str = run_command_in_subprocess(command=command_finetune, path=str(tmp_path)) - assert "Restoring model weights from RestoreConfig(path='" in stdout_finetune - - log_dir_ft = tmp_path / "finetune" / "evo2" - checkpoints_dir_ft = log_dir_ft / "checkpoints" - tensorboard_dir_ft = log_dir_ft / "dev" - - # Check if logs dir exists - assert log_dir_ft.exists(), "Logs folder should exist." - # Check if checkpoints dir exists - assert checkpoints_dir_ft.exists(), "Checkpoints folder does not exist." - - expected_checkpoint_suffix = f"{num_steps}.0-last" - # Check if any subfolder ends with the expected suffix - matching_subfolders_ft = [ - p for p in checkpoints_dir_ft.iterdir() if p.is_dir() and (expected_checkpoint_suffix in p.name) - ] - - assert matching_subfolders_ft, ( - f"No checkpoint subfolder ending with '{expected_checkpoint_suffix}' found in {checkpoints_dir_ft}." - ) - - # Check if directory with tensorboard logs exists - assert tensorboard_dir_ft.exists(), "TensorBoard logs folder does not exist." - # Recursively search for files with tensorboard logger - event_files = list(tensorboard_dir_ft.rglob("events.out.tfevents*")) - assert event_files, f"No TensorBoard event files found under {tensorboard_dir_ft}" - - assert len(matching_subfolders_ft) == 1, "Only one checkpoint subfolder should be found." - - -@pytest.mark.timeout(512) # Optional: fail if the test takes too long. -@pytest.mark.slow -def test_train_evo2_llama_finetune_runs(tmp_path): - """ - This test runs the `train_evo2` command with mock data in a temporary directory using Llama model. - It uses the temporary directory provided by pytest as the working directory. - The command is run in a subshell, and we assert that it returns an exit code of 0. - """ - num_steps = 2 - # Note: The command assumes that `train_evo2` is in your PATH. - command = small_training_llama_cmd(tmp_path / "pretrain", max_steps=num_steps, val_check=num_steps) - stdout_pretrain: str = run_command_in_subprocess(command=command, path=str(tmp_path)) - assert "Restoring model weights from RestoreConfig(path='" not in stdout_pretrain - - log_dir = tmp_path / "pretrain" / "evo2" - checkpoints_dir = log_dir / "checkpoints" - tensorboard_dir = log_dir / "dev" - - # Check if logs dir exists - assert log_dir.exists(), "Logs folder should exist." - # Check if checkpoints dir exists - assert checkpoints_dir.exists(), "Checkpoints folder does not exist." - - expected_checkpoint_suffix = f"{num_steps}.0-last" - # Check if any subfolder ends with the expected suffix - matching_subfolders = [ - p for p in checkpoints_dir.iterdir() if p.is_dir() and (expected_checkpoint_suffix in p.name) - ] - - assert matching_subfolders, ( - f"No checkpoint subfolder ending with '{expected_checkpoint_suffix}' found in {checkpoints_dir}." - ) - - # Check if directory with tensorboard logs exists - assert tensorboard_dir.exists(), "TensorBoard logs folder does not exist." - # Recursively search for files with tensorboard logger - event_files = list(tensorboard_dir.rglob("events.out.tfevents*")) - assert event_files, f"No TensorBoard event files found under {tensorboard_dir}" - - assert len(matching_subfolders) == 1, "Only one checkpoint subfolder should be found." - command_finetune = small_training_llama_finetune_cmd( - tmp_path / "finetune", max_steps=num_steps, val_check=num_steps, prev_ckpt=matching_subfolders[0] - ) - stdout_finetune: str = run_command_in_subprocess(command=command_finetune, path=str(tmp_path)) - assert "Restoring model weights from RestoreConfig(path='" in stdout_finetune - - log_dir_ft = tmp_path / "finetune" / "evo2" - checkpoints_dir_ft = log_dir_ft / "checkpoints" - tensorboard_dir_ft = log_dir_ft / "dev" - - # Check if logs dir exists - assert log_dir_ft.exists(), "Logs folder should exist." - # Check if checkpoints dir exists - assert checkpoints_dir_ft.exists(), "Checkpoints folder does not exist." - - expected_checkpoint_suffix = f"{num_steps}.0-last" - matching_subfolders_ft = [ - p for p in checkpoints_dir_ft.iterdir() if p.is_dir() and (expected_checkpoint_suffix in p.name) - ] - - assert matching_subfolders_ft, ( - f"No checkpoint subfolder ending with '{expected_checkpoint_suffix}' found in {checkpoints_dir_ft}." - ) - - # Check if directory with tensorboard logs exists - assert tensorboard_dir_ft.exists(), "TensorBoard logs folder does not exist." - # Recursively search for files with tensorboard logger - event_files = list(tensorboard_dir_ft.rglob("events.out.tfevents*")) - assert event_files, f"No TensorBoard event files found under {tensorboard_dir_ft}" - - assert len(matching_subfolders_ft) == 1, "Only one checkpoint subfolder should be found." - - -@pytest.mark.timeout(256) # Optional: fail if the test takes too long. -@pytest.mark.slow -def test_train_evo2_stops(tmp_path): - """ - This test runs the `train_evo2` command with mock data in a temporary directory. - It uses the temporary directory provided by pytest as the working directory. - The command is run in a subshell, and we assert that it returns an exit code of 0. - """ - max_steps = 500000 - early_stop_steps = 4 - val_check = 2 - additional_args = f"--early-stop-on-step {early_stop_steps}" - # Expected location of logs and checkpoints - log_dir = tmp_path / "evo2" - checkpoints_dir = log_dir / "checkpoints" - - assert not log_dir.exists(), "Logs folder shouldn't exist yet." - - # Note: The command assumes that `train_evo2` is in your PATH. - command = small_training_cmd(tmp_path, max_steps=max_steps, val_check=val_check, additional_args=additional_args) - command_parts_no_program = shlex.split(command)[1:] - args = parse_args(args=command_parts_no_program) - train_stdout, trainer = run_train_with_std_redirect(args) - - assert f"Training epoch 0, iteration 0/{early_stop_steps - 1}" in train_stdout - # Extract and validate global steps - global_steps = extract_global_steps_from_log(train_stdout) - assert global_steps[0] == 0 - assert global_steps[-1] == (early_stop_steps - 1) - assert trainer.global_step == early_stop_steps - assert len(global_steps) == early_stop_steps - - expected_checkpoint_suffix = f"{early_stop_steps}.0-last" - # Check if checkpoints dir exists - assert checkpoints_dir.exists(), "Checkpoints folder does not exist." - - # Check if any subfolder ends with the expected suffix - matching_subfolders = [ - p for p in checkpoints_dir.iterdir() if p.is_dir() and (expected_checkpoint_suffix in p.name) - ] - - assert matching_subfolders, ( - f"No checkpoint subfolder ending with '{expected_checkpoint_suffix}' found in {checkpoints_dir}." - ) - - assert "reduced_train_loss" in trainer.logged_metrics # validation logging on by default - assert "TFLOPS_per_GPU" in trainer.logged_metrics # ensuring that tflops logger can be added - assert "train_step_timing in s" in trainer.logged_metrics - - -@pytest.mark.parametrize( - "additional_args", - [ - pytest.param("", id="no_fp8"), - pytest.param( - "--fp8", - marks=[ - pytest.mark.skipif(not fp8_available, reason=reason_for_no_fp8), - ], - id="fp8", - ), - ], -) -@pytest.mark.timeout(512) # Optional: fail if the test takes too long. -@pytest.mark.slow -def test_train_evo2_stop_at_max_steps_and_continue(tmp_path, additional_args): - max_steps_first_run = 4 - max_steps_second_run = 6 - val_check_interval = 2 - # Expected location of logs and checkpoints - log_dir = tmp_path / "evo2" - checkpoints_dir = log_dir / "checkpoints" - - command_first_run = small_training_cmd( - tmp_path, max_steps_first_run, val_check_interval, additional_args=additional_args - ) - - # The first training command to finish at max_steps_first_run - stdout_first_run = run_command_in_subprocess(command=command_first_run, path=str(tmp_path)) - - assert f"Training epoch 0, iteration 0/{max_steps_first_run - 1}" in stdout_first_run - # Extract and validate global steps - global_steps_first_run = extract_global_steps_from_log(stdout_first_run) - - assert global_steps_first_run[0] == 0 - assert global_steps_first_run[-1] == max_steps_first_run - 1 - assert len(global_steps_first_run) == max_steps_first_run - - expected_checkpoint_first_run_suffix = f"{max_steps_first_run}.0-last" - # Check if checkpoints dir exists - assert checkpoints_dir.exists(), "Checkpoints folder does not exist." - # Check if any ckpt subfolder ends with the expected suffix - matching_subfolders = [ - p for p in checkpoints_dir.iterdir() if p.is_dir() and (expected_checkpoint_first_run_suffix in p.name) - ] - assert matching_subfolders, ( - f"No checkpoint subfolder ending with '{expected_checkpoint_first_run_suffix}' found in {checkpoints_dir}." - ) - - # The second training command to continue from max_steps_first_run and finish at max_steps_second_run - command_second_run = small_training_cmd( - tmp_path, max_steps_second_run, val_check_interval, additional_args=additional_args - ) - stdout_second_run = run_command_in_subprocess(command=command_second_run, path=str(tmp_path)) - global_steps_second_run = extract_global_steps_from_log(stdout_second_run) - - assert global_steps_second_run[0] == max_steps_first_run - assert global_steps_second_run[-1] == max_steps_second_run - 1 - assert len(global_steps_second_run) == max_steps_second_run - max_steps_first_run - - expected_checkpoint_second_run_suffix = f"{max_steps_second_run}.0-last" - matching_subfolders = [ - p for p in checkpoints_dir.iterdir() if p.is_dir() and (expected_checkpoint_second_run_suffix in p.name) - ] - assert matching_subfolders, ( - f"No checkpoint subfolder ending with '{expected_checkpoint_second_run_suffix}' found in {checkpoints_dir}." - ) - - -@pytest.fixture(scope="session") -def dataset_config(request): - """Get dataset directory and training config from command line options or environment variables. - - Users can provide dataset paths via: - - Command line: pytest --dataset-dir=/path/to/data --training-config=/path/to/config.yaml - - Environment: DATASET_DIR=/path/to/data TRAINING_CONFIG=/path/to/config.yaml pytest - - If not provided, tests will fall back to --mock-data. - """ - # Try to get from pytest command line options first - dataset_dir = request.config.getoption("--dataset-dir", default=None) - training_config = request.config.getoption("--training-config", default=None) - - # Fall back to environment variables - if not dataset_dir: - dataset_dir = os.environ.get("DATASET_DIR") - if not training_config: - training_config = os.environ.get("TRAINING_CONFIG") - - return {"dataset_dir": dataset_dir, "training_config": training_config} - - -@pytest.fixture(scope="session") -def initial_checkpoint(): - """Load the initial checkpoint for distributed training tests.""" - from bionemo.core.data.load import load - - return load("evo2/7b-8k:1.0") - - -@pytest.fixture(scope="session") -def base_checkpoint(tmp_path_factory, initial_checkpoint, dataset_config): - """Create a base checkpoint by training one step with no parallelism. - - This fixture is session-scoped, so it creates the checkpoint once and reuses it - across all parametrized test cases, significantly improving test performance. - """ - num_steps = 1 - tmp_path = tmp_path_factory.mktemp("base_checkpoint_session") - base_path = tmp_path / "base_training" - - # Create command with the initial checkpoint and dataset (if provided) - cmd = distributed_training_cmd( - path=base_path, - max_steps=num_steps, - val_check=num_steps, - num_devices=1, - dp=1, - tp=1, - cp=1, - pp=1, - dataset_dir=dataset_config["dataset_dir"], - training_config=dataset_config["training_config"], - additional_args=f"--ckpt-dir {initial_checkpoint}", - ) - - # Run training - stdout = run_command_in_subprocess(command=cmd, path=str(tmp_path)) - assert "Restoring model weights from RestoreConfig(path='" in stdout - - # Find the resulting checkpoint - log_dir = base_path / "evo2" - checkpoints_dir = log_dir / "checkpoints" - # Lightning uses 0-indexed step counting, so after max_steps=1, we're at step 0 - expected_checkpoint_suffix = "step=0" - - matching_subfolders = [ - p for p in checkpoints_dir.iterdir() if p.is_dir() and (expected_checkpoint_suffix in p.name) - ] - - assert len(matching_subfolders) == 1, "Expected exactly one checkpoint subfolder" - return matching_subfolders[0] - - -@pytest.mark.parametrize( - "dp,cp,tp,pp", - [ - pytest.param(2, 1, 1, 1, id="data_parallel"), - pytest.param(1, 2, 1, 1, id="context_parallel"), - pytest.param(1, 1, 2, 1, id="tensor_parallel"), - pytest.param(1, 1, 1, 2, id="pipeline_parallel"), - ], -) -@pytest.mark.timeout(900) -@pytest.mark.slow -@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Test requires at least 2 GPUs") -def test_distributed_training_gradient_equivalence( - tmp_path, initial_checkpoint, base_checkpoint, dataset_config, dp, cp, tp, pp -): - """Test that gradients are equivalent across different distributed training strategies.""" - # NOTE: Megatron Core is changing its distributed checkpoint format soon. This test needs to be updated after release 0.14. - num_steps = 1 - - # Calculate total devices needed - num_devices = dp * cp * tp * pp - assert num_devices == 2, ( - f"Test is designed for 2 GPUs but got {num_devices} for dp={dp}, cp={cp}, tp={tp}, pp={pp}" - ) - - # Create parallel training checkpoint - parallel_path = tmp_path / f"parallel_dp{dp}_cp{cp}_tp{tp}_pp{pp}" - - cmd = distributed_training_cmd( - path=parallel_path, - max_steps=num_steps, - val_check=num_steps, - num_devices=num_devices, - dp=dp, - tp=tp, - cp=cp, - pp=pp, - dataset_dir=dataset_config["dataset_dir"], - training_config=dataset_config["training_config"], - additional_args=f"--ckpt-dir {initial_checkpoint}", - ) - - # Run distributed training - stdout = run_command_in_subprocess(command=cmd, path=str(tmp_path)) - assert "Restoring model weights from RestoreConfig(path='" in stdout - - # Find the resulting checkpoint - log_dir = parallel_path / "evo2" - checkpoints_dir = log_dir / "checkpoints" - # Lightning uses 0-indexed step counting, so after max_steps=1, we're at step 0 - expected_checkpoint_suffix = "step=0" - - matching_subfolders = [ - p for p in checkpoints_dir.iterdir() if p.is_dir() and (expected_checkpoint_suffix in p.name) - ] - - assert len(matching_subfolders) == 1, "Expected exactly one checkpoint subfolder" - parallel_checkpoint = matching_subfolders[0] - - # Compare gradients/optimizer states between base and parallel distributed training - print(f"Base checkpoint: {base_checkpoint}") - print(f"Parallel checkpoint (dp={dp}, cp={cp}, tp={tp}, pp={pp}): {parallel_checkpoint}") - - # Ensure both checkpoints exist before comparison - assert base_checkpoint.exists(), "Base checkpoint should exist" - assert parallel_checkpoint.exists(), "Parallel checkpoint should exist" - - # Use the custom gradient comparison logic to verify optimizer states match - # This implements theorem 5.3 of https://www.arxiv.org/pdf/2506.09280 for gradient equivalence - checkpoint_dirs = [str(base_checkpoint / "weights"), str(parallel_checkpoint / "weights")] - assert_optimizer_states_match(checkpoint_dirs) diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/test_evo2.py b/sub-packages/bionemo-evo2/tests/bionemo/evo2/test_evo2.py deleted file mode 100644 index 49d86c7f5f..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/test_evo2.py +++ /dev/null @@ -1,900 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -import logging -import os -import time -from pathlib import Path -from typing import Any, Callable, Literal - -import numpy as np -import pandas as pd -import pytest -import torch -from megatron.core.inference.common_inference_params import CommonInferenceParams -from megatron.core.transformer.enums import AttnBackend -from megatron.core.transformer.module import Float16Module -from nemo.collections import llm -from nemo.collections.llm.gpt.model.hyena import HyenaInferenceContext -from nemo.collections.llm.inference import generate -from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer -from nemo.lightning.io.pl import MegatronCheckpointIO - -from bionemo.core.data.load import load -from bionemo.llm.utils.weight_utils import ( - MegatronModelType, - _key_in_filter, - _munge_key_megatron_to_nemo2, - _munge_sharded_tensor_key_megatron_to_nemo2, -) -from bionemo.testing.megatron_parallel_state_utils import distributed_model_parallel_state -from bionemo.testing.torch import check_fp8_support - - -logger = logging.getLogger(__name__) -logger.setLevel(logging.DEBUG) # Capture all levels in the logger itself - - -def determine_memory_requirement_and_skip_if_not_met(ckpt_name: str, test_name: str | None = None) -> int: - """Determine the memory requirement for a given checkpoint and test_name. - - The memory requirement recorded is not discriminated for flash_decode True or False. The memory requirement - recorded depend on checkpoint name only through model size. - - Args: - ckpt_name: str - the name of the checkpoint to test - test_name: str | None - the name of the test that is to be run. - Returns: - The input sequence length cap, for the model sin the checkpoint, given certain memory requirements. - If the memory requirement is not met, the test is skipped. - """ - - # memory_needed_by_test: max reserved rounded up + 1, for stand-alone test - memory_needed_df = pd.DataFrame( - [ - { - "test_name": "test_forward", - "model_size": "1b", - "seq_len_cap": 6000, - "memory_needed_by_test": 18, - }, # checked both variants in isolation - { - "test_name": "test_forward", - "model_size": "7b", - "seq_len_cap": 4000, - "memory_needed_by_test": 33, - }, # checked both variants in isolation - { - "test_name": "test_forward_manual", - "model_size": "1b", - "seq_len_cap": 6000, - "memory_needed_by_test": 18, - }, # checked both variants in isolation - { - "test_name": "test_forward_manual", - "model_size": "7b", - "seq_len_cap": 4000, - "memory_needed_by_test": 21, - }, # checked both variants in isolation - { - "test_name": "test_batch_generate", - "model_size": "1b", - "seq_len_cap": -1, - "memory_needed_by_test": 16, - }, # checked both variants in isolation - { - "test_name": "test_batch_generate", - "model_size": "7b", - "seq_len_cap": -1, - "memory_needed_by_test": 43, - }, # checked both variants in isolation - { - "test_name": "test_batch_generate_coding_sequences", - "model_size": "1b", - "seq_len_cap": -1, - "memory_needed_by_test": 6, - }, # checked both variants in isolation - { - "test_name": "test_batch_generate_coding_sequences", - "model_size": "7b", - "seq_len_cap": -1, - "memory_needed_by_test": 21, - }, # checked both variants in isolation - { - "test_name": "test_generate_speed", - "model_size": "1b", - "seq_len_cap": -1, - "memory_needed_by_test": -1, - }, # skipped for now until Anton's changes - { - "test_name": "test_generate_speed", - "model_size": "7b", - "seq_len_cap": -1, - "memory_needed_by_test": -1, - }, # skipped for now until Anton's changes - ], - columns=["test_name", "model_size", "seq_len_cap", "memory_needed_by_test"], - ) - memory_needed_df_wi_index = memory_needed_df.set_index(["test_name", "model_size"]) - - if "1b" in ckpt_name: - model_size = "1b" - elif "7b" in ckpt_name: - model_size = "7b" - else: - raise ValueError(f"{ckpt_name=} is not supported for testing") - - seq_len_cap = memory_needed_df_wi_index.loc[(test_name, model_size), "seq_len_cap"] - memory_needed_by_test = memory_needed_df_wi_index.loc[(test_name, model_size), "memory_needed_by_test"] - - # skip_condition_flash = flash_decode is None or flash_decode - gb_available = torch.cuda.mem_get_info()[0] / 1024**3 - skip_condition = gb_available < memory_needed_by_test - if skip_condition: - pytest.skip( - ", ".join( - [ - f"Inference API requires at least {memory_needed_by_test}GB of available memory for {model_size} models", - f"{gb_available=}", - ] - ) - ) - return seq_len_cap - - -def load_weights_sharded_inplace_nemo2_to_mcore( - model: MegatronModelType, - distributed_checkpoint_dir: str | Path, - skip_keys_with_these_prefixes: set[str], - ckpt_format: Literal["zarr", "torch_dist"] = "torch_dist", -): - logger.info("Start setting up state dict") - sharded_state_dict = { - _munge_key_megatron_to_nemo2(k): _munge_sharded_tensor_key_megatron_to_nemo2(v) - for k, v in model.sharded_state_dict().items() - if not _key_in_filter( - k, skip_keys_with_these_prefixes - ) # and "_extra_state" not in k # extra state is needed for fp8 sharded states - } - # Load the checkpoint with strict=false to allow for missing keys (backward compatibility) - # Error: megatron.core.dist_checkpointing.core.CheckpointingException: - # Object shard ... module.decoder.final_norm._extra_state/shard_0_1.pt not found - MegatronCheckpointIO(save_ckpt_format=ckpt_format).load_checkpoint( - distributed_checkpoint_dir, sharded_state_dict=sharded_state_dict, strict=False - ) - - -@pytest.mark.parametrize("seq_len", [8_192, 16_384]) -def test_golden_values_top_k_logits_and_cosine_similarity(seq_len: int): - try: - evo2_1b_checkpoint_weights: Path = load("evo2/1b-8k:1.0") / "weights" - gold_standard_no_fp8 = load("evo2/1b-8k-nofp8-te-goldvalue-testdata-A6000:1.0") - except ValueError as e: - if e.args[0].endswith("does not have an NGC URL."): - raise ValueError( - "Please re-run test with `BIONEMO_DATA_SOURCE=pbss py.test ...`, " - "one or more files are missing from ngc." - ) - else: - raise e - with distributed_model_parallel_state(), torch.no_grad(): - hyena_config = llm.Hyena1bConfig(use_te=True, seq_length=seq_len) - tokenizer = get_nmt_tokenizer( - "byte-level", - ) - raw_megatron_model = hyena_config.configure_model(tokenizer).eval().cuda() - device = raw_megatron_model.parameters().__next__().device - load_weights_sharded_inplace_nemo2_to_mcore(raw_megatron_model, evo2_1b_checkpoint_weights, {}, "torch_dist") - model = Float16Module(hyena_config, raw_megatron_model) - input_seq = "GAAATTAGCGCGTCCGGAATGATACGAGGGGAAACGAAATTTTGAATTAATGGAGAAAAAAGACGAGAAACCTTAAGCAAAAAAATTTTAGCTTCGAATATTTATTAATTTCTGAGATGTTGTTAAACGATTTTCGATTCCAAGTTGTGCGCACGAACGTTATTGCAAATAAATGCTGCTTATTCGGATGTTTCCACGATCTTTGTTGCAATGGTAGTCGAGTACCCGATAACCCAATTTCGTTACATCGGCCTATCTGTAGAATATCCAATCTATGGTTCATAAAAAATCTGATCGTTTGTTTTTAAGAAATTAAACGCGTTAAATTGAACGAATTTCGAATACCGGTCTTAGCGAAGGACCTCCCCTCTTGCTTGCGTATTGCCCCGCGAAATTTCTTTTCGGCGATGAACGATACAAAAAATTCTATCGAATGTTACTTCTATTCTCTGCCTCGTCTATGACTTGGAGATTGGTCTATGTCGTTCGTTTTCTCGCGAGTTTCCAATATGTCCGTAGTATGTGAACGCTGGTATTCGTGAAGATAAATTATTGTTTTTACAATTTCTTTCAAAAATATATAATTTTAATTTATATAAT" - input_ids = torch.tensor(tokenizer.text_to_ids(input_seq)).int().unsqueeze(0).to(device) - position_ids = torch.arange(len(input_seq)).unsqueeze(0).to(device) - attention_mask = None - outputs = model(input_ids=input_ids, position_ids=position_ids, attention_mask=attention_mask) - gold_standard_no_fp8_tensor = torch.load(gold_standard_no_fp8).to(device=outputs.device, dtype=outputs.dtype) - top_2_logits_golden = gold_standard_no_fp8_tensor.topk(dim=-1, sorted=True, largest=True, k=2) - ambiguous_positions = ( - top_2_logits_golden.values[..., 0] - top_2_logits_golden.values[..., 1] - ).abs() < 9.9e-3 # hand tunes for observed diffs from A100 and H100 - n_ambiguous = ambiguous_positions.sum() - - assert n_ambiguous <= 19 - - our_char_indices = outputs.softmax(dim=-1).argmax(dim=-1).flatten().detach().cpu().numpy() - not_amb_positions = ~ambiguous_positions.flatten().cpu().numpy() - # Generate our string, removing the ambiguous positions. - our_generation_str = "".join([chr(idx) for idx in our_char_indices[not_amb_positions].tolist()]) - # Do the same to the golden values - gold_std_char_indices = ( - gold_standard_no_fp8_tensor.softmax(dim=-1).argmax(dim=-1).flatten().detach().cpu().numpy() - ) - # Make the string - gold_std_str = "".join([chr(idx) for idx in gold_std_char_indices[not_amb_positions].tolist()]) - array_eq = np.array(list(our_generation_str)) == np.array(list(gold_std_str)) - # Ensure the two strings are approximately equal. - if array_eq.mean() < 0.95: - array_eq = np.array(list(our_generation_str)) == np.array(list(gold_std_str)) - mismatch_positions = np.arange(outputs.shape[1])[not_amb_positions][~array_eq] - err_str = f"Fraction of expected mismatch positions exceeds 5%: {(~array_eq).mean()}" - err_str += f"Mismatch positions: {mismatch_positions}" - err_str += f"Fraction of unexpected mismatch positions: {(~array_eq).mean()}" - top_two_logits_at_mismatch = top_2_logits_golden.values[0, mismatch_positions] - top_2_logits_pred = outputs.topk(dim=-1, sorted=True, largest=True, k=2) - top_two_pred_logits_at_mismatch = top_2_logits_pred.values[0, mismatch_positions] - err_str += f"Top two logits at mismatch positions: {top_two_logits_at_mismatch}" - err_str += f"Top two pred logits at mismatch positions: {top_two_pred_logits_at_mismatch}" - raise AssertionError(err_str) - - # Verify that the top-4 from the logit vectors are the same. - # A: 65 - # C: 67 - # G: 71 - # T: 84 - # Find the corresponding ATGC and compare the two vectors with those four values. - # Ensures that the top 4 ascii characters of the output are ACGT. - top_4_inds = outputs.topk(dim=-1, sorted=False, largest=True, k=4) - assert set(top_4_inds.indices.flatten().cpu().numpy().tolist()).issubset((65, 67, 71, 84)) - output_vector = outputs[0, -1, top_4_inds.indices] - - # Then its the top 4 indices of the gold standard tensor - top_4_inds_golden = gold_standard_no_fp8_tensor.topk(dim=-1, sorted=False, largest=True, k=4) - assert set(top_4_inds_golden.indices.flatten().cpu().numpy().tolist()).issubset((65, 67, 71, 84)) - gold_standard_no_fp8_vector = gold_standard_no_fp8_tensor[0, -1, top_4_inds_golden.indices] - - # Run cosine similarity between the two vectors. - logit_similarity = torch.nn.functional.cosine_similarity(output_vector, gold_standard_no_fp8_vector, dim=-1) - assert torch.mean(torch.abs(logit_similarity - torch.ones_like(logit_similarity))) < 0.03 - - -@pytest.mark.skip(reason="test fails on main, not due to #1058") -@pytest.mark.slow -def test_golden_values_top_k_logits_and_cosine_similarity_7b(seq_len: int = 8_192): - try: - evo2_7b_checkpoint_weights: Path = load("evo2/7b-8k:1.0") / "weights" - gold_standard_no_fp8 = load("evo2/7b-8k-nofp8-te-goldvalue-testdata:1.0") - except ValueError as e: - if e.args[0].endswith("does not have an NGC URL."): - raise ValueError( - "Please re-run test with `BIONEMO_DATA_SOURCE=pbss py.test ...`, " - "one or more files are missing from ngc." - ) - else: - raise e - with distributed_model_parallel_state(), torch.no_grad(): - hyena_config = llm.Hyena7bConfig(use_te=True, seq_length=seq_len) - tokenizer = get_nmt_tokenizer( - "byte-level", - ) - raw_megatron_model = hyena_config.configure_model(tokenizer).eval().cuda() - device = raw_megatron_model.parameters().__next__().device - load_weights_sharded_inplace_nemo2_to_mcore(raw_megatron_model, evo2_7b_checkpoint_weights, {}, "torch_dist") - model = Float16Module(hyena_config, raw_megatron_model) - input_seq = "GAAATTAGCGCGTCCGGAATGATACGAGGGGAAACGAAATTTTGAATTAATGGAGAAAAAAGACGAGAAACCTTAAGCAAAAAAATTTTAGCTTCGAATATTTATTAATTTCTGAGATGTTGTTAAACGATTTTCGATTCCAAGTTGTGCGCACGAACGTTATTGCAAATAAATGCTGCTTATTCGGATGTTTCCACGATCTTTGTTGCAATGGTAGTCGAGTACCCGATAACCCAATTTCGTTACATCGGCCTATCTGTAGAATATCCAATCTATGGTTCATAAAAAATCTGATCGTTTGTTTTTAAGAAATTAAACGCGTTAAATTGAACGAATTTCGAATACCGGTCTTAGCGAAGGACCTCCCCTCTTGCTTGCGTATTGCCCCGCGAAATTTCTTTTCGGCGATGAACGATACAAAAAATTCTATCGAATGTTACTTCTATTCTCTGCCTCGTCTATGACTTGGAGATTGGTCTATGTCGTTCGTTTTCTCGCGAGTTTCCAATATGTCCGTAGTATGTGAACGCTGGTATTCGTGAAGATAAATTATTGTTTTTACAATTTCTTTCAAAAATATATAATTTTAATTTATATAAT" - input_ids = torch.tensor(tokenizer.text_to_ids(input_seq)).int().unsqueeze(0).to(device) - position_ids = torch.arange(len(input_seq)).unsqueeze(0).to(device) - attention_mask = None - outputs = model(input_ids=input_ids, position_ids=position_ids, attention_mask=attention_mask) - gold_standard_no_fp8_tensor = torch.load(gold_standard_no_fp8).to(device=outputs.device, dtype=outputs.dtype) - is_fp8_supported, compute_capability, device_info = check_fp8_support(device.index) - - if is_fp8_supported and compute_capability == "9.0": - # Most rigurous assertion for output equivalence currently works on devices that are new enough to - # support FP8. - logger.info( - f"Device {device_info} ({compute_capability}) supports FP8 with 9.0 compute capability, the " - "same configuration as the gold standard was generated with. Running most rigurous assertion." - ) - torch.testing.assert_close(outputs, gold_standard_no_fp8_tensor) - else: - logger.info( - f"Device {device_info} ({compute_capability}) does not support FP8. Running less rigurous assertions." - ) - top_2_logits_golden = gold_standard_no_fp8_tensor.topk(dim=-1, sorted=True, largest=True, k=2) - ambiguous_positions = ( - top_2_logits_golden.values[..., 0] - top_2_logits_golden.values[..., 1] - ).abs() < 9.9e-3 # hand tunes for observed diffs from A100 and H100 with 7b model - n_ambiguous = ambiguous_positions.sum() - - assert n_ambiguous <= 19 - - our_char_indices = outputs.softmax(dim=-1).argmax(dim=-1).flatten().detach().cpu().numpy() - not_amb_positions = ~ambiguous_positions.flatten().cpu().numpy() - # Generate our string, removing the ambiguous positions. - our_generation_str = "".join([chr(idx) for idx in our_char_indices[not_amb_positions].tolist()]) - # Do the same to the golden values - gold_std_char_indices = ( - gold_standard_no_fp8_tensor.softmax(dim=-1).argmax(dim=-1).flatten().detach().cpu().numpy() - ) - # Make the string - gold_std_str = "".join([chr(idx) for idx in gold_std_char_indices[not_amb_positions].tolist()]) - - # Ensure the two strings are equal. - assert all(np.array(list(our_generation_str)) == np.array(list(gold_std_str))) - - # Verify that the top-4 from the logit vectors are the same. - # A: 65 - # C: 67 - # G: 71 - # T: 84 - # Find the corresponding ATGC and compare the two vectors with those four values. - # Ensures that the top 4 ascii characters of the output are ACGT. - top_4_inds = outputs.topk(dim=-1, sorted=False, largest=True, k=4) - assert set(top_4_inds.indices.flatten().cpu().numpy().tolist()).issubset((65, 67, 71, 84)) - output_vector = outputs[0, -1, top_4_inds.indices] - - # Then its the top 4 indices of the gold standard tensor - top_4_inds_golden = gold_standard_no_fp8_tensor.topk(dim=-1, sorted=False, largest=True, k=4) - assert set(top_4_inds_golden.indices.flatten().cpu().numpy().tolist()).issubset((65, 67, 71, 84)) - gold_standard_no_fp8_vector = gold_standard_no_fp8_tensor[0, -1, top_4_inds_golden.indices] - - # Run cosine similarity between the two vectors. - logit_similarity = torch.nn.functional.cosine_similarity(output_vector, gold_standard_no_fp8_vector, dim=-1) - assert torch.mean(torch.abs(logit_similarity - torch.ones_like(logit_similarity))) < 9.9e-3 - - -@pytest.fixture -def sequences(): - with (Path(__file__).parent / "data" / "prompts.csv").open(newline="") as f: - from csv import DictReader - - reader = DictReader(f) - return [row["Sequence"] for row in reader] - - -@pytest.fixture -def coding_sequences(): - with (Path(__file__).parent / "data" / "cds_prompts.csv").open(newline="") as f: - from csv import DictReader - - reader = DictReader(f) - return [row["Sequence"] for row in reader] - - -def get_trainer(pipeline_parallel=1): - import nemo.lightning as nl - - fp8 = True - full_fp8 = False - return nl.Trainer( - accelerator="gpu", - devices=pipeline_parallel, - strategy=nl.MegatronStrategy( - tensor_model_parallel_size=1, - pipeline_model_parallel_size=pipeline_parallel, - context_parallel_size=1, - pipeline_dtype=torch.bfloat16, - ckpt_load_optimizer=False, - ckpt_save_optimizer=False, - ckpt_async_save=False, - save_ckpt_format="torch_dist", - ckpt_load_strictness="log_all", - ), - log_every_n_steps=1, - limit_val_batches=10, - num_sanity_val_steps=0, - plugins=nl.MegatronMixedPrecision( - precision="bf16-mixed", - params_dtype=torch.bfloat16, - # Only use FP8 in this plugin when using full FP8 precision and FP8. - # Otherwise use vortex_style_fp8 in the model config. - fp8="hybrid" if fp8 and full_fp8 else None, - fp8_amax_history_len=16 if fp8 and full_fp8 else 1, - fp8_amax_compute_algo="max" if fp8 and full_fp8 else "most_recent", - ), - ) - - -# here: pass arg through to inference_batch_times_seqlen_threshold and inference_max_seq_length -def get_model_and_tokenizer_raw(ckpt_dir_or_name: Path | str, seq_len_max: int = 8192, **kwargs): - """ - Load a model and tokenizer from a checkpoint directory or name. If you supply a Path argument then we assume that - the path is already a checkpoint directory, otherwise we load the checkpoint from NGC or PBSS depending on - the environment variable BIONEMO_DATA_SOURCE. - """ - trainer = get_trainer() - from bionemo.core.data.load import load - - if isinstance(ckpt_dir_or_name, Path): - ckpt_dir: Path = ckpt_dir_or_name - else: - ckpt_dir: Path = load(ckpt_dir_or_name) - from nemo.collections.llm import inference - - inference_wrapped_model, mcore_tokenizer = inference.setup_model_and_tokenizer( - path=ckpt_dir, - trainer=trainer, - params_dtype=torch.bfloat16, - inference_batch_times_seqlen_threshold=seq_len_max, - inference_max_seq_length=seq_len_max, - recompute_granularity=None, - recompute_num_layers=None, - recompute_method=None, - **kwargs, - ) - return inference_wrapped_model, mcore_tokenizer - - -def get_model_and_tokenizer(ckpt_name, vortex_style_fp8=False, seq_len_max: int = 8192, **kwargs): - return get_model_and_tokenizer_raw(ckpt_name, vortex_style_fp8=vortex_style_fp8, seq_len_max=seq_len_max, **kwargs) - - -def get_model_and_tokenizer_ignore_vortex(ckpt_name, vortex_style_fp8=False, seq_len_max: int = 8192, **kwargs): - # Capture and remove the vortex_style_fp8 argument for mamba models. - return get_model_and_tokenizer_raw(ckpt_name, seq_len_max=seq_len_max, **kwargs) - - -def calc_matchrate(*, tokenizer, in_seq, logits): - softmax_logprobs = torch.log_softmax(logits, dim=-1) - softmax_logprobs = softmax_logprobs[:, :-1] - o = softmax_logprobs.argmax(dim=-1)[0] - if hasattr(tokenizer, "tokenize"): - i = torch.tensor(tokenizer.tokenize(in_seq[1:]), device=o.device) - else: - i = torch.tensor(tokenizer.text_to_ids(in_seq[1:]), device=o.device) - return (i == o).sum().item() / (i.size()[0] - 1) - - -def check_matchrate(*, ckpt_name, matchrate, assert_matchrate=True): - logger.info(f"{ckpt_name} {matchrate = }") - if "1b-" in ckpt_name: - if assert_matchrate: - assert matchrate > 0.70, (ckpt_name, matchrate) - else: - print(f"{ckpt_name} {matchrate = }") - elif "7b-" in ckpt_name: - if assert_matchrate: - assert matchrate > 0.79, (ckpt_name, matchrate) - else: - print(f"{ckpt_name} {matchrate = }") - else: - raise NotImplementedError - - -@pytest.mark.parametrize( - "ckpt_name,expected_matchpercents", - [ - ("evo2/1b-8k-bf16:1.0", [96.27, 67.93, 77.50, 80.30]), - ("evo2/1b-8k:1.0", [96.27, 67.93, 77.50, 80.30]), - ("evo2/7b-8k:1.0", [97.60, 89.63, 80.03, 84.57]), - ("evo2/7b-1m:1.0", [97.60, 89.63, 80.03, 84.57]), - ], -) -def test_forward(sequences: list[str], ckpt_name: str, expected_matchpercents: list[float]): - assert len(sequences) > 0 - seq_len_cap = determine_memory_requirement_and_skip_if_not_met( - ckpt_name, test_name=inspect.currentframe().f_code.co_name - ) - - is_fp8_supported, compute_capability, device_info = check_fp8_support(torch.cuda.current_device()) - skip = "evo2/1b-8k:" in ckpt_name and not is_fp8_supported - if skip: - # This checkpoint is sensitive to FP8, so we skip it if it is not supported on the current device. - pytest.skip(f"Skipping {ckpt_name} because it is not supported on {device_info} ({compute_capability})") - vortex_style_fp8 = is_fp8_supported and "bf16" not in ckpt_name - inference_wrapped_model, mcore_tokenizer = get_model_and_tokenizer( - ckpt_name, vortex_style_fp8=vortex_style_fp8, flash_decode=True, enable_flash_decode=True - ) - matchrates = [] - for seq in sequences: - seq = seq[:seq_len_cap] # TODO: artificial limit, megatron uses more memory. Vortex can process full sequences - with torch.no_grad(): - device = torch.cuda.current_device() - tokens = torch.tensor([mcore_tokenizer.tokenize(seq)], device=device) - forward_args = { - "tokens": tokens, - "position_ids": None, - "attention_mask": None, - } - - inference_wrapped_model.prep_model_for_inference(prompts_tokens=None) - # Ensure full-sequence logits are materialized for tests expecting [B, S, V] - inference_wrapped_model.inference_context.materialize_only_last_token_logits = False - logits = inference_wrapped_model.run_one_forward_step(forward_args) - inference_wrapped_model.inference_context.reset() - - from megatron.core.inference.communication_utils import broadcast_from_last_pipeline_stage - - batch_size, context_length, vocab_size = 1, len(seq), 512 - logits = broadcast_from_last_pipeline_stage( - [batch_size, context_length, vocab_size], - dtype=inference_wrapped_model.inference_wrapper_config.params_dtype, - tensor=logits, - ) - - matchrate = calc_matchrate(tokenizer=mcore_tokenizer, in_seq=seq, logits=logits) - matchrates.append(matchrate) - check_matchrate(ckpt_name=ckpt_name, matchrate=matchrate, assert_matchrate=False) - assert len(matchrates) == len(expected_matchpercents) - matchperc_print = [f"{m * 100.0:.1f}%" for m in matchrates] - matchperc_print_expected = [f"{ep:.1f}%" for ep in expected_matchpercents] - assert all(m * 100.0 >= 0.95 * ep for m, ep in zip(matchrates, expected_matchpercents)), ( - f"Expected at least 95% of {matchperc_print_expected=}, got {matchperc_print=}" - ) - - -@pytest.mark.parametrize( - "ckpt_name,expected_matchpercents,flash_decode", - [ - # Try flash decode with one and not the other to verify that both paths work. - ("evo2/1b-8k-bf16:1.0", [96.27, 67.93, 77.50, 80.30], True), - ("evo2/1b-8k:1.0", [96.27, 67.93, 77.50, 80.30], False), - ("evo2/7b-8k:1.0", [97.60, 89.63, 80.03, 84.57], False), - ("evo2/7b-1m:1.0", [97.60, 89.63, 80.03, 84.57], False), - ], -) -def test_forward_manual(sequences: list[str], ckpt_name: str, expected_matchpercents: list[float], flash_decode: bool): - assert len(sequences) > 0 - seq_len_cap = determine_memory_requirement_and_skip_if_not_met( - ckpt_name, test_name=inspect.currentframe().f_code.co_name - ) - - is_fp8_supported, compute_capability, device_info = check_fp8_support(torch.cuda.current_device()) - skip = "evo2/1b-8k:" in ckpt_name and not is_fp8_supported - - vortex_style_fp8 = is_fp8_supported and "bf16" not in ckpt_name - if skip: - # This checkpoint is sensitive to FP8, so we skip it if it is not supported on the current device. - pytest.skip(f"Skipping {ckpt_name} because it is not supported on {device_info} ({compute_capability})") - with distributed_model_parallel_state(), torch.no_grad(): - tokenizer = get_nmt_tokenizer( - "byte-level", - ) - flash_decode_kwargs: dict[str, Any] = {"flash_decode": flash_decode} - if flash_decode: - flash_decode_kwargs["attention_backend"] = AttnBackend.flash - if "1b-8k" in ckpt_name: - model_config = llm.Hyena1bConfig( - use_te=True, - seq_length=8192, - vortex_style_fp8=vortex_style_fp8, - **flash_decode_kwargs, - ) - elif "7b-8k" in ckpt_name: - model_config = llm.Hyena7bConfig( - use_te=True, - seq_length=8192, - vortex_style_fp8=vortex_style_fp8, - **flash_decode_kwargs, - ) - elif "7b-1m" in ckpt_name: - model_config = llm.Hyena7bARCLongContextConfig( - use_te=True, - seq_length=8192, - vortex_style_fp8=vortex_style_fp8, - **flash_decode_kwargs, - ) - else: - raise NotImplementedError - ckpt_weights: Path = load(ckpt_name) / "weights" - raw_megatron_model = model_config.configure_model(tokenizer).eval().cuda() - device = raw_megatron_model.parameters().__next__().device - load_weights_sharded_inplace_nemo2_to_mcore(raw_megatron_model, ckpt_weights, {}, "torch_dist") - model = Float16Module(model_config, raw_megatron_model) - if flash_decode: - inference_context = HyenaInferenceContext(max_batch_size=1, max_sequence_length=8192) - # Ensure full-sequence logits are materialized for tests expecting [B, S, V] - inference_context.materialize_only_last_token_logits = False - forward_kwargs = {"runtime_gather_output": True, "inference_context": inference_context} - else: - forward_kwargs = {} - matchrates = [] - for seq in sequences: - seq = seq[ - :seq_len_cap - ] # TODO: artificial limit, megatron uses more memory. Vortex can process full sequences - with torch.no_grad(): - device = torch.cuda.current_device() - # tokens = torch.tensor([tokenizer.tokenize(seq)], device=device) - input_ids = torch.tensor(tokenizer.text_to_ids(seq)).int().unsqueeze(0).to(device) - attention_mask = None - # when labels is None, the model returns logits - logits = model( - input_ids=input_ids, - position_ids=None, - attention_mask=attention_mask, - labels=None, - **forward_kwargs, - ) - if flash_decode: - forward_kwargs["inference_context"].reset() - matchrate = calc_matchrate(tokenizer=tokenizer, in_seq=seq, logits=logits) - matchrates.append(matchrate) - check_matchrate(ckpt_name=ckpt_name, matchrate=matchrate, assert_matchrate=False) - assert len(matchrates) == len(expected_matchpercents) - matchperc_print = [f"{m * 100.0:.1f}%" for m in matchrates] - matchperc_print_expected = [f"{ep:.1f}%" for ep in expected_matchpercents] - assert all(m * 100.0 >= 0.95 * ep for m, ep in zip(matchrates, expected_matchpercents)), ( - f"Expected at least 95% of {matchperc_print_expected=}, got {matchperc_print=}" - ) - - -def mid_point_split(*, seq, num_tokens: int | None = None, fraction: float = 0.5): - mid_point = int(fraction * len(seq)) - prompt = seq[:mid_point] - if num_tokens is not None: - target = seq[mid_point : mid_point + num_tokens] # Only compare to the section of sequence directly - else: - target = seq[mid_point:] - return prompt, target - - -def calculate_sequence_identity(seq1: str, seq2: str) -> float | None: - """Calculate sequence identity between two sequences through direct comparison.""" - if not seq1 or not seq2: - return None - - # Direct comparison of sequences - min_length = min(len(seq1), len(seq2)) - matches = sum(a == b for a, b in zip(seq1[:min_length], seq2[:min_length])) - - return (matches / min_length) * 100 - - -@pytest.mark.parametrize( - "ckpt_name,model_tokenizer_provider,expected_matchpercents", - [ - ("evo2/1b-8k-bf16:1.0", get_model_and_tokenizer, [96.8, 29.7, 76.6, 71.6]), - ("evo2/1b-8k:1.0", get_model_and_tokenizer, [96.8, 29.7, 76.6, 71.6]), - ("evo2_mamba/7b-8k:0.1", get_model_and_tokenizer_ignore_vortex, [99.2, 51.0, 73.0, 82.6]), - ("evo2/7b-8k:1.0", get_model_and_tokenizer, [97.60, 89.63, 80.03, 84.57]), - ("evo2/7b-1m:1.0", get_model_and_tokenizer, [97.60, 89.63, 80.03, 84.57]), - ], -) -def test_batch_generate( - sequences: list[str], ckpt_name: str, model_tokenizer_provider: Callable, expected_matchpercents: list[float] -): - assert len(sequences) > 0 - _ = determine_memory_requirement_and_skip_if_not_met(ckpt_name, test_name=inspect.currentframe().f_code.co_name) - - is_fp8_supported, compute_capability, device_info = check_fp8_support(torch.cuda.current_device()) - skip = "evo2/1b-8k:" in ckpt_name and not is_fp8_supported - if skip: - # This checkpoint is sensitive to FP8, so we skip it if it is not supported on the current device. - pytest.skip(f"Skipping {ckpt_name} because it is not supported on {device_info} ({compute_capability})") - if "evo2_mamba" in ckpt_name and os.environ.get("BIONEMO_DATA_SOURCE") != "pbss": - # TODO: add evo2_mamba/7b-8k to NGC and remove this skip - pytest.skip(f"Skipping {ckpt_name} because it is not on NGC yet. Run with `BIONEMO_DATA_SOURCE=pbss`.") - # only use vortex_style_fp8 for non-bf16 checkpoints with fp8 support - vortex_style_fp8 = is_fp8_supported and "bf16" not in ckpt_name - - num_tokens = 500 - seq_prompts = [mid_point_split(seq=seq, num_tokens=num_tokens) for seq in sequences] - seq_len_max = num_tokens + max([len(sq[0]) for sq in seq_prompts]) - inference_wrapped_model, mcore_tokenizer = model_tokenizer_provider( - ckpt_name, - vortex_style_fp8=vortex_style_fp8, - seq_len_max=seq_len_max, - ) - - results = generate( - model=inference_wrapped_model, - max_batch_size=1, # vortex only supports batch size 1 - tokenizer=mcore_tokenizer, - prompts=[sq[0] for sq in seq_prompts], - random_seed=42, - inference_params=CommonInferenceParams( - temperature=1.0, - top_k=1, - top_p=0.0, - return_log_probs=False, - num_tokens_to_generate=num_tokens, - ), - ) - - match_percents = [] - for i, (result, (prompt, target)) in enumerate(zip(results, seq_prompts)): - gen_seq = result.generated_text - logging.info(f"{ckpt_name} {torch.distributed.get_rank()=} {gen_seq=}") - logging.info(f"{ckpt_name} {torch.distributed.get_rank()=} {target=}") - match_percent = calculate_sequence_identity(target, gen_seq) - logging.info( - f"{ckpt_name} {torch.distributed.get_rank()=} {match_percent=} expected: {expected_matchpercents[i]}" - ) - match_percents.append(match_percent) - - assert len(match_percents) == len(expected_matchpercents) - matchperc_print = [f"{mp:.1f}%" for mp in match_percents] - matchperc_print_expected = [f"{ep:.1f}%" for ep in expected_matchpercents] - assert all(mp >= 0.90 * ep for mp, ep in zip(match_percents, expected_matchpercents)), ( - f"Expected at least 90% of {matchperc_print_expected=}, got {matchperc_print=}" - ) - - -@pytest.mark.parametrize( - "ckpt_name,model_tokenizer_provider,expected_matchpercents", - [ - ("evo2/1b-8k-bf16:1.0", get_model_and_tokenizer, [86.4, 78.8, 49.7]), - ("evo2/1b-8k:1.0", get_model_and_tokenizer, [86.4, 78.8, 49.7]), - ("evo2_mamba/7b-8k:0.1", get_model_and_tokenizer_ignore_vortex, [86.5, 88.4, 88.2]), - ("evo2/7b-8k:1.0", get_model_and_tokenizer, [88.8, 88.5, 82.2]), - ("evo2/7b-1m:1.0", get_model_and_tokenizer, [88.8, 88.5, 82.2]), - ], -) -def test_batch_generate_coding_sequences( - coding_sequences: list[str], - ckpt_name: str, - model_tokenizer_provider: Callable, - expected_matchpercents: list[float], -): - assert len(coding_sequences) > 0 - determine_memory_requirement_and_skip_if_not_met(ckpt_name, test_name=inspect.currentframe().f_code.co_name) - - is_fp8_supported, compute_capability, device_info = check_fp8_support(torch.cuda.current_device()) - skip = "evo2/1b-8k:" in ckpt_name and not is_fp8_supported - if skip: - # This checkpoint is sensitive to FP8, so we skip it if it is not supported on the current device. - pytest.skip(f"Skipping {ckpt_name} because it is not supported on {device_info} ({compute_capability})") - if "evo2_mamba" in ckpt_name and os.environ.get("BIONEMO_DATA_SOURCE") != "pbss": - # TODO: add evo2_mamba/7b-8k to NGC and remove this skip - pytest.skip(f"Skipping {ckpt_name} because it is not on NGC yet. Run with `BIONEMO_DATA_SOURCE=pbss`.") - # only use vortex_style_fp8 for non-bf16 checkpoints with fp8 support - vortex_style_fp8 = is_fp8_supported and "bf16" not in ckpt_name - - match_percents: list[float] = [] - cds_lengths: list[int | None] = [] - original_cds_lengths: list[int] = [len(seq) for seq in coding_sequences] - seq_prompts = [mid_point_split(seq=seq, num_tokens=None, fraction=0.3) for seq in coding_sequences] - num_tokens = max(len(sq[1]) for sq in seq_prompts) + 15 - - inference_wrapped_model, mcore_tokenizer = model_tokenizer_provider( - ckpt_name, vortex_style_fp8=vortex_style_fp8, enable_flash_decode=True, flash_decode=True - ) - - _ = generate( - model=inference_wrapped_model, - max_batch_size=1, # vortex only supports batch size 1 - tokenizer=mcore_tokenizer, - prompts=["AAACCC"], - random_seed=42, - inference_params=CommonInferenceParams( - temperature=1.0, - top_k=1, - top_p=0.0, - return_log_probs=False, - num_tokens_to_generate=1, - ), - ) - results = generate( - model=inference_wrapped_model, - max_batch_size=1, # vortex only supports batch size 1 - tokenizer=mcore_tokenizer, - prompts=[sq[0] for sq in seq_prompts], - random_seed=42, - inference_params=CommonInferenceParams( - temperature=1.0, - top_k=1, - top_p=0.0, - return_log_probs=False, - num_tokens_to_generate=num_tokens, - ), - ) - - for i, (result, (prompt, target)) in enumerate(zip(results, seq_prompts)): - gen_seq = result.generated_text - logging.info(f"{ckpt_name} {torch.distributed.get_rank()=} {gen_seq=}") - logging.info(f"{ckpt_name} {torch.distributed.get_rank()=} {target=}") - full_seq = prompt + gen_seq - stop_codons = {"TAA", "TAG", "TGA"} - assert full_seq[:3] == "ATG" # start codon - cds_length = None - for codon_start in range(0, len(full_seq), 3): - codon = full_seq[codon_start : codon_start + 3] - if codon in stop_codons: - cds_length = codon_start + 3 - break - match_percent = calculate_sequence_identity(target, gen_seq) - logging.info( - f"{ckpt_name} {torch.distributed.get_rank()=} {match_percent=} expected: {expected_matchpercents[i]}" - ) - match_percents.append(match_percent) - cds_lengths.append(cds_length) - # 99% of the time, you have a stop codon within the first 96 codons if everything were random. - - assert len(match_percents) == len(expected_matchpercents) - assert len(cds_lengths) == len(original_cds_lengths) - matchperc_print = [f"{mp:.1f}%" for mp in match_percents] - matchperc_print_expected = [f"{ep:.1f}%" for ep in expected_matchpercents] - # By chance you expect to have a stop codon within the first 96 codons if everything were random - # so verify that we are putting the first stop codon after this point, as well as it being at least 90% of the - # original sequence length. - assert all( - pcl is None or ((pcl - len(pmpt) > 96 * 3 or len(tgt) < 96 * 3) and pcl >= 0.9 * ocl) - for pcl, ocl, (pmpt, tgt) in zip(cds_lengths, original_cds_lengths, seq_prompts) - ), f"Expected at least 70% of {original_cds_lengths=}, got {cds_lengths=}" - assert all(mp >= 0.90 * ep for mp, ep in zip(match_percents, expected_matchpercents)), ( - f"Expected at least 90% of {matchperc_print_expected=}, got {matchperc_print=}" - ) - - -@pytest.mark.skip( - reason="skip the test for now, and decide what to do after getting Anton's changes sorted and merged." -) -@pytest.mark.slow -@pytest.mark.parametrize( - "ckpt_name,model_tokenizer_provider,expected_tokens_sec", - [ - ("evo2/1b-8k-bf16:1.0", get_model_and_tokenizer, 41.0), - ("evo2/1b-8k:1.0", get_model_and_tokenizer, 41.0), - ("evo2_mamba/7b-8k:0.1", get_model_and_tokenizer_ignore_vortex, 39.73), - ("evo2/7b-8k:1.0", get_model_and_tokenizer, 32.0), - ("evo2/7b-1m:1.0", get_model_and_tokenizer, 32.0), - ], -) -def test_generate_speed( - ckpt_name: str, - model_tokenizer_provider: Callable, - expected_tokens_sec: float, -): - is_fp8_supported, compute_capability, device_info = check_fp8_support(torch.cuda.current_device()) - determine_memory_requirement_and_skip_if_not_met(ckpt_name, test_name=inspect.currentframe().f_code.co_name) - - skip = "evo2/1b-8k:" in ckpt_name and not is_fp8_supported - if skip: - # This checkpoint is sensitive to FP8, so we skip it if it is not supported on the current device. - pytest.skip(f"Skipping {ckpt_name} because it is not supported on {device_info} ({compute_capability})") - if "evo2_mamba" in ckpt_name and os.environ.get("BIONEMO_DATA_SOURCE") != "pbss": - # TODO: add evo2_mamba/7b-8k to NGC and remove this skip - pytest.skip(f"Skipping {ckpt_name} because it is not on NGC yet. Run with `BIONEMO_DATA_SOURCE=pbss`.") - # only use vortex_style_fp8 for non-bf16 checkpoints with fp8 support - vortex_style_fp8 = is_fp8_supported and "bf16" not in ckpt_name - inference_wrapped_model, mcore_tokenizer = model_tokenizer_provider( - ckpt_name, - vortex_style_fp8=vortex_style_fp8, - fp32_residual_connection=False, - enable_flash_decode=True, - flash_decode=True, - ) - - # warm up the model with a single call before timing. This should take care of compilation etc. - _ = generate( - model=inference_wrapped_model, - max_batch_size=1, # vortex only supports batch size 1 - tokenizer=mcore_tokenizer, - prompts=["AAACCC"], - random_seed=42, - inference_params=CommonInferenceParams( - temperature=1.0, - top_k=1, - top_p=0.0, - return_log_probs=False, - num_tokens_to_generate=1, - ), - ) - t0 = time.perf_counter_ns() - results = generate( - model=inference_wrapped_model, - max_batch_size=1, # vortex only supports batch size 1 - tokenizer=mcore_tokenizer, - prompts=["A"], - random_seed=42, - inference_params=CommonInferenceParams( - temperature=1.0, - top_k=1, - top_p=0.0, - return_log_probs=False, - num_tokens_to_generate=300, - ), - ) - dt = (time.perf_counter_ns() - t0) / 1e9 # seconds - tokens_per_sec = (len(results[0].generated_text) + 1) / dt # +1 for the prompt - assert tokens_per_sec > expected_tokens_sec * 0.85, ( - f"Expected at least {expected_tokens_sec} tokens/sec, got {tokens_per_sec}" - ) diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/test_prompt.py b/sub-packages/bionemo-evo2/tests/bionemo/evo2/test_prompt.py deleted file mode 100644 index 0a91fee930..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/test_prompt.py +++ /dev/null @@ -1,140 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Arc Institute. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Michael Poli. All rights reserved. -# SPDX-FileCopyrightText: Copyright (c) 2024 Stanford University. All rights reserved -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from dataclasses import dataclass - -import pytest -from megatron.core.inference.inference_request import InferenceRequest - -from bionemo.core.data.load import load -from bionemo.evo2.run.infer import infer - - -RANDOM_SEED = 42 -MAX_NEW_TOKENS = 500 -TEMPERATURE = 1.0 -TOP_K = 0 -TOP_P = 0.0 - -# todo: figure out 1M checkpoints (or add to NGC) -CHECKPOINT_NAMES = [ - "evo2/1b-8k-bf16:1.0", - # "evo2/7b-8k:1.0", - # "evo2/7b-1m:1.0", -] - - -PROMPT_1 = "GAATAGGAACAGCTCCGGTCTACAGCTCCCAGCGTGAGCGACGCAGAAGACGGTGATTTCTGCATTTCCATCTGAGGTACCGGGTTCATCTCACTAGGGAGTGCCAGACAGTGGGCGCAGGCCAGTGTGTGTGCGCACCGTGCGCGAGCCGAAGCAGGG" - -PROMPT_2 = "GATCACAGGTCTATCACCCTATTAACCACTCACGGGAGCTCTCCATGCATTTGGTATTTTCGTCTGGGGGGTATGCACGCGATAGCATTGCGAGACGCTGGAGCCGGAGCACCCTATGTCGCAGTATCTGTCTTTGATTCCTGCCTCATCCTATTATTT" - - -@dataclass -class InferCofig: - """Configuration for model inference parameters.""" - - temperature: float = TEMPERATURE - top_k: int = TOP_K - top_p: float = TOP_P - tensor_parallel_size: int = 1 - pipeline_model_parallel_size: int = 1 - context_parallel_size: int = 1 - max_new_tokens: int = MAX_NEW_TOKENS - ckpt_format: str = "torch_dist" - seed: int = RANDOM_SEED - flash_decode: bool = False - - -_checkpoint_cache = {} - - -@pytest.fixture(scope="session") -def load_checkpoint(): - """Factory function that returns a checkpoint loader with caching.""" - - def _load_checkpoint(ckpt_name: str) -> str: - if ckpt_name not in _checkpoint_cache: - _checkpoint_cache[ckpt_name] = load(ckpt_name) - return _checkpoint_cache[ckpt_name] - - return _load_checkpoint - - -def percent_equal_tokens(response1: list[InferenceRequest], response2: list[InferenceRequest]) -> float: - """Percent of tokens that are equal between two responses.""" - num_equal = [i == j for i, j in zip(response1[0].generated_tokens, response2[0].generated_tokens)] - return sum(num_equal) / len(num_equal) - - -# just a DRY wrapper for the infer function -def run_inference(prompt: str, checkpoint_path: str, config: InferCofig) -> list[InferenceRequest]: - """Run model inference with given parameters. - - Args: - prompt: Input prompt for the model - checkpoint_path: Path to model checkpoint - config: Inference configuration parameters - - Returns: - Model response - """ - return infer( - prompt=prompt, - ckpt_dir=checkpoint_path, - temperature=config.temperature, - top_k=config.top_k, - top_p=config.top_p, - max_new_tokens=config.max_new_tokens, - tensor_parallel_size=config.tensor_parallel_size, - pipeline_model_parallel_size=config.pipeline_model_parallel_size, - context_parallel_size=config.context_parallel_size, - output_file=None, - ckpt_format=config.ckpt_format, - seed=config.seed, - flash_decode=config.flash_decode, - ) - - -@pytest.mark.parametrize("ckpt_name", CHECKPOINT_NAMES) -def test_identical_prompts_should_be_identical(load_checkpoint, ckpt_name): - """Test that identical prompts produce identical sequences for temperature 1.0.""" - checkpoint_path = load_checkpoint(ckpt_name) - - # with clean_parallel_state_context(): - response_prompt1 = run_inference(PROMPT_1, checkpoint_path, InferCofig()) - response_prompt2 = run_inference(PROMPT_1, checkpoint_path, InferCofig()) - - sequence_similarity = percent_equal_tokens(response_prompt1, response_prompt2) - print(f"sequence similarity {ckpt_name} identical prompts: {sequence_similarity}") - assert sequence_similarity == 1.0 - - -@pytest.mark.parametrize("ckpt_name", CHECKPOINT_NAMES) -def test_different_prompts_too_similar(load_checkpoint, ckpt_name): - """Test that different prompts for the same sequence are too similar. - That is, different prompts should produce more varied sequences. - """ - checkpoint_path = load_checkpoint(ckpt_name) - - similarity_threshold = 0.9 - - # with clean_parallel_state_context(): - response_prompt1 = run_inference(PROMPT_1, checkpoint_path, InferCofig()) - response_prompt2 = run_inference(PROMPT_2, checkpoint_path, InferCofig()) - sequence_similarity = percent_equal_tokens(response_prompt1, response_prompt2) - assert sequence_similarity <= similarity_threshold diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/test_stop_and_go.py b/sub-packages/bionemo-evo2/tests/bionemo/evo2/test_stop_and_go.py deleted file mode 100644 index 842f78255f..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/test_stop_and_go.py +++ /dev/null @@ -1,402 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from typing import Literal - -import lightning.pytorch as pl -import pytest -import torch -from megatron.core.distributed import DistributedDataParallelConfig -from megatron.core.optimizer import OptimizerConfig -from nemo import lightning as nl -from nemo.collections.llm import HyenaModel -from nemo.collections.llm.gpt.data import MockDataModule -from nemo.collections.llm.gpt.model.hyena import HyenaNVTestConfig -from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer -from nemo.lightning.pytorch.optim import CosineAnnealingScheduler, MegatronOptimizerModule -from nemo.lightning.pytorch.strategies import MegatronStrategy -from typing_extensions import override - -from bionemo.evo2.models.mamba import HybridMambaConfig8BEvo2Loss, MambaModel -from bionemo.testing import testing_callbacks -from bionemo.testing.harnesses import stop_and_go -from bionemo.testing.harnesses.mode import Mode - - -class TestEvo2StopAndGo(stop_and_go.StopAndGoHarness): - """Most of these parameters are copied from test_evo2.py which runs train.py.""" - - num_steps: int = 3 - val_check_interval: int = 1 - limit_val_batches: int = 1 - lr: float = 3e-4 - wd: float = 0.01 - clip_grad: float = 1.0 - micro_batch_size: int = 1 - global_batch_size: int = 1 - num_layers: int = 4 - precision: Literal["16-mixed", "bf16-mixed", "32"] = "bf16-mixed" - workers: int = 8 - seq_length: int = 8 - hybrid_override_pattern: str = "SDH*" - use_megatron_comm_overlap_llama3_8k: bool = False - hidden_dropout: float = 0.1 - attention_dropout: float = 0.1 - no_renormalize_loss: bool = False - sequence_parallel: bool = False - cross_entropy_loss_fusion: bool = False - no_fp32_residual_connection: bool = False - add_bias_output: bool = True - - @classmethod - def setup_trainer( - cls, - mode: Mode, - ) -> nl.Trainer: - """Setup trainer by passing stop, resume, or continuous callbacks according to mode.""" - ddp = DistributedDataParallelConfig( - check_for_nan_in_grad=True, - overlap_grad_reduce=False, - overlap_param_gather=False, - grad_reduce_in_fp32=False, - align_param_gather=False, - average_in_collective=True, - ) - strategy = MegatronStrategy( - ddp=ddp, - tensor_model_parallel_size=1, - pipeline_model_parallel_size=1, - context_parallel_size=1, - sequence_parallel=cls.sequence_parallel, - pipeline_dtype=torch.bfloat16, - ckpt_async_save=False, - ckpt_load_optimizer=True, - ckpt_save_optimizer=True, - save_ckpt_format="torch_dist", - ckpt_load_strictness="log_all", - ) - - trainer = nl.Trainer( - devices=1, - max_steps=cls.num_steps, - num_nodes=1, - accelerator="gpu", - strategy=strategy, - limit_val_batches=cls.limit_val_batches, - num_sanity_val_steps=0, - val_check_interval=cls.val_check_interval, - log_every_n_steps=cls.val_check_interval, - enable_checkpointing=True, - use_distributed_sampler=False, - callbacks=list(cls.callbacks[mode].values()), - plugins=nl.MegatronMixedPrecision( - precision=cls.precision, params_dtype=torch.bfloat16, grad_reduce_in_fp32=False, fp8_wgrad=False - ), - ) - return trainer - - @override - @classmethod - def setup_class(cls): - super().setup_class() - - # setup data - cls.tokenizer = get_nmt_tokenizer("byte-level") - # run stop and go - cls.run_stop_and_go() - - @classmethod - def setup_model(cls, mode: Mode) -> tuple[pl.LightningModule, pl.LightningDataModule, nl.MegatronOptimizerModule]: - # build data module - data = MockDataModule( - seq_length=cls.seq_length, - micro_batch_size=cls.micro_batch_size, - global_batch_size=cls.global_batch_size, - num_workers=cls.workers, - tokenizer=cls.tokenizer, - ) - - data.init_global_step = 0 - # config - config = HyenaNVTestConfig( - **{ - "tp_comm_overlap": cls.use_megatron_comm_overlap_llama3_8k, - "seq_length": cls.seq_length, - "use_te": True, - "params_dtype": torch.bfloat16, - "bf16": True, - "recompute_granularity": None, - "recompute_method": None, - "recompute_num_layers": None, - "num_layers": cls.num_layers, - "hidden_size": 1920, - "hybrid_override_pattern": cls.hybrid_override_pattern, - "num_attention_heads": 15, - "num_query_groups": 15, - "ffn_hidden_size": 5120, - "hidden_dropout": cls.hidden_dropout, - "num_groups_hyena": 1920, - "num_groups_hyena_medium": 128, - "num_groups_hyena_short": 128, - "attention_dropout": cls.attention_dropout, - "to_upper": "weighted" if cls.no_renormalize_loss else "normalized_weighted", - "distribute_saved_activations": False if cls.sequence_parallel else True, - "cross_entropy_loss_fusion": cls.cross_entropy_loss_fusion, - "fp32_residual_connection": not cls.no_fp32_residual_connection, - "add_bias_output": cls.add_bias_output, - } - ) - - optimizer_config = OptimizerConfig( - optimizer="adam", - lr=cls.lr, - adam_beta1=0.9, - adam_beta2=0.95, - weight_decay=cls.wd, - clip_grad=cls.clip_grad, - params_dtype=torch.float32, - use_distributed_optimizer=True, - bf16=True, - ) - # build optimizer - optimizer = MegatronOptimizerModule( - config=optimizer_config, - lr_scheduler=CosineAnnealingScheduler(warmup_steps=1, max_steps=cls.num_steps, min_lr=3e-5), - no_weight_decay_cond=config.hyena_no_weight_decay_cond_fn, - ) - - # # Build model - module = HyenaModel(config, tokenizer=data.tokenizer) - optimizer.connect(module) - return module, data, optimizer - - @pytest.mark.parametrize( - "callback_type", - [ - testing_callbacks.LearningRateCallback, - testing_callbacks.GlobalStepStateCallback, - testing_callbacks.ConsumedSamplesCallback, - testing_callbacks.OptimizerStateCallback, - testing_callbacks.TrainInputCallback, - testing_callbacks.TrainOutputCallback, - testing_callbacks.TrainLossCallback, - testing_callbacks.ValidInputCallback, - testing_callbacks.ValidOutputCallback, - testing_callbacks.ValidLossCallback, - ], - ) - def test_stop_and_go_consistency(self, callback_type): - if callback_type in [ - testing_callbacks.ValidLossCallback, - testing_callbacks.ValidOutputCallback, - testing_callbacks.TrainInputCallback, - testing_callbacks.TrainOutputCallback, - testing_callbacks.TrainLossCallback, - testing_callbacks.OptimizerStateCallback, - ]: - pytest.xfail(reason="Tensors not close") - super().test_stop_and_go_consistency(callback_type) - - @pytest.mark.skip(reason="TODO: assert train_consumed_go > 0 fails.") - def test_train_val_init_consumed_samples(self): - pass - - -class TestEvo2MambaStopAndGo(stop_and_go.StopAndGoHarness): - """Most of these parameters are copied from test_evo2.py which runs train.py.""" - - num_steps: int = 3 - val_check_interval: int = 1 - limit_val_batches: int = 1 - lr: float = 3e-4 - wd: float = 0.01 - clip_grad: float = 1.0 - micro_batch_size: int = 1 - global_batch_size: int = 1 - precision: Literal["16-mixed", "bf16-mixed", "32"] = "bf16-mixed" - workers: int = 8 - seq_length: int = 8 - hybrid_override_pattern: str = "M-M-M-M*-" - use_megatron_comm_overlap_llama3_8k: bool = False - hidden_dropout: float = 0.1 - attention_dropout: float = 0.1 - no_renormalize_loss: bool = False - sequence_parallel: bool = False - cross_entropy_loss_fusion: bool = False - no_fp32_residual_connection: bool = False - use_targeted_variance_loss: bool = True - embedding_init_method_std: float = 1.0 - num_layers: int = 9 - hidden_size: int = 128 - ffn_hidden_size: int = 512 - mamba_num_groups: int = 2 - mamba_head_dim: int = 32 - num_query_groups: int = 2 - num_attention_heads: int = 2 - mamba_state_dim: int = 32 - - @classmethod - def setup_trainer( - cls, - mode: Mode, - ) -> nl.Trainer: - """Setup trainer by passing stop, resume, or continuous callbacks according to mode.""" - ddp = DistributedDataParallelConfig( - check_for_nan_in_grad=True, - overlap_grad_reduce=False, - overlap_param_gather=False, - grad_reduce_in_fp32=False, - align_param_gather=False, - average_in_collective=True, - ) - strategy = MegatronStrategy( - ddp=ddp, - tensor_model_parallel_size=1, - pipeline_model_parallel_size=1, - context_parallel_size=1, - sequence_parallel=cls.sequence_parallel, - pipeline_dtype=torch.bfloat16, - ckpt_async_save=False, - ckpt_load_optimizer=True, - ckpt_save_optimizer=True, - save_ckpt_format="torch_dist", - ckpt_load_strictness="log_all", - ) - - trainer = nl.Trainer( - devices=1, - max_steps=cls.num_steps, - num_nodes=1, - accelerator="gpu", - strategy=strategy, - limit_val_batches=cls.limit_val_batches, - num_sanity_val_steps=0, - val_check_interval=cls.val_check_interval, - log_every_n_steps=cls.val_check_interval, - enable_checkpointing=True, - use_distributed_sampler=False, - callbacks=list(cls.callbacks[mode].values()), - plugins=nl.MegatronMixedPrecision( - precision=cls.precision, params_dtype=torch.bfloat16, grad_reduce_in_fp32=False, fp8_wgrad=False - ), - ) - return trainer - - @override - @classmethod - def setup_class(cls): - super().setup_class() - - # setup data - cls.tokenizer = get_nmt_tokenizer("byte-level") - # run stop and go - cls.run_stop_and_go() - - @classmethod - def setup_model(cls, mode: Mode) -> tuple[pl.LightningModule, pl.LightningDataModule, nl.MegatronOptimizerModule]: - # build data module - data = MockDataModule( - seq_length=cls.seq_length, - micro_batch_size=cls.micro_batch_size, - global_batch_size=cls.global_batch_size, - num_workers=cls.workers, - tokenizer=cls.tokenizer, - ) - - data.init_global_step = 0 - # config - config = HybridMambaConfig8BEvo2Loss( - **{ - "tp_comm_overlap": cls.use_megatron_comm_overlap_llama3_8k, - "seq_length": cls.seq_length, - "params_dtype": torch.bfloat16, - "bf16": True, - "recompute_granularity": None, - "recompute_method": None, - "recompute_num_layers": None, - "hidden_size": cls.hidden_size, - "ffn_hidden_size": cls.ffn_hidden_size, - "num_layers": cls.num_layers, - "hybrid_override_pattern": cls.hybrid_override_pattern, - "hidden_dropout": cls.hidden_dropout, - "attention_dropout": cls.attention_dropout, - "to_upper": "weighted" if cls.no_renormalize_loss else "normalized_weighted", - "distribute_saved_activations": False if cls.sequence_parallel else True, - "cross_entropy_loss_fusion": cls.cross_entropy_loss_fusion, - "fp32_residual_connection": not cls.no_fp32_residual_connection, - "use_targeted_variance_loss": cls.use_targeted_variance_loss, - "embedding_init_method_std": cls.embedding_init_method_std, - "mamba_num_groups": cls.mamba_num_groups, - "mamba_head_dim": cls.mamba_head_dim, - "num_query_groups": cls.num_query_groups, - "num_attention_heads": cls.num_attention_heads, - "mamba_state_dim": cls.mamba_state_dim, - } - ) - - optimizer_config = OptimizerConfig( - optimizer="adam", - lr=cls.lr, - adam_beta1=0.9, - adam_beta2=0.95, - weight_decay=cls.wd, - clip_grad=cls.clip_grad, - params_dtype=torch.float32, - use_distributed_optimizer=True, - bf16=True, - ) - # build optimizer - optimizer = MegatronOptimizerModule( - config=optimizer_config, - lr_scheduler=CosineAnnealingScheduler(warmup_steps=1, max_steps=cls.num_steps, min_lr=3e-5), - no_weight_decay_cond=config.hyena_no_weight_decay_cond_fn, - ) - - # # Build model - module = MambaModel(config, tokenizer=data.tokenizer) - optimizer.connect(module) - return module, data, optimizer - - @pytest.mark.parametrize( - "callback_type", - [ - testing_callbacks.LearningRateCallback, - testing_callbacks.GlobalStepStateCallback, - testing_callbacks.ConsumedSamplesCallback, - testing_callbacks.OptimizerStateCallback, - testing_callbacks.TrainInputCallback, - testing_callbacks.TrainOutputCallback, - testing_callbacks.TrainLossCallback, - testing_callbacks.ValidInputCallback, - testing_callbacks.ValidOutputCallback, - testing_callbacks.ValidLossCallback, - ], - ) - def test_stop_and_go_consistency(self, callback_type): - if callback_type in [ - testing_callbacks.ValidLossCallback, - testing_callbacks.ValidOutputCallback, - testing_callbacks.TrainInputCallback, - testing_callbacks.TrainOutputCallback, - testing_callbacks.TrainLossCallback, - testing_callbacks.OptimizerStateCallback, - ]: - pytest.xfail(reason="Tensors not close") - super().test_stop_and_go_consistency(callback_type) - - @pytest.mark.skip(reason="TODO: assert train_consumed_go > 0 fails.") - def test_train_val_init_consumed_samples(self): - pass diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/utils/__init__.py b/sub-packages/bionemo-evo2/tests/bionemo/evo2/utils/__init__.py deleted file mode 100644 index 4c0c148742..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/utils/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/utils/checkpoint/__init__.py b/sub-packages/bionemo-evo2/tests/bionemo/evo2/utils/checkpoint/__init__.py deleted file mode 100644 index 4c0c148742..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/utils/checkpoint/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/sub-packages/bionemo-evo2/tests/bionemo/evo2/utils/checkpoint/test_eden_llama_roundtrip.py b/sub-packages/bionemo-evo2/tests/bionemo/evo2/utils/checkpoint/test_eden_llama_roundtrip.py deleted file mode 100644 index 47102bf5a3..0000000000 --- a/sub-packages/bionemo-evo2/tests/bionemo/evo2/utils/checkpoint/test_eden_llama_roundtrip.py +++ /dev/null @@ -1,104 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import os -from pathlib import Path - -import pytest -import torch -from nemo.collections.llm.gpt.model.llama import HFLlamaExporter - -from bionemo.core.data.load import load -from bionemo.evo2.models.llama import HFEdenLlamaImporter -from bionemo.llm.lightning import batch_collator -from bionemo.testing.subprocess_utils import run_command_in_subprocess - - -REPO_PATH = Path(__file__).parent.parent.parent.parent.parent.parent.parent.parent - - -@pytest.fixture(scope="module") -def eden_llama_og2_step_182313_on_evo2_rrna_highly_conserved_PMC4140814(): - """Test data for Evo2 llama inference. - - Returns: - tree - . - ├── per_layer_activations - │ └── activations_rank000_dl00_batch000000.pt - ├── predictions__rank_0__dp_rank_0.pt - ├── ribosomal_rrna_highly_conserved_PMC4140814.fasta - └── seq_idx_map.json - - 1 directory, 4 files - """ - return load("evo2_llama/eden_llama_og2_step_182313_on_evo2_rrna_highly_conserved_PMC4140814:1.0") - - -@pytest.fixture(scope="module") -def llama_7b_8k_og2(): - return load("evo2_llama/7B-8k-og2:1.0") - - -def predict_metagenome( - model_checkpoint_path: Path, metagenome_fasta_path: Path, output_path: Path -) -> tuple[dict[str, torch.Tensor], dict[str, int]]: - cmd = f"""predict_evo2 \ - --eden-tokenizer \ - --model-size 7B \ - --fasta {metagenome_fasta_path} \ - --ckpt-dir {model_checkpoint_path} \ - --output-log-prob-seqs \ - --log-prob-collapse-option per_token \ - --output-dir {output_path}""" - run_command_in_subprocess(cmd, os.getcwd()) - with open(output_path / "seq_idx_map.json", "r") as jsonf: - fasta_to_index = json.load(jsonf) - preds_list = [torch.load(f) for f in output_path.glob("*.pt")] - all_pt_data = batch_collator([item for item in preds_list if item is not None]) - return all_pt_data, fasta_to_index # type: ignore - - -@pytest.mark.skipif(os.environ.get("BIONEMO_DATA_SOURCE") != "pbss", reason="Test data is not available on NGC") -@pytest.mark.slow -def test_eden_llama_roundtrip( - tmp_path, llama_7b_8k_og2: Path, eden_llama_og2_step_182313_on_evo2_rrna_highly_conserved_PMC4140814: Path -): - """Test that converting NeMo -> HF -> NeMo produces the same model.""" - fasta_path = ( - eden_llama_og2_step_182313_on_evo2_rrna_highly_conserved_PMC4140814 - / "ribosomal_rrna_highly_conserved_PMC4140814.fasta" - ) - assert llama_7b_8k_og2.exists() and fasta_path.exists() - - exporter = HFLlamaExporter(llama_7b_8k_og2) - hf_path = tmp_path / "hf_checkpoint" - exporter.apply(hf_path) - importer = HFEdenLlamaImporter(hf_path) - importer.apply(tmp_path / "nemo_checkpoint") - original_predictions, original_fasta_to_index = predict_metagenome( - llama_7b_8k_og2, fasta_path, tmp_path / "original_predictions" - ) - new_predictions, new_fasta_to_index = predict_metagenome( - tmp_path / "nemo_checkpoint", fasta_path, tmp_path / "new_predictions" - ) - assert original_fasta_to_index == new_fasta_to_index, "Fasta to index mapping is not the same, need better logic." - for key in ["seq_idx", "log_probs_seqs", "loss_mask"]: - torch.testing.assert_close( - original_predictions[key], - new_predictions[key], - msg=lambda diff: f"Results for {key} are not the same:\n{diff}", - ) diff --git a/sub-packages/bionemo-example_model/DEPRECATED b/sub-packages/bionemo-example_model/DEPRECATED deleted file mode 100644 index 7f8c0fb01a..0000000000 --- a/sub-packages/bionemo-example_model/DEPRECATED +++ /dev/null @@ -1,7 +0,0 @@ -This sub-package (sub-packages/bionemo-example_model) is deprecated. - -This package provided an example NeMo/Megatron model for documentation -and tutorials. It is no longer maintained as the framework has moved to -self-contained recipes in bionemo-recipes/. - -This package will be removed in a future release. diff --git a/sub-packages/bionemo-example_model/LICENSE b/sub-packages/bionemo-example_model/LICENSE deleted file mode 120000 index 61bc2cda7e..0000000000 --- a/sub-packages/bionemo-example_model/LICENSE +++ /dev/null @@ -1 +0,0 @@ -../../LICENSE/license.txt \ No newline at end of file diff --git a/sub-packages/bionemo-example_model/README.md b/sub-packages/bionemo-example_model/README.md deleted file mode 100644 index 1d59049022..0000000000 --- a/sub-packages/bionemo-example_model/README.md +++ /dev/null @@ -1,126 +0,0 @@ -# bionemo-example_model - -> **DEPRECATED**: This package is deprecated and no longer maintained. The BioNeMo framework has moved to -> self-contained recipes in `bionemo-recipes/`. This package will be removed in a future release. - -This is a minimalist package containing an example model that makes use of bionemo2 and nemo conventions. It contains the necessary models, dataloaders, datasets, and custom loss functions. The referenced classes and functions are in `bionemo.example_model.lightning.lightning_basic`. - -This tutorial demonstrates the creation of a simple MNIST model. This should be run in a BioNeMo container. The BioNeMo Framework container can run in a brev.dev launchable: [![ Click here to deploy.](https://uohmivykqgnnbiouffke.supabase.co/storage/v1/object/public/landingpage/brevdeploynavy.svg)](https://console.brev.dev/launchable/deploy?launchableID=env-2pPDA4sJyTuFf3KsCv5KWRbuVlU). It takes about 10 minutes to deploy this notebook as a Launchable. As of this writing, we are working on a free tier so a credit card may be required. You can reach out to your NVIDIA rep for credit. Notebooks and a shell interface can be launched by clicking `Open Notebook`. (Note: This links to the nightly release and may be out of sync with these docs.) - -For this tutorial, we will reuse elements from the BioNeMo example_model package. - -`Megatron`/`NeMo` modules and datasets are special derivatives of PyTorch modules and datasets that extend and accelerate the distributed training and inference capabilities of PyTorch. - -Some distinctions of Megatron/NeMo are: - -- `torch.nn.Module`/`LightningModule` changes into `MegatronModule`. -- Loss functions should extend the `MegatronLossReduction` module and implement a `reduce` method for aggregating loss across multiple micro-batches. -- Megatron configuration classes (for example `megatron.core.transformer.TransformerConfig`) are extended with a `configure_model` method that defines how model weights are initialized and loaded in a way that is compliant with training via NeMo2. -- Various modifications and extensions to common PyTorch classes, such as adding a `MegatronDataSampler` (and re-sampler such as `PRNGResampleDataset` or `MultiEpochDatasetResampler`) to your `LightningDataModule`. - -# Loss Functions - -First, we define a simple loss function in `bionemo.example_model.lightning.lightning_basic`. These should extend the `MegatronLossReduction` class. The output of forward and backward passes happen in parallel. There should be a forward function that calculates the loss defined. The reduce function is required. - -Loss functions used here are `MSELossReduction` and `ClassifierLossReduction`. These functions return a Tensor, which contain the losses for the microbatches, and a `SameSizeLossDict` containing the average loss. This is a Typed Dictionary that is the return type for a loss that is computed for the entire batch, where all microbatches are the same size. - -# Datasets and Datamodules - -Datasets used for model training must be compatible with Megatron datasets. To enable this, the output of a given index and epoch must be deterministic. However, we may wish to have a different ordering in every epoch. To enable this, the items in the dataset should be accessible by both the epoch and the index. This can be done by accessing elements of the dataset with `EpochIndex` from `bionemo.core.data.multi_epoch_dataset`. A simple way of doing this is to wrap a dataset with `IdentityMultiEpochDatasetWrapper` imported from `bionemo.core.data.multi_epoch_dataset`. In this example, in in `bionemo.example_model.lightning.lightning_basic`, we use a custom dataset `MNISTCustomDataset` that wraps the `__getitem__` method of the MNIST dataset such that it returns a dict instead of a Tuple or tensor. The `MNISTCustomDataset` returns elements of type `MnistItem`, which is a `TypedDict`. - -In the data module/data loader class, it is necessary to have a data_sampler attribute to shuffle the data and that allows the sampler to be used with Megatron. This is a nemo2 peculiarity. A `nemo.lightning.pytorch.plugins.MegatronDataSampler` is the best choice. It sets up the capability to utilize micro-batching and gradient accumulation. It is also the place where the global batch size is constructed. - -Also the sampler will not shuffle your data. So you need to wrap your dataset in a dataset shuffler that maps sequential IDs to random IDs in your dataset. This can be done with `MultiEpochDatasetResampler` from `bionemo.core.data.multi_epoch_dataset`. - -This is implemented in the `MNISTDataModule`. In the setup method of the dataloader, the train, test and validation sets are `MNISTCustomDataset` are wrapped in the `IdentityMultiEpochDatasetWrapper`. These are then wrapped in the `MultiEpochDatasetResampler`. More information about `MegatronCompatability` and how to set up more complicated datasets can be found in [`docs.user-guide.background.megatron_datasets.md`](https://docs.nvidia.com/bionemo-framework/latest/user-guide/background/megatron_datasets/). - -We also define a `train_dataloader`, `val_dataloader`, and `predict_dataloader` methods that return the corresponding dataloaders. - -# Models - -Models need to be Megatron modules. At the most basic level this just means: - -1. They extend `MegatronModule` from megatron.core.transformer.module. -2. They need a config argument of type `megatron.core.ModelParallelConfig`. An easy way of implementing this is to inherit from `bionemo.llm.model.config.MegatronBioNeMoTrainableModelConfig`. This is a class for BioNeMo that supports usage with Megatron models, as NeMo2 requires. This class also inherits `ModelParallelConfig`. -3. They need a self.`model_type:megatron.core.transformer.enums.ModelType` enum defined (`ModelType.encoder_or_decoder` is a good option.) -4. `def set_input_tensor(self, input_tensor)` needs to be present. This is used in model parallelism. This function can be a stub/placeholder function. - -The following models are implemented in `bionemo.example_model.lightning.lightning_basic`. - -`ExampleModelTrunk` is a base model. This returns a tensor. `ExampleModel` is a model that extends the base model with a few linear layers and it is used for pretraining. This returns the output of the base model and of the full model. - -`ExampleFineTuneModel` extends the `ExampleModelTrunk` by adding a classification layer. This returns a tensor of logits over the 10 potential digits. - -# Model Configs - -The model config class is used to instantiate the model. These configs must have: - -1. A `configure_model` method which allows the Megatron strategy to lazily initialize the model after the parallel computing environment has been setup. These also handle loading starting weights for fine-tuning cases. Additionally these configs tell the trainer which loss you want to use with a matched model. -2. A `get_loss_reduction_class` method that defines the loss function. - -The following configs are implemented in `bionemo.example_model.lightning.lightning_basic`. - -Here, a base generic config `ExampleGenericConfig` is defined. `PretrainConfig` extends this class. This defines the model class and the loss class in: - -``` -class PretrainConfig(ExampleGenericConfig["PretrainModel", "MSELossReduction"], iom.IOMixinWithGettersSetters): - - model_cls: Type[PretrainModel] = PretrainModel - loss_cls: Type[MSELossReduction] = MSELossReduction - -``` - -Similarly, `ExampleFineTuneConfig` extends `ExampleGenericConfig` for finetuning. - -# Training Module - -It is helpful to have a training module that inherits from `lightning.pytorch.LightningModule` which organizes the model architecture, training, validation, and testing logic while abstracting away boilerplate code, enabling easier and more scalable training. This wrapper can be used for all model and loss combinations specified in the config. -In `bionemo.example_model.lightning.lightning_basic`, we define `BionemoLightningModule`. - -In this example, `training_step`, `validation_step`, and `predict_step` define the training, validation, and prediction loops are independent of the forward method. In nemo: - -1. NeMo's Strategy overrides the `train_step`, `validation_step` and `prediction_step` methods. -2. The strategies' training step will call the forward method of the model. -3. That forward method then calls the wrapped forward step of `MegatronParallel` which wraps the forward method of the model. -4. That wrapped forward step is then executed inside the `MegatronCore` scheduler, which calls the `_forward_step` method from the `MegatronParallel` class. -5. Which then calls the `training_step`, `validation_step` and `prediction_step` function here. - -Additionally, during these steps, we log the validation, testing, and training loss. This is done similarly to https://lightning.ai/docs/torchmetrics/stable/pages/lightning.html. These logs can then be exported to wandb, or other metric viewers. For more complicated tracking, it may be necessary to use pytorch callbacks: https://lightning.ai/docs/pytorch/stable/extensions/callbacks.html. - -Further `loss_reduction_class()`, `training_loss_reduction()`, `validation_loss_reduction(),` and` test_loss_reduction()` are defined based on what's in the config. Additionally, `configure_model()` is defined based on the config. - -# Training the models - -In `bionemo.example_model.lightning.lightning_basic` a checkpoint_callback variable is defined. This enables .nemo file-like checkpointing. - -The remaining functions are defined in the training scripts: `pretrain_mnist.py`, `finetune_mnist.py`, and `predict_mnist.py`. - -We specify a training strategy of type `nemo.lightning.MegatronStrategy`. This strategy implements model parallelism using NVIDIA's Megatron-LM framework. It supports various forms of parallelism including tensor model parallelism, pipeline model parallelism, sequence parallelism, and expert parallelism for efficient training of large language models. - -We specify a trainer of type `nemo.lightning.Trainer`, which is an extension of the pytorch lightning trainer. This is where the devices, validation intervals, maximal steps, maximal number of epochs, and how frequently to log are specified. - -We specify a nemo-logger. We can set TensorBoard and WandB logging, along with extra loggers. Here, we specify a `CSVLogger` from lightning.pytorch.loggers. - -We can now proceed to training. The first pre-training scripts is `bionemo/example_model/training_scripts/pretrain_mnist.py` - -Then, we train the model with the `BionemoLightningModule`, `MNISTDataModule`, trainer and nemo_logger. - -This script will print out the location of the final model: \ - -Then we can run a finetuning-script: - -``` -python src/bionemo/example_model/training_scripts/finetune_mnist.py ---pretrain_ckpt_dirpath -``` - -A nuance here is that in the config file, we specify the initial checkpoint path, along with which keys to skip. In the previous model checkpoint, we did not have a head labelled "digit_classifier", so we specify it as a head to be skipped. -This script will print the location of the finetuned directory: \. - -Finally, we can run a classification task with - -``` - -python src/bionemo/example_model/training_scripts/predict_mnist.py --finetune_dir . -``` - -The results can be viewed with TensorBoardLogger if that is configured, or as a CSV file created by the `CSVLogger`. diff --git a/sub-packages/bionemo-example_model/VERSION b/sub-packages/bionemo-example_model/VERSION deleted file mode 120000 index 558194c5a5..0000000000 --- a/sub-packages/bionemo-example_model/VERSION +++ /dev/null @@ -1 +0,0 @@ -../../VERSION \ No newline at end of file diff --git a/sub-packages/bionemo-example_model/pyproject.toml b/sub-packages/bionemo-example_model/pyproject.toml deleted file mode 100644 index bc1a030ef3..0000000000 --- a/sub-packages/bionemo-example_model/pyproject.toml +++ /dev/null @@ -1,40 +0,0 @@ -[build-system] -requires = ["setuptools>=64", "wheel"] -build-backend = "setuptools.build_meta" - -[project] -# UV doesn't seem to pick up on changes to requirements.txt files as a signal that it needs to re-lock a project's -# dependencies. We should probably just move to listing requirements in these pyproject.toml files directly, and also -# now include bionemo-* sub-packages explicitly. -name = "bionemo-example_model" -readme = "README.md" -description = "[DEPRECATED] BioNeMo example_model: Example model for documentation and tutorials. No longer maintained." -authors = [{ name = "BioNeMo Team", email = "bionemofeedback@nvidia.com" }] -requires-python = ">=3.10" -classifiers = ["Private :: Do Not Upload", "Programming Language :: Python :: 3.10"] -license = { file = "LICENSE" } -dynamic = ["version"] -dependencies = [ - 'bionemo-core', - 'bionemo-llm', - 'megatron-core', - 'nemo_toolkit', - 'torchvision >= 0.15.1', -] - -[project.optional-dependencies] -test = [ - "bionemo-testing" -] - -[tool.setuptools.packages.find] -where = ["src"] -include = ["bionemo.*"] -namespaces = true -exclude = ["test*."] - -[tool.setuptools.dynamic] -version = { file = "VERSION" } - -[tool.uv] -cache-keys = [{ git = true }] diff --git a/sub-packages/bionemo-example_model/src/bionemo/example_model/lightning/__init__.py b/sub-packages/bionemo-example_model/src/bionemo/example_model/lightning/__init__.py deleted file mode 100644 index 8bdbf443c0..0000000000 --- a/sub-packages/bionemo-example_model/src/bionemo/example_model/lightning/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import warnings - -warnings.warn( - "bionemo.example_model (sub-packages/bionemo-example_model) is deprecated and will be removed in a future release. " - "This package is no longer maintained.", - DeprecationWarning, - stacklevel=2, -) diff --git a/sub-packages/bionemo-example_model/src/bionemo/example_model/lightning/lightning_basic.py b/sub-packages/bionemo-example_model/src/bionemo/example_model/lightning/lightning_basic.py deleted file mode 100644 index 856678238f..0000000000 --- a/sub-packages/bionemo-example_model/src/bionemo/example_model/lightning/lightning_basic.py +++ /dev/null @@ -1,665 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This is intended to be a minimal self-container NeMo2 example.""" - -import os -from dataclasses import dataclass, field -from typing import Any, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypedDict, TypeVar - -import torch -from megatron.core import ModelParallelConfig -from megatron.core.optimizer.optimizer_config import OptimizerConfig -from megatron.core.transformer.enums import ModelType -from megatron.core.transformer.module import MegatronModule -from nemo.lightning import io -from nemo.lightning.megatron_parallel import MegatronLossReduction -from nemo.lightning.pytorch import callbacks as nl_callbacks -from nemo.lightning.pytorch.optim import MegatronOptimizerModule -from nemo.lightning.pytorch.plugins import MegatronDataSampler -from torch import Tensor, nn -from torch.utils.data import DataLoader -from torchvision import transforms -from torchvision.datasets import MNIST - -import lightning.pytorch as pl -from bionemo.core import BIONEMO_CACHE_DIR -from bionemo.core.data.multi_epoch_dataset import IdentityMultiEpochDatasetWrapper, MultiEpochDatasetResampler -from bionemo.llm.api import MegatronLossType -from bionemo.llm.lightning import LightningPassthroughPredictionMixin -from bionemo.llm.model.config import OVERRIDE_BIONEMO_CONFIG_DEFAULTS, MegatronBioNeMoTrainableModelConfig -from bionemo.llm.utils import iomixin_utils as iom - - -__all__: Sequence[str] = ( - "BionemoLightningModule", - "ExampleFineTuneOutput", - "ExampleModel", - "ExampleModelOutput", - "MNISTCustomDataset", - "MNISTDataModule", - "MSELossReduction", - "MnistItem", - "PretrainConfig", - "SameSizeLossDict", - "checkpoint_callback", - "data_module", -) - -############################################################################################# -# Losses: here we define some loss functions. The output of forward happens in parallel -# and that is where backward happens. def reduce is only used for collecting forward output -# for inference, as well as for logging. - - -class SameSizeLossDict(TypedDict): - """This is the return type for a loss that is computed for the entire batch, where all microbatches are the same size.""" - - avg: Tensor - - -class MnistItem(TypedDict): - """Training input for the MNIST dataset.""" - - data: Tensor - label: Tensor - idx: int - - -class ExampleModelOutput(TypedDict): - """Output for the example model implementation.""" - - x_hat: Tensor - z: Tensor - - -class ExampleFineTuneOutput(ExampleModelOutput): - """Output for the fine-tuned example model implementation.""" - - digit_logits: Tensor - - -class MSELossReduction(MegatronLossReduction): - """A class used for calculating the loss, and for logging the reduced loss across micro batches.""" - - def forward(self, batch: MnistItem, forward_out: Dict[str, Tensor]) -> Tuple[Tensor, SameSizeLossDict]: - """Calculates the loss within a micro-batch. A micro-batch is a batch of data on a single GPU. - - Args: - batch: A batch of data that gets passed to the original forward inside LitAutoEncoder. - forward_out: the output of the forward method inside LitAutoEncoder. - - Returns: - A tuple containing [, ReductionT] where the loss tensor will be used for - backpropagation and the ReductionT will be passed to the reduce method - (which currently only works for logging.). - """ - x = batch["data"] - x_hat = forward_out["x_hat"] - xview = x.view(x.size(0), -1).to(x_hat.dtype) - loss = nn.functional.mse_loss(x_hat, xview) - - return loss, {"avg": loss} - - def reduce(self, losses_reduced_per_micro_batch: Sequence[SameSizeLossDict]) -> Tensor: - """Works across micro-batches. (data on single gpu). - - Note: This currently only works for logging and this loss will not be used for backpropagation. - - Args: - losses_reduced_per_micro_batch: a list of the outputs of forward - - Returns: - A tensor that is the mean of the losses. (used for logging). - """ - mse_losses = torch.stack([loss["avg"] for loss in losses_reduced_per_micro_batch]) - return mse_losses.mean() - - -class MSEPlusClassifierLossReduction(MegatronLossReduction): - """A class used for calculating the loss, and for logging the reduced loss across micro batches.""" - - def forward(self, batch: MnistItem, forward_out: ExampleFineTuneOutput) -> Tuple[Tensor, SameSizeLossDict]: - """Calculates the loss within a micro-batch. A micro-batch is a batch of data on a single GPU. - - Args: - batch: A batch of data that gets passed to the original forward inside LitAutoEncoder. - forward_out: the output of the forward method inside LitAutoEncoder. - - Returns: - A tuple containing [, ReductionT] where the loss tensor will be used for - backpropagation and the ReductionT will be passed to the reduce method - (which currently only works for logging.). - """ - x = batch["data"] - digits = batch["label"] - x_hat = forward_out["x_hat"] - digit_logits = forward_out["digit_logits"] - xview = x.view(x.size(0), -1).to(x_hat.dtype) - mse_loss = nn.functional.mse_loss(x_hat, xview) - classifier_loss = nn.functional.cross_entropy(digit_logits, digits) - loss = classifier_loss + mse_loss - return loss, {"avg": loss} - - def reduce(self, losses_reduced_per_micro_batch: Sequence[SameSizeLossDict]) -> Tensor: - """Works across micro-batches. (data on single gpu). - - Note: This currently only works for logging and this loss will not be used for backpropagation. - - Args: - losses_reduced_per_micro_batch: a list of the outputs of forward - - Returns: - A tensor that is the mean of the losses. (used for logging). - """ - mse_losses = torch.stack([loss["avg"] for loss in losses_reduced_per_micro_batch]) - return mse_losses.mean() - - -class ClassifierLossReduction(MegatronLossReduction): - """A class used for calculating the loss, and for logging the reduced loss across micro batches.""" - - def forward(self, batch: MnistItem, forward_out: Tensor) -> Tuple[Tensor, SameSizeLossDict]: - """Calculates the loss within a micro-batch. A micro-batch is a batch of data on a single GPU. - - Args: - batch: A batch of data that gets passed to the original forward inside LitAutoEncoder. - forward_out: the output of the forward method inside LitAutoEncoder. - - Returns: - A tuple containing [, ReductionT] where the loss tensor will be used for - backpropagation and the ReductionT will be passed to the reduce method - (which currently only works for logging.). - """ - digits = batch["label"] - digit_logits = forward_out - loss = nn.functional.cross_entropy(digit_logits, digits) - return loss, {"avg": loss} - - def reduce(self, losses_reduced_per_micro_batch: Sequence[SameSizeLossDict]) -> Tensor: - """Works across micro-batches. (data on single gpu). - - Note: This currently only works for logging and this loss will not be used for backpropagation. - - Args: - losses_reduced_per_micro_batch: a list of the outputs of forward - - Returns: - A tensor that is the mean of the losses. (used for logging). - """ - mse_losses = torch.stack([loss["avg"] for loss in losses_reduced_per_micro_batch]) - return mse_losses.mean() - - -####################################################################################### -# Data methods. The dataset has no changes vs a vanilla pytorch dataset. The data module -# has a data_sampler in it which is a nemo2 peculiarity. Also the sampler will not -# shuffle your data! So you need to wrap your dataset in a dataset shuffler that maps -# sequential ids to random ids in your dataset. - - -class MNISTCustomDataset(MNIST): - """A Wrapper for the MNIST Dataset.""" - - def __getitem__(self, idx: int) -> MnistItem: - """Wraps the getitem method of the MNIST dataset such that we return a Dict. - - This is instead of a Tuple or tensor. - - Args: - idx: The index we want to grab, an int. - - Returns: - A dict containing the data ("x"), label ("y"), and index ("idx"). - """ - data, label = super().__getitem__(idx) - - return { - "data": data, - "label": label, - "idx": idx, - } - - -####################################################################################### -# Data module needs a data_sampler for handling the mcore strategy nemo2 runner. -class MNISTDataModule(pl.LightningDataModule): - """A Megatron Compatible Data Module for MNIST. - - Attributes: - data_dir: data directory - micro_batch_size: batch_size - global_batch_size: global batch size - max_len: maximal sequence length for megatron sampler - rampup_batch_size: ramp up batch size - num_workers: number of workers - data_sampler: data_sampler set to be a megatron one - """ - - def __init__( - self, - data_dir: str | os.PathLike = str(BIONEMO_CACHE_DIR), - batch_size: int = 32, - num_workers: int = 0, - global_batch_size: int | None = None, - output_log: bool = True, - ) -> None: - """Initialize class. - - Args: - data_dir: data directory - batch_size: batch_size - global_batch_size: global batch size - num_workers: number of workers - output_log: whether to output logs - - """ - super().__init__() - self.data_dir = data_dir - self.micro_batch_size = batch_size - self.global_batch_size = global_batch_size or batch_size - self.max_len = 1048 - # We need to define a "seq_length" for OneLogger, but we just set it to max_len - self.seq_length = self.max_len - self.rampup_batch_size = None - self.num_workers = num_workers - # Note that this sampler is sequential, meaning it does not do any shuffling. Let's wrap our data in a shuffler. - # Wraps the datasampler with the MegatronDataSampler. The MegatronDataSampler is a wrapper that allows the sampler - # to be used with megatron. It sets up the capability to utilize micro-batching and gradient accumulation. It is also - # the place where the global batch size is constructed. - self.data_sampler = MegatronDataSampler( - seq_len=self.max_len, - micro_batch_size=self.micro_batch_size, - global_batch_size=self.global_batch_size, - rampup_batch_size=self.rampup_batch_size, - output_log=output_log, - ) - - def setup(self, stage: str) -> None: - """Sets up the datasets. - - Args: - stage: can be one of train / test / predict. - """ - self.mnist_test = MultiEpochDatasetResampler( - IdentityMultiEpochDatasetWrapper( - MNISTCustomDataset(self.data_dir, download=True, transform=transforms.ToTensor(), train=False) - ), - seed=43, - shuffle=False, - ) - mnist_full = MNISTCustomDataset(self.data_dir, download=True, transform=transforms.ToTensor(), train=True) - mnist_train, mnist_val = torch.utils.data.random_split( - mnist_full, [55000, 5000], generator=torch.Generator().manual_seed(42) - ) - self.mnist_train = MultiEpochDatasetResampler( - IdentityMultiEpochDatasetWrapper(mnist_train), seed=44, shuffle=True - ) - - self.mnist_val = MultiEpochDatasetResampler( - IdentityMultiEpochDatasetWrapper(mnist_val), - seed=45, - shuffle=False, - ) - - def train_dataloader(self) -> DataLoader: - """Returns the training dataloader.""" - return DataLoader(self.mnist_train, batch_size=self.micro_batch_size, num_workers=self.num_workers) - - def val_dataloader(self) -> DataLoader: - """Returns the validation dataloader.""" - return DataLoader(self.mnist_val, batch_size=self.micro_batch_size, num_workers=self.num_workers) - - def predict_dataloader(self) -> DataLoader: - """Returns the prediction dataloader.""" - return DataLoader(self.mnist_test, batch_size=self.micro_batch_size, num_workers=self.num_workers) - - -######################################################### -# Models: These need to be megatron modules. At the most basic level this just means: -# 1. they need a config argument of type ModelParallelConfig -# 2. they need a self.model_type:ModelType enum defined (ModelType.encoder_or_decoder is probably usually fine) -# 3. def set_input_tensor(self, input_tensor) needs to be present. This is used in model parallelism - - -class ExampleModelTrunk(MegatronModule): - def __init__(self, config: ModelParallelConfig) -> None: - """Constructor of the model. - - Args: - config: The config object is responsible for telling the strategy what model to create. - """ - super().__init__(config) - # FIXME add an assertion that the user is not trying to do tensor parallelism since this doesn't use - # parallelizable megatron linear layers. - self.model_type: ModelType = ModelType.encoder_or_decoder - self.vp_stage = None # Add vp_stage attribute for compatibility with virtual pipeline - self.linear1 = nn.Linear(28 * 28, 64) - self.relu = nn.ReLU() - self.linear2 = nn.Linear(64, 3) - - def forward(self, x: Tensor) -> Tensor: - # we could return a dictionary of strings to tensors here, but let's demonstrate this is not necessary - x = x.view(x.size(0), -1) - z = self.linear1(x) - z = self.relu(z) - z = self.linear2(z) - return z - - def set_input_tensor(self, input_tensor: Optional[Tensor]) -> None: - """This _would_ be needed for model parallel and other kinds of more complicated forward passes in megatron.""" - pass - - -class ExampleModel(ExampleModelTrunk): - """An example model.""" - - def __init__(self, config: ModelParallelConfig) -> None: - """Constructor of the model. - - Args: - config: The config object is responsible for telling the strategy what model to create. - """ - super().__init__(config) - self.linear3 = nn.Linear(3, 64) - self.relu2 = nn.ReLU() - self.linear4 = nn.Linear(64, 28 * 28) - - def forward(self, x: Tensor) -> ExampleModelOutput: - """Forward pass of the model. - - Args: - x: The input data. - - Returns: - x_hat: The result of the last linear layer of the network. - """ - z: Tensor = super().forward(x) - x_hat = self.linear3(z) - x_hat = self.relu2(x_hat) - x_hat = self.linear4(x_hat) - return {"x_hat": x_hat, "z": z} - - -class ExampleFineTuneBothModel(ExampleModel): - """Example of taking the example model and adding an output task.""" - - def __init__(self, config: ModelParallelConfig): - super().__init__(config) - # 10 output digits, and use the latent output layer (z) for making predictions - self.digit_classifier = nn.Linear(self.linear2.out_features, 10) - - def forward(self, x: Tensor) -> ExampleFineTuneOutput: - parent_out: ExampleModelOutput = super().forward(x) - digit_logits = self.digit_classifier(parent_out["z"]) - return { - "x_hat": parent_out["x_hat"], - "z": parent_out["z"], - "digit_logits": digit_logits, - } - - -class ExampleFineTuneModel(ExampleModelTrunk): - """Example of taking the example model and replacing output task.""" - - def __init__(self, config: ModelParallelConfig): - super().__init__(config) - # 10 output digits, and use the latent output layer (z) for making predictions - self.digit_classifier = nn.Linear(self.linear2.out_features, 10) - - def forward(self, x: Tensor) -> Tensor: - z: Tensor = super().forward(x) - digit_logits = self.digit_classifier(z) # to demonstrate flexibility, in this case we return a tensor - return digit_logits - - -################################################################################################################# -# Model+Loss Configs: these have a configure_model function which allows the megatron strategy to lazily initialize -# the model after the parallel computing environment has been setup. These also handle loading starting weights -# for fine-tuning cases. Additionally these configs tell the trainer which loss you want to use with a matched -# model. - - -# typevar for capturing subclasses of ExampleModelTrunk. Useful for Generic type hints as below. -ExampleModelT = TypeVar("ExampleModelT", bound=ExampleModelTrunk) - - -@dataclass -class ExampleGenericConfig( - Generic[ExampleModelT, MegatronLossType], MegatronBioNeMoTrainableModelConfig[ExampleModelT, MegatronLossType] -): - """ExampleGenericConfig is a dataclass that is used to configure the model. - - Timers from ModelParallelConfig are required for megatron forward compatibility. - """ - - loss_cls: Type[MegatronLossType] = MSELossReduction # type: ignore # this will get overriden by children - hidden_size: int = 64 # Needs to be set to avoid zero division error in megatron :( - num_attention_heads: int = 1 # Needs to be set to avoid zero division error in megatron :( - num_layers: int = 1 # Needs to be set to avoid zero division error in megatron :( - # IMPORTANT: Since we're adding/overriding the loss_cls, and that's not how we generally track this, we need to - # add this into the list of config settings that we do not draw from the loaded checkpoint when restoring. - override_parent_fields: List[str] = field(default_factory=lambda: OVERRIDE_BIONEMO_CONFIG_DEFAULTS + ["loss_cls"]) - - def configure_model(self) -> ExampleModelT: - """Uses model_cls and loss_cls to configure the model. - - Note: Must pass self into Model since model requires having a config object. - - Returns: - The model object. - """ - # 1. first load any settings that may exist in the checkpoint related to the model. - if self.initial_ckpt_path: - self.load_settings_from_checkpoint(self.initial_ckpt_path) - # 2. then initialize the model - model = self.model_cls(self) - # 3. Load weights from the checkpoint into the model - if self.initial_ckpt_path: - self.update_model_from_checkpoint(model, self.initial_ckpt_path) - return model - - def get_loss_reduction_class(self) -> Type[MegatronLossType]: - """Use loss_cls to configure the loss, since we do not change the settings of the loss based on the config.""" - return self.loss_cls - - -# The configs below simply define which model class to pair with which loss, since the abstractions around getting the -# model and loss are handled in the ExampleGenericConfig class. -@dataclass -class PretrainConfig(ExampleGenericConfig["ExampleModel", "MSELossReduction"], iom.IOMixinWithGettersSetters): - """PretrainConfig is a dataclass that is used to configure the model. - - Timers from ModelParallelConfig are required for megatron forward compatibility. - """ - - model_cls: Type[ExampleModel] = ExampleModel - loss_cls: Type[MSELossReduction] = MSELossReduction - - -@dataclass -class ExampleFineTuneBothConfig( - ExampleGenericConfig["ExampleFineTuneBothModel", "MSEPlusClassifierLossReduction"], iom.IOMixinWithGettersSetters -): - """ExampleConfig is a dataclass that is used to configure the model. - - Timers from ModelParallelConfig are required for megatron forward compatibility. - """ - - model_cls: Type[ExampleFineTuneBothModel] = ExampleFineTuneBothModel - loss_cls: Type[MSEPlusClassifierLossReduction] = MSEPlusClassifierLossReduction - - -@dataclass -class ExampleFineTuneConfig( - ExampleGenericConfig["ExampleFineTuneConfig", "ClassifierLossReduction"], iom.IOMixinWithGettersSetters -): - """ExampleConfig is a dataclass that is used to configure the model. - - Timers from ModelParallelConfig are required for megatron forward compatibility. - """ - - model_cls: Type[ExampleFineTuneModel] = ExampleFineTuneModel - loss_cls: Type[ClassifierLossReduction] = ClassifierLossReduction - - -################################################################################ -# General training wrapper that can be re-used for all model/loss combos -# just specify different configs. - - -class BionemoLightningModule(pl.LightningModule, io.IOMixin, LightningPassthroughPredictionMixin): - """A very basic lightning module for testing the megatron strategy and the megatron-nemo2-bionemo contract.""" - - def __init__(self, config: MegatronBioNeMoTrainableModelConfig): - """Initializes the model. - - Args: - config: a Config object necessary to construct the actual nn.Module (the thing that has the parameters). - """ - super().__init__() - self.config = config - self.vp_stage = None # Add vp_stage attribute for compatibility with virtual pipeline - self.optim = MegatronOptimizerModule( - config=OptimizerConfig( - lr=1e-4, - optimizer="adam", - use_distributed_optimizer=True, - bf16=config.bf16, - fp16=config.fp16, - params_dtype=config.params_dtype, - ), - ) - # Bind the configure_optimizers method to the model - self.optim.connect(self) - - def forward(self, batch: Dict, batch_idx: int) -> Any: - """This forward will be called by the megatron scheduler and it will be wrapped. - - !!! note - - The `training_step` defines the training loop and is independent of the `forward` method here. - - Args: - batch: A dictionary of data. - batch_idx: The index of the batch. - - Returns: - The output of the model. - """ - x = batch["data"] - return self.module(x) - - def training_step(self, batch, batch_idx: Optional[int] = None): - """The training step is where the loss is calculated and the backpropagation is done. - - Background: - - NeMo's Strategy overrides this method. - - The strategies' training step will call the forward method of the model. - - That forward method then calls the wrapped forward step of MegatronParallel which wraps the forward method of the model. - - That wrapped forward step is then executed inside the Mcore scheduler, which calls the `_forward_step` method from the - MegatronParallel class. - - Which then calls the training_step function here. - - In this particular use case, we simply call the forward method of this class, the lightning module. - - Args: - batch: A dictionary of data. requires `batch_idx` as default None. - batch_idx: The index of the batch. - """ - # Forward pass - predictions = self(batch, batch_idx) - - # Calculate loss using the training loss reduction function - loss_reduction = self.training_loss_reduction() - loss_reduction.setup(batch) - loss = loss_reduction(predictions) - - # Log the training loss - self.log("train_loss", loss[1]["avg"], on_step=True, on_epoch=True, prog_bar=True, logger=True) - - return predictions - - def validation_step(self, batch, batch_idx: Optional[int] = None): - """Alias for forward step at validation.""" - predictions = self(batch, batch_idx) - - # Calculate loss using the validation loss reduction function - loss_reduction = self.validation_loss_reduction() - loss_reduction.setup(batch) - loss = loss_reduction(predictions) - # Log the validation loss - self.log( - "val_loss", - loss[1]["avg"], - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - return predictions - - def predict_step(self, batch, batch_idx: Optional[int] = None): - """Alias for forward step at prediction.""" - return self(batch, batch_idx) - - def training_loss_reduction(self) -> MegatronLossReduction: - """This is the function that takes batch['loss_mask'] and the logits output by the model and reduces the loss. - - Returns: - A MegatronLossReduction - """ - return self.loss_reduction_class()() - - def validation_loss_reduction(self) -> MegatronLossReduction: - """This is the function that takes batch['loss_mask'] and the logits output by the model and reduces the loss. - - Returns: - A MegatronLossReduction - """ - return self.loss_reduction_class()() - - def test_loss_reduction(self) -> MegatronLossReduction: - """This is the function that takes batch['loss_mask'] and the logits output by the model and reduces the loss. - - Returns: - A MegatronLossReduction - """ - return self.loss_reduction_class()() - - def configure_model(self) -> None: - """This configures the model. It is called lazily by the megatron strategy.""" - self.module = self.config.configure_model() - # Ensure vp_stage attribute is set for compatibility with virtual pipeline - if not hasattr(self.module, "vp_stage"): - self.module.vp_stage = None - - def loss_reduction_class(self) -> Type[MegatronLossReduction]: - """Get the loss reduction class the user has specified in their config.""" - return self.config.get_loss_reduction_class() - - -"""Training Elements""" -checkpoint_callback = nl_callbacks.ModelCheckpoint( - save_last=True, - save_on_train_epoch_end=True, - monitor="val_loss", - always_save_context=True, # Enables the .nemo file-like checkpointing where all IOMixins are under SerDe -) - -# Set up the data module -data_module = MNISTDataModule(data_dir=str(BIONEMO_CACHE_DIR), batch_size=128) -# metric_tracker = MetricTracker(metrics_to_track_val=["loss"], metrics_to_track_train=["loss"]) diff --git a/sub-packages/bionemo-example_model/src/bionemo/example_model/training_scripts/finetune_mnist.py b/sub-packages/bionemo-example_model/src/bionemo/example_model/training_scripts/finetune_mnist.py deleted file mode 100644 index b0e081fd34..0000000000 --- a/sub-packages/bionemo-example_model/src/bionemo/example_model/training_scripts/finetune_mnist.py +++ /dev/null @@ -1,108 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import argparse -from pathlib import Path - -from lightning.pytorch.loggers import CSVLogger, TensorBoardLogger -from nemo import lightning as nl -from nemo.collections import llm -from nemo.lightning import NeMoLogger, resume -from nemo.lightning.pytorch import callbacks as nl_callbacks - -from bionemo.example_model.lightning.lightning_basic import ( - BionemoLightningModule, - ExampleFineTuneConfig, - data_module, -) - - -def run_finetune(checkpoint_dir: str, name: str, directory_name: str): - """Run the finetuning step. - - Args: - checkpoint_dir: The directory with the previous model - name: The experiment name. - directory_name: The directory to write the output - Returns: - str: the path of the trained model. - """ - save_dir = Path(directory_name) / "classifier" - checkpoint_callback = nl_callbacks.ModelCheckpoint( - save_last=True, - save_on_train_epoch_end=True, - monitor="val_loss", - always_save_context=True, # Enables the .nemo file-like checkpointing where all IOMixins are under SerDe - ) - - nemo_logger2 = NeMoLogger( - log_dir=str(save_dir), - name=name, - tensorboard=TensorBoardLogger(save_dir=save_dir, name=name), - ckpt=checkpoint_callback, - extra_loggers=[CSVLogger(save_dir / "logs", name=name)], - ) - - lightning_module2 = BionemoLightningModule( - config=ExampleFineTuneConfig( - initial_ckpt_path=checkpoint_dir, - initial_ckpt_skip_keys_with_these_prefixes={"digit_classifier"}, - ) - ) - - strategy = nl.MegatronStrategy( - tensor_model_parallel_size=1, - pipeline_model_parallel_size=1, - ddp="megatron", - find_unused_parameters=True, - always_save_context=True, - ) - - trainer = nl.Trainer( - accelerator="gpu", - devices=1, - strategy=strategy, - limit_val_batches=5, - val_check_interval=5, - max_steps=100, - max_epochs=10, - num_nodes=1, - log_every_n_steps=5, - plugins=nl.MegatronMixedPrecision(precision="bf16-mixed"), - ) - llm.train( - model=lightning_module2, - data=data_module, - trainer=trainer, - log=nemo_logger2, - resume=resume.AutoResume( - resume_if_exists=True, - resume_ignore_no_checkpoint=True, - ), - ) - finetune_dir = Path(checkpoint_callback.last_model_path.replace(".ckpt", "")) - return finetune_dir - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--pretrain_ckpt_dirpath", type=str, help="The checkpoint directory after pre-training") - args = parser.parse_args() - - name = "example" - directory_name = "sample_models" - finetune_dir = run_finetune(args.pretrain_ckpt_dirpath, name, directory_name) - print(finetune_dir) diff --git a/sub-packages/bionemo-example_model/src/bionemo/example_model/training_scripts/predict_mnist.py b/sub-packages/bionemo-example_model/src/bionemo/example_model/training_scripts/predict_mnist.py deleted file mode 100644 index e47647ebbe..0000000000 --- a/sub-packages/bionemo-example_model/src/bionemo/example_model/training_scripts/predict_mnist.py +++ /dev/null @@ -1,68 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import argparse - -from nemo import lightning as nl - -from bionemo.core import BIONEMO_CACHE_DIR -from bionemo.example_model.lightning.lightning_basic import ( - BionemoLightningModule, - ExampleFineTuneConfig, - MNISTDataModule, -) - - -def run_predict(finetune_dir: str, test_length: int): - """Run the prediction step. - - Args: - finetune_dir: The directory with the previous step - test_length: The length of the test step. - - Returns: - tensor: the outputs of the model. - """ - strategy = nl.MegatronStrategy( - tensor_model_parallel_size=1, - pipeline_model_parallel_size=1, - ddp="megatron", - find_unused_parameters=True, - always_save_context=True, - ) - - test_run_trainer = nl.Trainer( - accelerator="gpu", - devices=1, - strategy=strategy, - num_nodes=1, - plugins=nl.MegatronMixedPrecision(precision="bf16-mixed"), - ) - - lightning_module3 = BionemoLightningModule(config=ExampleFineTuneConfig(initial_ckpt_path=finetune_dir)) - new_data_module = MNISTDataModule(data_dir=str(BIONEMO_CACHE_DIR), batch_size=test_length, output_log=False) - - results = test_run_trainer.predict(lightning_module3, datamodule=new_data_module) - return results - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--finetune_dir", type=str, help="The directory with the fine-tuned model. ") - args = parser.parse_args() - test_length = 10_000 - results = run_predict(args.finetune_dir, test_length) - print(results) diff --git a/sub-packages/bionemo-example_model/src/bionemo/example_model/training_scripts/pretrain_mnist.py b/sub-packages/bionemo-example_model/src/bionemo/example_model/training_scripts/pretrain_mnist.py deleted file mode 100644 index 8cb277ff9e..0000000000 --- a/sub-packages/bionemo-example_model/src/bionemo/example_model/training_scripts/pretrain_mnist.py +++ /dev/null @@ -1,94 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from pathlib import Path - -from lightning.pytorch.loggers import CSVLogger, TensorBoardLogger -from nemo import lightning as nl -from nemo.collections import llm -from nemo.lightning import NeMoLogger, resume - -from bionemo.example_model.lightning.lightning_basic import ( - BionemoLightningModule, - PretrainConfig, - checkpoint_callback, - data_module, -) - - -def run_pretrain(name: str, directory_name: str): - """Run the pretraining step. - - Args: - name: The experiment name. - directory_name: The directory to write the output - Returns: - str: the path of the trained model. - """ - # Setup the logger train the model - save_dir = Path(directory_name) / "pretrain" - - nemo_logger = NeMoLogger( - log_dir=str(save_dir), - name=name, - tensorboard=TensorBoardLogger(save_dir=save_dir, name=name), - ckpt=checkpoint_callback, - extra_loggers=[CSVLogger(save_dir / "logs", name=name)], - ) - - # Set up the training module - lightning_module = BionemoLightningModule(config=PretrainConfig()) - strategy = nl.MegatronStrategy( - tensor_model_parallel_size=1, - pipeline_model_parallel_size=1, - ddp="megatron", - find_unused_parameters=True, - always_save_context=True, - ) - - trainer = nl.Trainer( - accelerator="gpu", - devices=1, - strategy=strategy, - limit_val_batches=5, - val_check_interval=5, - max_steps=100, - max_epochs=10, - num_nodes=1, - log_every_n_steps=5, - plugins=nl.MegatronMixedPrecision(precision="bf16-mixed"), - ) - - # This trains the model - llm.train( - model=lightning_module, - data=data_module, - trainer=trainer, - log=nemo_logger, - resume=resume.AutoResume( - resume_if_exists=True, # Looks for the -last checkpoint to continue training. - resume_ignore_no_checkpoint=True, # When false this will throw an error with no existing checkpoint. - ), - ) - return Path(checkpoint_callback.last_model_path.replace(".ckpt", "")) - - -if __name__ == "__main__": - directory_name = "sample_models" - name = "example" - pretrain_ckpt_dirpath = run_pretrain(name, directory_name) - - print(pretrain_ckpt_dirpath) diff --git a/sub-packages/bionemo-example_model/tests/bionemo/example_model/lightning/test_lightning_basic.py b/sub-packages/bionemo-example_model/tests/bionemo/example_model/lightning/test_lightning_basic.py deleted file mode 100644 index d505cf6247..0000000000 --- a/sub-packages/bionemo-example_model/tests/bionemo/example_model/lightning/test_lightning_basic.py +++ /dev/null @@ -1,187 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from pathlib import Path -from typing import Any, Dict, Set, Tuple, Type - -import pytest -import torch -from _pytest.compat import LEGACY_PATH -from lightning.pytorch.loggers import TensorBoardLogger -from nemo import lightning as nl -from nemo.collections import llm -from nemo.lightning import NeMoLogger, io, resume -from nemo.lightning.pytorch import callbacks as nl_callbacks - -from bionemo.core import BIONEMO_CACHE_DIR -from bionemo.core.utils.dtypes import PrecisionTypes, get_autocast_dtype -from bionemo.example_model.lightning import lightning_basic as lb -from bionemo.llm.model.config import MegatronBioNeMoTrainableModelConfig -from bionemo.testing import megatron_parallel_state_utils -from bionemo.testing.callbacks import MetricTracker - - -def _train_model_get_ckpt( - name: str, - root_dir: Path, - model_cfg_cls: Type[MegatronBioNeMoTrainableModelConfig], - ckpt_path: Path | None, - skip_weight_prefixes: Set[str], - precision: PrecisionTypes, -) -> Tuple[Path, MetricTracker]: - if precision not in {"32", 32}: - extra_args: Dict[str, Any] = { - "plugins": nl.MegatronMixedPrecision(precision=precision), - } - else: - extra_args = {} - - checkpoint_callback = nl_callbacks.ModelCheckpoint( - save_last=True, - save_on_train_epoch_end=True, - monitor="val_loss", - always_save_context=True, # Enables the .nemo file-like checkpointing where all IOMixins are under SerDe - # async_save=False, # Tries to save asynchronously, previously led to race conditions. - filename="{epoch}-{step}-{val_loss:.2f}", - ) - save_dir = root_dir / name - tb_logger = TensorBoardLogger(save_dir=save_dir, name=name) - # Setup the logger and train the model - nemo_logger = NeMoLogger( - log_dir=str(root_dir), - name=name, - tensorboard=tb_logger, - ckpt=checkpoint_callback, - ) - # Needed so that the trainer can find an output directory for the profiler - # nemo_logger.save_dir = tmpdir - # ckpt_path needs to be a string for SerDe - ckpt_path_optstr: str | None = str(ckpt_path) if ckpt_path is not None else None - config = model_cfg_cls( - initial_ckpt_path=ckpt_path_optstr, - initial_ckpt_skip_keys_with_these_prefixes=sorted(skip_weight_prefixes), - # NOTE: the optimizer needs fp16 and bf16 bools set to match the model. For now get them from the config and - # set them here. - fp16=get_autocast_dtype(precision) == torch.float16, - bf16=get_autocast_dtype(precision) == torch.bfloat16, - autocast_dtype=get_autocast_dtype(precision), - params_dtype=get_autocast_dtype(precision), - pipeline_dtype=get_autocast_dtype(precision), - enable_autocast=precision not in {32, "32"}, - ) - - lightning_module = lb.BionemoLightningModule( - config=config, - ) - strategy = nl.MegatronStrategy( - tensor_model_parallel_size=1, - pipeline_model_parallel_size=1, - ddp="megatron", - find_unused_parameters=True, - always_save_context=True, - ) - metric_tracker = MetricTracker(metrics_to_track_val=["loss"], metrics_to_track_train=["loss"]) - trainer = nl.Trainer( - accelerator="gpu", - devices=1, - strategy=strategy, - limit_val_batches=5, - val_check_interval=5, - max_steps=100, - num_nodes=1, - log_every_n_steps=5, - callbacks=[metric_tracker], - **extra_args, - ) - data_module = lb.MNISTDataModule(data_dir=str(BIONEMO_CACHE_DIR), batch_size=64) # Re-use the same data directory - llm.train( - model=lightning_module, - data=data_module, - trainer=trainer, - log=nemo_logger, - resume=resume.AutoResume( - resume_if_exists=True, # Looks for the -last checkpoint to continue training. - resume_ignore_no_checkpoint=True, # When false this will throw an error with no existing checkpoint. - ), - ) - ckpt_dirpath = Path(checkpoint_callback.last_model_path.replace(".ckpt", "")) - return ckpt_dirpath, metric_tracker - - -@pytest.mark.needs_gpu -@pytest.mark.parametrize("precision", [32, "bf16-mixed"]) -def test_train_mnist_litautoencoder_with_megatron_strategy_single_gpu(tmpdir: LEGACY_PATH, precision: PrecisionTypes): - with megatron_parallel_state_utils.distributed_model_parallel_state(): - ckpt_path, initial_metrics = _train_model_get_ckpt( - name="test_experiment", - root_dir=tmpdir / "pretrain", - model_cfg_cls=lb.PretrainConfig, - ckpt_path=None, - skip_weight_prefixes=set(), - precision=precision, - ) - weights_ckpt = ckpt_path / "weights" - assert weights_ckpt.exists() - assert weights_ckpt.is_dir() - assert io.is_distributed_ckpt(weights_ckpt) - assert initial_metrics.collection_train["loss"][0] > initial_metrics.collection_train["loss"][-1] - with megatron_parallel_state_utils.distributed_model_parallel_state(): - simple_ft_checkpoint, simple_ft_metrics = _train_model_get_ckpt( - name="simple_finetune_experiment", - root_dir=tmpdir / "simple_finetune", # new checkpoint will land in a subdir of this - model_cfg_cls=lb.PretrainConfig, # same config as before since we are just continuing training - ckpt_path=ckpt_path, # specify the initial checkpoint path now - skip_weight_prefixes=set(), # no new weights in this model need skipping - precision=precision, - ) - weights_ckpt = simple_ft_checkpoint / "weights" - assert weights_ckpt.exists() - assert weights_ckpt.is_dir() - assert io.is_distributed_ckpt(weights_ckpt) - assert initial_metrics.collection_train["loss"][-1] > simple_ft_metrics.collection_train["loss"][0] - with megatron_parallel_state_utils.distributed_model_parallel_state(): - add_head_checkpoint, add_head_ft_metrics = _train_model_get_ckpt( - name="add_head_finetune_experiment", - root_dir=tmpdir / "add_head_finetune", - model_cfg_cls=lb.ExampleFineTuneBothConfig, # config that returns a model/loss with a new task head - ckpt_path=simple_ft_checkpoint, # cumulatively modify a checkpoint with subsequent experiments, (optional) - skip_weight_prefixes={"digit_classifier"}, # The new head weights are not in the ckpt so need skipping. - precision=precision, - ) - weights_ckpt = add_head_checkpoint / "weights" - assert weights_ckpt.exists() - assert weights_ckpt.is_dir() - assert io.is_distributed_ckpt(weights_ckpt) - assert add_head_ft_metrics.collection_train["loss"][0] > add_head_ft_metrics.collection_train["loss"][-1] - # We're adding a new loss, so the loss should be worse initially at least. - assert add_head_ft_metrics.collection_train["loss"][0] > simple_ft_metrics.collection_train["loss"][-1] - - with megatron_parallel_state_utils.distributed_model_parallel_state(): - drop_head_checkpoint, drop_head_ft_metrics = _train_model_get_ckpt( - name="drop_head_finetune_experiment", - root_dir=tmpdir / "drop_head_finetune", - model_cfg_cls=lb.ExampleFineTuneConfig, # config that drops the decoder and head -> only cls now - ckpt_path=add_head_checkpoint, # cumulatively build on the config that had this cls head (optional) - skip_weight_prefixes=set(), # no new parameters vs prior cfg, will continue training cls head by itself - precision=precision, - ) - weights_ckpt = drop_head_checkpoint / "weights" - assert weights_ckpt.exists() - assert weights_ckpt.is_dir() - assert io.is_distributed_ckpt(weights_ckpt) - # We're dropping a loss, so initially we should be better than before - assert drop_head_ft_metrics.collection_train["loss"][0] > drop_head_ft_metrics.collection_train["loss"][-1] - assert add_head_ft_metrics.collection_train["loss"][-1] > drop_head_ft_metrics.collection_train["loss"][0] diff --git a/sub-packages/bionemo-llm/DEPRECATED b/sub-packages/bionemo-llm/DEPRECATED deleted file mode 100644 index 4dcaeae022..0000000000 --- a/sub-packages/bionemo-llm/DEPRECATED +++ /dev/null @@ -1,7 +0,0 @@ -This sub-package (sub-packages/bionemo-llm) is deprecated. - -This package provided NeMo/Megatron-based LLM components for BioNeMo. -It is no longer maintained as the framework has moved to self-contained -recipes in bionemo-recipes/. - -This package will be removed in a future release. diff --git a/sub-packages/bionemo-llm/LICENSE b/sub-packages/bionemo-llm/LICENSE deleted file mode 120000 index 61bc2cda7e..0000000000 --- a/sub-packages/bionemo-llm/LICENSE +++ /dev/null @@ -1 +0,0 @@ -../../LICENSE/license.txt \ No newline at end of file diff --git a/sub-packages/bionemo-llm/README.md b/sub-packages/bionemo-llm/README.md deleted file mode 100644 index 5729ccac18..0000000000 --- a/sub-packages/bionemo-llm/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# bionemo-llm - -> **DEPRECATED**: This package is deprecated and no longer maintained. The BioNeMo framework has moved to -> self-contained recipes in `bionemo-recipes/`. This package will be removed in a future release. - -The Bionemo Large Language Model (LLM) submodule contains common code used in submodules that train LLMs on biological -datasets. This includes data masking and collate functions, the -bio-BERT common architecture code, loss functions, and other NeMo / Megatron-LM compatibility functions. Sub-packages -should only depend on `bionemo-llm` if they need access to NeMo and Megatron-LM. diff --git a/sub-packages/bionemo-llm/VERSION b/sub-packages/bionemo-llm/VERSION deleted file mode 100644 index 59aa62c1fa..0000000000 --- a/sub-packages/bionemo-llm/VERSION +++ /dev/null @@ -1 +0,0 @@ -2.4.5 diff --git a/sub-packages/bionemo-llm/pyproject.toml b/sub-packages/bionemo-llm/pyproject.toml deleted file mode 100644 index b947ce3b4f..0000000000 --- a/sub-packages/bionemo-llm/pyproject.toml +++ /dev/null @@ -1,44 +0,0 @@ -[build-system] -requires = ["setuptools>=64", "wheel"] -build-backend = "setuptools.build_meta" - -[project] -name = "bionemo-llm" -readme = "README.md" -description = "[DEPRECATED] BioNeMo Large Language Model Components using NeMo and Megatron. No longer maintained." -authors = [{ name = "BioNeMo Team", email = "bionemofeedback@nvidia.com" }] -requires-python = ">=3.10" -license = { file = "LICENSE" } -dynamic = ["version"] -dependencies = [ - # bionemo sub-packages - 'bionemo-core', - # external - 'lightning>=2.2.1', - 'megatron-core', - 'nemo_toolkit[nlp,eval]>=2.2.1', - 'nemo-run', - 'hatchling', -] - -[project.optional-dependencies] -test = [ - 'bionemo-testing' -] -te = [ - # TE & Apex need to be installed after PyTorch, NVCC, and CUDA. - # TODO(@pstjohn, @cspades): Figure out how to do this without post-installation. - 'transformer_engine[pytorch]' -] - -[tool.setuptools.packages.find] -where = ["src"] -include = ["bionemo.*"] -namespaces = true -exclude = ["test*."] - -[tool.setuptools.dynamic] -version = { file = "VERSION" } - -[tool.uv] -cache-keys = [{ git = true }] diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/__init__.py b/sub-packages/bionemo-llm/src/bionemo/llm/__init__.py deleted file mode 100644 index 625a5ad983..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import warnings - -warnings.warn( - "bionemo.llm (sub-packages/bionemo-llm) is deprecated and will be removed in a future release. " - "This package is no longer maintained.", - DeprecationWarning, - stacklevel=2, -) diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/api.py b/sub-packages/bionemo-llm/src/bionemo/llm/api.py deleted file mode 100644 index 53cb77010b..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/api.py +++ /dev/null @@ -1,51 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from abc import ABC, abstractmethod -from typing import Generic, Sequence, TypeVar - -from megatron.core.transformer.module import MegatronModule -from nemo.lightning.megatron_parallel import DataT, MegatronLossReduction - - -__all__: Sequence[str] = ( - "BionemoMegatronModel", - "MegatronLossReduction", # re-export Megatron's loss definition as it's a core part of the bionemo-llm API - "MegatronLossType", - "MegatronModelType", -) - - -class BionemoMegatronModel(MegatronModule, Generic[DataT], ABC): - """Models that use Megatron must be a MegatronModule type. - - The only major difference is the explicit `forward` pass method signature that makes this class compatible - with bionemo-core's `Model` structural type. - """ - - @abstractmethod - def forward(self, *args, **kwargs) -> DataT: # noqa: D102 - raise NotImplementedError() - - -# Typechecking: ensure that the bionemo megatron model abstraction is compliant with bionemo-core's Model -# _: type[Model] = BionemoMegatronModel - - -MegatronModelType = TypeVar("MegatronModelType", bound=MegatronModule) -# bound=BionemoMegatronModel) - -MegatronLossType = TypeVar("MegatronLossType", bound=MegatronLossReduction) diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/data/__init__.py b/sub-packages/bionemo-llm/src/bionemo/llm/data/__init__.py deleted file mode 100644 index 25e6abfbc5..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/data/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/data/collate.py b/sub-packages/bionemo-llm/src/bionemo/llm/data/collate.py deleted file mode 100644 index 9f72d4d04e..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/data/collate.py +++ /dev/null @@ -1,120 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -from typing import Sequence, TypeVar - -import torch - -from bionemo.llm.data import types - - -logger = logging.getLogger(__name__) - -_T = TypeVar("_T", bound=dict[str, torch.Tensor]) -_warned_once: bool = False - - -MLM_LOSS_IGNORE_INDEX = -100 # This should match the masked value used in the MLM loss mask. - - -def padding_collate_fn( - batch: Sequence[_T], - padding_values: dict[str, int], - min_length: int | None = None, - max_length: int | None = None, -) -> _T: - """Collate function with padding. - - Args: - batch: List of samples, each of which is a dictionary of tensors. - padding_values: A dictionary of padding values for each tensor key. - min_length: Minimum length of the output batch; tensors will be padded to this length. If not - provided, no extra padding beyond the max_length will be added. - max_length: Maximum length of the sequence. If not provided, tensors will be padded to the - longest sequence in the batch. - - Returns: - A collated batch with the same dictionary input structure. - """ - global _warned_once - keys: set[str] | None = None - - if len(batch) == 0: # empty batches passed through in DDP inference - return {} - - for entry in batch: - # First check that we have sane batches where keys align with each other. - if keys is None: - keys = set(entry.keys()) - else: - if set(entry.keys()) != keys: - raise ValueError(f"All keys in inputs must match each other. Got: {[sorted(e.keys()) for e in batch]}") - if entry.keys() != padding_values.keys(): - if not _warned_once: - extra_keys = {k for k in entry.keys() if k not in padding_values} - missing_keys = {k for k in padding_values.keys() if k not in entry} - logger.warning( - f"Extra keys in batch that will not be padded: {extra_keys}. Missing keys in batch: {missing_keys}" - ) - _warned_once = True - - def _pad(tensors, padding_value): - if max_length is not None: - tensors = [t[:max_length] for t in tensors] - batched_tensors = torch.nn.utils.rnn.pad_sequence(tensors, batch_first=True, padding_value=padding_value) - if min_length is None: - return batched_tensors - return torch.nn.functional.pad(batched_tensors, (0, min_length - batched_tensors.size(1)), value=padding_value) - - return { - k: _pad([s[k] for s in batch], padding_values[k]) - if k in padding_values - else torch.stack([s[k] for s in batch]) - for k in batch[0].keys() - } # type: ignore[return-value] - - -def bert_padding_collate_fn( - batch: Sequence[types.BertSample], - padding_value: int, - min_length: int | None = None, - max_length: int | None = None, -) -> types.BertSample: - """Padding collate function for BERT dataloaders. - - Args: - batch (list): List of samples. - padding_value (int, optional): The tokenizer's pad token ID. - min_length: Minimum length of the output batch; tensors will be padded to this length. If not - provided, no extra padding beyond the max_length will be added. - max_length: Maximum length of the sequence. If not provided, tensors will be padded to the - longest sequence in the batch. - """ - padding_values = { - "text": padding_value, - "types": 0, - "attention_mask": False, - "labels": MLM_LOSS_IGNORE_INDEX, # This should match the masked value used in the MLM loss mask. - "loss_mask": False, - "is_random": 0, - } - return padding_collate_fn( - batch=batch, # type: ignore[assignment] - padding_values=padding_values, - min_length=min_length, - max_length=max_length, - ) diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/data/datamodule.py b/sub-packages/bionemo-llm/src/bionemo/llm/data/datamodule.py deleted file mode 100644 index c9408846b5..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/data/datamodule.py +++ /dev/null @@ -1,159 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import functools -from typing import Any, Dict, Literal - -import lightning.pytorch as pl -from megatron.core.num_microbatches_calculator import update_num_microbatches -from nemo.lightning.data import WrappedDataLoader -from nemo.lightning.pytorch.plugins import MegatronDataSampler -from torch.utils.data import DataLoader, Dataset - -from bionemo.llm.data import collate - - -class MegatronDataModule(pl.LightningDataModule): - """A mixin that adds a `state_dict` and `load_state_dict` method for datamodule training resumption in NeMo.""" - - def __init__(self, *args, **kwargs): - """Set init_global_step to 0 for datamodule resumption.""" - super().__init__(*args, **kwargs) - self.init_global_step = 0 - - def update_init_global_step(self): - """Please always call this when you get a new dataloader... if you forget, your resumption will not work.""" - self.init_global_step = self.trainer.global_step # Update the init_global_step whenever we re-init training - self.data_sampler.init_global_step = ( - self.init_global_step - ) # Update the init_global_step whenever we re-init training - - def state_dict(self) -> Dict[str, Any]: - """Called when saving a checkpoint, implement to generate and save datamodule state. - - Returns: - A dictionary containing datamodule state. - - """ - consumed_samples = self.data_sampler.compute_consumed_samples(self.trainer.global_step - self.init_global_step) - return {"consumed_samples": consumed_samples} - - def load_state_dict(self, state_dict: Dict[str, Any]) -> None: - """Called when loading a checkpoint, implement to reload datamodule state given datamodule stat. - - Args: - state_dict: the datamodule state returned by ``state_dict``. - - """ - consumed_samples = state_dict["consumed_samples"] - self.data_sampler.init_consumed_samples = consumed_samples - self.data_sampler.prev_consumed_samples = consumed_samples - - update_num_microbatches( - consumed_samples=consumed_samples, - consistency_check=False, - ) - self.data_sampler.if_first_step = 1 - - -class MockDataModule(MegatronDataModule): - """A simple data module that just wraps input datasets with dataloaders.""" - - def __init__( - self, - train_dataset: Dataset | None = None, - valid_dataset: Dataset | None = None, - test_dataset: Dataset | None = None, - predict_dataset: Dataset | None = None, - pad_token_id: int = 0, - min_seq_length: int | None = None, - max_seq_length: int = 512, - micro_batch_size: int = 16, - global_batch_size: int = 16, - num_workers: int = 4, - ) -> None: - """Initialize the MockDataModule.""" - super().__init__() - self.train_dataset = train_dataset - self.valid_dataset = valid_dataset - self.test_dataset = test_dataset - self.predict_dataset = predict_dataset - self.pad_token_id = pad_token_id - self.min_seq_length = min_seq_length - self.max_seq_length = max_seq_length - self.batch_size = micro_batch_size - self.num_workers = num_workers - self.data_sampler = MegatronDataSampler( - seq_len=max_seq_length, - micro_batch_size=micro_batch_size, - global_batch_size=global_batch_size, - dataloader_type="single", - output_log=False, - ) - - def setup(self, stage: str | None = None) -> None: # noqa: D102 - pass - - def _make_dataloader( - self, dataset: Dataset, mode: Literal["train", "validation", "test", "predict"] - ) -> WrappedDataLoader: - if mode not in ["predict", "test"]: - self.update_init_global_step() - - return WrappedDataLoader( - mode=mode, - dataset=dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - collate_fn=functools.partial( - collate.bert_padding_collate_fn, - padding_value=self.pad_token_id, - min_length=self.min_seq_length, - max_length=self.max_seq_length, - ), - ) - - def train_dataloader(self) -> DataLoader: # noqa: D102 - if self.train_dataset is None: - raise ValueError("No train_dataset was provided") - return self._make_dataloader( - self.train_dataset, - mode="train", - ) - - def val_dataloader(self) -> DataLoader: # noqa: D102 - if self.valid_dataset is None: - raise ValueError("No valid_dataset was provided") - return self._make_dataloader( - self.valid_dataset, - mode="validation", - ) - - def test_dataloader(self) -> DataLoader: # noqa: D102 - if self.test_dataset is None: - raise ValueError("No test_dataset was provided") - return self._make_dataloader( - self.test_dataset, - mode="test", - ) - - def predict_dataloader(self) -> DataLoader: # noqa: D102 - if self.predict_dataset is None: - raise ValueError("No predict_dataset was provided") - return self._make_dataloader( - self.predict_dataset, - mode="predict", - ) diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/data/label2id_tokenizer.py b/sub-packages/bionemo-llm/src/bionemo/llm/data/label2id_tokenizer.py deleted file mode 100644 index dece1a7d2d..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/data/label2id_tokenizer.py +++ /dev/null @@ -1,123 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from typing import Dict, Iterable, List, Sequence, Union - -from nemo.collections.common.tokenizers import TokenizerSpec - - -__all__: Sequence[str] = ("Label2IDTokenizer",) - - -class Label2IDTokenizer(TokenizerSpec): - """Initializes simple Char Tokenizer. - - Intended to be used for extracting class labels - for classification models such as secondary - structure prediction model, where each class is - encoded with a character (ex. "C", "H", "E") - - Examples: - >>> tokenizer = Label2IDTokenizer() - >>> seqs = ['CHE', 'CCC', 'EHH'] - >>> tokenizer = tokenizer.build_vocab(s) - - """ - - def __init__(self) -> None: # noqa: D107 - super().__init__() - self.vocab: Dict[str, int] = {} - self.decode_vocab: Dict[int, str] = {id_: token for token, id_ in self.vocab.items()} - - @property - def vocab_size(self) -> int: - """Return the size of the vocab being used.""" - return len(self.vocab) - - def text_to_tokens(self, text: str) -> List[str]: # noqa: D102 - return list(text) - - def tokens_to_text(self, tokens: List[str]) -> str: # noqa: D102 - return "".join(tokens) - - def tokens_to_ids(self, tokens: List[str]) -> List[int]: - """Convert tokens to indexes/ids. - - Args: - tokens: Containing tokens - Returns: - Containing ID's for each token - """ - ids = [] - for token in tokens: - id_ = self.vocab.get(token) - if id_ is None: - raise ValueError(f"Do not recognize token: {token}") - else: - ids.append(id_) - return ids - - def ids_to_tokens(self, ids: List[int]) -> List[str]: - """Convert Ids to tokens. - - Args: - ids: Containg ids for each token - Returns: - Containing tokens - """ - tokens = [] - for id_ in ids: - token = self.decode_vocab.get(id_) - if token is None: - raise ValueError(f"Do not recognize ID: {id_}") - tokens.append(token) - return tokens - - def text_to_ids(self, text: str) -> List[int]: - """Converts text to ids. - - Args: - text (str): String containing text to convert - Returns: - (List[int]): Id's corresponding to the tokenization - of the text - """ - tokens = self.text_to_tokens(text) - return self.tokens_to_ids(tokens) - - def ids_to_text(self, ids: List[int]) -> str: # noqa: D102 - tokens = self.ids_to_tokens(ids) - return self.tokens_to_text(tokens) - - def build_vocab(self, strings: Union[str, Iterable[str]]) -> "Label2IDTokenizer": - """Builds the vocabulary of the tokenizer from strings - Args: - strings: (Union[str, Iterable[str]]): Strings to - build the vocabulary with. If a string is supplied, - then the vocabulary is built from the single string. - Otherwise, the vocabulary is progressively built - from all the strings in `strings`. - """ # noqa: D205 - if isinstance(strings, str): - strings = [strings] - - for string in strings: - for token in string: - if token not in self.vocab: - self.vocab[token] = len(self.vocab) - self.decode_vocab[self.vocab[token]] = token - - return self diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/data/masking.py b/sub-packages/bionemo-llm/src/bionemo/llm/data/masking.py deleted file mode 100644 index 4f85685364..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/data/masking.py +++ /dev/null @@ -1,167 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from dataclasses import dataclass - -import torch - -from bionemo.llm.data.types import Tokenizer - - -@dataclass(frozen=True) -class BertMaskConfig: - """Configuration for masking tokens in a BERT-style model. - - Attributes: - mask_prob: Probability of masking a token. - mask_token_prob: Probability of replacing a masked token with the mask token. - random_token_prob: Probability of replacing a masked token with a random token. - """ - - tokenizer: Tokenizer - random_tokens: range - mask_prob: float = 0.15 - mask_token_prob: float = 0.8 - random_token_prob: float = 0.1 - - def __post_init__(self) -> None: - """Check that the sum of `mask_token_prob` and `random_token_prob` is less than or equal to 1.0. - - Raises: - ValueError: If the sum of `mask_token_prob` and `random_token_prob` is greater than 1.0. - """ - if self.random_token_prob + self.mask_token_prob > 1.0: - raise ValueError("Sum of random_token_prob and mask_token_prob must be less than or equal to 1.0.") - - -def apply_bert_pretraining_mask( - tokenized_sequence: torch.Tensor, random_seed: int, mask_config: BertMaskConfig -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """Applies the pretraining mask to a tokenized sequence. - - Args: - tokenized_sequence: Tokenized protein sequence. - random_seed: Random seed for reproducibility. - mask_config: Configuration for masking tokens in a BERT-style model. - - Returns: - masked_sequence: - The tokenized sequence with some tokens masked. - labels: - A tensor the same shape as `masked_sequence` containing labels for the masked tokens, with -1 for non-masked - tokens. - loss_mask: - A boolean tensor the same shape as `masked_sequence`, where 'True' indicates which tokens should be included - in the loss. - """ - if mask_config.tokenizer.mask_token_id is None: - raise ValueError("Tokenizer must have a mask token.") - - if mask_config.random_token_prob + mask_config.mask_token_prob > 1.0: - raise ValueError("Sum of random_token_prob and mask_token_prob must be less than or equal to 1.0.") - - # Set the seed so that __getitem__(idx) is always deterministic. - # This is required by Megatron-LM's parallel strategies. - generator = torch.Generator().manual_seed(random_seed) - - mask_stop_1 = mask_config.mask_prob * mask_config.mask_token_prob - mask_stop_2 = mask_config.mask_prob * (mask_config.mask_token_prob + mask_config.random_token_prob) - - random_draws = torch.rand(tokenized_sequence.shape, generator=generator) # Random draws for each token in [0, 1). - - # Overall mask for a token being masked in some capacity - either mask token, random token, or left as-is - # (identity). We don't want to mask special tokens. - loss_mask = ~torch.isin(tokenized_sequence, torch.tensor(mask_config.tokenizer.all_special_ids)) - loss_mask &= random_draws < mask_config.mask_prob - - # The first `mask_token_prob` fraction of the `mask_prob` tokens are replaced with the mask token. - mask_token_mask = (random_draws < mask_stop_1) & loss_mask - - # The next `random_token_prob` fraction of the `mask_prob` tokens are replaced with a random token. - random_token_mask = ((random_draws >= mask_stop_1) & (random_draws < mask_stop_2)) & loss_mask - - # The remaining tokens are implicitly left as-is, representing an identity mask. - - # Mask the tokens. - masked_sequence = tokenized_sequence.clone() - masked_sequence[mask_token_mask] = mask_config.tokenizer.mask_token_id - num_random_tokens: int = random_token_mask.sum().item() # type: ignore[assignment] - masked_sequence[random_token_mask] = torch.randint( - low=mask_config.random_tokens.start, - high=mask_config.random_tokens.stop, - size=(num_random_tokens,), - dtype=masked_sequence.dtype, - generator=generator, - ) - - # Create the labels for the masked tokens. - labels = tokenized_sequence.clone() - labels[~loss_mask] = -100 # Ignore loss for non-masked tokens. - - return masked_sequence, labels, loss_mask - - -def add_cls_and_eos_tokens( - sequence: torch.Tensor, - labels: torch.Tensor, - loss_mask: torch.Tensor, - cls_token: int | None = None, - eos_token: int | None = None, -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """Prepends the CLS token and appends the EOS token to the masked sequence, updating the loss mask and labels. - - These labels should never be masked, so this is done after the masking step. - - Args: - sequence: The input (likely masked) sequence. - labels: The true values of the input sequence at the mask positions. - loss_mask: A boolean tensor indicating which tokens should be included in the loss. - cls_token: The token to use for the CLS token. If None, no CLS token is added. - eos_token: The token to use for the EOS token. If None, no EOS token is added. - - Returns: - The same input tensors with the CLS and EOS tokens added, and the labels and loss_mask updated accordingly. - """ - # Prepend the CLS token and append the EOS token, and update the loss mask and labels accordingly. - sequence = torch.cat( - [ - torch.tensor([cls_token], dtype=sequence.dtype) - if cls_token is not None - else torch.tensor([], dtype=sequence.dtype), - sequence, - torch.tensor([eos_token], dtype=sequence.dtype) - if eos_token is not None - else torch.tensor([], dtype=sequence.dtype), - ] - ) - - labels = torch.cat( - [ - torch.tensor([-1], dtype=labels.dtype) if cls_token is not None else torch.tensor([], dtype=labels.dtype), - labels, - torch.tensor([-1], dtype=labels.dtype) if eos_token is not None else torch.tensor([], dtype=labels.dtype), - ] - ) - - loss_mask = torch.cat( - [ - torch.tensor([False]) if cls_token is not None else torch.tensor([], dtype=loss_mask.dtype), - loss_mask, - torch.tensor([False]) if eos_token is not None else torch.tensor([], dtype=loss_mask.dtype), - ] - ) - - return sequence, labels, loss_mask diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/data/types.py b/sub-packages/bionemo-llm/src/bionemo/llm/data/types.py deleted file mode 100644 index 2cf909ecb4..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/data/types.py +++ /dev/null @@ -1,57 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from typing import Protocol, Sequence, TypedDict - -from torch import Tensor - - -__all__: Sequence[str] = ( - "BertSample", - "Tokenizer", -) - - -class BertSample(TypedDict): - """The type expected by NeMo/Megatron for a single dataset item. - - Attributes: - text: The tokenized, masked input text. - types: The token type ids, if applicable. - attention_mask: A mask over all valid tokens, excluding padding. - labels: The true values of the masked tokens at each position covered by loss_mask. - loss_mask: The mask over the text indicating which tokens are masked and should be predicted. - is_random: ?? - """ - - text: Tensor - types: Tensor - attention_mask: Tensor - labels: Tensor - loss_mask: Tensor - is_random: Tensor - - -class Tokenizer(Protocol): - """Required attributes for a tokenizers provided to apply_bert_pretraining_mask.""" - - @property - def mask_token_id(self) -> int | None: # noqa: D102 - ... - - @property - def all_special_ids(self) -> list[int]: # noqa: D102 - ... diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/lightning.py b/sub-packages/bionemo-llm/src/bionemo/llm/lightning.py deleted file mode 100644 index 327038abc6..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/lightning.py +++ /dev/null @@ -1,452 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Any, Callable, Generic, Iterable, Iterator, List, Literal, Optional, Sequence, Tuple, TypeVar, Union - -import lightning.pytorch as pl -import torch.distributed -from megatron.core import parallel_state -from megatron.core.optimizer.optimizer_config import OptimizerConfig -from nemo.lightning import io as nlio -from nemo.lightning.megatron_parallel import ( - DataT, - MegatronLossReduction, - ReductionT, -) -from nemo.lightning.pytorch.optim import MegatronOptimizerModule -from torch import Tensor - -from bionemo.core.model.config import BionemoTrainableModelConfig -from bionemo.llm.api import MegatronLossType, MegatronModelType - - -__all__: Sequence[str] = ( - "BionemoLightningModule", - "LightningPassthroughPredictionMixin", - "PassthroughLossReduction", - "batch_collator", - "default_megatron_optimizer", - "get_dtype_device", -) - - -T = TypeVar("T") -BatchT = TypeVar("BatchT") - - -def some_first(seq: Iterable[Optional[T]]) -> T: - """Returns the first non-None value from the sequence or fails""" # noqa: D415 - for s in seq: - if s is not None: - return s - raise ValueError("non-None value not found") - - -def get_dtype_device(torch_object) -> Tuple[torch.dtype, torch.device]: # noqa: D103 - match torch_object: - case []: - raise ValueError("Looking up dtype on an empty list") - case {**data} if not data: - raise ValueError("Looking up dtype on an empty dict") - case Tensor(dtype=dtype, device=device): - return dtype, device - case torch.nn.Module() as m: - try: - p = next(m.parameters()) - except StopIteration as e: - raise ValueError("Cannot get dtype on a torch module with no parameters.") from e - return p.dtype, p.device - case dict(keys=_, values=values): - val = some_first(values()) - return get_dtype_device(val) - case list() as l: - val = some_first(l) - return get_dtype_device(val) - case _: - raise TypeError("Got something we didnt expect") - - -# NOTE(SKH): These types are all wrong, but are close. The inner type must always be a Tensor, but the outer container should be generic. -def batch_collator( - batches: Optional[Union[Tuple[ReductionT], List[ReductionT]]], - batch_dim: int = 0, - seq_dim: int = 1, - batch_dim_key_defaults: dict[str, int] = {"token_logits": 1}, - seq_dim_key_defaults: dict[str, int] = {"token_logits": 0}, - preferred_gpu: int = 0, -) -> Optional[ReductionT]: - """Takes a sequence of batches and collates them into a single batch. - - This is distinct from the standard pytorch default_collator since it does - not add the batch dimension, it's assumed the batch - dimension is already present in the input, as would be the case when - parallelizing across minibatches. - - IMPORTANT: The underlying data primitive _must_ be a torch Tensor. The input to this function is a recurisve type, - there can be any amount of nesting between dictionaries, tuples, and lists, as long as the inner type is a n-d Tensor. - - Examples: - Outer container = Dict: - [{'a': Tensor([1]), 'b': Tensor([2])}, {'a': Tensor([2]), 'b': Tensor([3])}] -> {'a': Tensor([1, 2]), 'b': Tensor([2, 3])} - Outer container = List: - [[Tensor([1]), Tensor([2])], [Tensor([2]), Tensor([3])]] -> [Tensor([1, 2]), Tensor([2, 3])] - Outer container = Tuple: - ([Tensor([1]), Tensor([2])], [Tensor([2]), Tensor([3])]) -> (Tensor([1, 2]), Tensor([2, 3])) - - Args: - batches (Optional[Sequence[ReductionT]]): sequence of batches to collate into a single batch. - batch_dim: If you know that the batch dim for the batch you are concatenating is not the 0th dimension (for - example it is sequence first) then supply that dimension. - seq_dim: If you know that the sequence dim for the batch you are concatenating is not the 1st dimension (for - example it is sequence first) then supply that dimension. This is used for padding to the max length. - batch_dim_key_defaults (dictionary of keys to integers): If your batch is a dictionary and you know that some - keys have non-standard (0) batch dimensions, supply those here. By default "token_logits" has batch dim 1 - and otherwise all keys are assumed to have batch dim 0. - seq_dim_key_defaults (dictionary of keys to integers): If your batch is a dictionary and you know that some - keys have non-standard (1) sequence dimensions, supply those here. By default "token_logits" has seq dim 0 - and otherwise all keys are assumed to have seq dim 1. - preferred_gpu: If any of the tensors are on any GPU, all of them will be moved to this GPU. 0 by default. - - Returns: - A single batch of the same type as the elements of your input sequence. - """ - match batches: - # Handle base-cases for batch concatenation, either a list of None or a list of tensors - case [None, *_]: - return None - case [Tensor(), *_]: - # If any tensor is on a GPU, move all to preferred GPU - if any(t.is_cuda for t in batches): - device = torch.device(f"cuda:{preferred_gpu}") - batches = [t.to(device) for t in batches] - # First shortcut if all tensors are 1D (they have at least one batch dim, and it must be at 0) - if len(batches) > 0 and isinstance(batches[0], Tensor) and batches[0].ndim == 1: - return torch.cat(batches, dim=0) - # Find max sequence length across all tensors - max_seq_len = max(batch.size(seq_dim) for batch in batches) - # Pad each tensor to max length along seq_dim - padded_batches = [] - for batch in batches: - # Initialize padding tuple - needs 2 values per dim, starting from last dim - # e.g. for 3D tensor: [left_pad_dim2, right_pad_dim2, left_pad_dim1, right_pad_dim1, left_pad_dim0, right_pad_dim0] - pad_size = [0] * (2 * batch.ndim) - # Calculate padding needed at end of sequence dimension - pad_amount = max_seq_len - batch.size(seq_dim) - # Pad end of sequence dimension by putting padding amount in correct position - # For seq_dim=1 in 3D tensor: [0, 0, 0, pad_amount, 0, 0] - pad_size[2 * (batch.ndim - 1 - seq_dim) + 1] = pad_amount - padded_batch = torch.nn.functional.pad(batch, tuple(pad_size)) - padded_batches.append(padded_batch) - padded_batch = torch.cat(padded_batches, dim=batch_dim) - assert padded_batch.size(seq_dim) == max_seq_len - return padded_batch - # Next 3 calls are the recursive calls into the sub-structures of the batch. We handle dictionaries, tuples, and lists - case [dict(), *_]: - return { - key: batch_collator( - [batch[key] for batch in batches], - batch_dim=batch_dim_key_defaults.get(key, batch_dim), - seq_dim=seq_dim_key_defaults.get(key, seq_dim), - batch_dim_key_defaults=batch_dim_key_defaults, - seq_dim_key_defaults=seq_dim_key_defaults, - preferred_gpu=preferred_gpu, - ) - for key in batches[0] - } - case [tuple(), *_]: - return tuple( - batch_collator( - [batch[i] for batch in batches], - batch_dim=batch_dim, - seq_dim=seq_dim, - batch_dim_key_defaults=batch_dim_key_defaults, - seq_dim_key_defaults=seq_dim_key_defaults, - preferred_gpu=preferred_gpu, - ) - for i in range(len(batches[0])) - ) - case [list(), *_]: - return [ - batch_collator( - [batch[i] for batch in batches], - batch_dim=batch_dim, - seq_dim=seq_dim, - batch_dim_key_defaults=batch_dim_key_defaults, - seq_dim_key_defaults=seq_dim_key_defaults, - preferred_gpu=preferred_gpu, - ) - for i in range(len(batches[0])) - ] - # Final cases shouldn't happen, an empty sequence (no batches), or "other". - case []: - raise ValueError("Cannot process an empty sequence") - case _: - raise ValueError("Unsupported input structure in batch_collator") - - -# TODO(@jstjohn): Properly use the Generic for DataT and ReductionT usage. Define our own batch/output types. -# TODO(@skothenhill): Re-think the generics here- the way that `batch_collator` is expressed, `batches` should be a recursive generic type. -class PassthroughLossReduction(MegatronLossReduction, Generic[DataT]): - """A workaround for nemo/megatron to perform inference. - - Internally in NeMo2.0 the forward step is always expected to return a loss reduction class, and forward is - expected to return a loss. This class hijacks that mechanism to instead pass through the forward output unperturbed - as the loss (to enable inference in the predict step), and then the reduce method is used to collate the batch of - forward outputs into a single batch. This supports the model forward output being a tensor, dict, tuple, or list of - tensors. The inner type _must always be a Tensor_. - """ - - def forward(self, batch: DataT, forward_out: DataT) -> Tuple[Tensor, DataT]: - """Passes through the `forward_out` value as the 2nd tuple element. - - Args: - batch: The batch of data that was passed through the model to generate output. NOTE: this value is ignored. - forward_out: The output from your model's forward pass. - - Returns: - A tuple containing the loss tensor (dummy in this case) and the forward output (unmodified). - """ - return torch.zeros((1, 1)), forward_out - - def reduce(self, forward_out: List[DataT]) -> DataT: - """Collates list of model's outputs into a single output.""" - return batch_collator(forward_out) - - -class LightningPassthroughPredictionMixin: - """A mixin that allows your model to do inference on the predict step by hijacking nemo's loss reduction mechanism.""" - - def predict_loss_reduction(self) -> PassthroughLossReduction: - """For the predict step, pass through the forward pass output.""" - return PassthroughLossReduction() - - -ForwardStep = Callable[[MegatronModelType, DataT], DataT] -"""Megatron-compatible forward pass function. -""" - -DataStep = Callable[[Iterator[DataT]], DataT] -"""Batches together an iterator of individual examples. - -Necessary for compatability with Megatron. This function type is similiar to the collate function of PyTorch. - -A `DataStep` function takes an iterator over individual examples. Each example may be a tensor, sequence of tensors, -or a set of named tensors (provided as a `dict` mapping `str` names to each `Tensor`). Each iteration must -yield the same type. - -The output of this function will mirror the same structure of each yielded example. It will be a concatenation of all -of the examples in the iterator. -""" - - -class BionemoLightningModule( - Generic[MegatronModelType, MegatronLossType], - pl.LightningModule, - nlio.IOMixin, - nlio.ConnectorMixin, - LightningPassthroughPredictionMixin, -): - """Reusable PyTorch Lightning module for Megatron models that is compatible with NeMo's conventions.""" - - def __init__( - self, - config: BionemoTrainableModelConfig[MegatronModelType, MegatronLossType], - forward_step: ForwardStep, - data_step: DataStep, - optimizer: MegatronOptimizerModule, - model_transform: Optional[Callable[[MegatronModelType], MegatronModelType]] = None, - configure_init_model_parallel: bool = False, - **model_construct_args, - ) -> None: - """Constructor. - - Args: - config: Serializable configuration object that allows one to construct a new model instance and loss - function. Necessary for Megatron-based training as the model itself cannot be serialized and - distributed to nodes. Instead, we serialize the procedure for making the model and distribute that. - forward_step: Performs forward pass using the model and a batch of data. - data_step: Custom batch-creating function for the model. - optimizer: Megatron-compatible distributed optimizer instance. Defaults to using ADAM with a 1e-4 learning - rate. - model_construct_args: Optional. Any arguments necessary to construct the model in the `config`'s - `configure_model` method. - model_transform: Optional. The model transform function. - configure_init_model_parallel: Optional. Whether to initialize the model parallel at configuration time. - **model_construct_args: Optional. Arguments necessary for the supplied model configuration's - `configure_model` method, which will make an instance of the model. - """ - super().__init__() - self.config = config - self.module_construct_args: Optional[dict[str, Any]] = model_construct_args - # ***must** be set up in configure_model() -- megatron constraint - # also, must be called `module`: nemo expects the actual model to be stored this way - self.module: Optional[MegatronModelType] = None - self.loss_reduction_class: type[MegatronLossType] = config.get_loss_reduction_class() - self.optim = optimizer - self.optim.connect(self) # This will bind the `configure_optimizers` method - self._data_step = data_step - self._forward_step = forward_step - self.model_transform = model_transform - self.configure_init_model_parallel = configure_init_model_parallel - # configure metrics - self.train_metric = self.config.train_metric.get_instance() if self.config.train_metric else None - self.valid_metric = self.config.valid_metric.get_instance() if self.config.valid_metric else None - - def configure_model(self) -> None: - """Updates internal state: instantiates the model from the object's config, assigns to `model` attribute. - - NOTE: this method is idempotent; successive calls have no effect. The model is only initialized once. - - Raises: - ValueError iff the internal config's configure_model method returns None. - """ - if self.configure_init_model_parallel: - self.trainer.strategy._init_model_parallel = True - if self.module is None: - if self.module_construct_args is None: - module_construct_args = {} - elif "model_construct_args" in self.module_construct_args: - # Not sure why this is needed, but it seems "model_construct_args" ends up as a key inside this dict. - module_construct_args = self.module_construct_args["model_construct_args"] - else: - module_construct_args = self.module_construct_args - - model: MegatronModelType = self.config.configure_model(**module_construct_args) - self.module = model - if self.module is None: - raise ValueError("Invalid semantics: configure_model method **MUST** initialize the model.") - - def is_on_logging_device(self): - """Return True if last stage of pipeline parallel and first tensor parallel rank.""" - return parallel_state.is_pipeline_last_stage() and parallel_state.get_tensor_model_parallel_rank() == 0 - - def forward(self, *args, **kwargs) -> DataT: - """Call the forward method of the underlying model, and return whatever it outputs.""" - # safe to do because configure_model is idempotent - self.configure_model() - assert self.module is not None, "ERROR: configure_model() method has been incorrectly overridden!" - prediction = self.module(*args, **kwargs) # for now just pass through to the underlying model - return prediction - - def data_step(self, dataloader_iter: Iterator[DataT]) -> DataT: # noqa: D102 - return self._data_step(dataloader_iter) - - def forward_step(self, batch) -> Tensor: - """Megatron-required: the training forward step for the model, which is required to produce the loss. - - Normally, the forward pass of a model means its inference. Loss is computed using the predictions - from the forward pass against labels. Megatron unfortunately conflates these two different concepts - and instead has models "forward" method produce the loss. See the Megatron docs for details: - https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/pipeline_parallel/schedules.py#L170 - - To get actual predictions, use the :func:`forward` method instead. - """ - # safe to do because configure_model is idempotent - self.configure_model() - assert self.module is not None - return self._forward_step(self.module, batch) - - def update_metric( - self, batch, outputs, metric, task: Literal["pretraining", "classification", "regression"] - ) -> None: - """Update metric for logging.""" - match task: - case "pretraining": - logits = outputs["token_logits"].detach().transpose(0, 1) # [s, b, v] -> [b, s, v] - metric(logits, batch["labels"]) - case "classification": - classification_output = outputs["classification_output"] - num_classes = classification_output.shape[-1] - labels = batch["labels"] - if classification_output.ndim == 3: # token-level classification - classification_output = classification_output.reshape(-1, num_classes)[ - batch["loss_mask"].view(-1) - ] # shape [-1, num_classes] - assert classification_output.ndim == 2 - - labels = batch["labels"].reshape(-1)[batch["loss_mask"].view(-1)] - metric( - classification_output.reshape(-1, num_classes), - labels.reshape(-1), - ) - case "regression": - regression_output = outputs["regression_output"] - metric(regression_output, batch["labels"]) - case _: - raise NotImplementedError(f"unrecognized task {task}") - - def training_step(self, batch, batch_idx: Optional[int] = None) -> Tensor: - """In mcore the loss-function is part of the forward-pass when labels are provided.""" - outputs = self.forward_step(batch) - if self.train_metric is not None: - if self.is_on_logging_device(): - self.update_metric(batch, outputs, self.train_metric, self.config.train_metric.task) - - self.log( - self.config.train_metric.metric_name, - self.train_metric, - on_step=True, - on_epoch=False, - prog_bar=True, - ) - - return outputs - - def validation_step(self, batch, batch_idx: Optional[int] = None) -> Tensor: - """In mcore the loss-function is part of the forward-pass when labels are provided.""" - outputs = self.forward_step(batch) - if self.valid_metric is not None and self.is_on_logging_device(): - self.update_metric(batch, outputs, self.valid_metric, self.config.valid_metric.task) - - return outputs - - def predict_step(self, batch, batch_idx: Optional[int] = None) -> Tensor: - """Alias for forward_step.""" - if len(batch) == 0: - return - return self.forward_step(batch) - - def training_loss_reduction(self) -> MegatronLossType: - """This is the function that takes batch['loss_mask'] and the logits output by the model and reduces the loss.""" - return self.loss_reduction_class() - - def validation_loss_reduction(self) -> MegatronLossType: # noqa: D102 - return self.loss_reduction_class(validation_step=True) - - def test_loss_reduction(self) -> MegatronLossType: # noqa: D102 - return self.loss_reduction_class(validation_step=True) - - def on_validation_epoch_end(self): # noqa: D102 - if self.valid_metric is None: - return - - if self.trainer.sanity_checking: - self.valid_metric.reset() # clean up sanity runs - return - - self.log( - self.config.valid_metric.metric_name, - self.valid_metric, - on_step=False, - on_epoch=True, - prog_bar=True, - ) - - -def default_megatron_optimizer() -> MegatronOptimizerModule: - """Default distributed optimizer uses Adam with a 1e-4 learning rate.""" - return MegatronOptimizerModule( - config=OptimizerConfig(lr=1e-4, optimizer="adam", use_distributed_optimizer=True), - ) diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/model/__init__.py b/sub-packages/bionemo-llm/src/bionemo/llm/model/__init__.py deleted file mode 100644 index 25e6abfbc5..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/model/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/model/biobert/__init__.py b/sub-packages/bionemo-llm/src/bionemo/llm/model/biobert/__init__.py deleted file mode 100644 index 25e6abfbc5..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/model/biobert/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/model/biobert/lightning.py b/sub-packages/bionemo-llm/src/bionemo/llm/model/biobert/lightning.py deleted file mode 100644 index f9a1168c3e..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/model/biobert/lightning.py +++ /dev/null @@ -1,283 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from typing import Callable, Dict, Iterable, Optional, Protocol, Sequence, TypedDict, cast - -import lightning.pytorch as pl -import torch.distributed -from megatron.core import parallel_state -from megatron.core.packed_seq_params import PackedSeqParams -from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec -from nemo.lightning.megatron_parallel import DataT, MegatronLossReduction -from nemo.lightning.pytorch.optim import MegatronOptimizerModule -from torch import Tensor -from transformers.tokenization_utils_base import PreTrainedTokenizerBase - -from bionemo.llm.lightning import ( - BionemoLightningModule, - DataStep, - ForwardStep, - default_megatron_optimizer, -) -from bionemo.llm.model.biobert.model import BioBertConfig, MegatronBioBertModel - - -__all__: Sequence[str] = ( - "BertBatch", - "BertModel", - "SequenceBatch", - "bert_forward_step", - "biobert_data_step", - "biobert_lightning_module", - "get_batch_on_this_context_parallel_rank", - "get_packed_seq_params", -) - - -class BertModel(Protocol[DataT]): - """Interface for BERT-like models.""" - - def forward( - self, input_ids: Tensor, attention_mask: Tensor, packed_seq_params: Optional[PackedSeqParams] = None - ) -> DataT: - """Inference for BERT-like models. - - Inference for BERT-like models require their tokenized inputs by IDs, an attention mask over the input, - and the original sequence lengths if the sequences are packed into a dense batch. - """ - ... - - -class BertBatchCore(TypedDict): - """Input datatype for inference with BERT-like models.""" - - text: Tensor - attention_mask: Tensor - - -DataStepOutput = Dict[str, torch.Tensor | PackedSeqParams] -DataStepFunction = Callable[[Iterable], DataStepOutput] -ForwardStepFunction = Callable[[pl.LightningModule, DataStepOutput], DataT] - - -class BertBatch(BertBatchCore, total=False): - """Input datatype for inference with BERT-like models.""" - - cu_seqlens: Tensor - - -class SequenceBatchCore(TypedDict): - """Input datatype for inference with BERT-like models.""" - - cu_seqlens: Tensor - - -class SequenceBatch(SequenceBatchCore, total=False): - """Input datatype for inference with BERT-like models.""" - - cu_seqlens_argmin: Tensor - max_seqlen: Tensor - - -def biobert_data_step(dataloader_iter) -> Dict[str, Tensor]: - """Preprocesses a batch of data for the GeneFormer model, and ingest a single batch of data from the dataloader iterator. - only necessary batch keys are subsetted and passed to the model's forward pass, and the loss forward pass, depending on stage. - TODO document how parallel_state pipeline stages work. - - Args: - dataloader_iter: An iterator over the dataloader. - - Returns: - output: A dictionary of this batch limiting to relevant keys. - - """ # noqa: D205 - # Based on: https://github.com/NVIDIA/Megatron-LM/blob/main/pretrain_gpt.py#L87 - # https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py#L828-L842 - - batch = next(dataloader_iter) - - if isinstance(batch, tuple) and len(batch) == 3: - _batch: dict = batch[0] - else: - _batch = batch - - required_keys = set() - required_keys.add("attention_mask") - if parallel_state.is_pipeline_first_stage(): - required_keys.add("text") - if parallel_state.is_pipeline_last_stage(): - required_keys.update(("labels", "loss_mask", "types", "is_random")) - # if self.get_attention_mask_from_fusion: - # required_keys.remove('attention_mask') - - _batch = {key: val.cuda(non_blocking=True) if key in required_keys else None for key, val in _batch.items()} - # slice batch along sequence dimension for context parallelism - output = get_batch_on_this_context_parallel_rank(_batch) - - return output - - -def bert_forward_step(model: BertModel[DataT], batch: BertBatch) -> DataT: - """Performs the model's forward pass using the batch, for Megatron compatibility. - - This subsets the batch keys to the ones actually used by forward pass of the model, and then calls the model's - forward pass. if "cu_seqsens" are defined in the batch, then the packed sequence parameters are also passed to the - model for forward pass efficiency. - """ - if "cu_seqlens" in batch: - forward_results = model.forward( - input_ids=batch["text"], - attention_mask=batch["attention_mask"], - packed_seq_params=get_packed_seq_params(cast(SequenceBatch, batch)), - ) - else: - forward_results = model.forward(input_ids=batch["text"], attention_mask=batch["attention_mask"]) - # TODO support losses that also include the binary head, this means doing something more fancy than the one - # default GPT reduction function above MaskedTokenLossReduction() - return forward_results - - -def biobert_lightning_module( - config: BioBertConfig[MegatronBioBertModel, MegatronLossReduction], - optimizer: Optional[MegatronOptimizerModule] = None, - tokenizer: Optional[TokenizerSpec | PreTrainedTokenizerBase] = None, - data_step: DataStep = biobert_data_step, - forward_step: ForwardStep = bert_forward_step, - model_transform: Optional[Callable] = None, - **model_construct_args, -) -> BionemoLightningModule[MegatronBioBertModel, MegatronLossReduction]: - """A pytorch lightning module for BioBert-derived models. - - This module is designed to be used with the Megatron-LM strategy and nemo 2.0 conventions. - To change your loss, pass in a different config object that returns a different loss reduction class. - To change your model and what it outputs, pass in a different config object that returns a different model. - Do not modify this function unless you need to change higher level logic. You may need to modify the various step - and forward functions towards the bottom of this file to handle new/different keys in the batch. In the future some - of those functions may need to be refactored out into the config object or a different place so that they live - closer to the model definition. - """ - return BionemoLightningModule( - config=config, - optimizer=optimizer if optimizer is not None else default_megatron_optimizer(), - data_step=data_step, - forward_step=forward_step, - tokenizer=tokenizer, - model_transform=model_transform, - **model_construct_args, - ) - - -def get_batch_on_this_context_parallel_rank(batch: Dict[str, Tensor], in_place: bool = True) -> Dict[str, Tensor]: - """Ensures that the input batch is in the right format for context parallel rank. - - Modifies the batch data based on the context parallel rank, if the context parallel world size is greater than 1. - Otherwise, the batch is returned as-is. - - - Args: - batch: The input batch data. - in_place: If true, then the input is mutated. The returned dict is a reference to the input. - Otherwise, the input data is always shallow-copied and this copy is modified and returned. - - Returns: - dict: The modified batch data based on the context parallel rank. - """ - if not in_place: - batch: dict[str, Tensor] = dict(**batch) - - if cp_size := parallel_state.get_context_parallel_world_size() > 1: - num_valid_tokens_in_ub: Tensor | None = None - if "loss_mask" in batch and batch["loss_mask"] is not None: - num_valid_tokens_in_ub = batch["loss_mask"].sum() - - cp_rank = parallel_state.get_context_parallel_rank() - for key, val in batch.items(): - if val is not None: - seq_dim = 1 if key != "attention_mask" else 2 - _val = val.view( - *val.shape[0:seq_dim], - 2 * cp_size, - val.shape[seq_dim] // (2 * cp_size), - *val.shape[(seq_dim + 1) :], - ) - index = torch.tensor([cp_rank, (2 * cp_size - cp_rank - 1)], device="cpu", pin_memory=True).cuda( - non_blocking=True - ) - _val = _val.index_select(seq_dim, index) - _val = _val.view(*val.shape[0:seq_dim], -1, *_val.shape[(seq_dim + 2) :]) - batch[key] = _val - batch["num_valid_tokens_in_ub"] = num_valid_tokens_in_ub # type: ignore - - return batch - - -def get_packed_seq_params(batch: SequenceBatch) -> PackedSeqParams: - """Get the packed sequence parameters for the given batch. - - This function should only be called if `cu_seqlens` is defined in the batch. - - Args: - batch: The input batch to pack. - - Returns: - PackedSeqParams: The packed sequence parameters containing the following attributes: - - cu_seqlens_q (Tensor): The sequence lengths for query. - - cu_seqlens_kv (Tensor): The sequence lengths for key and value. - - max_seqlen_q (Tensor, optional): The maximum sequence length for query. - - max_seqlen_kv (Tensor, optional): The maximum sequence length for key and value. - - qkv_format (str): The format of query, key, and value tensors. - - """ - cu_seqlens = batch["cu_seqlens"].squeeze() # remove batch size dimension (mbs=1) - # remove -1 "paddings" added in collate_fn - if cu_seqlens_argmin := batch.get("cu_seqlens_argmin", None) is not None: - # pre-compute cu_seqlens_argmin in dataset class for perf - cu_seqlens = cu_seqlens[: cu_seqlens_argmin.item()] - else: - cu_seqlens = cu_seqlens[: torch.argmin(cu_seqlens)] - - # pre-compute max_seqlens in dataset class for perf - max_seqlen = batch["max_seqlen"].squeeze() if "max_seqlen" in batch else None - - # these args are passed eventually into TEDotProductAttention.forward() - return PackedSeqParams( - cu_seqlens_q=cu_seqlens, - cu_seqlens_kv=cu_seqlens, - max_seqlen_q=max_seqlen, - max_seqlen_kv=max_seqlen, - qkv_format="thd", - ) - - -class BioBertLightningModule(BionemoLightningModule): - def __init__( - self, - *args, - data_step_function: DataStepFunction = biobert_data_step, - forward_step_function: ForwardStepFunction = bert_forward_step, - **kwargs, - ): - """DEPRECATED! Please use BionemoLightningModule. This is here so we can load older checkpoints. - This maps the old name `forward_step_function` to the new name `forward_step` and `data_step_function` to - `data_step`. - - Args: - *args: all args are passed through to BionemoLightningModule - data_step_function (DataStepFunction, optional): The data step function. Defaults to biobert_data_step. - forward_step_function (ForwardStepFunction, optional): The forward step function. Defaults to bert_forward_step. - **kwargs: all other kwargs are passed through to BionemoLightningModule. - """ # noqa: D205 - super().__init__(*args, forward_step=forward_step_function, data_step=data_step_function, **kwargs) diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/model/biobert/model.py b/sub-packages/bionemo-llm/src/bionemo/llm/model/biobert/model.py deleted file mode 100644 index c6687bffdb..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/model/biobert/model.py +++ /dev/null @@ -1,622 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -import tarfile -from copy import deepcopy -from dataclasses import dataclass, field -from typing import ( - Any, - Callable, - List, - Literal, - Optional, - Sequence, - Type, - TypedDict, - TypeVar, -) - -import torch -from megatron.core import parallel_state, tensor_parallel -from megatron.core.models.bert.bert_lm_head import BertLMHead -from megatron.core.models.bert.pooler import Pooler -from megatron.core.models.common.embeddings.language_model_embedding import LanguageModelEmbedding -from megatron.core.models.common.embeddings.rotary_pos_embedding import RotaryEmbedding -from megatron.core.models.common.language_module.language_module import LanguageModule -from megatron.core.transformer.enums import ModelType -from megatron.core.transformer.spec_utils import ModuleSpec -from megatron.core.transformer.transformer_block import TransformerBlock -from megatron.core.transformer.transformer_config import TransformerConfig -from megatron.core.transformer.utils import get_linear_layer -from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer -from nemo.lightning import get_vocab_size -from torch import Tensor -from torch.optim import Optimizer - -from bionemo.llm.api import MegatronLossType -from bionemo.llm.model.biobert.transformer_specs import BiobertSpecOption, get_biobert_spec -from bionemo.llm.model.config import ( - OVERRIDE_BIONEMO_CONFIG_DEFAULTS, - MegatronBioNeMoTrainableModelConfig, - TorchmetricsConfig, -) -from bionemo.llm.model.loss import BERTMLMLossWithReduction -from bionemo.llm.utils.weight_utils import nemo1_to_nemo2_biobert_key_mapping - - -__all__: Sequence[str] = ( - "OVERRIDE_BIOBERT_CONFIG_DEFAULTS", - "BioBertConfig", - "BioBertOutput", - "BioBertOutputCore", - "MegatronBioBertModel", - "MegatronBioBertModelType", - "PositionEmbeddingKinds", -) - -# Configure the logger -logging.basicConfig( - level=logging.INFO, # Set the minimum logging level - format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", # Log format - datefmt="%Y-%m-%d %H:%M:%S", # Date format -) - -logger = logging.getLogger(__file__) - -# Add some fields specific to the BIOBERT config that we want to override by default -# TODO automatically determine which fields a user is trying to override in the future. -_OVERRIDE_BIOBERT_CONFIG_DEFAULTS: List[str] = OVERRIDE_BIONEMO_CONFIG_DEFAULTS + [ - "return_only_hidden_states", - "include_embeddings", - "include_input_ids", - "include_hiddens", - # Precision override for starting from a checkpoint and casting to a different precision - "params_dtype", - "pipeline_dtype", - "autocast_dtype", - # Model parallelism settings! Important to override these if the user requests different settings from how - # a model was trained (common). See https://github.com/NVIDIA/bionemo-framework/issues/275 - "tensor_model_parallel_size", - "pipeline_model_parallel_size", - "virtual_pipeline_model_parallel_size", - "sequence_parallel", - "context_parallel_size", - "expert_model_parallel_size", - "apply_rope_fusion", - "bias_dropout_fusion", - "bias_activation_fusion", - "attention_softmax_in_fp32", - "get_attention_mask_from_fusion", - "activation_func", # FIXME hack: update the ESM2 checkpoint with the updated activation function and don't override - "moe_extended_tp", - "skip_logits", -] - -# A copy that we do not use internally. Useful for external users who want to -# start with these defaults and add some new keys that they want to not override. -OVERRIDE_BIOBERT_CONFIG_DEFAULTS = deepcopy(_OVERRIDE_BIOBERT_CONFIG_DEFAULTS) - - -class BioBertOutputCore(TypedDict): - """Keys always present in the bionemo bert model inference output.""" - - token_logits: Tensor - binary_logits: Optional[Tensor] - - -class BioBertOutput(BioBertOutputCore, total=False): - """The megatron bionemo bert model inference type.""" - - hidden_states: Tensor - - -PositionEmbeddingKinds = Literal["learned_absolute", "rope"] -"""Kinds of supported positional embeddings.""" - - -# TODO make this a base class without the language head and pooler -class MegatronBioBertModel(LanguageModule): - """Transformer language model. - - Args: - config: transformer config - num_tokentypes: Set to 2 when args.bert_binary_head is True, and 0 otherwise. Defaults to 0. - transformer_layer_spec: Specifies module to use for transformer layers - vocab_size: vocabulary size - max_sequence_length: maximum size of sequence. This is used for positional embedding - pre_process: Include embedding layer (used with pipeline parallelism) - post_process: Include an output layer (used with pipeline parallelism) - parallel_output: Do not gather the outputs, keep them split across tensor parallel ranks - share_embeddings_and_output_weights: When True, input embeddings and output logit weights are shared. - Defaults to False. - position_embedding_type: Position embedding type. Options ["learned_absolute", "rope"]. - Defaults is 'learned_absolute'. - rotary_percent: Percent of rotary dimension to use for rotary position embeddings. - Defaults to 1.0 (100%). Ignored unless position_embedding_type is 'rope'. - """ - - def __init__( # noqa: D107 - self, - config: TransformerConfig, - num_tokentypes: int, - transformer_layer_spec: ModuleSpec, - vocab_size: int, - max_sequence_length: int, - tokenizer: Optional[AutoTokenizer] = None, - pre_process: bool = True, - post_process: bool = True, - fp16_lm_cross_entropy: bool = False, - parallel_output: bool = True, - share_embeddings_and_output_weights: bool = False, - position_embedding_type: PositionEmbeddingKinds = "learned_absolute", - rotary_percent: float = 1.0, - seq_len_interpolation_factor: Optional[float] = None, - add_binary_head: bool = False, - return_embeddings: bool = False, - include_embeddings: bool = False, - use_full_attention_mask: bool = False, - include_hiddens: bool = False, - include_input_ids: bool = False, - skip_logits: bool = False, # Useful for inference time. - ): - # TODO (@jstjohn) come up with a cleaner way for this model to return a set of things the user wants. - # hidden states, embeddings, logits, etc. The defaults should work for training but we need to make it - # customizable and easy to tell how to make it work well for inference as well as trouble shooting. - # Also make sure that everything returned that the user wants gets transposed to the b,s,h format. - super(MegatronBioBertModel, self).__init__(config=config) - self.post_process = post_process - self.add_binary_head = add_binary_head - self.skip_logits = skip_logits - if return_embeddings: - assert self.post_process, "only return embeddings on the last pipeline stage" - # `b` = batch, `s` = sequence. - # The old flash attention mechanism apparently wants you to use a b x 1 x s x s attention mask while - # the new one wants a b x 1 x 1 x s attention mask. This is a hack to allow us to switch between the two. - self.use_full_attention_mask = use_full_attention_mask - self.config: TransformerConfig = config - self.transformer_layer_spec: ModuleSpec = transformer_layer_spec - self.vocab_size = vocab_size - self.max_sequence_length = max_sequence_length - self.tokenizer = tokenizer - self.pre_process = pre_process - self.post_process = post_process - self.fp16_lm_cross_entropy = fp16_lm_cross_entropy - self.parallel_output = parallel_output - self.share_embeddings_and_output_weights = share_embeddings_and_output_weights - self.position_embedding_type = position_embedding_type - self.add_binary_head = add_binary_head - self.return_embeddings = return_embeddings - self.include_embeddings = include_embeddings - self.include_hiddens = include_hiddens - self.include_input_ids = include_input_ids - self.skip_logits = skip_logits - - # megatron core pipelining currently depends on model type - self.model_type = ModelType.encoder_or_decoder - # Embeddings. - if self.pre_process: - self.register_buffer( - "bert_position_id_tensor", - torch.arange(max_sequence_length, dtype=torch.long, requires_grad=False).unsqueeze(0), - persistent=False, - ) - self.embedding = LanguageModelEmbedding( - config=self.config, - vocab_size=self.vocab_size, - max_sequence_length=self.max_sequence_length, - position_embedding_type=position_embedding_type, - num_tokentypes=num_tokentypes, - ) - - if self.position_embedding_type == "rope": - self.rotary_pos_emb = RotaryEmbedding( - kv_channels=self.config.kv_channels, - rotary_percent=rotary_percent, - rotary_interleaved=self.config.rotary_interleaved, - # bug in megatron: they list the type as `float` but they default to `None` so it should be `Optional[float]` - seq_len_interpolation_factor=seq_len_interpolation_factor, # type: ignore - ) - - # Transformer. - self.encoder = TransformerBlock( - config=self.config, - spec=self.transformer_layer_spec, - pre_process=self.pre_process, - post_process=self.post_process, # NOTE: in bionemo1 this is hard-coded to True - ) - - # Output - if post_process: - # TODO: Make sure you are passing in the mpu_vocab_size properly - if self.config.defer_embedding_wgrad_compute: - # The embedding activation buffer preserves a reference to the input activations - # of the final embedding projection layer GEMM. It will hold the activations for - # all the micro-batches of a global batch for the last pipeline stage. Once we are - # done with all the back props for all the microbatches for the last pipeline stage, - # it will be in the pipeline flush stage. During this pipeline flush we use the - # input activations stored in embedding activation buffer and gradient outputs - # stored in gradient buffer to calculate the weight gradients for the embedding - # final linear layer. - self.embedding_activation_buffer = [] - self.grad_output_buffer = [] - else: - self.embedding_activation_buffer = None - self.grad_output_buffer = None - - self.lm_head = BertLMHead( - config.hidden_size, - config, - ) - - self.output_layer = tensor_parallel.ColumnParallelLinear( - config.hidden_size, - self.vocab_size, - config=config, - init_method=config.init_method, - is_expert=False, - bias=True, - skip_bias_add=False, - gather_output=not self.parallel_output, - skip_weight_param_allocation=pre_process and share_embeddings_and_output_weights, - embedding_activation_buffer=self.embedding_activation_buffer, - grad_output_buffer=self.grad_output_buffer, - ) - - self.binary_head = None - if self.add_binary_head: - # TODO: Shoudl switch this to TE ? - self.binary_head = get_linear_layer( - config.hidden_size, 2, config.init_method, config.perform_initialization - ) - self.pooler = Pooler(config.hidden_size, config.init_method, config, config.sequence_parallel) - - if self.pre_process or self.post_process: - self.setup_embeddings_and_output_layer() - - def bert_extended_attention_mask(self, attention_mask: Tensor) -> Tensor: - """Creates the extended attention mask - - Converts the attention mask of dimension [batch size, 1, seq len] to [batch size, 1, seq len, seq len] and makes it binary - - Args: - attention_mask (Tensor): The input attention mask - - Returns: - Tensor: The extended binary attention mask - """ # noqa: D415 - # We create a 3D attention mask from a 2D tensor mask. - # [b, 1, s] - attention_mask_b1s = attention_mask.unsqueeze(1) - - if self.use_full_attention_mask: - # [b, s, 1] - attention_mask_bs1 = attention_mask.unsqueeze(2) - # [b, s, s] - attention_mask_bss = attention_mask_b1s * attention_mask_bs1 - # [b, 1, s, s] - extended_attention_mask = attention_mask_bss.unsqueeze(1) - else: - # Tensor Engine requires a 1x1xS attention mask which it internally - # converts into a 1xSxS mask. - # [b, 1, 1, s] - extended_attention_mask = attention_mask_b1s.unsqueeze(1) - - # Convert attention mask to binary, and flip the values from 0 to 1 and vice versa so that - # extended_attention_mask._mask_fill(-1000) that megatron does internally result in - # masking out pad positions. - extended_attention_mask = extended_attention_mask < 0.5 - - return extended_attention_mask - - def bert_position_ids(self, token_ids): # noqa: D102 - # Create position ids - seq_length = token_ids.size(1) - if seq_length != self.max_sequence_length: - return self.bert_position_id_tensor[:, :seq_length] - return self.bert_position_id_tensor # No need to subset so skip the slice op - - def embedding_forward( - self, - input_ids: Tensor, - position_ids: Tensor, - tokentype_ids: Optional[Tensor] = None, - attention_mask: Optional[Tensor] = None, - ) -> Tensor: - """Produce embeddings.""" - return self.embedding(input_ids=input_ids, position_ids=position_ids, tokentype_ids=tokentype_ids) - - def set_input_tensor(self, input_tensor: Tensor | list[Tensor]) -> None: - """Sets input tensor to the model. - - See megatron.model.transformer.set_input_tensor() - - Args: - input_tensor: Sets the input tensor for the model. - - Raises: - ValueError: Iff the input tensor is a list that doesn't have exactly 1 tensor. - """ - # This is usually handled in schedules.py but some inference code still gives us non-lists or None. - if isinstance(input_tensor, list): - if len(input_tensor) != 1: - raise ValueError(f"input_tensor should only be length 1 for gpt/bert, not length: {len(input_tensor)}") - single_input_tensor: Tensor = input_tensor[0] - else: - single_input_tensor = input_tensor - self.encoder.set_input_tensor(single_input_tensor) - - def forward( - self, - input_ids: Tensor, - attention_mask: Tensor, - tokentype_ids: Optional[Tensor] = None, - lm_labels: Optional[Tensor] = None, - inference_params: Any | None = None, - runtime_gather_output: Optional[bool] = None, - ) -> BioBertOutput | Tensor: - """Forward function of BERT model - - Forward function of the BERT Model This function passes the input tensors - through the embedding layer, and then the encoder and finally into the post - processing layer (optional). - - It either returns the Loss values if labels are given or the final hidden units. - """ # noqa: D415 - # TODO! If we upgrade to TE 1.7 why does bit flipping back to 1 help the loss in TE 1.7? It claimed that they now follow standards, did - # nemo/megatron flip again internally to be compatible wtih TE somewhere? - # change the following line to ~self.bert... and see if it helps if we upgrade to TE 1.7 and NeMo/Megatron have not compensated. - extended_attention_mask = self.bert_extended_attention_mask(attention_mask) - - if parallel_state.is_pipeline_first_stage(): - using_input_ids: Optional[Tensor] = input_ids - using_position_ids: Optional[Tensor] = self.bert_position_ids(input_ids) - else: - using_input_ids = None - using_position_ids = None - - # Encoder embedding. - if self.pre_process: - encoder_input: Optional[Tensor] = self.embedding_forward( - input_ids=using_input_ids, - position_ids=using_position_ids, - tokentype_ids=tokentype_ids, - attention_mask=attention_mask, - ) - else: - # intermediate stage of pipeline - # encoder will get hidden_states from encoder.input_tensor - encoder_input = None - - # Rotary positional embeddings (Why not move this into BERT/GPTEmberdding ?) - rotary_pos_emb = None - if self.position_embedding_type == "rope": - rotary_seq_len = self.rotary_pos_emb.get_rotary_seq_len( - inference_params, - self.encoder, - encoder_input, - self.config, - packed_seq_params=None, # TODO @sichu: upstream to Megatron-LM - ) - rotary_pos_emb = self.rotary_pos_emb(rotary_seq_len) - - # Run encoder. - hidden_states = self.encoder( - hidden_states=encoder_input, - attention_mask=extended_attention_mask, - inference_params=inference_params, - rotary_pos_emb=rotary_pos_emb, - ) - - if not self.post_process: - return hidden_states - - if self.add_binary_head: - pooled_output = self.pooler(hidden_states, 0) - - if self.return_embeddings or self.include_embeddings: - embeddings = torch.transpose(hidden_states, 0, 1) - masks = torch.sum(attention_mask, dim=1) - # Collect masked embeddings. - output_embeddings = torch.zeros( - size=(embeddings.shape[0], embeddings.shape[2]), - dtype=embeddings.dtype, - device=torch.cuda.current_device(), - ) - for i, (embedding, mask) in enumerate(zip(embeddings, masks)): - output_embeddings[i, :] = torch.mean(embedding[1 : mask - 1], dim=0) - - if self.return_embeddings: - return output_embeddings - - # logits and loss - output_weight = None - if self.share_embeddings_and_output_weights: - output_weight = self.shared_embedding_or_output_weight() - - hidden_states_after_lm_head = self.lm_head(hidden_states=hidden_states) - if not self.skip_logits: - # TODO add , runtime_gather_output=runtime_gather_output once supported in ColumnParallelLinear - logits, _ = self.output_layer(hidden_states_after_lm_head, weight=output_weight) - else: - logits = None - - binary_logits = None - if self.binary_head is not None: - binary_logits = self.binary_head(pooled_output) - - output = {"token_logits": logits, "binary_logits": binary_logits} - if self.include_hiddens: - output["hidden_states"] = hidden_states.transpose(0, 1).contiguous() # [s b h] => [b s h] - if self.include_input_ids: - output["input_ids"] = input_ids - if self.include_embeddings: - output["embeddings"] = output_embeddings - return output - - -# Typevar that works for all children of MegatronBioBertModel -MegatronBioBertModelType = TypeVar("MegatronBioBertModelType", bound=MegatronBioBertModel) -"""A megatron model that is or extends the MegatronBioBertModel.""" - - -@dataclass -class BioBertConfig( - MegatronBioNeMoTrainableModelConfig[MegatronBioBertModelType, MegatronLossType], -): - """Config class for BioBert model, responsible for the partial configuration of Transformer models. - - NOTE: do not use this config directly, define a child config that overrides items from this parent config - - `configure_model()` is ultimately called by the LightningModule using PTL lightning module hooks. - """ - - # From megatron.core.models.gpt.bert_model.GPTModel - kv_channels: int | None = None - fp16_lm_cross_entropy: bool = False - apply_rope_fusion: bool = True - parallel_output: bool = True - bias_dropout_fusion: bool = True - bias_activation_fusion: bool = True - masked_softmax_fusion: bool = True - persist_layer_norm: bool = True - get_attention_mask_from_fusion: bool = True - share_embeddings_and_output_weights: bool = False # try True - make_vocab_size_divisible_by: int = 128 - position_embedding_type: PositionEmbeddingKinds = "learned_absolute" - rotary_base: int = 10000 - rotary_percent: float = 1.0 - seq_len_interpolation_factor: Optional[float] = None - seq_length: int = 1024 - hidden_size: int = 512 - num_attention_heads: int = 8 - num_layers: int = 6 - init_method_std: float = 0.02 - biobert_spec_option: BiobertSpecOption = BiobertSpecOption.bert_layer_with_transformer_engine_spec - - optimizer_fn: Optional[Callable[["MegatronBioBertModel"], Optimizer]] = None - # TODO (@skothenhill,@georgea) update to use the nemo2 checkpoint mixins - # support HF (requires weight interleaving on qkv layer) and nemo1 checkpoints ideally. - # TODO (@skothenhill,@jstjohn) come up with a nice way of doing fine-tuning checkpoint loading, - # where some acceptible layers (eg lm_head) may or may not be absent from the model, and others - # (like a new head) may be new and missing from the initial checkpoint. - nemo1_ckpt_path: Optional[str] = None - - initial_ckpt_path: Optional[str] = None - # TODO(@jstjohn, @skothenhill) Was this supposed to be only on the child? - initial_ckpt_skip_keys_with_these_prefixes: List[str] = field(default_factory=list) - # Used if initializing from a checkpoint, set this to any fields you want to override rather than re-set. - # by default all fields will be overridden. - override_parent_fields: List[str] = field(default_factory=lambda: _OVERRIDE_BIOBERT_CONFIG_DEFAULTS) - return_embeddings: bool = False - include_embeddings: bool = False - return_only_hidden_states: bool = False - include_hiddens: bool = False # Include hidden layers in the output of the model - include_input_ids: bool = False - skip_logits: bool = False # useful for inference - core_attention_override: Type[torch.nn.Module] | None = None - - # loss reduction class - loss_reduction_class: Type[MegatronLossType] = BERTMLMLossWithReduction - - # metric logging - train_metric: Optional[TorchmetricsConfig] = None - valid_metric: Optional[TorchmetricsConfig] = None - - def configure_model(self, tokenizer: AutoTokenizer) -> MegatronBioBertModelType: # noqa: D102 - vp_size = self.virtual_pipeline_model_parallel_size - if vp_size: - p_size = self.pipeline_model_parallel_size - assert (self.num_layers // p_size) % vp_size == 0, ( - "Make sure the number of model chunks is the same across all pipeline stages." - ) - - # The local specs all require the standard full attention mask. - use_full_attention_mask: bool = "transformer_engine" not in self.biobert_spec_option - do_next_sentence = False - if self.model_cls is None: - raise ValueError( - f"You must supply `model_cls` to the {type(self)} for module to initialization in `configure_model`." - ) - - if self.initial_ckpt_path: - self.load_settings_from_checkpoint(self.initial_ckpt_path) - model = self.model_cls( - self, - transformer_layer_spec=get_biobert_spec( - self.biobert_spec_option, - qk_layernorm=self.qk_layernorm, - core_attention=self.core_attention_override, - ), - num_tokentypes=2 if do_next_sentence else 0, - vocab_size=get_vocab_size(self, tokenizer.vocab_size, self.make_vocab_size_divisible_by), - max_sequence_length=self.seq_length, - tokenizer=tokenizer, - fp16_lm_cross_entropy=self.fp16_lm_cross_entropy, - parallel_output=self.parallel_output, - share_embeddings_and_output_weights=self.share_embeddings_and_output_weights, - position_embedding_type=self.position_embedding_type, - rotary_percent=self.rotary_percent, - seq_len_interpolation_factor=self.seq_len_interpolation_factor, - return_embeddings=self.return_embeddings, - include_embeddings=self.include_embeddings, - pre_process=parallel_state.is_pipeline_first_stage(), - post_process=parallel_state.is_pipeline_last_stage(), # set to False for inference - add_binary_head=do_next_sentence, - use_full_attention_mask=use_full_attention_mask, - include_hiddens=self.include_hiddens, - skip_logits=self.skip_logits, - include_input_ids=self.include_input_ids, - ) - # TODO (@skothenhill) this is a hack to load the old checkpoint. - # This should be removed once we have a proper checkpoint conversion - # see NeMo/nemo/collections/llm/gpt/model/mixtral.py for how we should do it. - # We should eventually have an adapter for nemo1 checkpoints, HF checkpoints (at least for ESM2 @georgea) - # and an adapter may also be the right way to handle expected missing/extra keys when importing - # a checkpoint for fine-tuning (eg ignore misisng lm_head, if not there in model, etc). - if self.nemo1_ckpt_path is not None: - assert self.initial_ckpt_path is None, "Mutually exclusive checkpoint path used twice" - te_mapping = "transformer_engine" in self.biobert_spec_option.value - with tarfile.open(self.nemo1_ckpt_path, "r") as old_ckpt: - ckpt_file = old_ckpt.extractfile("./model_weights.ckpt") - if ckpt_file is None: - raise ValueError(f"Failure to read checkpoint file: {old_ckpt}/model_weights/ckpt") - old_weights = torch.load(ckpt_file) - new_state_dict_from_old = {} - for k, v in old_weights.items(): - new_key = nemo1_to_nemo2_biobert_key_mapping(k, new_model_prefix="", te_mapping=te_mapping) - new_state_dict_from_old[new_key] = v - # TE adds non-null ._extra_state objects to layers, which store some kind of buffer bits - # so we need to allow those to pass through if we're loading from bionemo1 which did not - # use TE. - model.load_state_dict(new_state_dict_from_old, strict=not te_mapping) - if self.initial_ckpt_path is not None: - assert self.nemo1_ckpt_path is None, "Mutually exclusive checkpoint path used twice" - self.update_model_from_checkpoint(model, self.initial_ckpt_path) - - # TODO (@jstjohn) come up with a cleaner way in the biobert module to return hidden states. - # maybe a suite of options like hugging face has so a user can ask for several or only one thing. - if self.return_only_hidden_states: - # this applies the final layernorm in the encoder to the hidden states which was - # the default in nemo1. - model.post_process = False - model.encoder.post_process = True - model.encoder.post_layer_norm = True - return model - - def get_loss_reduction_class(self) -> Type[MegatronLossType]: # noqa: D102 - # You could optionally return a different loss reduction class here based on the config settings. - return self.loss_reduction_class diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/model/biobert/testing_utils.py b/sub-packages/bionemo-llm/src/bionemo/llm/model/biobert/testing_utils.py deleted file mode 100644 index 40fd162a38..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/model/biobert/testing_utils.py +++ /dev/null @@ -1,56 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import lightning.pytorch as pl -import torch.nn.functional as F - - -def compute_biobert_loss_singlegpu(trainer: pl.Trainer, pl_module: pl.LightningModule): - """Computes the loss for BioBert models on a single GPU. - - This will not function in multi-gpu settings nor with models that do not conform to BioBert. - - Args: - trainer (pl.Trainer): The Lightning Trainer object. - pl_module (pl.LightningModule): The LightningModule being trained. - - Returns: - float: The mean loss. - - See Also: - - :class: BioBertModel - """ - model = pl_module - dl = trainer.datamodule.val_dataloader() - - n, loss = -1, 0.0 - model.eval() - # batch = next(iter(dl)) - batch = model.data_step(iter(dl)) - result = model( - input_ids=batch["text"].cuda(), # 'tokens' also a valid input for MockGPTDataModule - attention_mask=batch["attention_mask"].cuda(), - ) - loss_mask = batch["loss_mask"].cuda() - # Not guaranteed i guess? - logits = result["token_logits"] - target = batch["labels"].cuda() - loss += F.cross_entropy(logits[loss_mask].float(), target[loss_mask], reduction="sum") - n += loss_mask.sum() - - mean_loss: float = (loss / n).detach().cpu().numpy().item() - model.train() - return mean_loss diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/model/biobert/transformer_specs.py b/sub-packages/bionemo-llm/src/bionemo/llm/model/biobert/transformer_specs.py deleted file mode 100644 index 63e2854e69..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/model/biobert/transformer_specs.py +++ /dev/null @@ -1,257 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from enum import Enum -from typing import Optional, Sequence, Type - -from megatron.core.extensions.transformer_engine import ( - TEDotProductAttention, - TELayerNormColumnParallelLinear, - TERowParallelLinear, -) -from megatron.core.fusions.fused_bias_dropout import get_bias_dropout_add -from megatron.core.fusions.fused_layer_norm import FusedLayerNorm -from megatron.core.models.bert import bert_layer_specs -from megatron.core.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear -from megatron.core.transformer import spec_utils -from megatron.core.transformer.attention import SelfAttention, SelfAttentionSubmodules -from megatron.core.transformer.dot_product_attention import DotProductAttention -from megatron.core.transformer.enums import AttnMaskType -from megatron.core.transformer.identity_op import IdentityOp -from megatron.core.transformer.mlp import MLP, MLPSubmodules -from megatron.core.transformer.transformer_layer import TransformerLayer, TransformerLayerSubmodules -from torch.nn import Module - -from bionemo.llm.model.layers import ESM2QueryScaling, TELayerNorm - - -__all__: Sequence[str] = ( - "BiobertSpecOption", - "get_biobert_spec", -) - - -class BiobertSpecOption(str, Enum): - """Options for the BiobertSpec. The spec defines the architecture of the transformer (BERT) block in the biobert model. - This is a `str, Enum` type so that argparse can use the string names as choices. - """ # noqa: D205 - - bert_layer_local_spec = "bert_layer_local_spec" - bert_layer_local_spec_with_qk_ln = "bert_layer_local_spec_with_qk_ln" - bert_layer_with_transformer_engine_spec = "bert_layer_with_transformer_engine_spec" - bert_layer_with_transformer_engine_and_qk_ln_spec = "bert_layer_with_transformer_engine_and_qk_ln_spec" - # ESM2 spec - esm2_bert_layer_local_spec = "esm2_bert_layer_local_spec" - esm2_bert_layer_with_transformer_engine_spec = "esm2_bert_layer_with_transformer_engine_spec" - amplify_with_transformer_engine_spec = "amplify_with_transformer_engine_spec" - - -def get_biobert_spec( # noqa: D417 - biobert_spec_option: BiobertSpecOption, - qk_layernorm: bool = False, - core_attention: Optional[Type[Module]] = None, -) -> spec_utils.ModuleSpec: - """Get the spec for the Biobert model. - - Args: - model_type (ModelType): The model type. - spec_option (BiobertSpecOption): The spec option. - - Returns: - TransformerConfig: The Biobert spec. - """ - # - # BEGIN define several specs that are a function of `qk_layernorm` - # - - match biobert_spec_option: - case BiobertSpecOption.bert_layer_local_spec: - return bert_layer_specs.bert_layer_local_spec - - case BiobertSpecOption.bert_layer_local_spec_with_qk_ln: - # Use this spec for an implementation using only modules in megatron core - - if core_attention is None: - core_attention = DotProductAttention - - bert_layer_local_spec_with_qk_ln = spec_utils.ModuleSpec( - module=TransformerLayer, - submodules=TransformerLayerSubmodules( - input_layernorm=FusedLayerNorm, - self_attention=spec_utils.ModuleSpec( - module=SelfAttention, - params={"attn_mask_type": AttnMaskType.padding}, - submodules=SelfAttentionSubmodules( - linear_qkv=ColumnParallelLinear, - core_attention=core_attention, - linear_proj=RowParallelLinear, - q_layernorm=FusedLayerNorm if qk_layernorm else IdentityOp, - k_layernorm=FusedLayerNorm if qk_layernorm else IdentityOp, - ), - ), - self_attn_bda=get_bias_dropout_add, - pre_mlp_layernorm=FusedLayerNorm, - mlp=spec_utils.ModuleSpec( - module=MLP, - submodules=MLPSubmodules( - linear_fc1=ColumnParallelLinear, - linear_fc2=RowParallelLinear, - ), - ), - mlp_bda=get_bias_dropout_add, - sharded_state_dict_keys_map={ - "input_layernorm.": "self_attention.linear_qkv.layer_norm_", - "pre_mlp_layernorm.": "mlp.linear_fc1.layer_norm_", - }, - ), - ) - return bert_layer_local_spec_with_qk_ln - - case BiobertSpecOption.bert_layer_with_transformer_engine_spec: - return bert_layer_specs.bert_layer_with_transformer_engine_spec - - case BiobertSpecOption.bert_layer_with_transformer_engine_and_qk_ln_spec: - if core_attention is None: - core_attention = TEDotProductAttention - - bert_layer_with_transformer_engine_and_qk_ln_spec = spec_utils.ModuleSpec( - module=TransformerLayer, - submodules=TransformerLayerSubmodules( - self_attention=spec_utils.ModuleSpec( - module=SelfAttention, - params={"attn_mask_type": AttnMaskType.padding}, - submodules=SelfAttentionSubmodules( - linear_qkv=TELayerNormColumnParallelLinear, - core_attention=core_attention, - linear_proj=TERowParallelLinear, - q_layernorm=TELayerNorm if qk_layernorm else IdentityOp, - k_layernorm=TELayerNorm if qk_layernorm else IdentityOp, - ), - ), - self_attn_bda=get_bias_dropout_add, - mlp=spec_utils.ModuleSpec( - module=MLP, - submodules=MLPSubmodules( - linear_fc1=TELayerNormColumnParallelLinear, - linear_fc2=TERowParallelLinear, - ), - ), - mlp_bda=get_bias_dropout_add, - ), - ) - return bert_layer_with_transformer_engine_and_qk_ln_spec - - case BiobertSpecOption.esm2_bert_layer_local_spec: - if core_attention is None: - raise ValueError(f"Must supply core_attention with {BiobertSpecOption.esm2_bert_layer_local_spec} !") - - esm2_bert_layer_local_spec = spec_utils.ModuleSpec( - module=TransformerLayer, - submodules=TransformerLayerSubmodules( - input_layernorm=FusedLayerNorm, - self_attention=spec_utils.ModuleSpec( - module=SelfAttention, - params={"attn_mask_type": AttnMaskType.padding}, - submodules=SelfAttentionSubmodules( - linear_qkv=ColumnParallelLinear, - core_attention=core_attention, - linear_proj=RowParallelLinear, - q_layernorm=ESM2QueryScaling, - k_layernorm=IdentityOp, - ), - ), - self_attn_bda=get_bias_dropout_add, - pre_mlp_layernorm=FusedLayerNorm, - mlp=spec_utils.ModuleSpec( - module=MLP, - submodules=MLPSubmodules( - linear_fc1=ColumnParallelLinear, - linear_fc2=RowParallelLinear, - ), - ), - mlp_bda=get_bias_dropout_add, - sharded_state_dict_keys_map={ - "input_layernorm.": "self_attention.linear_qkv.layer_norm_", - "pre_mlp_layernorm.": "mlp.linear_fc1.layer_norm_", - }, - ), - ) - return esm2_bert_layer_local_spec - - case BiobertSpecOption.esm2_bert_layer_with_transformer_engine_spec: - if core_attention is None: - core_attention = TEDotProductAttention - - esm2_bert_layer_local_spec = spec_utils.ModuleSpec( - module=TransformerLayer, - submodules=TransformerLayerSubmodules( - self_attention=spec_utils.ModuleSpec( - module=SelfAttention, - params={"attn_mask_type": AttnMaskType.padding}, - submodules=SelfAttentionSubmodules( - linear_qkv=TELayerNormColumnParallelLinear, - core_attention=core_attention, - linear_proj=TERowParallelLinear, - q_layernorm=ESM2QueryScaling, - k_layernorm=IdentityOp, - ), - ), - self_attn_bda=get_bias_dropout_add, - mlp=spec_utils.ModuleSpec( - module=MLP, - submodules=MLPSubmodules( - linear_fc1=TELayerNormColumnParallelLinear, - linear_fc2=TERowParallelLinear, - ), - ), - mlp_bda=get_bias_dropout_add, - ), - ) - return esm2_bert_layer_local_spec - - case BiobertSpecOption.amplify_with_transformer_engine_spec: - if core_attention is None: - core_attention = TEDotProductAttention - - amplify_with_transformer_engine_spec = spec_utils.ModuleSpec( - module=TransformerLayer, - submodules=TransformerLayerSubmodules( - self_attention=spec_utils.ModuleSpec( - module=SelfAttention, - params={"attn_mask_type": AttnMaskType.padding}, - submodules=SelfAttentionSubmodules( - linear_qkv=TELayerNormColumnParallelLinear, - core_attention=core_attention, - linear_proj=TERowParallelLinear, - q_layernorm=IdentityOp, - k_layernorm=IdentityOp, - ), - ), - self_attn_bda=get_bias_dropout_add, - mlp=spec_utils.ModuleSpec( - module=MLP, - submodules=MLPSubmodules( - linear_fc1=TELayerNormColumnParallelLinear, - linear_fc2=TERowParallelLinear, - ), - ), - mlp_bda=get_bias_dropout_add, - ), - ) - return amplify_with_transformer_engine_spec - - case _: - raise NotImplementedError(f"Spec option {biobert_spec_option} not implemented") diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/model/config.py b/sub-packages/bionemo-llm/src/bionemo/llm/model/config.py deleted file mode 100644 index f4421b5d35..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/model/config.py +++ /dev/null @@ -1,186 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import importlib -import logging -from copy import deepcopy -from dataclasses import dataclass, field, fields -from pathlib import Path -from typing import Any, Generic, List, Literal, Optional, Protocol, Sequence, Type - -import torchmetrics -from megatron.core.transformer import TransformerConfig -from nemo.lightning import io - -from bionemo.core.model.config import BionemoModelConfig, BionemoTrainableModelConfig -from bionemo.llm.api import MegatronLossType, MegatronModelType -from bionemo.llm.utils import iomixin_utils as iom -from bionemo.llm.utils.weight_utils import load_weights_sharded_inplace_nemo2_to_mcore - - -__all__: Sequence[str] = ("MegatronBioNeMoModelConfig", "TorchmetricsConfig") - -# Configure the logger -logging.basicConfig( - level=logging.INFO, # Set the minimum logging level - format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", # Log format - datefmt="%Y-%m-%d %H:%M:%S", # Date format -) - -logger = logging.getLogger(__file__) - -_OVERRIDE_BIONEMO_CONFIG_DEFAULTS: List[str] = [ - "initial_ckpt_skip_keys_with_these_prefixes", - "override_parent_fields", - "initial_ckpt_path_ignore_weights", - "initial_ckpt_path", - "model_cls", - "bf16", - "fp16", - "train_metric", - "valid_metric", -] - -OVERRIDE_BIONEMO_CONFIG_DEFAULTS = deepcopy(_OVERRIDE_BIONEMO_CONFIG_DEFAULTS) # copy for export - - -class MegatronBioNeMoModelConfig(BionemoModelConfig[MegatronModelType], TransformerConfig, iom.WillHaveGetSetHparam): - """A ModelConfig class for bionemo that supports usage with Megatron models, for example as NeMo2 requires.""" - - model_cls: Type[MegatronModelType] - - -@dataclass -class MegatronBioNeMoTrainableModelConfig( - MegatronBioNeMoModelConfig[MegatronModelType], - BionemoTrainableModelConfig[MegatronModelType, MegatronLossType], - Generic[MegatronModelType, MegatronLossType], -): - """A TrainableModelConfig class for bionemo that supports usage with Megatron models, for example as NeMo2 requires.""" - - initial_ckpt_path: str | None = None - initial_ckpt_skip_keys_with_these_prefixes: List[str] = field(default_factory=list) - override_parent_fields: List[str] = field(default_factory=lambda: _OVERRIDE_BIONEMO_CONFIG_DEFAULTS) - - def load_settings_from_checkpoint(self, initial_ckpt_path: str) -> None: - """Load settings into self from the checkpoint saved in self. - - Any setting in self.override_parent_fields is not overriden. Note that this function will also update the hyper - parameters in this config, as well as the associated attributes in self in case they were modified post-init. - - Args: - initial_ckpt_path: The path to the checkpoint to load, note that everything is loaded from this checkpoint - other than the settings in self.override_parent_fields. - - Returns: - None, the settings are loaded into self in place, and the hyper-parameters that will later be saved into - a checkpoint are updated. - """ - logger.warning(f"Loading {self.initial_ckpt_path}") - # 1. get the config from the trainer io context by querying the `model.config` subpath of the trainer. - initial_config: MegatronBioNeMoTrainableModelConfig = io.load_context( - path=Path(initial_ckpt_path) / "context", subpath="model.config" - ) # type: ignore - initial_fields = {f.name for f in fields(initial_config)} - my_fields = [f.name for f in fields(self)] - skip_fields = set(self.override_parent_fields) - override_fields = [f for f in my_fields if f in initial_fields and f not in skip_fields] - override_mutate_possibly_extra_mutated_fiddle(self, initial_config, override_fields) - - def update_model_from_checkpoint(self, model: MegatronModelType, initial_ckpt_path: str) -> None: - """Utility function to standardize how to load a megatron model from a checkpoint ignoring user-specified keys. - - Update the model with the weights from the provided checkpoint path, skipping the keys with the prefixes in - self.initial_ckpt_skip_keys_with_these_prefixes. - - Args: - model: The Megatron model to update. - initial_ckpt_path: The path to the megatron checkpoint to load. - - Returns: - None, the model is updated in place, supporting megatron model parallelism abstractions, and ignoring - any extra keys that are provided in self.initial_ckpt_skip_keys_with_these_prefixes. - """ - load_weights_sharded_inplace_nemo2_to_mcore( - model=model, # type: ignore - distributed_checkpoint_dir=initial_ckpt_path, - skip_keys_with_these_prefixes=set(self.initial_ckpt_skip_keys_with_these_prefixes), - ) - - -class IOMixinProto(Protocol): - """A Protocol for the get/set hparam functions of the IOMixin class from NeMo.""" - - def set_hparam(self, attribute: str, value: Any, also_change_value: bool = True) -> None: - """Set the value of an attribute in the config attached to the class by the IOMixin.""" - ... - - def get_hparam(self, attribute: str) -> Any: - """Get the value of an attribute in the config attached to the class by the IOMixin.""" - ... - - -def override_mutate_possibly_extra_mutated_fiddle( - target_cfg: IOMixinProto, source_cfg: IOMixinProto, maybe_mutated_elements_to_clone: List[str] -) -> None: - """Override the values of the target config with the values of the source config for the given elements. - - This will modify the tracked init hyper-parameter values, as well as modifying the associated attributes in - self incase they were modified later by post_init code. - - Args: - target_cfg: The config to update. - source_cfg: The config to copy values from. - maybe_mutated_elements_to_clone: The list of elements to copy from the source config to the target config. - - Returns: - None, the target config is updated in place. - """ - for f in maybe_mutated_elements_to_clone: - # 1. Update the tracked config values. Note that the associated attribute in self may have been modified - # post-init, so we don't want to change the value in self here. We do that separately next. - target_cfg.set_hparam(f, source_cfg.get_hparam(f), also_change_value=False) - # 2. Update the lazily untracked values (if the same variable name is used post-init) - setattr(target_cfg, f, getattr(source_cfg, f)) - - -@dataclass -class TorchmetricsConfig: - """TorchmetricsConfig to instantiate torchmetrics.Metric class. - - Fiddle requires all objects in config serializable and torchmetric.Metric is not. Its instantiation must be deferred into BionemoLightningModule.__init__. - Only support torchmetrics currently, e.g. users can provide 'text.Perplexity' to 'class_path' to use 'torchmetrics.text.Perplexity'. - """ - - class_path: str - task: Literal["lm", "classification", "regression"] - metric_name: str - kwargs: Optional[dict[str, Any]] = None - - def __post_init__(self): - """__post_init__ in dataclass.""" - self.kwargs = {} if self.kwargs is None else self.kwargs - - def get_instance(self) -> torchmetrics.Metric: - """Dynamically imports and instantiates the metric class.""" - if "." in self.class_path: - module_path, class_name = self.class_path.rsplit(".", 1) - module = importlib.import_module(f"torchmetrics.{module_path}") - else: - class_name = self.class_path - module = importlib.import_module("torchmetrics") - - cls_ = getattr(module, class_name) - return cls_(**self.kwargs) diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/model/layers.py b/sub-packages/bionemo-llm/src/bionemo/llm/model/layers.py deleted file mode 100644 index 74d8eb1ec3..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/model/layers.py +++ /dev/null @@ -1,62 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import math -from typing import Sequence - -import torch -import transformer_engine as te -from megatron.core.transformer.transformer_config import TransformerConfig -from megatron.core.utils import divide - - -__all__: Sequence[str] = ("ESM2QueryScaling", "TELayerNorm") - - -class TELayerNorm(te.pytorch.LayerNorm): # noqa: D101 - def __init__(self, config: TransformerConfig, *args, **kwargs) -> None: # noqa: D417 - """A wrapper around transformer engine layernorm that allows it to be initialized with a TransformerConfig. - This allows this method to be used in a megatron layerspec. - - Args: - config (TransformerConfig): The megatron config. This is used for extracing sequence_parallel and zero_centered_gamma. - The rest of the config is not used. - """ # noqa: D205 - # Eps tends to get passed through properly, as does hidden_size, but not other params from the config. - super().__init__( - *args, - zero_centered_gamma=config.layernorm_zero_centered_gamma, - sequence_parallel=config.sequence_parallel, - **kwargs, - ) - - -class ESM2QueryScaling(torch.nn.Module): # noqa: D101 - def __init__(self, config: TransformerConfig, *args, **kwargs) -> None: # noqa: D417 - """A custom layer that scales quary values. - - This layer should replace the q_layernorm=IdentityOp in ESM2 ModuleSpec to reproduce ESM2 - which apply 1/sqrt(hidden_size_per_attention_head) scaling prior to apply_rotary_pos_emb() - - Args: - config (TransformerConfig): The megatron config. This is used for computing projection_size - """ - super().__init__() - projection_size = config.kv_channels * config.num_attention_heads - self.hidden_size_per_attention_head = divide(projection_size, config.num_attention_heads) - self.sqrt_val = math.sqrt(self.hidden_size_per_attention_head) - - @torch.compile - def forward(self, query, *args, **kwargs): # noqa: D102 - return query / self.sqrt_val diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/model/loss.py b/sub-packages/bionemo-llm/src/bionemo/llm/model/loss.py deleted file mode 100644 index a13508acd9..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/model/loss.py +++ /dev/null @@ -1,169 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Dict, Sequence, Tuple, TypedDict - -import torch -from megatron.core import tensor_parallel -from megatron.core.fusions.fused_cross_entropy import fused_vocab_parallel_cross_entropy -from nemo.lightning.megatron_parallel import ( - MegatronLossReduction, - masked_token_loss, -) -from torch import Tensor - - -__all__: Sequence[str] = ( - "BERTMLMLossWithReduction", - "DataParallelGroupLossAndIO", - "PerTokenLossDict", - "SameSizeLossDict", -) - - -# TODO(@sichu) update typing -class PerTokenLossDict(TypedDict): - """Tensor dictionary for loss. - - This is the return type for a loss that is computed per token in the batch, supporting microbatches of varying sizes. - """ - - loss_sum_and_microbatch_size: Tensor - - -class SameSizeLossDict(TypedDict): - """Tensor dictionary for loss. - - This is the return type for a loss that is computed for the entire batch, where all microbatches are the same size. - """ - - avg: Tensor - - -class DataParallelGroupLossAndIO(TypedDict): - """Average losses across the data parallel group + the original batch and inference output.""" - - avg: Tensor - batch: dict[str, Tensor] - forward_out: dict[str, Tensor] - - -class BERTMLMLossWithReduction(MegatronLossReduction): # noqa: D101 - def __init__(self, validation_step: bool = False, val_drop_last: bool = True) -> None: # noqa: D107 - super().__init__() - self.validation_step = validation_step - self.val_drop_last = val_drop_last - - def forward( - self, batch: Dict[str, Tensor], forward_out: Dict[str, Tensor] - ) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, torch.Tensor]]: - """Forward impl. - - https://github.com/NVIDIA/NeMo/blob/main/nemo/lightning/megatron_parallel.py#L1733 - - Note that Method signature is slightly different from NeMo as the NeMo signature is incorrect. - """ - # neva returns (logits, loss_mask) - if isinstance(forward_out, tuple): - # NOTE(SKH): this comes from NeMo- when does this occur? Directly related to the incorrect method signature. - forward_out, loss_mask = forward_out - batch["loss_mask"] = loss_mask - - if "labels" not in batch: - raise ValueError("Labels not provided in the batch. These are required for this loss computation.") - - # NOTE: token_logits is [sequence, batch] but labels and other fields, including the loss are [batch, sequence] - unreduced_token_loss = unreduced_token_loss_fn(forward_out["token_logits"], batch["labels"]) # [b s] - - loss_sum, num_valid_tokens = masked_token_loss(unreduced_token_loss, batch["loss_mask"]) - - if self.validation_step and not self.val_drop_last and loss_sum.isnan(): - assert num_valid_tokens == 0, "Got NaN loss with non-empty input" - if batch["loss_mask"].count_nonzero() != 0: - raise ValueError("Got NaN loss with non-empty input") - loss_sum = torch.zeros_like(num_valid_tokens) - - num_valid_tokens = num_valid_tokens.clone().detach().to(torch.int) - loss_sum_and_ub_size = torch.cat([loss_sum.clone().detach().view(1), num_valid_tokens.view(1)]) - # Set to 1 to avoid divide by zero in the megatron scheduler: - # https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/pipeline_parallel/schedules.py#L303-L308 - if num_valid_tokens.item() == 0: - num_valid_tokens = torch.ones_like(num_valid_tokens) - - return loss_sum, num_valid_tokens, {"loss_sum_and_ub_size": loss_sum_and_ub_size} - - def reduce(self, losses_reduced_per_micro_batch) -> torch.Tensor: - """Loss reduction impl. - - Taken from: https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py#L534-L552 . - """ - if losses_reduced_per_micro_batch: - if "avg" in losses_reduced_per_micro_batch[0]: - # legacy behavior, average over the number of microbatches - avg = [x["avg"] for x in losses_reduced_per_micro_batch] - loss = torch.cat(avg).mean() - return loss - - from megatron.core import parallel_state - - loss_sum_and_ub_size = [ - x["loss_sum_and_ub_size"] for x in losses_reduced_per_micro_batch if x["loss_sum_and_ub_size"][1] > 0 - ] - loss = ( - torch.vstack(loss_sum_and_ub_size).sum(dim=0) - if len(loss_sum_and_ub_size) > 0 - else torch.tensor([0.0, 0.0], device=torch.cuda.current_device()) - ) - torch.distributed.all_reduce( - loss, - group=parallel_state.get_data_parallel_group(with_context_parallel=True), - ) - # average over the total number of tokens across the global batch. - loss = loss[0] / loss[1] - - return loss - - return torch.tensor(0.0, device=torch.cuda.current_device()) - - -def unreduced_token_loss_fn(logits: Tensor, labels: Tensor, cross_entropy_loss_fusion: bool = False) -> Tensor: - """Computes the unreduced token loss given the logits and labels without regard to the loss mask. - - WARNING: This function does not apply a loss mask. Also, it does inplace operation on the inputs. - - Args: - logits (Tensor): The predicted logits of shape [sequence_length, batch_size, num_classes]. - labels (Tensor): The true labels of shape [batch_size, sequence_length]. - cross_entropy_loss_fusion (bool): If True, use the fused kernel version of vocab parallel cross entropy. This - should generally be preferred for speed as it packs more operations into a single kernel on the GPU. However - some users have observed reduced training stability when using this method. - - Returns: - Tensor: The unreduced token loss of shape [batch_size, sequence_length]. - """ - labels = labels.transpose(0, 1).contiguous() # [b, s] -> [s, b] - if cross_entropy_loss_fusion: - loss = fused_vocab_parallel_cross_entropy(logits, labels) - else: - loss = tensor_parallel.vocab_parallel_cross_entropy(logits, labels) - # [s b] => [b, s] - loss = loss.transpose(0, 1).contiguous() - return loss - - -def unreduced_sequence_loss_fn(self, logits: Tensor, labels: Tensor) -> Tensor: - # TODO (@jstjohn): implement this function to handle the next sequence prediction task - # TODO (@jstjohn): determine expected shapes of logits/labels in this case and add that to the docstring - raise NotImplementedError("Sequence loss not implemented yet.") diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/model/lr_scheduler.py b/sub-packages/bionemo-llm/src/bionemo/llm/model/lr_scheduler.py deleted file mode 100644 index 29e7081a98..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/model/lr_scheduler.py +++ /dev/null @@ -1,142 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from typing import List, Optional, Sequence, TypedDict - -from nemo.lightning.pytorch.optim.lr_scheduler import LRSchedulerModule -from nemo.lightning.pytorch.optim.megatron import MegatronOptimizerModule -from torch.optim.lr_scheduler import _LRScheduler - -from bionemo.llm.model.biobert.model import MegatronBioBertModel - - -__all__: Sequence[str] = ( - "SchedulerOutput", - "WarmupAnnealDecayHold", - "WarmupAnnealDecayHoldScheduler", -) - - -class SchedulerOutput(TypedDict): - """Output of the scheduler method.""" - - optimizer: MegatronOptimizerModule - lr_scheduler: dict - monitor: str - - -class WarmupAnnealDecayHold(_LRScheduler): - """Warmup Anneal Decay Hold learning rate scheduler.""" - - def __init__( - self, - optimizer: MegatronOptimizerModule, - *, - warmup_steps: Optional[int] = None, - max_steps: Optional[int] = None, - max_lr: Optional[float] = None, - min_lr: float = 4e-5, - anneal_percentage: float = 0.10, - last_epoch: int = -1, - ) -> None: - """Initializes the WarmupAnnealDecayHold learning rate scheduler. - - Args: - optimizer: Optimizer to apply the learning rate scheduler. - warmup_steps (int): Number of steps for the linear warm-up. - max_steps (int): Total number of training steps. - max_lr (float): Peak learning rate to be achieved after warm-up. - min_lr (float): Minimum learning rate. - anneal_percentage (float): Percentage of the max_lr to hold after decay. - last_epoch (int): The index of the last epoch. - """ - self.warmup_steps = warmup_steps - self.max_steps = max_steps - self.max_lr = max_lr - self.min_lr = min_lr - self.anneal_percentage = anneal_percentage - self.last_epoch = last_epoch - - for group in optimizer.param_groups: - group.setdefault("initial_lr", max_lr) - - super(WarmupAnnealDecayHold, self).__init__(optimizer, last_epoch) - - def get_lr(self) -> List[float]: - """Get the learning rate at the current step.""" - step_num = self.last_epoch - if step_num < self.warmup_steps: - lr = self.min_lr + (self.max_lr - self.min_lr) * step_num / self.warmup_steps - else: - decay_steps = self.max_steps - self.warmup_steps - lr = self.max_lr * (1 - (step_num - self.warmup_steps) / decay_steps) - lr = max(lr, self.max_lr * self.anneal_percentage) - - return [lr for _ in self.optimizer.param_groups] - - -class WarmupAnnealDecayHoldScheduler(LRSchedulerModule): - """Warmup Policy Learning Rate Scheduler.""" - - def __init__( - self, - warmup_steps: int = 2000, - max_steps: int = 500_000, - max_lr: float = 4e-4, - min_lr: float = 4e-5, - anneal_percentage: float = 0.10, - interval: str = "step", - frequency: int = 1, - monitor: str = "val_loss", - ) -> None: - """Initializes the WarmupAnnealDecayHoldScheduler.""" - super().__init__() - self.warmup_steps = warmup_steps - self.max_steps = max_steps - self.max_lr = max_lr - self.min_lr = min_lr - self.anneal_percentage = anneal_percentage - self.interval = interval - self.frequency = frequency - self.monitor = monitor - - def scheduler(self, model: MegatronBioBertModel, optimizer: MegatronOptimizerModule) -> SchedulerOutput: - """Returns the scheduler output.""" - lr_scheduler = WarmupAnnealDecayHold( - optimizer, - warmup_steps=self.warmup_steps, - max_steps=self.max_steps, - max_lr=self.max_lr, - min_lr=self.min_lr, - anneal_percentage=self.anneal_percentage, - ) - return { - "optimizer": optimizer, - # REQUIRED: The scheduler instance - "lr_scheduler": { - "scheduler": lr_scheduler, - # `interval` is the unit of the scheduler's step size, could also be 'step'. - # 'epoch' updates the scheduler on epoch end whereas 'step' - # updates it after a optimizer update. - "interval": self.interval, - # How many epochs/steps should pass between calls to - # `scheduler.step()`. 1 corresponds to updating the learning - # rate after every epoch/step. - "frequency": self.frequency, - }, - # Metric to to monitor for schedulers like `ReduceLROnPlateau` - "monitor": self.monitor, - } diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/run/__init__.py b/sub-packages/bionemo-llm/src/bionemo/llm/run/__init__.py deleted file mode 100644 index 25e6abfbc5..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/run/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/run/config_models.py b/sub-packages/bionemo-llm/src/bionemo/llm/run/config_models.py deleted file mode 100644 index c228d3c053..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/run/config_models.py +++ /dev/null @@ -1,444 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import pathlib -from abc import ABC, abstractmethod -from dataclasses import field -from typing import Any, Callable, Dict, Generic, List, Literal, Optional, Type, TypeVar - -import lightning.pytorch as pl -import torch -from pydantic import BaseModel, field_serializer, field_validator, model_validator -from torch.nn import functional as F - -from bionemo.core.utils import dtypes -from bionemo.llm.model.biobert.model import BioBertConfig -from bionemo.llm.model.biobert.transformer_specs import BiobertSpecOption -from bionemo.llm.utils.logger_utils import WandbConfig - - -ModelConfigT = TypeVar("ModelConfigT", bound=BioBertConfig) -DataModuleT = TypeVar("DataModuleT", bound=pl.LightningDataModule) - -# Activation functions not available in torch.nn.functional require custom serialization/validation. Add them here with a lookup key. -CUSTOM_ACTIVATION_FNS: Dict[str, Callable[[torch.Tensor, Any], torch.Tensor]] = {} - -# DO NOT use keys that already exist in torch.nn.functional, as the torch.nn.functional functions are selected first. -for key in CUSTOM_ACTIVATION_FNS: - assert key not in dir(torch.nn.functional), f"Key {key} already exists in torch.nn.functional" - -# It does not matter if values are duplicated as the key=>value mapping still does the right thing. Repeat values should be considered aliases. -REVERSE_CUSTOM_ACTIVATION_FNS: Dict[Callable[[torch.Tensor, Any], torch.Tensor], str] = { - v: k for k, v in CUSTOM_ACTIVATION_FNS.items() -} - - -def deserialize_str_to_path(path: str) -> pathlib.Path: - """General purpose deserialize for string/path objects. Since YAML has no native representation for pathlib.Path, we serialize to strings. Import this method as a @field_validator.""" - return pathlib.Path(path) - - -def serialize_path_or_str(path: str | pathlib.Path) -> str: - """General purpose serialization for string/path objects. Since YAML has no native representation for pathlib.Path, we serialize to strings. Import this method as a @field_serializer.""" - if isinstance(path, pathlib.Path): - return str(path) - elif isinstance(path, str): - return path - else: - raise ValueError(f"Expected str or pathlib.Path, got {type(path)}") - - -class DataConfig(BaseModel, Generic[DataModuleT], ABC): - """Base class for all data configurations. - - This class is used to define the interface for all data configurations. It is used to define the data module that - will be used in the training loop. - """ - - micro_batch_size: int = 8 - result_dir: str | pathlib.Path = "./results" - num_dataset_workers: int = 0 - seq_length: int = 128 - - @field_serializer("result_dir") - def serialize_paths(self, value: pathlib.Path) -> str: # noqa: D102 - return serialize_path_or_str(value) - - @field_validator("result_dir") - def deserialize_paths(cls, value: str) -> pathlib.Path: # noqa: D102 - return deserialize_str_to_path(value) - - @abstractmethod - def construct_data_module(self, global_batch_size: int) -> DataModuleT: - """Construct the data module from the configuration. Cannot be defined generically.""" - ... - - def custom_model_validator(self, global_cfg: "MainConfig") -> "MainConfig": - """Use custom implementation of this method to define the things inside global_config. - - The following expression will always be true: - - global_cfg.data_config == self - """ - return global_cfg - - -class ExposedModelConfig(BaseModel, Generic[ModelConfigT], ABC): - """BioNeMo model configuration class, wraps TransformerConfig and friends. - - This class is used to define the interface for all model configurations. It is **Exposed** to guard against ill-typed - or poorly defined fields in the underlying configuration objects. `ModelConfigT` declares the associated type of the - underlying config (most commonly a BioBertGenericConfig, but could also be a TransformerConfig or something similar). - Children should try to expose the minimal set of fields necessary for the user to configure the model while keeping - the more esoteric configuration private to the underlying ModelConfigT. - """ - - # Restores weights from a pretrained checkpoint - initial_ckpt_path: Optional[str] = None - # Does not attempt to load keys with these prefixes (useful if you attached extra parameters and still want to load a set of weights) - initial_ckpt_skip_keys_with_these_prefixes: List[str] = field(default_factory=list) - - # Pydantic stuff to allow arbitrary types + validators + serializers - class Config: # noqa: D106 - arbitrary_types_allowed = True - - def model_class(self) -> Type[ModelConfigT]: - """Returns the underlying model class that this config wraps.""" - raise NotImplementedError - - def custom_model_validator(self, global_cfg: "MainConfig") -> "MainConfig": - """Use custom implementation of this method to define the things inside global_config. - - The following expression will always be true: - - global_cfg.bionemo_model_config == self - """ - return global_cfg - - def exposed_to_internal_bionemo_model_config(self) -> ModelConfigT: - """Converts the exposed dataclass to the underlying Transformer config. - - The underlying ModelConfigT may both be incomplete and unserializable. We use this transformation as a way to - hide fields that are either not serializable by Pydantic or that we do not want to expose. - """ - cls: Type[ModelConfigT] = self.model_class() - model_dict = {} - for attr in self.model_fields: - if attr not in model_dict and attr in cls.__dataclass_fields__: - model_dict[attr] = getattr(self, attr) - - # Now set fp16 and bf16 based on the precision for the underlying TransformerConfig=>ParallelConfig - # the only constraint is that both must not be true. - model_dict["bf16"] = self.pipeline_dtype == dtypes.precision_to_dtype["bf16-mixed"] - model_dict["fp16"] = self.pipeline_dtype == dtypes.precision_to_dtype["16-mixed"] - result = cls(**model_dict) - - return result - - # NOTE: See PrecisionTypes for a list of valid literals that may be deserialized. - params_dtype: torch.dtype - pipeline_dtype: torch.dtype - autocast_dtype: torch.dtype - - num_layers: int = 6 - hidden_size: int = 256 - ffn_hidden_size: int = 512 - num_attention_heads: int = 4 - seq_length: int = 512 - fp32_residual_connection: bool = False - hidden_dropout: float = 0.02 - init_method_std: float = 0.02 - kv_channels: Optional[int] = None - apply_query_key_layer_scaling: bool = False - make_vocab_size_divisible_by: int = 128 - masked_softmax_fusion: bool = True - fp16_lm_cross_entropy: bool = False - gradient_accumulation_fusion: bool = False - layernorm_zero_centered_gamma: bool = False - layernorm_epsilon: float = 1.0e-12 - activation_func: Callable[[torch.Tensor, Any], torch.Tensor] = F.gelu - qk_layernorm: bool = False - apply_residual_connection_post_layernorm: bool = False - bias_activation_fusion: bool = True - bias_dropout_fusion: bool = True - get_attention_mask_from_fusion: bool = False - attention_dropout: float = 0.1 - share_embeddings_and_output_weights: bool = True - enable_autocast: bool = False - nemo1_ckpt_path: Optional[str] = None - biobert_spec_option: BiobertSpecOption = BiobertSpecOption.bert_layer_with_transformer_engine_spec - - @field_serializer("biobert_spec_option") - def serialize_spec_option(self, value: BiobertSpecOption) -> str: # noqa: D102 - return value.value - - @field_validator("biobert_spec_option", mode="before") - def deserialize_spec_option(cls, value: str) -> BiobertSpecOption: # noqa: D102 - return BiobertSpecOption(value) - - @field_validator("activation_func", mode="before") - @classmethod - def validate_activation_func(cls, activation_func: str) -> Callable: - """Validates the activation function, assumes this function exists in torch.nn.functional. - - For custom activation functions, use the CUSTOM_ACTIVATION_FUNCTIONS dictionary in the module. This method - validates the provided activation function string and returns a callable function based on the validation - context using the provided validator in the base class. - - Args: - activation_func (str): The activation function to be validated. - context (ValidationInfo): The context for validation. - - Returns: - Callable: A callable function after validation. - - See Also: - CUSTOM_ACTIVATION_FNS - """ - func = getattr(torch.nn.functional, activation_func.lower(), None) - if func is None and activation_func in CUSTOM_ACTIVATION_FNS: - func = CUSTOM_ACTIVATION_FNS[activation_func] - return func - elif func is None: - raise ValueError( - f"activation_func must be a valid function in `torch.nn.functional`, got {activation_func=}" - ) - else: - return func - - @field_serializer("activation_func") - def serialize_activation_func(self, v: Callable[[torch.Tensor, Any], torch.Tensor]) -> str: - """Serializes a given activation function to its corresponding string representation. - - By default, all activation functions from `torch.nn.functional` are serialized to their name. User defined - activation functions should also be defined here with a custom mapping in CUSTOM_ACTIVATION_FNS defined at the - top of this file. This allows our Pydantic model to serialize and deserialize the activation function. - - Args: - v (Callable[[torch.Tensor, Any], torch.Tensor]): The activation function to serialize. - - Returns: - str: The name of the activation function if it is a standard PyTorch function, - or the corresponding serialization key if it is a custom activation function. - - Raises: - ValueError: If the activation function is not supported. - """ - func_name = v.__name__ - func = getattr(torch.nn.functional, func_name, None) - if func is not None: - return func_name - elif func in REVERSE_CUSTOM_ACTIVATION_FNS: - return REVERSE_CUSTOM_ACTIVATION_FNS[func] # Get the serialization key - else: - raise ValueError(f"Unsupported activation function: {v}") - - @field_validator("params_dtype", "pipeline_dtype", "autocast_dtype", mode="before") - @classmethod - def precision_validator(cls, v: dtypes.PrecisionTypes) -> torch.dtype: - """Validates the precision type and returns the corresponding torch dtype.""" - return dtypes.get_autocast_dtype(v) - - @field_serializer("params_dtype", "pipeline_dtype", "autocast_dtype") - def serialize_dtypes(self, v: torch.dtype) -> dtypes.PrecisionTypes: - """Serializes the torch dtype to the corresponding precision type.""" - return dtypes.dtype_to_precision[v] - - -class ParallelConfig(BaseModel): - """ParallelConfig is a configuration class for setting up parallelism in model training. - - Attributes: - tensor_model_parallel_size (int): The size of the tensor model parallelism. Default is 1. - pipeline_model_parallel_size (int): The size of the pipeline model parallelism. Default is 1. - accumulate_grad_batches (int): The number of batches to accumulate gradients over. Default is 1. - ddp (Literal["megatron"]): The distributed data parallel method to use. Default is "megatron". - remove_unused_parameters (bool): Whether to remove unused parameters. Default is True. - num_devices (int): The number of devices to use. Default is 1. - num_nodes (int): The number of nodes to use. Default is 1. - - Methods: - validate_devices(): Validates the number of devices based on the tensor and pipeline model parallel sizes. - """ - - tensor_model_parallel_size: int = 1 - pipeline_model_parallel_size: int = 1 - accumulate_grad_batches: int = 1 - ddp: Literal["megatron"] = "megatron" - remove_unused_parameters: bool = True - use_distributed_optimizer: bool = True - num_devices: int = 1 - num_nodes: int = 1 - - @model_validator(mode="after") - def validate_devices(self): - """Validates the number of devices based on the tensor and pipeline model parallel sizes.""" - if self.num_devices < self.tensor_model_parallel_size * self.pipeline_model_parallel_size: - raise ValueError("devices must be divisible by tensor_model_parallel_size * pipeline_model_parallel_size") - return self - - -class TrainingConfig(BaseModel): - """TrainingConfig is a configuration class for training models. - - Attributes: - max_steps (int): The maximum number of training steps. - limit_val_batches (int | float): The number of validation batches to use. Can be a fraction or a count. - val_check_interval (int): The interval (in steps) at which to check validation. - precision (Literal["32", "bf16-mixed", "16-mixed"], optional): The precision to use for training. Defaults to "bf16-mixed". - accelerator (str, optional): The type of accelerator to use for training. Defaults to "gpu". - gc_interval (int, optional): The interval of global steps at which to run synchronized garbage collection. Useful for synchronizing garbage collection when performing distributed training. Defaults to 0. - include_perplexity (bool, optional): Whether to include perplexity in the validation logs. Defaults to False. - enable_checkpointing (bool, optional): Whether to enable checkpointing and configure a default ModelCheckpoint callback if there is no user-defined ModelCheckpoint. Corresponds to the same parameter name in pl.Trainer - """ - - max_steps: int - limit_val_batches: int | float # Because this can be a fraction or a count... - val_check_interval: int - precision: Literal["32", "bf16-mixed", "16-mixed"] = "bf16-mixed" - accelerator: str = "gpu" - # NOTE: VERY important for distributed training performance. - gc_interval: int = 0 - log_train_ppl: bool = False - log_val_ppl: bool = True - enable_checkpointing: bool = True - create_tflops_callback: bool = False - - -class OptimizerSchedulerConfig(BaseModel): - """Configuration for the optimizer and learning rate scheduler. - - Attributes: - lr (float): Learning rate for the optimizer. Default is 1e-4. - optimizer (str): Type of optimizer to use. Default is "adam". - interval (str): Interval for updating the learning rate scheduler. Default is "step". - monitor (str): Metric to monitor for learning rate adjustments. Default is "val_loss". - interval (str): Interval for updating the learning rate scheduler. Default is "step". - monitor (str): Metric to monitor for learning rate adjustments. Default is "val_loss". - warmup_steps (int): Number of warmup steps for use with the warmup annealing learning rate scheduler. Default is 0. - lr_scheduler (Literal['warmup_anneal', 'cosine']): Type of learning rate scheduler to use. Default is 'warmup_anneal'. NOTE this is likely to change. - max_steps (Optional[int]): max_steps used in optimizer. Default to None which uses max_steps from TrainingConfig. - """ - - lr: float = 1e-4 - sgd_momentum: float = 0.9 - adam_eps: float = 1e-8 - weight_decay: float = 0.01 - use_distributed_optimizer: bool = True - optimizer: str = "adam" - interval: str = "step" - monitor: str = "val_loss" - cosine_rampup_frac: float = 0.01 - cosine_hold_frac: float = 0.05 - warmup_steps: int = 0 - lr_scheduler: Literal["warmup_anneal", "cosine"] = "warmup_anneal" - max_steps: Optional[int] = None - - -class ExperimentConfig(BaseModel): - """Configuration class for setting up and managing experiment parameters. - - Attributes: - save_every_n_steps (int): Number of steps between saving checkpoints. - result_dir (str | pathlib.Path): Directory where results will be saved. - experiment_name (str): Name of the experiment. - restore_from_checkpoint_path (Optional[str]): Path to restore from a checkpoint. Note: This does not invoke the checkpoint callback as expected. - save_last_checkpoint (bool): Flag to save the last checkpoint. Default is True. - metric_to_monitor_for_checkpoints (str): Metric to monitor for saving top-k checkpoints. Default is "reduced_train_loss". - save_top_k (int): Number of top checkpoints to save based on the monitored metric. Default is 2. - create_tensorboard_logger (bool): Flag to create a TensorBoard logger. Default is False. - create_checkpoint_callback (bool): Flag to create a ModelCheckpoint callback - """ - - save_every_n_steps: int - result_dir: str | pathlib.Path - experiment_name: str - # NOTE: restore_from_checkpoint_path does not invoke the checkpoint callback in the way we'd like. Avoid using. - restore_from_checkpoint_path: Optional[str] - save_last_checkpoint: bool = True - metric_to_monitor_for_checkpoints: str = "reduced_train_loss" - save_top_k: int = 2 - create_tensorboard_logger: bool = False - create_checkpoint_callback: bool = True - - @field_serializer("result_dir") - def serialize_paths(self, value: pathlib.Path) -> str: # noqa: D102 - return serialize_path_or_str(value) - - @field_validator("result_dir") - def deserialize_paths(cls, value: str) -> pathlib.Path: # noqa: D102 - return deserialize_str_to_path(value) - - -# DataConfig -> some config that can make a data module (see ABC definition.) -DataConfigT = TypeVar("DataConfigT", bound=DataConfig) -# ExposedModelConfig -> some config that can make a non-exposed model config (see ABC definition.) -ExModelConfigT = TypeVar("ExModelConfigT", bound=ExposedModelConfig) - - -class MainConfig(BaseModel, Generic[ExModelConfigT, DataConfigT]): - """Main configuration class for BioNeMo. All serialized configs that are a valid MainConfig should be Runnable. - - This class is used to define the main configuration for BioNeMo. It defines the minimal pieces of configuration - to execution a training job with the NeMo2 training api. It accepts two generic type parameters which users - must define in their own environment for execution. - - Additionally, this class assumes that the configs for ExposedModelConfig and DataConfig may have custom validators - implemented that operate on the entire MainConfig. This prevents the need from type based conditionals inside this - class while still allowing for custom validation global logic to be implemented in the underlying classes. For example, - some models may want to restrict their Datamodules seq_length to a certain value. - - - Args: - data_config: Generic config type that contains instructions on instantiating the required DataModule. - parallel_config: The parallel configuration for the model. - training_config: The training configuration for the model. - bionemo_model_config: Generic ExposedModelConfig type. This class hides extra configuration parameters in the - underlying model configuration as well as providing - optim_config: The optimizer/scheduler configuration for the model. - experiment_config: The experiment configuration for the model. - wandb_config: Optional, the wandb configuration for the model. - """ - - data_config: DataConfigT - parallel_config: ParallelConfig - training_config: TrainingConfig - bionemo_model_config: ExModelConfigT - optim_config: OptimizerSchedulerConfig - experiment_config: ExperimentConfig - wandb_config: Optional[WandbConfig] = None - - @model_validator(mode="after") - def validate_master_config(self) -> "MainConfig": - """Validates the master configuration object.""" - self.bionemo_model_config.seq_length = self.data_config.seq_length - return self - - @model_validator(mode="after") - def run_bionemo_model_config_model_validators(self) -> "MainConfig": - """Runs the model validators on the bionemo_model_config.""" - return self.bionemo_model_config.custom_model_validator(self) - - @model_validator(mode="after") - def run_data_config_model_validators(self) -> "MainConfig": - """Runs the model validators on the data_config.""" - return self.data_config.custom_model_validator(self) - - @model_validator(mode="after") - def validate_checkpointing_setting(self) -> "MainConfig": - """Validates the master configuration object.""" - self.training_config.enable_checkpointing = self.experiment_config.create_checkpoint_callback - return self diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/train.py b/sub-packages/bionemo-llm/src/bionemo/llm/train.py deleted file mode 100644 index 84417f60b2..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/train.py +++ /dev/null @@ -1,292 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import math -import pathlib -from dataclasses import field -from types import SimpleNamespace -from typing import Optional - -from lightning.pytorch.callbacks import LearningRateMonitor, RichModelSummary -from megatron.core.distributed import DistributedDataParallelConfig -from megatron.core.optimizer import OptimizerConfig -from nemo import lightning as nl -from nemo.collections import llm -from nemo.lightning import resume -from nemo.lightning.pytorch import callbacks as nl_callbacks -from nemo.lightning.pytorch.callbacks.flops_callback import FLOPsMeasurementCallback -from nemo.lightning.pytorch.optim import MegatronOptimizerModule -from nemo.lightning.pytorch.optim.lr_scheduler import CosineAnnealingScheduler -from nemo.utils import logging -from nemo.utils.exp_manager import TimingCallback -from pydantic import BaseModel - -from bionemo.core.utils.dtypes import get_autocast_dtype -from bionemo.llm.lightning import BionemoLightningModule -from bionemo.llm.model.biobert.lightning import biobert_lightning_module -from bionemo.llm.model.lr_scheduler import WarmupAnnealDecayHoldScheduler -from bionemo.llm.run.config_models import ( - DataConfig, - DataModuleT, - ExperimentConfig, - ExposedModelConfig, - OptimizerSchedulerConfig, - ParallelConfig, - TrainingConfig, -) -from bionemo.llm.utils.datamodule_utils import infer_global_batch_size -from bionemo.llm.utils.logger_utils import WandbConfig, setup_nemo_lightning_logger - - -class NsysConfig(BaseModel): - """Configuration for nsys profiling.""" - - start_step: int = 0 - end_step: Optional[int] = None - ranks: list[int] = field(default_factory=lambda: [0]) - - -def nemo_logger_factory(experiment_config: ExperimentConfig, wandb_config: Optional[WandbConfig]) -> nl.NeMoLogger: - """Creates and returns a NeMoLogger instance configured based on the provided experiment and wandb configurations. - - Args: - experiment_config (ExperimentConfig): Configuration object containing experiment settings such as - result directory, experiment name, checkpoint settings, and logger preferences. - wandb_config (Optional[WandbConfig]): Optional configuration object for Weights and Biases logging. - - Returns: - nl.NeMoLogger: An instance of NeMoLogger configured with the specified settings. - """ - if experiment_config.create_checkpoint_callback: - checkpoint_callback = nl_callbacks.ModelCheckpoint( - save_last=experiment_config.save_last_checkpoint, - monitor=experiment_config.metric_to_monitor_for_checkpoints, - save_top_k=experiment_config.save_top_k, - every_n_train_steps=experiment_config.save_every_n_steps, - always_save_context=True, - filename="{epoch}-{val_loss:.2f}-{step}-{consumed_samples}", # Including step and consumed_samples in the checkpoint filename prevents duplicate filenames and bugs related to this. - ) - else: - checkpoint_callback = None - - nemo_logger = setup_nemo_lightning_logger( - root_dir=experiment_config.result_dir, - name=experiment_config.experiment_name, - initialize_tensorboard_logger=experiment_config.create_tensorboard_logger, - wandb_config=wandb_config, - ckpt_callback=checkpoint_callback, - ) - return nemo_logger - - -def setup_trainer( - parallel_config: ParallelConfig, - training_config: TrainingConfig, - callbacks=None, - nsys_config: NsysConfig | None = None, -) -> nl.Trainer: - """Set up the trainer for model training using the specified parallel and training configurations. - - Args: - parallel_config (ParallelConfig): Configuration for parallelism, including tensor and pipeline model parallel sizes, - number of devices, and number of nodes. - training_config (TrainingConfig): Configuration for training, including maximum steps, accelerator type, - validation batch limit, validation check interval, and precision. - callbacks (list, optional): List of callback functions to be used during training. Defaults to None, - in which case default callbacks (RichModelSummary and LearningRateMonitor) are used. - nsys_config (NsysConfig, optional): Configuration for nsys profiling. If None, is disabled. - - Returns: - nl.Trainer: Configured trainer object ready for model training. - """ - strategy = nl.MegatronStrategy( - tensor_model_parallel_size=parallel_config.tensor_model_parallel_size, - pipeline_model_parallel_size=parallel_config.pipeline_model_parallel_size, - pipeline_dtype=get_autocast_dtype(training_config.precision), - ddp=DistributedDataParallelConfig( - check_for_nan_in_grad=True, - overlap_grad_reduce=True, - overlap_param_gather=False, # TODO waiting for NeMo fix - average_in_collective=True, - use_distributed_optimizer=parallel_config.use_distributed_optimizer, - ), - find_unused_parameters=True, - gradient_as_bucket_view=True, - ckpt_include_optimizer=True, - ckpt_async_save=True, - ckpt_parallel_load=True, - ) - if callbacks is None: - callbacks = [ - RichModelSummary(max_depth=4), - LearningRateMonitor(), - ] - - if training_config.gc_interval > 0: - callbacks.append( - nl_callbacks.GarbageCollectionCallback( - gc_interval_train=training_config.gc_interval, gc_interval_val=training_config.gc_interval - ) - ) - - if nsys_config: - if nsys_config.end_step is None: - nsys_config.end_step = training_config.max_steps - callbacks.append( - nl_callbacks.NsysCallback( - start_step=nsys_config.start_step, - end_step=nsys_config.end_step, - ranks=nsys_config.ranks, - gen_shape=True, - ) - ) - - trainer = nl.Trainer( - devices=parallel_config.num_devices, - max_steps=training_config.max_steps, - accelerator=training_config.accelerator, - strategy=strategy, - limit_val_batches=training_config.limit_val_batches, - val_check_interval=training_config.val_check_interval, - num_nodes=parallel_config.num_nodes, - callbacks=callbacks, - plugins=nl.MegatronMixedPrecision( - precision=training_config.precision, - params_dtype=get_autocast_dtype(training_config.precision), - pipeline_dtype=get_autocast_dtype(training_config.precision), - grad_reduce_in_fp32=False, - autocast_enabled=False, - ), - enable_checkpointing=training_config.enable_checkpointing, - ) - return trainer - - -def train( - bionemo_exposed_model_config: ExposedModelConfig, - data_config: DataConfig[DataModuleT], - parallel_config: ParallelConfig, - training_config: TrainingConfig, - optim_config: OptimizerSchedulerConfig, - experiment_config: ExperimentConfig, - wandb_config: Optional[WandbConfig], - nsys_config: Optional[NsysConfig] = None, - resume_if_exists: bool = True, -): - """Train a BioNemo model using the provided configurations. Uses the ExposedModelConfig and DataConfig as the primary variants for this method. - - Args: - bionemo_exposed_model_config (ExposedModelConfig): Configuration for the exposed BioNemo model. - data_config (DataConfig[DataModuleT]): Configuration for the data module. - parallel_config (ParallelConfig): Configuration for parallel training. - training_config (TrainingConfig): Configuration for training parameters. - optim_config (OptimizerSchedulerConfig): Configuration for the optimizer and scheduler. - experiment_config (ExperimentConfig): Configuration for the experiment. - wandb_config (Optional[WandbConfig]): Configuration for Weights and Biases logging.n - nsys_config (Optional[NsysConfig], optional): Configuration for nsys profiling. If None, is disabled. - resume_if_exists (bool, optional): Flag to resume training if a checkpoint exists. Defaults to True. - """ - bionemo_model_config = bionemo_exposed_model_config.exposed_to_internal_bionemo_model_config() - pathlib.Path(data_config.result_dir).mkdir(parents=True, exist_ok=True) - - if experiment_config.save_every_n_steps != training_config.val_check_interval: - logging.warning("Mutating training_config.save_every_n_steps to be equal to val_check_interval.") - experiment_config.save_every_n_steps = training_config.val_check_interval - - global_batch_size = infer_global_batch_size( - micro_batch_size=data_config.micro_batch_size, - num_nodes=parallel_config.num_nodes, - devices=parallel_config.num_devices, - accumulate_grad_batches=parallel_config.accumulate_grad_batches, - tensor_model_parallel_size=parallel_config.tensor_model_parallel_size, - pipeline_model_parallel_size=parallel_config.pipeline_model_parallel_size, - ) - - data: DataModuleT = data_config.construct_data_module(global_batch_size) - # TODO BioBertDataModule or BioBertTokenizer abstractions. We know all DataModuleT in this case has data.tokenizer, - # although this constraint is not documented. - - # TODO: need an abstraction for LrSchedulerConfig - if optim_config.lr_scheduler == "cosine": - lr_scheduler = CosineAnnealingScheduler( - max_steps=training_config.max_steps if optim_config.max_steps is None else optim_config.max_steps, - min_lr=optim_config.lr / 100, - warmup_steps=math.ceil(training_config.max_steps * optim_config.cosine_rampup_frac), - interval=optim_config.interval, - monitor=optim_config.monitor, - constant_steps=math.ceil(training_config.max_steps * optim_config.cosine_hold_frac), - ) - elif optim_config.lr_scheduler == "warmup_anneal": - lr_scheduler = WarmupAnnealDecayHoldScheduler( - warmup_steps=optim_config.warmup_steps, - max_steps=training_config.max_steps if optim_config.max_steps is None else optim_config.max_steps, - max_lr=optim_config.lr, - min_lr=optim_config.lr / 10.0, - anneal_percentage=0.10, - ) - else: - raise NotImplementedError(f"Scheduler {optim_config.lr_scheduler} not implemented.") - - optimizer = MegatronOptimizerModule( - config=OptimizerConfig( - lr=optim_config.lr, - weight_decay=optim_config.weight_decay, - sgd_momentum=optim_config.sgd_momentum, - adam_eps=optim_config.adam_eps, - optimizer=optim_config.optimizer, - use_distributed_optimizer=parallel_config.use_distributed_optimizer, - fp16=bionemo_model_config.fp16, - bf16=bionemo_model_config.bf16, - ), - lr_scheduler=lr_scheduler, - ) - - model: BionemoLightningModule = biobert_lightning_module( - config=bionemo_model_config, - tokenizer=data.tokenizer, - optimizer=optimizer, - ) - # NOTE (SKH): lifted default callbacks out of setup_trainer - callbacks = [ - RichModelSummary(max_depth=4), - LearningRateMonitor(), - TimingCallback(), # Required for certain plugins such as FLOPsMeasurement - ] - if training_config.create_tflops_callback: - dummy_data_module = SimpleNamespace() - dummy_data_module.global_batch_size = ( - global_batch_size # TODO(dorotat): remove this change after FLOPsMeasurementCallback is refactored - ) - dummy_data_module.tokenizer_vocab_size = data.vocab_size - flop_meas_callback = FLOPsMeasurementCallback( - bionemo_model_config, - dummy_data_module, - "bert", - ) - callbacks.append(flop_meas_callback) - trainer: nl.Trainer = setup_trainer(parallel_config, training_config, nsys_config=nsys_config, callbacks=callbacks) - nemo_logger: nl.NeMoLogger = nemo_logger_factory(experiment_config, wandb_config=wandb_config) - - llm.train( - model=model, - data=data, - trainer=trainer, - log=nemo_logger, - resume=resume.AutoResume( - resume_if_exists=resume_if_exists, - resume_ignore_no_checkpoint=True, - ), - ) diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/utils/__init__.py b/sub-packages/bionemo-llm/src/bionemo/llm/utils/__init__.py deleted file mode 100644 index 25e6abfbc5..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/utils/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/utils/callbacks.py b/sub-packages/bionemo-llm/src/bionemo/llm/utils/callbacks.py deleted file mode 100644 index 00d735fdf2..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/utils/callbacks.py +++ /dev/null @@ -1,225 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import os -from typing import Any, Literal, Sequence - - -try: # Python 3.12+ - from typing import override -except ImportError: # Python < 3.12 - from typing_extensions import override - -import lightning.pytorch as pl -import torch -from lightning.pytorch.callbacks import BasePredictionWriter -from megatron.core import parallel_state -from nemo.utils import logging as logger - -from bionemo.llm.lightning import batch_collator - - -IntervalT = Literal["epoch", "batch"] - - -class PredictionWriter(BasePredictionWriter, pl.Callback): - """A callback that writes predictions to disk at specified intervals during training. - - Logits, Embeddings, Hiddens, Input IDs, and Labels may all be saved to the disk depending on trainer configuration. - Batch Idxs are provided for each prediction in the same dictionary. These must be used to maintain order between - multi device predictions and single device predictions. - """ - - def __init__( - self, - output_dir: str | os.PathLike, - write_interval: IntervalT, - batch_dim_key_defaults: dict[str, int] | None = None, - seq_dim_key_defaults: dict[str, int] | None = None, - save_all_model_parallel_ranks: bool = False, - files_per_subdir: int | None = None, - ): - """Initializes the callback. - - Args: - output_dir: The directory where predictions will be written. - write_interval: The interval at which predictions will be written (batch, epoch). Epoch may not be used with - multi-device trainers. - batch_dim_key_defaults: The default batch dimension for each key, if different from the standard 0. - seq_dim_key_defaults: The default sequence dimension for each key, if different from the standard 1. - save_all_model_parallel_ranks: Whether to save predictions for all model parallel ranks. Generally these - will be redundant. - files_per_subdir: Number of files to write to each subdirectory. If provided, subdirectories with N files - each will be created. Ignored unless write_interval is 'batch'. - """ - super().__init__(write_interval) - self.write_interval = write_interval - self.output_dir = str(output_dir) - self.base_dir = self.output_dir # start out like this, but output_dir will be updated if files_per_subdir>0 - self.batch_dim_key_defaults = batch_dim_key_defaults - self.seq_dim_key_defaults = seq_dim_key_defaults - self.save_all_model_parallel_ranks = save_all_model_parallel_ranks - self.files_per_subdir = files_per_subdir - # Initialize to infinity if files_per_subdir is provided so that we create a new subdirectory before writing - # any files. - self.num_files_written = float("inf") if files_per_subdir else 0 - self.num_subdirs_written = 0 - - def setup(self, trainer: pl.Trainer, pl_module: pl.LightningModule, *args, **kwargs) -> None: # noqa: D417 - """Invoked with Trainer.fit, validate, test, and predict are called. Will immediately fail when 'write_interval' is 'epoch' and 'trainer.num_devices' > 1. - - Args: - trainer: The Trainer instance. - pl_module: The LightningModule instance. - """ - if trainer.num_devices > 1 and self.write_interval == "epoch": - logger.warning( - "Multi-GPU predictions could result in shuffled inputs. Verify that the original indices are included " - "in the model's predictions as outputs are not ordered and batch indices do not track input order." - ) - - @staticmethod - def _assert_initialized(): - """Asserts that the environment is initialized.""" - if not ( - torch.distributed.is_available() and torch.distributed.is_initialized() and parallel_state.is_initialized() - ): - raise RuntimeError("This function is only defined within an initialized megatron parallel environment.") - - @property - def data_parallel_world_size(self) -> int: - """Returns the data parallel world size.""" - self._assert_initialized() - return torch.distributed.get_world_size(parallel_state.get_data_parallel_group(with_context_parallel=False)) - - @property - def data_parallel_rank(self) -> int: - """Returns the data parallel rank.""" - self._assert_initialized() - return torch.distributed.get_rank(parallel_state.get_data_parallel_group(with_context_parallel=False)) - - @property - def should_write_predictions(self) -> bool: - """Ensures that predictions are only written on TP/CP rank 0 and that it is the last stage of the pipeline.""" - self._assert_initialized() - if not parallel_state.is_pipeline_last_stage(): - return False - if self.save_all_model_parallel_ranks: - return True - # TODO: handle expert parallelism and other kinds of parallelism - return parallel_state.get_tensor_model_parallel_rank() == 0 and parallel_state.get_context_parallel_rank() == 0 - - @override - def write_on_batch_end( - self, - trainer: pl.Trainer, - pl_module: pl.LightningModule, - prediction: Any, - batch_indices: Sequence[int] | None, - batch: Any, - batch_idx: int, - dataloader_idx: int, - ) -> None: - """Writes predictions to disk at the end of each batch. - - Predictions files follow the naming pattern, where rank is the active GPU in which the predictions were made. - predictions__rank_{rank}__batch_{batch_idx}.pt - - Args: - trainer: The Trainer instance. - pl_module: The LightningModule instance. - prediction: The prediction made by the model. - batch_indices: The indices of the batch. - batch: The batch data. - batch_idx: The index of the batch. - dataloader_idx: The index of the dataloader. - """ - # this will create N (num processes) files in `output_dir` each containing - # the predictions of it's respective rank - if self.should_write_predictions: - if ( - self.files_per_subdir is not None - and (self.num_files_written * self.data_parallel_world_size) >= self.files_per_subdir - ): - self.num_subdirs_written += 1 - self.output_dir = os.path.join(self.base_dir, f"subdir_{self.num_subdirs_written}") - os.makedirs(self.output_dir, exist_ok=True) - self.num_files_written = 0 - result_path = os.path.join( - self.output_dir, - f"predictions__rank_{trainer.global_rank}__dp_rank_{self.data_parallel_rank}__batch_{batch_idx}.pt", - ) - - # batch_indices is not captured due to a lightning bug when return_predictions = False - # we use input IDs in the prediction to map the result to input. - - # NOTE store the batch_idx so we do not need to rely on filenames for reconstruction of inputs. This is wrapped - # in a tensor and list container to ensure compatibility with batch_collator. - prediction["batch_idx"] = torch.tensor([batch_idx], dtype=torch.int64) - - torch.save(prediction, result_path) - logger.info(f"Inference predictions are stored in {result_path}\n{prediction.keys()}") - self.num_files_written += 1 - - @override - def write_on_epoch_end( - self, - trainer: pl.Trainer, - pl_module: pl.LightningModule, - predictions: Any, - batch_indices: Sequence[int], - ) -> None: - """Writes predictions to disk at the end of each epoch. - - Writing all predictions on epoch end is memory intensive. It is recommended to use the batch writer instead for - large predictions. - - Multi-device predictions will likely yield predictions in an order that is inconsistent with single device predictions and the input data. - - Args: - trainer: The Trainer instance. - pl_module: The LightningModule instance. - predictions: The predictions made by the model. - batch_indices: The indices of the batch. - - Raises: - Multi-GPU predictions are output in an inconsistent order with multiple devices. - """ - # this will create N (num processes) files in `output_dir` each containing - # the predictions of it's respective rank - if self.should_write_predictions: - result_path = os.path.join( - self.output_dir, - f"predictions__rank_{trainer.global_rank}__dp_rank_{self.data_parallel_rank}.pt", - ) - - # collate multiple batches / ignore empty ones - collate_kwargs = {} - if self.batch_dim_key_defaults is not None: - collate_kwargs["batch_dim_key_defaults"] = self.batch_dim_key_defaults - if self.seq_dim_key_defaults is not None: - collate_kwargs["seq_dim_key_defaults"] = self.seq_dim_key_defaults - - prediction = batch_collator([item for item in predictions if item is not None], **collate_kwargs) - - # batch_indices is not captured due to a lightning bug when return_predictions = False - # we use input IDs in the prediction to map the result to input - if isinstance(prediction, dict): - keys = prediction.keys() - else: - keys = "tensor" - torch.save(prediction, result_path) - logger.info(f"Inference predictions are stored in {result_path}\n{keys}") diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/utils/datamodule_utils.py b/sub-packages/bionemo-llm/src/bionemo/llm/utils/datamodule_utils.py deleted file mode 100644 index 4e4cfdea81..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/utils/datamodule_utils.py +++ /dev/null @@ -1,161 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from typing import Any, Dict, List, Union - - -def float_or_int_or_none(value: Union[str, float, int, None]) -> Union[float, int, None]: - """Converts a given value into a float, int, or None. - - Args: - value (Union[str, float, int, None]): A value that can be either a string, float, int, or None. - - Returns: - Union[float, int, None]: A float, int, or None based on the input value. - - If the input value is None or "None", it returns None. - If the input value is an int or float, it returns the same value. - If the input value is a string, it tries to convert it into an int if possible, otherwise into a float. - """ - if value is None or value == "None": - return - if isinstance(value, (int, float)): - return value - if value.isdigit(): - return int(value) - return float(value) - - -def parse_kwargs_to_arglist(kwargs: Dict[str, Any]) -> List[str]: - """Converts a dictionary of keyword arguments into a list of command-line arguments. - - Args: - kwargs (Dict[str, Any]): A dictionary where keys are argument names and values are argument values. - - Returns: - A list of strings, where each string is a command-line argument in the format '--argument-name value'. - """ - arglist = [] - for k, v in kwargs.items(): - arglist.extend([f"--{k.replace('_', '-')}", str(v)]) - return arglist - - -def infer_global_batch_size( - micro_batch_size: int, - num_nodes: int, - devices: int, - accumulate_grad_batches: int = 1, - tensor_model_parallel_size: int = 1, - pipeline_model_parallel_size: int = 1, - context_model_parallel_size: int = 1, -) -> int: - """Infers the global batch size based on the micro batch size, number of nodes, devices, accumulation of gradient batches, and model parallel sizes. - - Args: - micro_batch_size (int): The micro batch size. - num_nodes (int): The number of nodes. - devices (int): The number of devices. - accumulate_grad_batches (int): The accumulation of gradient batches. Defaults to 1. - tensor_model_parallel_size (int): The tensor model parallel size. Defaults to 1. - pipeline_model_parallel_size (int): The pipeline model parallel size. Defaults to 1. - context_model_parallel_size (int): The context model parallel size. Defaults to 1. - - Returns: - int: The global batch size. - """ - if not all( - isinstance(arg, int) - for arg in [ - micro_batch_size, - num_nodes, - devices, - accumulate_grad_batches, - tensor_model_parallel_size, - pipeline_model_parallel_size, - context_model_parallel_size, - ] - ): - raise ValueError( - f"All arguments must be of type int, got {type(micro_batch_size)}, {type(num_nodes)}, {type(devices)}, " - f"{type(accumulate_grad_batches)}, {type(tensor_model_parallel_size)}, {type(pipeline_model_parallel_size)}, and {type(context_model_parallel_size)}" - ) - if micro_batch_size <= 0: - raise ValueError(f"micro_batch_size must be greater than 0, got {micro_batch_size}") - if num_nodes <= 0: - raise ValueError(f"num_nodes must be greater than 0, got {num_nodes}") - if devices <= 0: - raise ValueError(f"devices must be greater than 0, got {devices}") - if accumulate_grad_batches <= 0: - raise ValueError(f"accumulate_grad_batches must be greater than 0, got {accumulate_grad_batches}") - if tensor_model_parallel_size <= 0: - raise ValueError(f"tensor_model_parallel_size must be greater than 0, got {tensor_model_parallel_size}") - if pipeline_model_parallel_size <= 0: - raise ValueError(f"pipeline_model_parallel_size must be greater than 0, got {pipeline_model_parallel_size}") - if context_model_parallel_size <= 0: - raise ValueError(f"context_model_parallel_size must be greater than 0, got {context_model_parallel_size}") - - world_size = num_nodes * devices - if world_size % (tensor_model_parallel_size * pipeline_model_parallel_size * context_model_parallel_size) != 0: - raise ValueError( - f"world_size must be divisible by tensor_model_parallel_size * pipeline_model_parallel_size * context_model_parallel_size, " - f"got {world_size} and TP{tensor_model_parallel_size} * PP{pipeline_model_parallel_size} * CP{context_model_parallel_size}" - ) - - model_parallel_size = tensor_model_parallel_size * pipeline_model_parallel_size * context_model_parallel_size - data_parallel_size = world_size // model_parallel_size - global_batch_size = micro_batch_size * data_parallel_size * accumulate_grad_batches - return global_batch_size - - -def infer_num_samples( - limit_batches: Union[float, int, str, None], num_samples_in_dataset: int, global_batch_size: int, stage: str -): - """Infers the number of samples based on the limit_batches parameter, the length of the dataset, and the global batch size. - - Args: - limit_batches (Union[float, int, str, None]): The limit on the number of batches. Can be a float - between 0 and 1, an integer, a string, or None. If None, defaults to 1.0. - num_samples_in_dataset (int): The number of samples in the dataset. - global_batch_size (int): The global batch size. - stage (str): The stage of the training. - - Returns: - int: The number of samples from the limit. - - Raises: - ValueError: If the limited number of samples is less than the global batch size, or if the - limit_batches parameter is invalid. - - If limit_batches is a float between 0 and 1, the number of samples is inferred as a fraction of the number of samples - in the dataset. If limit_batches is an integer greater than or equal to 1, the number of limited samples is inferred - as the product of limit_batches and global batch size. If limit_batches is None, it defaults to 1.0, indicating that - all dataset samples should be used. - """ - limit_batches = 1.0 if limit_batches is None else limit_batches # validation data does not require upsampling - if 0 < limit_batches <= 1.0 and isinstance(limit_batches, float): - num_limited_samples = int(num_samples_in_dataset * limit_batches) - if num_limited_samples < global_batch_size: - raise ValueError( - "The limited number of %s samples %s is less than the global batch size %s" - % (stage, num_limited_samples, global_batch_size) - ) - elif limit_batches >= 1 and isinstance(limit_batches, int): - num_limited_samples = int(limit_batches * global_batch_size) - else: - raise ValueError("Invalid choice of limit_%s_batches size: %s" % (stage, limit_batches)) - - return num_limited_samples diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/utils/iomixin_utils.py b/sub-packages/bionemo-llm/src/bionemo/llm/utils/iomixin_utils.py deleted file mode 100644 index ed88ca272a..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/utils/iomixin_utils.py +++ /dev/null @@ -1,134 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from abc import ABC, abstractmethod -from typing import Any, Dict, List - -from nemo.lightning import io - - -class WillHaveGetSetHparam(ABC): - """An ABC that states that a particular class _will_ have our mutatable IO Mixin variant added to it. - - This is a placeholder until a similar piece of functionality is added in NeMo. - - - Raises: - NotImplementedError: You must implement set_hparam, get_hparam, and get_hparams - """ - - @abstractmethod - def set_hparam(self, attribute: str, value: Any, also_change_value: bool = True) -> None: - """Mutates the saved hyper-parameter for the io mixed class. - - If you would like to only change the saved hyper-param - for example in the case of loading a dataclass where the same variables are mutated to other non-savable - entities by deterministic rules after init, then use `also_change_value=False` to only update the - hyper-parameter. - - Args: - attribute: The element name to modify within the saved init settings for self - value: New parameter for the saved init settings - also_change_value: If you also want to mutate the attribute of this same name in self to be the desired - value, set this to True, otherwise if the init arg and self arg are expected to be divergent, then - do not set this and modify the self attribute separately in the normal pythonic way. - - Returns: - None. - """ - raise NotImplementedError() - - @abstractmethod - def get_hparam(self, attribute: str) -> Any: - """Looks up the saved hyper-parameter for the io mixed class. - - Args: - attribute: The element name to look up within the saved init settings for self - Returns: - Value - Raises: - KeyError if the attribute does not exist in the saved init settings. - """ - raise NotImplementedError() - - @abstractmethod - def get_hparams(self) -> Dict[str, Any]: - """Returns the hyper-parameters of init in a dictionary format. - - Returns: - Dict[str, Any]: A dictionary of the init hyper-parameters on this object. - """ - raise NotImplementedError() - - -class IOMixinWithGettersSetters(WillHaveGetSetHparam, io.IOMixin): - """An implementation of WillHaveGetSetHparam which makes use of the io.IOMixin.__io__ added to your classes. - - This enables you to mutate the hyper-parameters of your classes which will later be saved in configs. - """ - - def set_hparam(self, attribute: str, value: Any, also_change_value: bool = True) -> None: - """Mutates the saved hyper-parameter for the io mixed class. - - If you would like to only change the saved hyper-param - for example in the case of loading a dataclass where the same variables are mutated to other non-savable - entities by deterministic rules after init, then use `also_change_value=False` to only update the - hyper-parameter. - - Args: - attribute: The element name to modify within the saved init settings for self - value: New parameter for the saved init settings - also_change_value: If you also want to mutate the attribute of this same name in self to be the desired - value, set this to True, otherwise if the init arg and self arg are expected to be divergent, then - do not set this and modify the self attribute separately in the normal pythonic way. - - Returns: - None. - """ - # Change the attribute of self and also change the io tracker so it gets updated in the config - if also_change_value: - setattr(self, attribute, value) - setattr(self.__io__, attribute, value) - - def get_hparam(self, attribute: str) -> Any: - """Looks up the saved hyper-parameter for the io mixed class. - - Args: - attribute: The element name to look up within the saved init settings for self - Returns: - Value - Raises: - KeyError if the attribute does not exist in the saved init settings. - """ - if attribute not in dir(self.__io__): - raise KeyError( - f"Attribute '{attribute}' not found in hyper-parameters. Options: {sorted(self.get_hparams().keys())}" - ) - return getattr(self.__io__, attribute) - - def get_non_default_hparams(self) -> List[str]: - """Returns a list of hyper-parameters that have been changed from their default values. - - Returns: - List[str]: A list of hyper-parameters that have been changed from their default values. - """ - return [k for k in self.__io__.__dict__["__argument_history__"].keys() if k != "__fn_or_cls__"] - - def get_hparams(self) -> Dict[str, Any]: - """Returns the hyper-parameters of init in a dictionary format. - - Returns: - Dict[str, Any]: A dictionary of the init hyper-parameters on this object. - """ - return {k: getattr(self.__io__, k) for k in self.get_non_default_hparams()} diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/utils/logger_utils.py b/sub-packages/bionemo-llm/src/bionemo/llm/utils/logger_utils.py deleted file mode 100644 index 29cd4d3126..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/utils/logger_utils.py +++ /dev/null @@ -1,114 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import pathlib -from typing import Any, Dict, List, Optional, Sequence - -from lightning.pytorch.loggers import TensorBoardLogger, WandbLogger -from nemo.lightning.nemo_logger import NeMoLogger -from nemo.lightning.pytorch import callbacks as nemo_callbacks -from nemo.utils import logging -from pydantic import BaseModel - - -__all__: Sequence[str] = ( - "WandbConfig", - "setup_nemo_lightning_logger", -) - - -class WandbConfig(BaseModel): - """Note: `name` controls the exp name is handled by the NeMoLogger so it is ommitted here. - `directory` is also omitted since it is set by the NeMoLogger. - - Args: - entity: The team posting this run (default: your username or your default team) - project: The name of the project to which this run will belong. - name: Display name for the run. By default it is set by NeMoLogger to experiment name - tags: Tags associated with this run. - group: A unique string shared by all runs in a given group - job_type: Type of run, which is useful when you're grouping runs together into larger experiments. - offline: Run offline (data can be streamed later to wandb servers). - id: Sets the version, mainly used to resume a previous run. - anonymous: Enables or explicitly disables anonymous logging. - """ # noqa: D205 - - entity: str | None # The team posting this run (default: your username or your default team) - project: str # The name of the project to which this run will belong. - name: str | None = None # Display name for the run. By default, it is set by NeMoLogger to experiment name - # save_dir: #Path where data is saved. "This is handled by NeMoLogger" - tags: List[str] | None # Tags associated with this run. - group: str | None # A unique string shared by all runs in a given group. - job_type: str | None = ( - None # Type of run, which is useful when you're grouping runs together into larger experiments. - ) - offline: bool # Run offline (data can be streamed later to wandb servers). - id: str | None # Sets the version, mainly used to resume a previous run. - anonymous: bool # Enables or explicitly disables anonymous logging. - log_model: bool # Save checkpoints in wandb dir to upload on W&B servers. - - -def setup_nemo_lightning_logger( - name: str | None = None, - root_dir: str | pathlib.Path = "./results", - initialize_tensorboard_logger: bool = False, - wandb_config: Optional[WandbConfig] = None, - ckpt_callback: Optional[nemo_callbacks.ModelCheckpoint] = None, - **kwargs: Dict[str, Any], -) -> NeMoLogger: - """Setup the logger for the experiment. - - Arguments: - name: The name of the experiment. Results go into `root_dir`/`name` - root_dir: The root directory to create the `name` directory in for saving run results. - initialize_tensorboard_logger: Whether to initialize the tensorboard logger. - wandb_config: The remaining configuration options for the wandb logger. - ckpt_callback: The checkpoint callback to use, must be a child of the pytorch lightning ModelCheckpoint callback. - NOTE the type annotation in the underlying NeMoCheckpoint constructor is incorrect. - **kwargs: The kwargs for the NeMoLogger. - - Returns: - NeMoLogger: NeMo logger instance. - """ - # The directory that the logger will save to - save_dir = pathlib.Path(root_dir) / name - save_dir.mkdir(parents=True, exist_ok=True) - - version = "dev" - if wandb_config is not None: - if wandb_config.name is None: - wandb_config.name = name - wandb_logger = WandbLogger(save_dir=save_dir, **wandb_config.model_dump()) - else: - wandb_logger = None - logging.warning("WandB is currently turned off.") - if initialize_tensorboard_logger: - tb_logger = TensorBoardLogger(save_dir=root_dir, name=name, version=version) - else: - tb_logger = None - logging.warning("User-set tensorboard is currently turned off. Internally one may still be set by NeMo2.") - - logger: NeMoLogger = NeMoLogger( - name=name, - log_dir=str(root_dir), - tensorboard=tb_logger, - wandb=wandb_logger, - ckpt=ckpt_callback, - version=version, - update_logger_directory=False, - **kwargs, - ) - # Needed so that the trainer can find an output directory for the profiler - logger.save_dir = save_dir - return logger diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/utils/megatron_utils.py b/sub-packages/bionemo-llm/src/bionemo/llm/utils/megatron_utils.py deleted file mode 100644 index c538f65295..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/utils/megatron_utils.py +++ /dev/null @@ -1,51 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import torch.distributed -from megatron.core import parallel_state - - -def is_only_data_parallel() -> bool: - """Checks to see if you are in a distributed megatron environment with only data parallelism active. - - This is useful if you are working on a model, loss, etc and you know that you do not yet support megatron model - parallelism. You can test that the only kind of parallelism in use is data parallelism. - - Returns: - True if data parallel is the only parallel mode, False otherwise. - """ - if not (torch.distributed.is_available() and parallel_state.is_initialized()): - raise RuntimeError("This function is only defined within an initialized megatron parallel environment.") - # Idea: when world_size == data_parallel_world_size, then you know that you are fully DDP, which means you are not - # using model parallelism (meaning virtual GPUs composed of several underlying GPUs that you need to reduce over). - - world_size: int = torch.distributed.get_world_size() - dp_world_size: int = parallel_state.get_data_parallel_world_size() - return world_size == dp_world_size - - -def average_losses_across_data_parallel_group(losses, with_context_parallel: bool = False): - """Reduce a tensor of losses across all GPUs.""" - averaged_losses = torch.cat([loss.clone().detach().view(1) for loss in losses]) - # Reduce across the DP (or optionally, the flattened DP + CP) group. - # Refer to the ring attention algorithm on why we always must reduce across the CP group. - torch.distributed.all_reduce( - averaged_losses, group=parallel_state.get_data_parallel_group(with_context_parallel=with_context_parallel) - ) - averaged_losses = averaged_losses / torch.distributed.get_world_size( - # Only average losses across the data parallel group, not the context parallel group! - group=parallel_state.get_data_parallel_group() - ) - return averaged_losses diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/utils/remote.py b/sub-packages/bionemo-llm/src/bionemo/llm/utils/remote.py deleted file mode 100644 index 81b896eb52..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/utils/remote.py +++ /dev/null @@ -1,157 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import os -from dataclasses import dataclass -from hashlib import md5 -from pathlib import Path -from typing import Optional, Sequence -from urllib import request - -import requests -from nemo.utils import logging - -from bionemo.core import BIONEMO_CACHE_DIR - - -__all__: Sequence[str] = ( - "FTPRemoteResource", - "RemoteResource", -) - - -@dataclass -class RemoteResource: - """Responsible for downloading remote files, along with optional processing of downloaded files for downstream usecases. - - Each object is invoked through either its constructor (setting up the destination and checksum), or through a pre-configured class method. - `download_resource()` contains the core functionality, which is to download the file at `url` to the fully qualified filename. Class methods - can be used to further configure this process. - - Receive: - a file, its checksum, a destination directory, and a root directory - - Our dataclass then provides some useful things: - - fully qualified destination folder (property) - - fully qualified destination file (property) - - check_exists() - - download_resource() - - Form the fully qualified destination folder. - Create a fully qualified path for the file - - (all lives in the download routine) - Check that the fq destination folder exists, otherwise create it - Download the file. - Checksum the download. - Done. - - Postprocessing should be their own method with their own configuration. - - Example usage: - >>> # The following will download and preprocess the prepackaged resources. - >>> GRCh38Ensembl99ResourcePreparer().prepare() - >>> Hg38chromResourcePreparer().prepare() - >>> GRCh38p13_ResourcePreparer().prepare() - - - Attributes: - dest_directory: The directory to place the desired file upon completing the download. Should have the form {dest_directory}/{dest_filename} - dest_filename: The desired name for the file upon completing the download. - checksum: checksum associated with the file located at url. If set to None, check_exists only checks for the existance of `{dest_directory}/{dest_filename}` - url: URL of the file to download - root_directory: the bottom-level directory, the fully qualified path is formed by joining root_directory, dest_directory, and dest_filename. - """ - - checksum: Optional[str] - dest_filename: str - dest_directory: str - root_directory: str | os.PathLike = BIONEMO_CACHE_DIR - url: Optional[str] = None - - @property - def fully_qualified_dest_folder(self): # noqa: D102 - return Path(self.root_directory) / self.dest_directory - - @property - def fully_qualified_dest_filename(self): - """Returns the fully qualified destination path of the file. - - Example: - /tmp/my_folder/file.tar.gz - """ - return os.path.join(self.fully_qualified_dest_folder, self.dest_filename) - - def exists_or_create_destination_directory(self, exist_ok=True): - """Checks that the `fully_qualified_destination_directory` exists, if it does not, the directory is created (or fails). - - exists_ok: Triest to create `fully_qualified_dest_folder` if it doesnt already exist. - """ - os.makedirs(self.fully_qualified_dest_folder, exist_ok=exist_ok) - - @staticmethod - def get_env_tmpdir(): - """Convenience method that exposes the environment TMPDIR variable.""" - return os.environ.get("TMPDIR", "/tmp") - - def download_resource(self, overwrite=False) -> str: - """Downloads the resource to its specified fully_qualified_dest name. - - Returns: the fully qualified destination filename. - """ - self.exists_or_create_destination_directory() - - if not self.check_exists() or overwrite: - logging.info(f"Downloading resource: {self.url}") - with requests.get(self.url, stream=True) as r, open(self.fully_qualified_dest_filename, "wb") as fd: - r.raise_for_status() - for bytes in r: - fd.write(bytes) - else: - logging.info(f"Resource already exists, skipping download: {self.url}") - - self.check_exists() - return self.fully_qualified_dest_filename - - def check_exists(self): - """Returns true if `fully_qualified_dest_filename` exists and the checksum matches `self.checksum`""" # noqa: D415 - if os.path.exists(self.fully_qualified_dest_filename): - with open(self.fully_qualified_dest_filename, "rb") as fd: - data = fd.read() - result = md5(data).hexdigest() - if self.checksum is None: - logging.info("No checksum provided, filename exists. Assuming it is complete.") - matches = True - else: - matches = result == self.checksum - return matches - - return False - - -class FTPRemoteResource(RemoteResource): # noqa: D101 - def download_resource(self, overwrite=False) -> str: - """Downloads the resource to its specified fully_qualified_dest name. - - Returns: the fully qualified destination filename. - """ - self.exists_or_create_destination_directory() - - if not self.check_exists() or overwrite: - request.urlretrieve(self.url, self.fully_qualified_dest_filename) - - self.check_exists() - return self.fully_qualified_dest_filename diff --git a/sub-packages/bionemo-llm/src/bionemo/llm/utils/weight_utils.py b/sub-packages/bionemo-llm/src/bionemo/llm/utils/weight_utils.py deleted file mode 100644 index a13fa85e67..0000000000 --- a/sub-packages/bionemo-llm/src/bionemo/llm/utils/weight_utils.py +++ /dev/null @@ -1,154 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from pathlib import Path -from typing import Sequence, Set - -from megatron.core import dist_checkpointing -from megatron.core.dist_checkpointing.mapping import ShardedTensor - -from bionemo.llm.api import MegatronModelType - - -__all__: Sequence[str] = ( - "load_weights_sharded_inplace_nemo2_to_mcore", - "nemo1_to_nemo2_biobert_key_mapping", -) - - -def nemo1_to_nemo2_biobert_key_mapping( # noqa: D417 - old_key: str, - new_model_prefix: str = "module", - old_model_prefix: str = "model", - te_mapping: bool = False, -) -> str: - """This function is used to map the keys from the old nemo BERT models to the new BioBERT models - - Args: - old_key (str): old key we want to map to the expected new key name. - new_model_prefix (str, optional): The new key for the base weights. - If you point this at the core megatron model set it to "". - For the regular nemo2 lightning module following standards, set it to "module". - Defaults to "module". - old_model_prefix (str, optional): The previous saved weight prefix. Defaults to "model" which was the standard in nemo1. - - Returns: - str: New key name - """ # noqa: D415 - # add the . to the end of the input prefixes if they are not the empty string, - # unless the user has already done so. - if old_model_prefix != "": - old_model_prefix = f"{old_model_prefix.rstrip('.')}." - if new_model_prefix != "": - new_model_prefix = f"{new_model_prefix.rstrip('.')}." - - # This function is used to map the keys from the old nemo BERT models to the new BioBERT models - base_rename = old_key.replace(f"{old_model_prefix}language_model.", f"{new_model_prefix}") - base_rename = base_rename.replace(f"{old_model_prefix}", f"{new_model_prefix}") - if "dense_h_to_4h" in base_rename: - return base_rename.replace("dense_h_to_4h", "linear_fc1") - if "dense_4h_to_h" in base_rename: - return base_rename.replace("dense_4h_to_h", "linear_fc2") - if "query_key_value" in base_rename: - return base_rename.replace("query_key_value", "linear_qkv") - if "self_attention.dense" in base_rename: - # This is definitely the linear_proj and not the qkv. The linear_proj shapes are 256x256 - # which match dense but not query_key_value - # (Pdb) new_state_dict['encoder.layers.4.self_attention.linear_proj.weight'].shape - # torch.Size([256, 256]) - # (Pdb) new_state_dict['encoder.layers.4.self_attention.linear_qkv.weight'].shape - # torch.Size([768, 256]) - # (Pdb) new_state_dict['encoder.layers.4.self_attention.linear_qkv.bias'].shape - # torch.Size([768]) - return base_rename.replace("self_attention.dense", "self_attention.linear_proj") - if "lm_head.bias" in base_rename: - return base_rename.replace("lm_head.bias", "output_layer.bias") - if "lm_head.weight" in base_rename: - return base_rename.replace("lm_head.weight", "output_layer.weight") - if "lm_head.layernorm" in base_rename: - return base_rename.replace("lm_head.layernorm", "lm_head.layer_norm") - - if "post_attention_layernorm" in base_rename: - base_rename = base_rename.replace("post_attention_layernorm", "pre_mlp_layernorm") - - # Handle the transformer engine spec's differences in layer naming and where things like layernorm are stored. - # TE moves layernorm from an object that's part of the main attention layer to being an internal component of - # the linear layers, probably for efficiency/fusion of some sort. - if te_mapping: - if ".input_layernorm.weight" in base_rename: - return base_rename.replace(".input_layernorm.weight", ".self_attention.linear_qkv.layer_norm_weight") - if ".input_layernorm.bias" in base_rename: - return base_rename.replace(".input_layernorm.bias", ".self_attention.linear_qkv.layer_norm_bias") - if ".pre_mlp_layernorm.bias" in base_rename: - return base_rename.replace(".pre_mlp_layernorm.bias", ".mlp.linear_fc1.layer_norm_bias") - if ".pre_mlp_layernorm.weight" in base_rename: - return base_rename.replace(".pre_mlp_layernorm.weight", ".mlp.linear_fc1.layer_norm_weight") - return base_rename - - -############################################################################################# -# Core utility functions: Below are some utility functions that allow for loading a nemo2 -# trained model back into a newly initialized megatron core model. The key insight is that -# the nemo2 lightning module owns a single `self.module = config.configure_model(...)` -# object. This `config.configure_module(...)` object is the megatron model that we want -# to load weights into. So we need to adjust the checkpoint keys since they will all -# have the extra `module.` prefix on them, while the megatron model we just initialized -# will not. These functions should make a wide variety of fine-tuning strategies doable. - - -def _munge_key_megatron_to_nemo2(k: str) -> str: - return f"module.{k}" - - -def _munge_sharded_tensor_key_megatron_to_nemo2(v: ShardedTensor) -> ShardedTensor: - # This works with PP=1, how do we handle PP>1? - key = v.key - v.key = _munge_key_megatron_to_nemo2(key) - return v - - -def _key_in_filter(k: str, filter: Set[str]) -> bool: - for prefix in filter: - if k.startswith(prefix): - return True - return False - - -def load_weights_sharded_inplace_nemo2_to_mcore( - model: MegatronModelType, distributed_checkpoint_dir: str | Path, skip_keys_with_these_prefixes: Set[str] -) -> None: - """Given a megatron module, this function will determine which keys/subsets of weights to load given the - parallel/distributed state. This operates assuming a checkpoint was saved by a nemo2 trainer which places - the `module.` prefix on all key names, but we are then going to load directly in to the megatron module - without the `module.` prefix. Note that if there are any _extra_ keys that you do not want to search the - checkpoint for, for example if you add new layers/heads onto your module, you need to supply the prefix - path to those keys in your model and they will be ignored. This latter feature is key for flexible fine-tuning - strategies where you load weights partially from other models with partially overlapping structures. - - Args: - model: Megatron model that you want to load weights into. - distributed_checkpoint_dir: _description_ - skip_keys_with_these_prefixes: _description_ - """ # noqa: D205 - sharded_state_dict = { - _munge_key_megatron_to_nemo2(k): _munge_sharded_tensor_key_megatron_to_nemo2(v) - for k, v in model.sharded_state_dict().items() - if not _key_in_filter(k, skip_keys_with_these_prefixes) and "_extra_state" not in k - } - dist_checkpointing.load( - sharded_state_dict=sharded_state_dict, - checkpoint_dir=str(Path(distributed_checkpoint_dir) / "weights"), - strict=dist_checkpointing.serialization.StrictHandling.ASSUME_OK_UNEXPECTED, - ) diff --git a/sub-packages/bionemo-llm/tests/__init__.py b/sub-packages/bionemo-llm/tests/__init__.py deleted file mode 100644 index 25e6abfbc5..0000000000 --- a/sub-packages/bionemo-llm/tests/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/sub-packages/bionemo-llm/tests/bionemo/llm/__init__.py b/sub-packages/bionemo-llm/tests/bionemo/llm/__init__.py deleted file mode 100644 index 25e6abfbc5..0000000000 --- a/sub-packages/bionemo-llm/tests/bionemo/llm/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/sub-packages/bionemo-llm/tests/bionemo/llm/data/test_collate.py b/sub-packages/bionemo-llm/tests/bionemo/llm/data/test_collate.py deleted file mode 100644 index d06aff3696..0000000000 --- a/sub-packages/bionemo-llm/tests/bionemo/llm/data/test_collate.py +++ /dev/null @@ -1,209 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import pytest -import torch - -from bionemo.llm.data.collate import bert_padding_collate_fn, padding_collate_fn - - -def test_padding_collate_fn(): - sample1 = { - "my_key": torch.tensor([1, 2, 3]), - } - sample2 = { - "my_key": torch.tensor([4, 5, 6, 7, 8]), - } - batch = [sample1, sample2] - collated_batch = padding_collate_fn(batch, padding_values={"my_key": -1}) - - assert torch.all(torch.eq(collated_batch["my_key"], torch.tensor([[1, 2, 3, -1, -1], [4, 5, 6, 7, 8]]))) - - -def test_padding_collate_with_missing_keys_raises(caplog): - sample1 = { - "my_key": torch.tensor([1, 2, 3]), - } - sample2 = { - "my_key": torch.tensor([4, 5, 6, 7, 8]), - "other_key": torch.tensor([1, 2, 3]), - } - batch = [sample1, sample2] - with pytest.raises(ValueError, match="All keys in inputs must match each other."): - padding_collate_fn(batch, padding_values={"my_key": -1}) - - -def test_padding_collate_with_mismatched_padding_values_warns(caplog): - sample1 = { - "my_key": torch.tensor([1, 2, 3]), - "other_key": torch.tensor([1, 2, 3, 4]), - } - sample2 = { - "my_key": torch.tensor([4, 5, 6, 7, 8]), - "other_key": torch.tensor([1, 2, 3]), - } - batch = [sample1, sample2] - - padding_collate_fn(batch, padding_values={"my_key": -1, "other_key": -1, "missing_key": 3}) - # Call 2x and check that we logged once - padding_collate_fn(batch, padding_values={"my_key": -1, "other_key": -1, "missing_key": 3}) - log_lines = caplog.text.strip("\n").split("\n") - assert len(log_lines) == 1, f"Expected one line, got: {log_lines}" - assert log_lines[0].endswith( - "Extra keys in batch that will not be padded: set(). Missing keys in batch: {'missing_key'}" - ) - assert log_lines[0].startswith("WARNING") - - -def test_bert_padding_collate_fn(): - # Create sample data - sample1 = { - "text": torch.tensor([1, 2, 3]), - "types": torch.zeros((3,), dtype=torch.int64), - "attention_mask": torch.tensor([True, True, False]), - "labels": torch.tensor([7, 8, 9]), - "loss_mask": torch.tensor([True, False, True]), - "is_random": torch.zeros((3,), dtype=torch.int64), - } - sample2 = { - "text": torch.tensor([10, 11, 12]), - "types": torch.zeros((3,), dtype=torch.int64), - "attention_mask": torch.tensor([True, False, True]), - "labels": torch.tensor([16, 17, 18]), - "loss_mask": torch.tensor([False, True, False]), - "is_random": torch.zeros((3,), dtype=torch.int64), - } - batch = [sample1, sample2] - - # Call the collate_fn - collated_batch = bert_padding_collate_fn(batch, padding_value=-1) - - # Assert the expected output - assert torch.all(torch.eq(collated_batch["text"], torch.tensor([[1, 2, 3], [10, 11, 12]]))) - assert torch.all(torch.eq(collated_batch["types"], torch.tensor([[0, 0, 0], [0, 0, 0]]))) - assert torch.all( - torch.eq(collated_batch["attention_mask"], torch.tensor([[True, True, False], [True, False, True]])) - ) - assert torch.all(torch.eq(collated_batch["labels"], torch.tensor([[7, 8, 9], [16, 17, 18]]))) - assert torch.all(torch.eq(collated_batch["loss_mask"], torch.tensor([[True, False, True], [False, True, False]]))) - assert torch.all(torch.eq(collated_batch["is_random"], torch.tensor([[0, 0, 0], [0, 0, 0]]))) - - -def test_bert_padding_collate_fn_with_padding(): - # Create sample data - sample1 = { - "text": torch.tensor([1, 2, 3]), - "types": torch.zeros((3,), dtype=torch.int64), - "attention_mask": torch.tensor([True, True, False]), - "labels": torch.tensor([7, 8, 9]), - "loss_mask": torch.tensor([True, False, True]), - "is_random": torch.zeros((3,), dtype=torch.int64), - } - sample2 = { - "text": torch.tensor([4, 5, 6, 7, 8]), - "types": torch.zeros((5,), dtype=torch.int64), - "attention_mask": torch.tensor([True, True, True, True, True]), - "labels": torch.tensor([-100, 5, -100, 7, 8]), - "loss_mask": torch.tensor([False, True, False, True, True]), - "is_random": torch.zeros((5,), dtype=torch.int64), - } - batch = [sample1, sample2] - - # Call the collate_fn - collated_batch = bert_padding_collate_fn(batch, padding_value=10) - - # Assert the expected output - assert torch.all(torch.eq(collated_batch["text"], torch.tensor([[1, 2, 3, 10, 10], [4, 5, 6, 7, 8]]))) - assert torch.all(torch.eq(collated_batch["types"], torch.tensor([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]))) - assert torch.all( - torch.eq( - collated_batch["attention_mask"], - torch.tensor([[True, True, False, False, False], [True, True, True, True, True]]), - ) - ) - assert torch.all(torch.eq(collated_batch["labels"], torch.tensor([[7, 8, 9, -100, -100], [-100, 5, -100, 7, 8]]))) - assert torch.all( - torch.eq( - collated_batch["loss_mask"], - torch.tensor([[True, False, True, False, False], [False, True, False, True, True]]), - ) - ) - assert torch.all(torch.eq(collated_batch["is_random"], torch.tensor([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]))) - - -def test_bert_padding_collate_fn_with_max_length_truncates(): - # Create sample data - sample1 = { - "text": torch.tensor([1, 2, 3]), - "types": torch.zeros((3,), dtype=torch.int64), - "attention_mask": torch.tensor([True, True, False]), - "labels": torch.tensor([7, 8, 9]), - "loss_mask": torch.tensor([True, False, True]), - "is_random": torch.zeros((3,), dtype=torch.int64), - } - sample2 = { - "text": torch.tensor([4, 5, 6, 7, 8]), - "types": torch.zeros((5,), dtype=torch.int64), - "attention_mask": torch.tensor([True, True, True, True, True]), - "labels": torch.tensor([-100, 5, -100, 7, 8]), - "loss_mask": torch.tensor([False, True, False, True, True]), - "is_random": torch.zeros((5,), dtype=torch.int64), - } - batch = [sample1, sample2] - - # Call the collate_fn - collated_batch = bert_padding_collate_fn(batch, padding_value=10, max_length=4) - - # Assert the expected output - assert torch.all(torch.eq(collated_batch["text"], torch.tensor([[1, 2, 3, 10], [4, 5, 6, 7]]))) - assert torch.all(torch.eq(collated_batch["types"], torch.tensor([[0, 0, 0, 0], [0, 0, 0, 0]]))) - assert torch.all( - torch.eq( - collated_batch["attention_mask"], torch.tensor([[True, True, False, False], [True, True, True, True]]) - ) - ) - assert torch.all(torch.eq(collated_batch["labels"], torch.tensor([[7, 8, 9, -100], [-100, 5, -100, 7]]))) - assert torch.all( - torch.eq(collated_batch["loss_mask"], torch.tensor([[True, False, True, False], [False, True, False, True]])) - ) - assert torch.all(torch.eq(collated_batch["is_random"], torch.tensor([[0, 0, 0, 0], [0, 0, 0, 0]]))) - - -def test_bert_padding_collate_fn_with_min_length_pads_extra(): - # Create sample data - sample1 = { - "text": torch.tensor([1, 2, 3]), - "types": torch.zeros((3,), dtype=torch.int64), - "attention_mask": torch.tensor([True, True, False]), - "labels": torch.tensor([7, 8, 9]), - "loss_mask": torch.tensor([True, False, True]), - "is_random": torch.zeros((3,), dtype=torch.int64), - } - sample2 = { - "text": torch.tensor([10, 11, 12]), - "types": torch.zeros((3,), dtype=torch.int64), - "attention_mask": torch.tensor([True, False, True]), - "labels": torch.tensor([16, 17, 18]), - "loss_mask": torch.tensor([False, True, False]), - "is_random": torch.zeros((3,), dtype=torch.int64), - } - batch = [sample1, sample2] - - # Call the collate_fn - collated_batch = bert_padding_collate_fn(batch, padding_value=-1, min_length=5) - assert torch.all(torch.eq(collated_batch["text"], torch.tensor([[1, 2, 3, -1, -1], [10, 11, 12, -1, -1]]))) - for val in collated_batch.values(): - assert val.size(1) == 5 diff --git a/sub-packages/bionemo-llm/tests/bionemo/llm/data/test_datamodule.py b/sub-packages/bionemo-llm/tests/bionemo/llm/data/test_datamodule.py deleted file mode 100644 index 362471333a..0000000000 --- a/sub-packages/bionemo-llm/tests/bionemo/llm/data/test_datamodule.py +++ /dev/null @@ -1,204 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -import pytest -import torch -from torch.utils.data import DataLoader, Dataset - -from bionemo.llm.data.datamodule import MockDataModule - - -class SimpleTokenizedDataset(Dataset): - def __init__(self, sequences): - self.sequences = sequences - - def __len__(self): - return len(self.sequences) - - def __getitem__(self, idx): - return {"text": torch.tensor(self.sequences[idx])} - - -@pytest.fixture -def example_sequences(): - return [ - [1, 2, 3], # Short sequence - [4, 5, 6, 7, 8], # Medium sequence - [9, 10, 11, 12, 13, 14, 15], # Long sequence - ] - - -@pytest.fixture -def tokenized_dataset(example_sequences): - return SimpleTokenizedDataset(example_sequences) - - -def test_train_dataloader_creation(tokenized_dataset): - datamodule = MockDataModule( - train_dataset=tokenized_dataset, - micro_batch_size=2, - global_batch_size=2, - ) - - datamodule.trainer = mock.Mock() - datamodule.trainer.global_step = 0 - - train_dataloader = datamodule.train_dataloader() - - with ( - mock.patch("megatron.core.parallel_state.get_data_parallel_rank", return_value=0), - mock.patch("megatron.core.parallel_state.get_data_parallel_world_size", return_value=1), - ): - train_dataloader = datamodule.data_sampler.transform_dataloader(train_dataloader) - - assert isinstance(train_dataloader, DataLoader) - - # The training dataloader will drop the final batch with uneven sizes - batches = list(train_dataloader) - assert len(batches) == 1 - assert len(batches[0]["text"]) == 2 - - -def test_padding_and_truncation(example_sequences): - dataset = SimpleTokenizedDataset(example_sequences) - datamodule = MockDataModule( - train_dataset=dataset, - pad_token_id=0, - min_seq_length=4, - max_seq_length=6, - micro_batch_size=3, # Process all sequences in one batch to observe padding - ) - - datamodule.trainer = mock.Mock() - datamodule.trainer.global_step = 0 - - train_loader = datamodule.train_dataloader() - batch = next(iter(train_loader)) - - # Get the padded sequences tensor - padded_sequences = batch["text"] - - # Check tensor shape and properties - assert padded_sequences.shape[0] == 3 # Batch size - assert padded_sequences.shape[1] >= 4 # At least min_length - assert padded_sequences.shape[1] <= 6 # At most max_length - - -def test_validation_dataloader(tokenized_dataset): - datamodule = MockDataModule( - valid_dataset=tokenized_dataset, - micro_batch_size=2, - global_batch_size=2, - ) - - datamodule.trainer = mock.Mock() - datamodule.trainer.global_step = 0 - - val_loader = datamodule.val_dataloader() - - with ( - mock.patch("megatron.core.parallel_state.get_data_parallel_rank", return_value=0), - mock.patch("megatron.core.parallel_state.get_data_parallel_world_size", return_value=1), - ): - val_loader = datamodule.data_sampler.transform_dataloader(val_loader) - - assert isinstance(val_loader, DataLoader) - - # The validation dataloader will drop the final batch with uneven sizes - batches = list(val_loader) - assert len(batches) == 1 - assert len(batches[0]["text"]) == 2 - - -def test_test_dataloader(tokenized_dataset): - datamodule = MockDataModule( - test_dataset=tokenized_dataset, - micro_batch_size=2, - global_batch_size=2, - ) - test_loader = datamodule.test_dataloader() - - with ( - mock.patch("megatron.core.parallel_state.get_data_parallel_rank", return_value=0), - mock.patch("megatron.core.parallel_state.get_data_parallel_world_size", return_value=1), - ): - test_loader = datamodule.data_sampler.transform_dataloader(test_loader) - - assert isinstance(test_loader, DataLoader) - - # Validate that all samples are seen with uneven batch sizes - batches = list(test_loader) - assert len(batches) == 2 - assert len(batches[0]["text"]) == 2 - assert len(batches[1]["text"]) == 1 - - -def test_predict_dataloader(tokenized_dataset): - datamodule = MockDataModule( - predict_dataset=tokenized_dataset, - micro_batch_size=2, - global_batch_size=2, - ) - predict_loader = datamodule.predict_dataloader() - - with ( - mock.patch("megatron.core.parallel_state.get_data_parallel_rank", return_value=0), - mock.patch("megatron.core.parallel_state.get_data_parallel_world_size", return_value=1), - ): - predict_loader = datamodule.data_sampler.transform_dataloader(predict_loader) - - assert isinstance(predict_loader, DataLoader) - - # Validate that all samples are seen with uneven batch sizes - batches = list(predict_loader) - assert len(batches) == 2 - assert len(batches[0]["text"]) == 2 - assert len(batches[1]["text"]) == 1 - - -def test_missing_datasets(): - datamodule = MockDataModule() - - with pytest.raises(ValueError, match="No train_dataset was provided"): - datamodule.train_dataloader() - - with pytest.raises(ValueError, match="No valid_dataset was provided"): - datamodule.val_dataloader() - - with pytest.raises(ValueError, match="No test_dataset was provided"): - datamodule.test_dataloader() - - -def test_batch_collation(example_sequences): - dataset = SimpleTokenizedDataset(example_sequences) - datamodule = MockDataModule( - train_dataset=dataset, - micro_batch_size=3, # Process all sequences in one batch - ) - - datamodule.trainer = mock.Mock() - datamodule.trainer.global_step = 0 - - train_loader = datamodule.train_dataloader() - batch = next(iter(train_loader)) - - # Check that we get a proper batch tensor - assert isinstance(batch["text"], torch.Tensor) - # Check batch size - assert batch["text"].shape[0] == 3 - # Check that sequences are padded to the length of the longest sequence - assert batch["text"].shape[1] == 7 # Length of longest sequence diff --git a/sub-packages/bionemo-llm/tests/bionemo/llm/data/test_masking.py b/sub-packages/bionemo-llm/tests/bionemo/llm/data/test_masking.py deleted file mode 100644 index 4dba10d691..0000000000 --- a/sub-packages/bionemo-llm/tests/bionemo/llm/data/test_masking.py +++ /dev/null @@ -1,271 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import pytest -import torch - -from bionemo.llm.data.masking import BertMaskConfig, add_cls_and_eos_tokens, apply_bert_pretraining_mask - - -class TestTokenizer: - @property - def mask_token_id(self): - return 32 - - @property - def all_special_ids(self): - return [0, 32] - - -def test_bert_mask_config_raises_with_invalid_probabilities(): - with pytest.raises(ValueError): - BertMaskConfig(tokenizer=1, random_tokens=range(2, 4), mask_token_prob=0.9, random_token_prob=0.2) - - -def test_apply_bert_pretraining_mask(): - # fmt: off - tokenized_sequence = torch.tensor( - [20, 15, 11, 7, 10, 16, 9, 10, 4, 15, 8, 12, 7, 10, 12, 4, 9, - 10, 8, 15, 9, 14, 7, 8, 6, 5, 16, 4, 5, 9, 9, 4, 8, 7, - 8, 10, 16, 7, 12, 7, 16, 13, 12, 5, 19, 4, 10, 8, 4, 6, 19, - 17, 12, 7, 5, 11, 14, 10, 6, 19, 7, 4, 5, 6, 6]) - # fmt: on - - random_seed = 123 - - # Apply the function - masked_sequence, labels, loss_mask = apply_bert_pretraining_mask( - tokenized_sequence, - random_seed, - mask_config=BertMaskConfig(tokenizer=TestTokenizer(), random_tokens=range(4, 24)), - ) - - # Check the unmasked tokens are unchanged. - assert torch.allclose(masked_sequence[~loss_mask], tokenized_sequence[~loss_mask]) - - # Make sure the output labels are correct. - assert torch.allclose(labels[loss_mask], tokenized_sequence[loss_mask]) - - values, _ = torch.mode(masked_sequence[loss_mask]) - assert values.item() == 32 - - -def test_apply_bert_pretraining_mask_no_mask_token(): - # fmt: off - tokenized_sequence = torch.tensor( - [20, 15, 11, 7, 10, 16, 9, 10, 4, 15, 8, 12, 7, 10, 12, 4, 9, - 10, 8, 15, 9, 14, 7, 8, 6, 5, 16, 4, 5, 9, 9, 4, 8, 7, - 8, 10, 16, 7, 12, 7, 16, 13, 12, 5, 19, 4, 10, 8, 4, 6, 19, - 17, 12, 7, 5, 11, 14, 10, 6, 19, 7, 4, 5, 6, 6]) - # fmt: on - - random_seed = 123 - - # Apply the function - masked_sequence, labels, loss_mask = apply_bert_pretraining_mask( - tokenized_sequence, - random_seed, - mask_config=BertMaskConfig(mask_token_prob=0.0, tokenizer=TestTokenizer(), random_tokens=range(4, 24)), - ) - - # Check the unmasked tokens are unchanged. - assert torch.allclose(masked_sequence[~loss_mask], tokenized_sequence[~loss_mask]) - - # Make sure the output labels are correct. - assert torch.allclose(labels[loss_mask], tokenized_sequence[loss_mask]) - - # Make sure no mask tokens are in the output sequence - assert torch.all(masked_sequence != 32) - - -def test_apply_bert_pretraining_mask_changing_mask_prob(): - # fmt: off - tokenized_sequence = torch.tensor( - [20, 15, 11, 7, 10, 16, 9, 10, 4, 15, 8, 12, 7, 10, 12, 4, 9, - 10, 8, 15, 9, 14, 7, 8, 6, 5, 16, 4, 5, 9, 9, 4, 8, 7, - 8, 10, 16, 7, 12, 7, 16, 13, 12, 5, 19, 4, 10, 8, 4, 6, 19, - 17, 12, 7, 5, 11, 14, 10, 6, 19, 7, 4, 5, 6, 6]) - # fmt: on - - random_seed = 123 - - # Apply the function - masked_sequence, labels, loss_mask = apply_bert_pretraining_mask( - tokenized_sequence, - random_seed, - mask_config=BertMaskConfig(mask_prob=0.0, tokenizer=TestTokenizer(), random_tokens=range(4, 24)), - ) - - # All mask values should be False. - assert torch.all(~loss_mask) - - -def test_apply_bert_pretraining_mask_converges_to_correct_probability(): - sequence = torch.ones(100_000, dtype=torch.long) - random_seed = 123 - - masked_sequence, _, loss_mask = apply_bert_pretraining_mask( - sequence, - random_seed, - mask_config=BertMaskConfig( - tokenizer=TestTokenizer(), - random_tokens=range(3, 5), - mask_prob=0.5, - mask_token_prob=0.25, - random_token_prob=0.12, - ), - ) - - # Check that overall masking probability is correct. - assert pytest.approx(loss_mask.float().mean(), abs=0.01) == 0.5 - - # Check that the distribution of masked tokens is correct. - assert pytest.approx((masked_sequence == 32).float().mean(), abs=0.01) == 0.5 * 0.25 - - # Check that the distribution of random tokens is correct. - assert ( - pytest.approx(torch.logical_or(masked_sequence == 3, masked_sequence == 4).float().mean(), abs=0.01) - == 0.5 * 0.12 - ) - - # Check that the distribution of unmasked tokens is correct. - assert pytest.approx((masked_sequence[loss_mask] == 1).float().mean(), abs=0.01) == 1.0 - (0.25 + 0.12) - - -def test_apply_bert_pretraining_mask_is_reproducible_with_same_seed(): - torch.manual_seed(42) - tokenized_sequence = torch.randint(0, 100, (1000,)) - - # Apply the function - masked_sequence, labels, loss_mask = apply_bert_pretraining_mask( - tokenized_sequence, - 123, - mask_config=BertMaskConfig(mask_prob=0.5, tokenizer=TestTokenizer(), random_tokens=range(4, 24)), - ) - - for _ in range(10): - new_seq, new_labels, new_mask = apply_bert_pretraining_mask( - tokenized_sequence, - 123, - mask_config=BertMaskConfig(mask_prob=0.5, tokenizer=TestTokenizer(), random_tokens=range(4, 24)), - ) - - assert torch.allclose(masked_sequence, new_seq) - assert torch.allclose(labels, new_labels) - assert torch.allclose(loss_mask, new_mask) - - -def test_apply_bert_pretraining_mask_changes_with_new_seed(): - torch.manual_seed(42) - tokenized_sequence = torch.randint(0, 100, (1000,)) - - # Apply the function - masked_sequence, labels, loss_mask = apply_bert_pretraining_mask( - tokenized_sequence, - 123, - mask_config=BertMaskConfig(mask_prob=0.5, tokenizer=TestTokenizer(), random_tokens=range(4, 24)), - ) - - new_seq, new_labels, new_mask = apply_bert_pretraining_mask( - tokenized_sequence, - 321, - mask_config=BertMaskConfig(mask_prob=0.5, tokenizer=TestTokenizer(), random_tokens=range(4, 24)), - ) - - assert not torch.allclose(masked_sequence, new_seq) - assert not torch.allclose(labels, new_labels) - assert not torch.allclose(loss_mask, new_mask) - - -def test_apply_bert_pretraining_mask_doesnt_mask_special_tokens(): - tokenized_sequence = torch.zeros(1000, dtype=torch.long) - masked_sequence, labels, loss_mask = apply_bert_pretraining_mask( - tokenized_sequence, - 123, - mask_config=BertMaskConfig(mask_prob=0.5, tokenizer=TestTokenizer(), random_tokens=range(4, 24)), - ) - assert torch.all(masked_sequence == 0) - assert torch.all(labels == -100) - assert torch.all(~loss_mask) - - -def test_add_cls_and_eos_tokens_both_tokens(): - sequence = torch.tensor([1, 2, 3]) - loss_mask = torch.tensor([False, True, False]) - labels = torch.tensor([-1, 2, -1]) - - augmented_sequence, augmented_labels, augmented_loss_mask = add_cls_and_eos_tokens( - sequence, labels, loss_mask, cls_token=0, eos_token=4 - ) - - assert len(augmented_sequence) == len(sequence) + 2 - assert augmented_sequence[0] == 0 - assert torch.allclose(augmented_sequence[1:-1], sequence) - assert augmented_sequence[-1] == 4 - - assert len(augmented_loss_mask) == len(loss_mask) + 2 - assert not augmented_loss_mask[0] - assert torch.allclose(augmented_loss_mask[1:-1], loss_mask) - assert not augmented_loss_mask[-1] - - assert len(augmented_labels) == len(labels) + 2 - assert augmented_labels[0] == -1 - assert torch.allclose(augmented_labels[1:-1], labels) - assert augmented_labels[-1] == -1 - - -def test_add_cls_and_eos_tokens_only_cls(): - sequence = torch.tensor([1, 2, 3]) - loss_mask = torch.tensor([False, True, False]) - labels = torch.tensor([-1, 2, -1]) - - augmented_sequence, augmented_labels, augmented_loss_mask = add_cls_and_eos_tokens( - sequence, labels, loss_mask, cls_token=0, eos_token=None - ) - - assert len(augmented_sequence) == len(sequence) + 1 - assert augmented_sequence[0] == 0 - assert torch.allclose(augmented_sequence[1:], sequence) - - assert len(augmented_loss_mask) == len(loss_mask) + 1 - assert not augmented_loss_mask[0] - assert torch.allclose(augmented_loss_mask[1:], loss_mask) - - assert len(augmented_labels) == len(labels) + 1 - assert augmented_labels[0] == -1 - assert torch.allclose(augmented_labels[1:], labels) - - -def test_add_cls_and_eos_tokens_only_bos(): - sequence = torch.tensor([1, 2, 3]) - loss_mask = torch.tensor([False, True, False]) - labels = torch.tensor([-1, 2, -1]) - - augmented_sequence, augmented_labels, augmented_loss_mask = add_cls_and_eos_tokens( - sequence, labels, loss_mask, cls_token=None, eos_token=4 - ) - - assert len(augmented_sequence) == len(sequence) + 1 - assert torch.allclose(augmented_sequence[:-1], sequence) - assert augmented_sequence[-1] == 4 - - assert len(augmented_loss_mask) == len(loss_mask) + 1 - assert torch.allclose(augmented_loss_mask[:-1], loss_mask) - assert not augmented_loss_mask[-1] - - assert len(augmented_labels) == len(labels) + 1 - assert torch.allclose(augmented_labels[:-1], labels) - assert augmented_labels[-1] == -1 diff --git a/sub-packages/bionemo-llm/tests/bionemo/llm/model/biobert/test_transformer_specs.py b/sub-packages/bionemo-llm/tests/bionemo/llm/model/biobert/test_transformer_specs.py deleted file mode 100644 index 2ed10184a7..0000000000 --- a/sub-packages/bionemo-llm/tests/bionemo/llm/model/biobert/test_transformer_specs.py +++ /dev/null @@ -1,84 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from typing import List - -import pytest -from megatron.core.fusions.fused_layer_norm import FusedLayerNorm -from megatron.core.transformer.identity_op import IdentityOp - -from bionemo.llm.model.biobert import transformer_specs -from bionemo.llm.model.layers import TELayerNorm - - -def test_enum_str_choices(): - options: List[str] = [o.value for o in transformer_specs.BiobertSpecOption] - for o_str in options: - # Make sure argparse will be happy with the string equality - assert o_str == transformer_specs.BiobertSpecOption(o_str) - if o_str != "random_string": - # Show that some random string doesn't match - assert "random_string" != transformer_specs.BiobertSpecOption(o_str) - - -def test_enum_equality(): - assert ( - transformer_specs.BiobertSpecOption("bert_layer_local_spec") - == transformer_specs.BiobertSpecOption.bert_layer_local_spec - ) - - -def test_local_spec_sets_qk_ln(): - spec_with_qk = transformer_specs.get_biobert_spec( - transformer_specs.BiobertSpecOption.bert_layer_local_spec_with_qk_ln, qk_layernorm=True - ) - spec_no_qk = transformer_specs.get_biobert_spec( - transformer_specs.BiobertSpecOption.bert_layer_local_spec_with_qk_ln, qk_layernorm=False - ) - assert spec_with_qk.submodules.self_attention.submodules.q_layernorm == FusedLayerNorm - assert ( - spec_with_qk.submodules.self_attention.submodules.q_layernorm - == spec_with_qk.submodules.self_attention.submodules.k_layernorm - ) - assert spec_no_qk.submodules.self_attention.submodules.q_layernorm == IdentityOp - assert ( - spec_no_qk.submodules.self_attention.submodules.q_layernorm - == spec_no_qk.submodules.self_attention.submodules.k_layernorm - ) - - -def test_te_spec_sets_qk_ln(): - spec_with_qk = transformer_specs.get_biobert_spec( - transformer_specs.BiobertSpecOption.bert_layer_with_transformer_engine_and_qk_ln_spec, qk_layernorm=True - ) - spec_no_qk = transformer_specs.get_biobert_spec( - transformer_specs.BiobertSpecOption.bert_layer_with_transformer_engine_and_qk_ln_spec, qk_layernorm=False - ) - assert spec_with_qk.submodules.self_attention.submodules.q_layernorm == TELayerNorm - assert ( - spec_with_qk.submodules.self_attention.submodules.q_layernorm - == spec_with_qk.submodules.self_attention.submodules.k_layernorm - ) - assert spec_no_qk.submodules.self_attention.submodules.q_layernorm == IdentityOp - assert ( - spec_no_qk.submodules.self_attention.submodules.q_layernorm - == spec_no_qk.submodules.self_attention.submodules.k_layernorm - ) - - -def test_get_spec_bad_input(): - with pytest.raises(NotImplementedError): - transformer_specs.get_biobert_spec("bad_input") diff --git a/sub-packages/bionemo-llm/tests/bionemo/llm/model/test_loss.py b/sub-packages/bionemo-llm/tests/bionemo/llm/model/test_loss.py deleted file mode 100644 index 8fc877b4b0..0000000000 --- a/sub-packages/bionemo-llm/tests/bionemo/llm/model/test_loss.py +++ /dev/null @@ -1,176 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest -import torch -import torch.nn.functional as F -from megatron.core.models.common.language_module import language_module -from megatron.core.transformer import transformer_config -from nemo.lightning import megatron_parallel - -from bionemo.llm.model import loss as bionemo_loss -from bionemo.llm.model.loss import unreduced_token_loss_fn -from bionemo.testing import megatron_parallel_state_utils -from bionemo.testing.lightning import get_random_microbatch - - -def test_loss_equivalency_nemo_vs_pytorch(): - # Setup no grad and megatron distributed contexts for the test - with torch.no_grad(), megatron_parallel_state_utils.distributed_model_parallel_state(): - # Define the batch size, sequence length, and number of tokens - batch_size = 2 - sequence_length = 5 - num_tokens = 31 - - # Generate random logits (batch_size x sequence_length x num_tokens) with - # mean 0 and standard deviation 10 - logits = torch.randn(batch_size, sequence_length, num_tokens, dtype=torch.float32).cuda() * 10 - - # Generate target sequences (batch_size x sequence_length) with random integers - target = torch.randint(0, num_tokens, (batch_size, sequence_length), dtype=torch.long).cuda() - - # Generate a loss mask (batch_size x sequence_length) with random 0s and 1s - loss_mask = torch.randint(0, 2, (batch_size, sequence_length), dtype=bool).cuda() - - #################### - # Base case: Calculate the cross-entropy loss of masked tokens using the vanilla pytorch function. - expected_loss = F.cross_entropy(logits[loss_mask], target[loss_mask], reduction="mean") - - #################### - # Part 1) get the loss using NeMo/Megatron's default strategy of - # a. computing the first part of the loss inside of the forward pass of the model - # (through a call to `compute_language_model_loss`) - # b. passing this through the forward of MaskedTokenLossReduction, which is executed - # in parallel across GPUs and owns reducing within a parllel group. - # c. A final reduction across parallel groups through a call to `reduce` - dummy_model = language_module.LanguageModule( - config=transformer_config.TransformerConfig( - num_layers=1, - hidden_size=64, - ffn_hidden_size=128, - num_attention_heads=1, - kv_channels=None, - ) - ) - # Transpose the logits from (batch_size x sequence_length x num_tokens) to (sequence_length x batch_size x num_tokens) - # since this is what `compute_language_model_loss` expects. - unreduced_megatron_loss = dummy_model.compute_language_model_loss(target, logits.transpose(0, 1).contiguous()) - nemo_default_loss_fn = megatron_parallel.MaskedTokenLossReduction() - batch_megatron = { - "loss_mask": loss_mask, - } - forward_nemo_loss = nemo_default_loss_fn.forward( - batch=batch_megatron, - forward_out=unreduced_megatron_loss, # wants the loss directly - ) - final_nemo_loss = nemo_default_loss_fn.reduce([forward_nemo_loss[2]]) - - # First check, nemo+megatron loss - torch.testing.assert_close(expected_loss, final_nemo_loss) - - -def test_loss_equivalency_bionemo_vs_pytorch(): - # Setup no grad and megatron distributed contexts for the test - with torch.no_grad(), megatron_parallel_state_utils.distributed_model_parallel_state(): - # Define the batch size, sequence length, and number of tokens - batch_size = 2 - sequence_length = 5 - num_tokens = 31 - - # Generate random logits (batch_size x sequence_length x num_tokens) with - # mean 0 and standard deviation 10 - logits = torch.randn(batch_size, sequence_length, num_tokens, dtype=torch.float32).cuda() * 10 - - # Generate target sequences (batch_size x sequence_length) with random integers - target = torch.randint(0, num_tokens, (batch_size, sequence_length), dtype=torch.long).cuda() - - # Generate a loss mask (batch_size x sequence_length) with random 0s and 1s - loss_mask = torch.randint(0, 2, (batch_size, sequence_length), dtype=bool).cuda() - - #################### - # Base case: Calculate the cross-entropy loss of masked tokens using the vanilla pytorch function. - expected_loss = F.cross_entropy(logits[loss_mask], target[loss_mask], reduction="mean") - #################### - # Part 2) get the loss using BioNeMo's default strategy of - # a. passing model logits through the forward of MaskedTokenLossReduction, which is executed - # in parallel across GPUs and owns reducing within a parllel group. This combines parts a and b of the - # NeMo/Megatron strategy into a single step, and doesn't expect the model to compute loss in forward. - # b. A final reduction across parallel groups through a call to `reduce` - # Second, check bionemo loss where model outputs logits - bionemo_loss_fn = bionemo_loss.BERTMLMLossWithReduction() - bionemo_model_output = { - "token_logits": logits.transpose( - 0, 1 - ).contiguous(), # bionemo loss function also wants logits s,b like nemo. - } - bionemo_batch = { - "loss_mask": loss_mask, - "labels": target, - } - forward_bionemo_loss = bionemo_loss_fn.forward( - batch=bionemo_batch, - forward_out=bionemo_model_output, - ) - final_bionemo_loss = bionemo_loss_fn.reduce([forward_bionemo_loss[2]]) - torch.testing.assert_close(expected_loss, final_bionemo_loss) - - -def test_vocab_parallel_cross_entropy_golden_value(seed: int = 42): - """Test tensor_parallel.vocab_parallel_cross_entropy""" - with megatron_parallel_state_utils.distributed_model_parallel_state(seed=seed): - # setup test input - microbatch_size, max_sequence_length, vocab_size = 1, 1024, 2 - microbatch_outputs = [get_random_microbatch(microbatch_size, max_sequence_length, vocab_size, seed=seed)] - - # 1. torch.nn.functional - loss = torch.nn.functional.cross_entropy( - input=microbatch_outputs[0]["forward_out"]["token_logits"].transpose(0, 1).contiguous().reshape(1024, 2), - target=microbatch_outputs[0]["batch"]["labels"].flatten(), - reduction="none", - ignore_index=-100, - ) - - # 2. tensor_parallel.vocab_parallel_cross_entropy - unreduced_token_loss = unreduced_token_loss_fn( - logits=microbatch_outputs[0]["forward_out"]["token_logits"], - labels=microbatch_outputs[0]["batch"]["labels"], - ) - - torch.testing.assert_close( - unreduced_token_loss.flatten(), - loss, - ) - - -@pytest.mark.xfail(reason="tensor_parallel.vocab_parallel_cross_entropy modifies input token_logits") -def test_vocab_parallel_cross_entropy_inplace_operation(seed: int = 42): - """Test inplace operation on input in tensor_parallel.vocab_parallel_cross_entropy""" - with megatron_parallel_state_utils.distributed_model_parallel_state(seed=seed): - # setup test input - microbatch_size, max_sequence_length, vocab_size = 1, 1024, 2 - microbatch_outputs = [get_random_microbatch(microbatch_size, max_sequence_length, vocab_size, seed=seed)] - - token_logits_clone = microbatch_outputs[0]["forward_out"]["token_logits"].clone() - labels_clone = microbatch_outputs[0]["batch"]["labels"].clone() - - _ = unreduced_token_loss_fn( - logits=microbatch_outputs[0]["forward_out"]["token_logits"], - labels=microbatch_outputs[0]["batch"]["labels"], - ) - - torch.testing.assert_allclose(microbatch_outputs[0]["batch"]["labels"], labels_clone) # pass - torch.testing.assert_allclose( - microbatch_outputs[0]["forward_out"]["token_logits"], token_logits_clone - ) # xfail diff --git a/sub-packages/bionemo-llm/tests/bionemo/llm/model/test_lr_scheduler.py b/sub-packages/bionemo-llm/tests/bionemo/llm/model/test_lr_scheduler.py deleted file mode 100644 index 1b5549db00..0000000000 --- a/sub-packages/bionemo-llm/tests/bionemo/llm/model/test_lr_scheduler.py +++ /dev/null @@ -1,78 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import torch - -from bionemo.llm.model.lr_scheduler import WarmupAnnealDecayHold, WarmupAnnealDecayHoldScheduler - - -def test_warmup_anneal_decay_hold_scheduler_exists(): - scheduler = WarmupAnnealDecayHoldScheduler(warmup_steps=2000, min_lr=4e-5, max_steps=500000, max_lr=4e-4) - assert scheduler is not None - assert scheduler.max_steps == 500000 - assert scheduler.warmup_steps == 2000 - assert scheduler.max_lr == 4e-4 - assert scheduler.min_lr == 4e-5 - - -def test_warmup_anneal_decay_hold_works(): - optim = torch.optim.Adam(torch.nn.Linear(10, 1).parameters(), lr=4e-4, weight_decay=0.01, betas=[0.9, 0.98]) - max_lr = 0.1 - min_lr = 0.01 - anneal_percentage = 0.50 - constant_value = anneal_percentage * max_lr - scheduler = WarmupAnnealDecayHold( - optimizer=optim, - warmup_steps=20, - min_lr=min_lr, - max_steps=100, - max_lr=max_lr, - anneal_percentage=anneal_percentage, - ) - - assert scheduler.get_lr()[0] == min_lr - # Check initial LR - for _ in range(20): - scheduler.step() - # Check warmup phase - assert scheduler.get_lr()[0] == max_lr - - # Check decay is lower than max - for _ in range(20): - scheduler.step() - - decay_lr = scheduler.get_lr()[0] - # Check decay is lower than last decay - assert decay_lr < max_lr - - # Keep decay stepping - for _ in range(20): - scheduler.step() - - decay_low = scheduler.get_lr()[0] - assert decay_low < decay_lr - assert decay_low == constant_value - - for _ in range(30): - scheduler.step() - - assert scheduler.get_lr()[0] == constant_value - - # Check hold phase. Run it much longer and confirm - for _ in range(300): - scheduler.step() - - assert scheduler.get_lr()[0] == constant_value diff --git a/sub-packages/bionemo-llm/tests/bionemo/llm/test_lightning.py b/sub-packages/bionemo-llm/tests/bionemo/llm/test_lightning.py deleted file mode 100644 index 4c21dc1bd6..0000000000 --- a/sub-packages/bionemo-llm/tests/bionemo/llm/test_lightning.py +++ /dev/null @@ -1,223 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import nemo.lightning as nl -import pytest -import torch -from torch import nn - -from bionemo.llm import lightning as bnptl -from bionemo.llm.lightning import batch_collator, get_dtype_device -from bionemo.testing import megatron_parallel_state_utils - - -def test_batch_collate_seqdim_and_singleton_with_padding(batch_size=2, num_batches=5): - raw_batches = [ - # Try making the data with an unusual dtype (uint8) to verify that it is left unchanged with padding. - {"idx": torch.tensor([i] * batch_size), "seq": torch.ones(batch_size, i + 1, dtype=torch.uint8)} - for i in range(num_batches) - ] - result = batch_collator(raw_batches) - assert isinstance(result, dict), "expect output container to be the same type as input (dict)" - torch.testing.assert_close(result["idx"], torch.tensor([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])) - # Make sure the padding is correct, and that the dtype is left as it was. - expected_result = torch.tensor( - [ - [1, 0, 0, 0, 0], - [1, 0, 0, 0, 0], - [1, 1, 0, 0, 0], - [1, 1, 0, 0, 0], - [1, 1, 1, 0, 0], - [1, 1, 1, 0, 0], - [1, 1, 1, 1, 0], - [1, 1, 1, 1, 0], - [1, 1, 1, 1, 1], - [1, 1, 1, 1, 1], - ], - dtype=torch.uint8, - ) - torch.testing.assert_close(result["seq"], expected_result) - - -def test_batch_collate_tuple(): - result = batch_collator(tuple((torch.tensor([i]), torch.tensor([i + 1])) for i in range(10))) - assert isinstance(result, tuple), "expect output container to be the same type as input (tuple)" - assert torch.equal(result[0], torch.tensor(list(range(10)))) - assert torch.equal(result[1], torch.tensor([i + 1 for i in range(10)])) - - -def test_batch_collate_dict(): - result = batch_collator( - [{"fixed key1": torch.tensor([i]), "fixed key2": torch.tensor([i + 1])} for i in range(10)] - ) - assert isinstance(result, dict), "expect output container to be the same type as input (dict)" - assert torch.equal(result["fixed key1"], torch.tensor(list(range(10)))) - assert torch.equal(result["fixed key2"], torch.tensor([i + 1 for i in range(10)])) - - -def test_batch_collate_list(): - result = batch_collator([[torch.tensor([i]), torch.tensor([i + 1])] for i in range(10)]) - assert isinstance(result, list), "expect output container to be the same type as input (list)" - assert torch.equal(result[0], torch.tensor(list(range(10)))) - assert torch.equal(result[1], torch.tensor([i + 1 for i in range(10)])) - - -@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires at least 2 GPUs") -def test_batch_collate_multi_gpu(): - # Create tensors on different GPUs - tensor1 = torch.tensor([1, 2, 3], device="cuda:0") - tensor2 = torch.tensor([4, 5, 6], device="cuda:1") - - result = batch_collator([tensor1, tensor2]) - - # Result should be on the first GPU and contain all values - expected = torch.tensor([1, 2, 3, 4, 5, 6], device="cuda:0") - assert result.device == torch.device("cuda:0") - assert torch.equal(result, expected) - - -def test_batch_collate_none(): - assert batch_collator([None, None]) is None - - -def test_batch_collator_tensor_fails(): - with pytest.raises(ValueError, match="Unsupported input structure in batch_collator"): - batch_collator(torch.tensor([[torch.tensor([i]), torch.tensor([i + 1])] for i in range(10)])) - - -def test_batch_collator_primitive_fails(): - with pytest.raises(ValueError, match="Unsupported input structure in batch_collator"): - batch_collator(4) - - -def test_batch_collator_emptylist_fails(): - with pytest.raises(ValueError, match="Cannot process an empty sequence"): - batch_collator([]) - - -def test_batch_collator_emptytuple_fails(): - with pytest.raises(ValueError, match="Cannot process an empty sequence"): - batch_collator(()) - - -def test_batch_collator_emptyset_fails(): - with pytest.raises(ValueError, match="Unsupported input structure in batch_collator"): - batch_collator(set()) - - -def test_batch_collator_emptydict_fails(): - with pytest.raises(ValueError, match="Unsupported input structure in batch_collator"): - batch_collator({}) - - -def test_tensor_dtype(): - tensor = torch.tensor(4.0, dtype=torch.float32) - dtype, _ = get_dtype_device(tensor) - assert dtype == torch.float32 - - -def test_module_dtype(): - module = MyModule(dtype=torch.float32) - dtype, _ = get_dtype_device(module) - assert dtype == torch.float32 - - -def test_nested_dtype(): - module = MyModule(dtype=torch.float32) - nested = NestedModule(module) - dtype, _ = get_dtype_device(nested) - assert dtype == torch.float32 - - -def test_dict_tensor_dtype(): - dtype, _ = get_dtype_device({"tensor": torch.tensor(5, dtype=torch.float32)}) - assert dtype == torch.float32 - - -# Handles the cases where we pass in a valid type, but it does not have an associated dtype -def test_empty_module(): - # Module with no underlying parameters - empty = MyModuleEmpty() - with pytest.raises(ValueError, match="Cannot get dtype on a torch module with no parameters."): - get_dtype_device(empty) - - -def test_none_fails(): - with pytest.raises(ValueError, match="non-None value not found"): - get_dtype_device([None, None]) - - -def test_empty_dict_fails(): - with pytest.raises(ValueError, match="Looking up dtype on an empty dict"): - get_dtype_device({}) - - -def test_empty_list_fails(): - with pytest.raises(ValueError, match="Looking up dtype on an empty list"): - get_dtype_device([]) - - -def test_garbage_fails(): - # String not a valid input type, should work for other garbage values too. - with pytest.raises(TypeError, match="Got something we didnt expect"): - get_dtype_device("flkasdflasd") - - -class MyModule(nn.Module): - def __init__(self, dtype=torch.float32): - super().__init__() - self.linears = nn.ModuleList([nn.Linear(10, 10, dtype=dtype) for i in range(10)]) - self.others = nn.ModuleList([nn.Linear(10, 10, dtype=dtype) for i in range(10)]) - - def forward(self, x): - # ModuleList can act as an iterable, or be indexed using ints - for i, linear in enumerate(self.linears): - x = linear(x) - return x - - -class MyModuleEmpty(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, x): - return x - - -class NestedModule(nn.Module): - def __init__(self, other): - super().__init__() - self.other = other - - def forward(self, x): - return self.other(x) - - -# TODO rewrite unittest and potentially LightningPassthroughPredictionMixin -@pytest.mark.xfail(reason="MegatronStrategy no longer has '_get_loss_reduction' attribute") -def test_mixin_strategy_contract_get_loss_reduction(): - with megatron_parallel_state_utils.distributed_model_parallel_state(): - strategy = nl.MegatronStrategy( - tensor_model_parallel_size=1, - pipeline_model_parallel_size=1, - ddp="megatron", - find_unused_parameters=True, - always_save_context=False, - ) - strategy.connect(bnptl.LightningPassthroughPredictionMixin()) - mixin = bnptl.LightningPassthroughPredictionMixin() - strategy_reduction_function = strategy._get_loss_reduction("predict") - assert isinstance(strategy_reduction_function(mixin), bnptl.PassthroughLossReduction) diff --git a/sub-packages/bionemo-llm/tests/bionemo/llm/utils/__init__.py b/sub-packages/bionemo-llm/tests/bionemo/llm/utils/__init__.py deleted file mode 100644 index 25e6abfbc5..0000000000 --- a/sub-packages/bionemo-llm/tests/bionemo/llm/utils/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/sub-packages/bionemo-llm/tests/bionemo/llm/utils/test_callbacks.py b/sub-packages/bionemo-llm/tests/bionemo/llm/utils/test_callbacks.py deleted file mode 100644 index 0ba20fe815..0000000000 --- a/sub-packages/bionemo-llm/tests/bionemo/llm/utils/test_callbacks.py +++ /dev/null @@ -1,115 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import os -from unittest.mock import MagicMock, patch - -import pytest -import pytorch_lightning as pl -import torch - -from bionemo.llm.lightning import batch_collator -from bionemo.llm.utils.callbacks import PredictionWriter -from bionemo.testing import megatron_parallel_state_utils - - -# Fixture for temporary directory -@pytest.fixture -def temp_dir(tmp_path): - return str(tmp_path) - - -@pytest.fixture -def mock_trainer(): - trainer = MagicMock(spec=pl.Trainer) - trainer.global_rank = 0 - return trainer - - -@pytest.fixture -def mock_module(): - return MagicMock(spec=pl.LightningModule) - - -@pytest.fixture -def sample_predictions(): - return [{"temp": torch.tensor([1, 2, 3])}, {"temp": torch.tensor([4, 5, 6])}, None] - - -@pytest.fixture -def collated_prediction(sample_predictions): - return batch_collator([item for item in sample_predictions if item is not None]) - - -@pytest.mark.parametrize("write_interval", ["batch", "epoch"]) -def test_prediction_writer_init(temp_dir, write_interval): - writer = PredictionWriter(output_dir=temp_dir, write_interval=write_interval) - assert writer.output_dir == temp_dir - if write_interval == "batch": - assert writer.interval.on_batch - if write_interval == "epoch": - assert writer.interval.on_epoch - - -@patch("torch.save") -def test_write_on_batch_end(mock_torch_save, temp_dir, mock_trainer, mock_module, collated_prediction): - writer = PredictionWriter(output_dir=temp_dir, write_interval="batch") - - batch_idx = 1 - with megatron_parallel_state_utils.distributed_model_parallel_state(): - writer.write_on_batch_end( - trainer=mock_trainer, - pl_module=mock_module, - prediction=collated_prediction, - batch_indices=[], - batch=None, - batch_idx=batch_idx, - dataloader_idx=0, - ) - - expected_path = os.path.join( - temp_dir, f"predictions__rank_{mock_trainer.global_rank}__dp_rank_0__batch_{batch_idx}.pt" - ) - mock_torch_save.assert_called_once_with(collated_prediction, expected_path) - - -@patch("torch.save") -def test_write_on_epoch_end( - mock_torch_save, temp_dir, mock_trainer, mock_module, sample_predictions, collated_prediction -): - writer = PredictionWriter(output_dir=temp_dir, write_interval="epoch") - - with megatron_parallel_state_utils.distributed_model_parallel_state(): - writer.write_on_epoch_end( - trainer=mock_trainer, - pl_module=mock_module, - predictions=sample_predictions, - batch_indices=[], - ) - - expected_path = os.path.join(temp_dir, f"predictions__rank_{mock_trainer.global_rank}__dp_rank_0.pt") - - mock_torch_save.assert_called_once() # Ensure it's called exactly once - - # Extract the actual call arguments - actual_args, actual_kwargs = mock_torch_save.call_args - prediction = actual_args[0] - assert actual_args[1] == expected_path, "Paths do not match" - - # Compare tensors manually - assert isinstance(prediction, dict) - for key in prediction: - assert torch.equal(prediction[key], collated_prediction[key]), "Tensors do not match" diff --git a/sub-packages/bionemo-llm/tests/bionemo/llm/utils/test_datamodule_utils.py b/sub-packages/bionemo-llm/tests/bionemo/llm/utils/test_datamodule_utils.py deleted file mode 100644 index c281fef09e..0000000000 --- a/sub-packages/bionemo-llm/tests/bionemo/llm/utils/test_datamodule_utils.py +++ /dev/null @@ -1,69 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from bionemo.llm.utils.datamodule_utils import float_or_int_or_none, infer_global_batch_size - - -def test_float_or_int_or_none_type_float(): - """Test that float_or_int_or_none returns a float when given a float on edge case 1.0""" - assert isinstance(float_or_int_or_none(1.0), float) - assert isinstance(float_or_int_or_none("1.0"), float) - - -def test_float_or_int_or_none_type_int(): - """Test that float_or_int_or_none returns an int when given an int on edge case 1""" - assert isinstance(float_or_int_or_none(1), int) - assert isinstance(float_or_int_or_none("1"), int) - - -def test_float_or_int_or_none_type_none(): - """Test that float_or_int_or_none returns None when given None""" - assert float_or_int_or_none(None) is None - assert float_or_int_or_none("None") is None - - -def test_infer_global_batch_size(): - """Test that infer_global_batch_size returns the correct global batch size""" - assert infer_global_batch_size(micro_batch_size=1, num_nodes=1, devices=1) == 1 # single node, single device - assert infer_global_batch_size(micro_batch_size=1, num_nodes=1, devices=8) == 8 # single node, multi device - assert ( - infer_global_batch_size( - micro_batch_size=1, - num_nodes=2, - devices=8, - ) - == 16 - ) # multi node, multi device - assert ( - infer_global_batch_size(micro_batch_size=1, num_nodes=2, devices=8, pipeline_model_parallel_size=2) == 8 - ) # multi node, multi device with pipeline parallel - assert ( - infer_global_batch_size( - micro_batch_size=1, num_nodes=2, devices=8, pipeline_model_parallel_size=2, tensor_model_parallel_size=2 - ) - == 4 - ) # multi node, multi device with pipeline and tensor parallel - assert ( - infer_global_batch_size( - micro_batch_size=1, - num_nodes=2, - devices=8, - pipeline_model_parallel_size=2, - tensor_model_parallel_size=2, - accumulate_grad_batches=2, - ) - == 8 - ) # multi node, multi device with pipeline and tensor parallel, and accumulate grad batches diff --git a/sub-packages/bionemo-llm/tests/bionemo/llm/utils/test_iomixin_utils.py b/sub-packages/bionemo-llm/tests/bionemo/llm/utils/test_iomixin_utils.py deleted file mode 100644 index b24e820b54..0000000000 --- a/sub-packages/bionemo-llm/tests/bionemo/llm/utils/test_iomixin_utils.py +++ /dev/null @@ -1,106 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from dataclasses import dataclass, field - -import pytest -from nemo.lightning import io - -from bionemo.llm.utils import iomixin_utils as iom - - -@dataclass -class BaseDataClass(iom.WillHaveGetSetHparam): - a: int = field(default_factory=lambda: 0) - b: int = 3 - - def lazy_update(self): - self.set_hparam("b", self.b + 2) - # Will update the value of b set later on making use of a future subclass IOMixin - - -@dataclass -class OverrideModelDataClass1(BaseDataClass, iom.IOMixinWithGettersSetters): - a: int = field(default_factory=lambda: 4) - c: int = 3 - - -@dataclass -class OverrideModelDataClass2(BaseDataClass, iom.IOMixinWithGettersSetters): - a: int = field(default_factory=lambda: 5) # override default of a - # do not define/override b - c: int = 4 # new variable - - -class TestIOMixin: - """TestCase on IOMixin. - - Notes: - IOMixin only captures non-default __init__ arguments into self.__io__ to ensure no compatibility in loading older mcore config in newer versions. - """ - - def test_dataclasses_two_versions(self): - _ = OverrideModelDataClass1(b=2) - v1 = OverrideModelDataClass2(b=4) - v1.lazy_update() # the mutate method allows a variable that matches the init arg to be changed and tracked. - coppied_v1 = io.reinit(v1) # Simulate loading from a checkpoint - v2 = OverrideModelDataClass2(a=3, b=1, c=5) - coppied_v2 = io.reinit(v2) # Simulate loading from a checkpoint - assert v1.a != v2.a - assert v1.a == coppied_v1.a - assert v2.a == coppied_v2.a - assert v1.b != v2.b - assert v1.a == 5 - assert v1.b == 6 - assert v1.c == 4 - assert v2.a == 3 - assert v2.b == 1 - assert v2.c == 5 - assert v1.a == coppied_v1.a - assert v1.b == coppied_v1.b - assert v1.c == coppied_v1.c - assert v2.a == coppied_v2.a - assert v2.b == coppied_v2.b - assert v2.c == coppied_v2.c - - def test_dataclass_out_of_sync(self): - v1 = OverrideModelDataClass1() - v1.set_hparam("b", 7, also_change_value=False) - assert v1.b == 3, "Also change value False should not update the object in self." - v1_copy = io.reinit(v1) - assert v1_copy.b == 7, "V1 should re-initialize with the updated hyper-parameter." - - # Make sure looking up a non-existant hyper-parameter raises an error - with pytest.raises(KeyError): - v1.get_hparam("q") - - # Make sure we can get all hyper-parameters that are **non-default** non-defaultfactory objects - assert v1.get_hparams() == {"b": 7} - - # Make sure by default we can change both the hyper-parameter and the attribute. - v1_copy.set_hparam("b", 8) - assert v1_copy.b == 8 - assert v1_copy.get_hparam("b") == 8 - - def test_dataclass_hparam_modify_parent_default(self): - v1 = OverrideModelDataClass1() - v1.set_hparam("a", 7) - assert v1.a == 7 - # Make sure we can get all **non-default** **non-defaultfactory** hyper-parameters - assert v1.get_hparams() == {"a": 7} - - v1_copy = io.reinit(v1) - assert v1_copy.a == 7, "V1 should re-initialize with the updated hyper-parameter." - assert v1_copy.get_hparam("a") == 7 diff --git a/sub-packages/bionemo-llm/tests/bionemo/llm/utils/test_logger_utils.py b/sub-packages/bionemo-llm/tests/bionemo/llm/utils/test_logger_utils.py deleted file mode 100644 index 2471a89a2d..0000000000 --- a/sub-packages/bionemo-llm/tests/bionemo/llm/utils/test_logger_utils.py +++ /dev/null @@ -1,147 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import pathlib - -import pytest -from lightning.pytorch.loggers import TensorBoardLogger, WandbLogger -from nemo import lightning as nl -from nemo.lightning.nemo_logger import NeMoLogger - -from bionemo.llm.utils.logger_utils import WandbConfig, setup_nemo_lightning_logger - - -@pytest.fixture -def project_name() -> str: - return "test_project" - - -@pytest.fixture -def wandb_config(project_name): - return WandbConfig( - entity=None, - project=project_name, - tags=["tag1", "tag2"], - group="test_group", - job_type="test_job", - offline=True, # ensure no actual communication with wandb servers - id=None, - anonymous=False, - log_model=False, - ) - - -def test_construct_logger_no_loggers(): - logger = setup_nemo_lightning_logger("test") - assert logger.name == "test" - assert logger.wandb is None - assert logger.tensorboard is None - - -def test_setup_logger_all_loggers(tmp_path, wandb_config, project_name, caplog): - # Use a unique experiment name - exp_name = "unit-test-loggers" - root_dir = tmp_path # provided by pytest as a temporary directory - - with caplog.at_level(logging.WARNING): - logger = setup_nemo_lightning_logger( - name=exp_name, - root_dir=root_dir, - initialize_tensorboard_logger=True, - wandb_config=wandb_config, - ckpt_callback=None, - ) - - # Checks on the returned logger - assert isinstance(logger, NeMoLogger), "The returned logger should be a NeMoLogger instance." - assert logger.name == exp_name - - # Check that directories are set up correctly - expected_save_dir = root_dir / exp_name - assert logger.log_dir == str(root_dir), "NeMoLogger save_dir should match expected path." - # assert not expected_save_dir.exists(), "Expected experiment directory should not be created yet." - - # Check TensorBoard logger initialization - tb_logger = logger.tensorboard - assert isinstance(tb_logger, TensorBoardLogger), "TensorBoardLogger should be created." - tb_log_dir = pathlib.Path(tb_logger.log_dir) - # assert not tb_log_dir.is_dir(), "TensorBoard log directory should not exist yet." - assert tb_log_dir == (expected_save_dir / "dev"), "TensorBoardLogger name should match experiment name." - - # Check WandB logger initialization - wandb_logger = logger.wandb - assert isinstance(wandb_logger, WandbLogger), "WandBLogger should be created." - # Validate that wandb_logger uses correct save_dir and name - # WandbLogger's experiment is lazily created, so just check configured values - assert wandb_logger.name != exp_name, "WandBLogger name should not match experiment name." - assert wandb_logger.name == project_name, "WandBLogger name should match project name." - assert pathlib.Path(wandb_logger.save_dir) == expected_save_dir, "WandBLogger save_dir should match expected path." - # Since we provided wandb_config and tensorboard was enabled, we should NOT see - # the warnings about them being turned off. - assert "WandB is currently turned off." not in caplog.text - assert "User-set tensorboard is currently turned off." not in caplog.text - - -def test_nemo_logger_initialized(tmp_path, wandb_config, project_name, caplog): - # Use a unique experiment name - exp_name = "unit-test-loggers" - root_dir = tmp_path # provided by pytest as a temporary directory - trainer = nl.Trainer(devices=1, accelerator="gpu", num_nodes=1) - - logger = setup_nemo_lightning_logger( - name=exp_name, - root_dir=root_dir, - initialize_tensorboard_logger=True, - wandb_config=wandb_config, - ckpt_callback=None, - ) - - # as in https://github.com/NVIDIA/NeMo/blob/bb895bc4b28ba99d707cb907c4496297a2a7b533/nemo/collections/llm/api.py#L852C22-L856C6 - logger.setup(trainer=trainer) - - # Check that directories are set up correctly - expected_save_dir = root_dir / exp_name - assert expected_save_dir.exists(), "Expected experiment directory should not be created yet." - - # Check TensorBoard logger initialization - tb_logger = logger.tensorboard - tb_log_dir = pathlib.Path(tb_logger.log_dir) - # assert not tb_log_dir.is_dir(), "TensorBoard log directory should not exist yet." - assert tb_log_dir == (expected_save_dir / "dev") - - # Trigger lazy creation of experiment in loggers so loggers have their metadata available - # following trainer setup at the start of the training in - # https://github.com/Lightning-AI/pytorch-lightning/blob/de7c28ae865b5c9fd3ff21debebb994605f7f420/src/lightning/pytorch/trainer/trainer.py#L944 - # which executes - # https://github.com/Lightning-AI/pytorch-lightning/blob/caa9e1e59436913e365bf52eeb2b07e3bf67efac/src/lightning/pytorch/trainer/call.py#L94C1-L97C34 - for _logger in trainer.loggers: - if hasattr(_logger, "experiment"): - _ = _logger.experiment - - -def test_setup_logger_wandb_experiment(tmp_path, wandb_config, project_name, caplog): - exp_name = "unit-test-loggers" - root_dir = tmp_path # provided by pytest as a temporary directory - - logger = setup_nemo_lightning_logger( - name=exp_name, - root_dir=root_dir, - initialize_tensorboard_logger=True, - wandb_config=wandb_config, - ckpt_callback=None, - ) - wandb_logger = logger.wandb - _ = wandb_logger.experiment diff --git a/sub-packages/bionemo-llm/tests/bionemo/llm/utils/test_megatron_utils.py b/sub-packages/bionemo-llm/tests/bionemo/llm/utils/test_megatron_utils.py deleted file mode 100644 index 3503084a76..0000000000 --- a/sub-packages/bionemo-llm/tests/bionemo/llm/utils/test_megatron_utils.py +++ /dev/null @@ -1,61 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import pytest - -from bionemo.llm.utils.megatron_utils import is_only_data_parallel -from bionemo.testing import megatron_parallel_state_utils as mpsu - - -def test_no_parallelism_raises(): - with pytest.raises(RuntimeError): - is_only_data_parallel() - - -def test_base_case(): - with mpsu.distributed_model_parallel_state(): - # our test instance with 1 GPU is trivially this case, also default initializations should be this case. - assert is_only_data_parallel() - - -def test_pp2(): - with mpsu.mock_distributed_parallel_state(world_size=8, pipeline_model_parallel_size=2): - assert not is_only_data_parallel() - - -def test_cp2(): - with mpsu.mock_distributed_parallel_state(world_size=8, context_parallel_size=2): - assert not is_only_data_parallel() - - -def test_tp2(): - with mpsu.mock_distributed_parallel_state(world_size=8, tensor_model_parallel_size=2): - assert not is_only_data_parallel() - - -def test_tp2pp2cp2(): - with mpsu.mock_distributed_parallel_state( - world_size=8, tensor_model_parallel_size=2, pipeline_model_parallel_size=2, context_parallel_size=2 - ): - assert not is_only_data_parallel() - - -def test_tp8(): - with mpsu.mock_distributed_parallel_state(world_size=8, tensor_model_parallel_size=8): - assert not is_only_data_parallel() - - -def test_dp_only(): - with mpsu.mock_distributed_parallel_state(world_size=8): - assert is_only_data_parallel() diff --git a/sub-packages/bionemo-testing/DEPRECATED b/sub-packages/bionemo-testing/DEPRECATED deleted file mode 100644 index d322a722dc..0000000000 --- a/sub-packages/bionemo-testing/DEPRECATED +++ /dev/null @@ -1,7 +0,0 @@ -This sub-package (sub-packages/bionemo-testing) is deprecated. - -This package provided test utilities for BioNeMo sub-packages that -depended on NeMo/Megatron. It is no longer maintained as the framework -has moved to self-contained recipes in bionemo-recipes/. - -This package will be removed in a future release. diff --git a/sub-packages/bionemo-testing/LICENSE b/sub-packages/bionemo-testing/LICENSE deleted file mode 120000 index 61bc2cda7e..0000000000 --- a/sub-packages/bionemo-testing/LICENSE +++ /dev/null @@ -1 +0,0 @@ -../../LICENSE/license.txt \ No newline at end of file diff --git a/sub-packages/bionemo-testing/README.md b/sub-packages/bionemo-testing/README.md deleted file mode 100644 index 15d9bb77c9..0000000000 --- a/sub-packages/bionemo-testing/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# bionemo-testing - -> **DEPRECATED**: This package is deprecated and no longer maintained. The BioNeMo framework has moved to -> self-contained recipes in `bionemo-recipes/`. This package will be removed in a future release. - -A package of test-time requirements and utilities for bionemo sub-packages. In particular, the `bionemo-testing` package -handles downloading and caching data and other assets for running unit tests and example notebooks. For more information -on test data handling, see [BioNeMo test data management](https://github.com/NVIDIA/bionemo-framework/blob/main/sub-packages/bionemo-testing/src/bionemo/testing/data/README.md) diff --git a/sub-packages/bionemo-testing/VERSION b/sub-packages/bionemo-testing/VERSION deleted file mode 100644 index 005119baaa..0000000000 --- a/sub-packages/bionemo-testing/VERSION +++ /dev/null @@ -1 +0,0 @@ -2.4.1 diff --git a/sub-packages/bionemo-testing/pyproject.toml b/sub-packages/bionemo-testing/pyproject.toml deleted file mode 100644 index 2766758e30..0000000000 --- a/sub-packages/bionemo-testing/pyproject.toml +++ /dev/null @@ -1,33 +0,0 @@ -[build-system] -requires = ["setuptools>=64", "wheel"] -build-backend = "setuptools.build_meta" - -[project] -name = "bionemo-testing" -readme = "README.md" -description = "[DEPRECATED] Utilities aiding test creation for BioNeMo sub-packages. No longer maintained." -authors = [{ name = "BioNeMo Team", email = "bionemofeedback@nvidia.com" }] -requires-python = ">=3.10" -license = { file = "LICENSE" } -dynamic = ["version"] -dependencies = [ - # bionemo sub-packages - 'bionemo-core', - 'bionemo-llm>=2.4.5', - # external - 'email-validator', - 'pytest', - 'overrides', -] - -[tool.setuptools.packages.find] -where = ["src"] -include = ["bionemo.*"] -namespaces = true -exclude = ["test*."] - -[tool.setuptools.dynamic] -version = { file = "VERSION" } - -[tool.uv] -cache-keys = [{ git = true }] diff --git a/sub-packages/bionemo-testing/src/bionemo/testing/__init__.py b/sub-packages/bionemo-testing/src/bionemo/testing/__init__.py deleted file mode 100644 index ba74fdfbd0..0000000000 --- a/sub-packages/bionemo-testing/src/bionemo/testing/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import warnings - -warnings.warn( - "bionemo.testing (sub-packages/bionemo-testing) is deprecated and will be removed in a future release. " - "This package is no longer maintained.", - DeprecationWarning, - stacklevel=2, -) diff --git a/sub-packages/bionemo-testing/src/bionemo/testing/assert_optimizer_grads_match.py b/sub-packages/bionemo-testing/src/bionemo/testing/assert_optimizer_grads_match.py deleted file mode 100644 index 29d6b39fb3..0000000000 --- a/sub-packages/bionemo-testing/src/bionemo/testing/assert_optimizer_grads_match.py +++ /dev/null @@ -1,363 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -from argparse import ArgumentParser -from pathlib import Path -from typing import Dict, Iterable, Optional, Tuple, Union - -import torch -from torch.distributed.checkpoint.filesystem import FileSystemReader -from torch.distributed.checkpoint.state_dict_loader import load - - -TensorLike = Union[torch.Tensor, Iterable[torch.Tensor]] - - -def _as_iter(x: TensorLike): - return x if (isinstance(x, Iterable) and not isinstance(x, torch.Tensor)) else [x] - - -def _fro_norm(x: TensorLike) -> torch.Tensor: - """Frobenius norm; supports sharded tensors (sum of shard ||·||_F^2).""" - it = list(_as_iter(x)) # Convert to list to avoid iterator consumption issues - if not it: - return torch.tensor(0.0, device="cpu") - s = torch.tensor(0.0, device=it[0].device) - for t in it: - s = s + t.float().pow(2).sum() - return torch.sqrt(s) - - -def machine_epsilon_for_dtype(dtype: torch.dtype) -> float: - """Return machine epsilon for dtype. For FP8, use BF16 epsilon per paper.""" - # Standard types - if dtype in (torch.float32, torch.float16, torch.bfloat16): - return float(torch.finfo(dtype).eps) - # FP8 recipes: accum/store typically BF16/FP32; use BF16 epsilon - if hasattr(torch, "float8_e4m3fn") and dtype in ( - torch.float8_e4m3fn, - getattr(torch, "float8_e5m2fn", None), - ): - return float(torch.finfo(torch.bfloat16).eps) - # Fallback - return float(torch.finfo(torch.float32).eps) - - -def relative_grad_diff(g_hat: TensorLike, g_ref: TensorLike, eps_den: float = 1e-30) -> float: - """Relative difference ||g_hat - g_ref||_F / ||g_ref||_F. - - Accepts a single tensor or an iterable of shards for each argument. - """ - # Convert to lists to avoid iterator consumption issues - gh_list = list(_as_iter(g_hat)) - gr_list = list(_as_iter(g_ref)) - - if len(gh_list) != len(gr_list): - raise ValueError(f"Shard count mismatch: {len(gh_list)} vs {len(gr_list)}") - - if not gh_list: - return 0.0 - - num_sq = torch.tensor(0.0, device=gh_list[0].device) - for a, b in zip(gh_list, gr_list): - num_sq = num_sq + (a.float() - b.float()).pow(2).sum() - num = torch.sqrt(num_sq) - den = _fro_norm(g_ref) - return float(num / (den + eps_den)) - - -def expected_rel_bound( - l: int, - *, - L: int = 32, - C: float = 1.03, - dtype: Optional[torch.dtype] = torch.bfloat16, - k: float = 4.0, -) -> float: - """Bound ~ k * (C ** (L + 1 - l)) * eps_mch, with 1-based layer index l. - - - L is hard-coded default to 32 per your request. - - C is 'close to 1'; 1.01-1.05 are reasonable defaults. - - k absorbs the hidden constant in big-O; 2-8 are common choices. - - dtype controls eps_mch; for FP8 use BF16 epsilon (see https://www.arxiv.org/pdf/2506.09280 theorem 5.3). - """ - eps_mch = machine_epsilon_for_dtype(dtype or torch.bfloat16) - depth = L + 1 - l # 1-based depth from the top (as in the theorem) - depth = max(depth, 0) - return float(k * (C**depth) * eps_mch) - - -def check_gradient( - g_hat: TensorLike, - g_ref: TensorLike, - l: int, - *, - L: int = 32, - C: float = 1.03, - dtype: Optional[torch.dtype] = None, - k: float = 4.0, -) -> Tuple[float, float, bool]: - """Compute (rel_error, bound, ok) for layer l. - - - If dtype is None, infer from g_ref (or g_hat if needed). - # See https://www.arxiv.org/pdf/2506.09280 theorem 5.3 - """ - # Infer dtype if not provided - if dtype is None: - gr_list = list(_as_iter(g_ref)) - if gr_list: - dtype = gr_list[0].dtype - else: - dtype = torch.bfloat16 # fallback - rel = relative_grad_diff(g_hat, g_ref) - bnd = expected_rel_bound(l, L=L, C=C, dtype=dtype, k=k) - return rel, bnd, (rel <= bnd) - - -def _filter_optimizer_tensors(plain_tensors: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: - """Return only optimizer-related tensors from a flat checkpoint tensor dict.""" - return {k: v for k, v in plain_tensors.items() if k.startswith("optimizer.") and ".exp_avg." in k} - - -def assert_grads_close(left: torch.Tensor, right: torch.Tensor): - """Assert that two gradient tensors are close using theorem 5.3 of https://www.arxiv.org/pdf/2506.09280.""" - # Implement theorem 5.3 of https://www.arxiv.org/pdf/2506.09280 - - # This is the real test: - rel, bnd, ok = check_gradient( - left, right, l=0, dtype=torch.bfloat16 - ) # hard code to layer 0 since that's the most permissive - - # If the real test above fails, run an assert close for the useful diagnostics and raise either way. - if not ok: - rel_shuff, _, ok_shuff = check_gradient(left, torch.roll(right, shifts=-1, dims=-1), l=0, dtype=torch.bfloat16) - - try: - torch.testing.assert_close(left, right) - msg = ( - "AssertionError on relative norm magnitude " - f"(rel={rel}, bnd={bnd}, ok={ok}, rel_shuff={rel_shuff}, ok_shuff={ok_shuff}) " - "but torch.testing.assert_close(left, right) passes. \n" - f"Left: {left.shape}/{left.dtype} {left}\n" - f"Right: {right.shape}/{right.dtype} {right}" - ) - except AssertionError as e: - msg = ( - "AssertionError on relative norm magnitude " - f"(rel={rel}, bnd={bnd}, ok={ok}, rel_shuff={rel_shuff}, ok_shuff={ok_shuff}): {e}\n" - f"Left: {left.shape}/{left.dtype} {left}\n" - f"Right: {right.shape}/{right.dtype} {right}" - ) - raise AssertionError(msg) - - -def unshard_row_parallel_state(saved_state, out_features, in_features, tp): - """Unshard row-parallel state tensor from sharded format to full format. - - saved_state: [..., tp, out_features * (in_features // tp)] - """ - prefix = saved_state.shape[:-2] - per = in_features // tp - x = saved_state.view(*prefix, tp, out_features, per) # [..., tp, O, I_shard] - x = x.permute(*range(len(prefix)), -2, -3, -1) # [..., O, tp, I_shard] - x = x.reshape(*prefix, out_features, in_features) # [..., O, I] - return x - - -def _assert_optimizer_tensors_equal( - left: Dict[str, torch.Tensor], - right: Dict[str, torch.Tensor], - left_empty: Dict[str, torch.Tensor], - right_empty: Dict[str, torch.Tensor], - eps=1e-4, -): - left_keys = set(left.keys()) - right_keys = set(right.keys()) - - only_left = sorted(left_keys - right_keys) - only_right = sorted(right_keys - left_keys) - assert not only_left and not only_right, ( - f"Optimizer tensor keys mismatch.\nOnly in left: {only_left}\nOnly in right: {only_right}" - ) - some_non_zero = False - assertions = [] - for key in sorted(left_keys): - lt, rt = left[key], right[key] - # rt_colpar, rt_rowpar = None, None - if lt.shape != rt.shape: - # "Tensor shape mismatch for {key}: {lt.shape} vs {rt.shape}, trying simple reshape - original_key = key.replace("optimizer.state.exp_avg.", "") - # Unsharded shape - # {'decoder.layers.self_attention.linear_proj.weight': torch.Size([32, 3072, 4096]), 'optimizer.state.exp_avg.decoder.layers.self_attention.linear_proj.weight': torch.Size([32, 1, 1, 12582912]), 'optimizer.state.exp_avg_sq.decoder.layers.self_attention.linear_proj.weight': torch.Size([32, 1, 1, 12582912]), 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight': torch.Size([32, 1, 1, 12582912])} - # Sharded shape - # {'decoder.layers.self_attention.linear_proj.weight': torch.Size([32, 3072, 4096]), 'optimizer.state.exp_avg.decoder.layers.self_attention.linear_proj.weight': torch.Size([32, 1, 2, 6291456]), 'optimizer.state.exp_avg_sq.decoder.layers.self_attention.linear_proj.weight': torch.Size([32, 1, 2, 6291456]), 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight': torch.Size([32, 1, 2, 6291456])} - left_shape = left_empty[original_key].shape - right_shape = right_empty[original_key].shape - skip_tp_check = False - - if left_shape != right_shape: - if "embedding.word_embeddings.weight" in key or ".output_layer.weight" in key: - # First handle different padding on the input/output dimensions. - lt = lt.reshape(left_shape) - rt = rt.reshape(right_shape) - min_dim = min(left_shape[0], right_shape[0]) - lt = lt[:min_dim, ...] - rt = rt[:min_dim, ...] - left_shape = lt.shape - right_shape = rt.shape - skip_tp_check = True - else: - raise AssertionError(f"Tensor shape mismatch for {key}: {left_shape} vs {right_shape}") - # problem: we do not know the TP axis for this tensor. We can guess though. - if len(left_shape) == 3 and not skip_tp_check: - # TP axis is 1 - lt = lt.reshape(1, left_shape[0], left_shape[1], left_shape[2]) - elif len(left_shape) == 2 and not skip_tp_check: - # TP axis is 2 - lt = lt.reshape(1, left_shape[0], left_shape[1]) - - if ( - key.endswith("mlp.linear_fc2.weight") - or key.endswith("self_attention.linear_proj.weight") - or key.endswith("mixer.dense.weight") - ) and not skip_tp_check: - # Handle row parallel linear layers. - # TODO come up with a better way to determine row parallel linear layers. - rt = unshard_row_parallel_state( - rt, out_features=left_shape[-2], in_features=left_shape[-1], tp=rt.shape[-2] - ) - else: - try: - rt = rt.reshape(lt.shape) - except Exception as e: - msg = f"Tensor shape mismatch for {key}: {lt.shape} vs {rt.shape}, simple reshape failed: {e}" - if "embedding.word_embeddings.weight" in key or ".output_layer.weight" in key: - print( - f"FIXME: Skipping {key} because it's a word embedding or output layer," - "and something about padding changes under TP." - ) - continue - raise AssertionError(msg) - - assert lt.shape == rt.shape and lt.dtype == rt.dtype, ( - f"Tensor meta mismatch for {key}: {lt.shape}/{lt.dtype} vs {rt.shape}/{rt.dtype}" - ) - # Reduce the rate of 0 vs near 0 rtol failures by adding a small epsilon - left_scale = torch.max(torch.abs(lt)) - right_scale = torch.max(torch.abs(rt)) - if left_scale <= eps and right_scale <= eps: - print( - f"WARNING: zero-ish scale tensors ({left_scale=} vs {right_scale=}) " - f"so they will trivially pass comparing {key=}" - ) - else: - some_non_zero = True - try: - assert_grads_close(lt, rt) - print(f"Optimizer tensors match for {key}") - except AssertionError as e: - assertions.append(AssertionError(f"AssertionError for {key}: {e}")) - assert not assertions, f"Assertion Errors found comparing keys: {assertions}" - assert some_non_zero, "No non-zero tensors found in this comparison" - - -def load_dist_checkpoint_pt( - ckpt_dir, - metadata_ckpt_dir=None, - pattern=r"optimizer", - device="cpu", - return_full_empty: bool = False, -): - """Return {full_key: tensor} for every tensor whose key matches *pattern*.""" - meta_ckpt_dir = Path(metadata_ckpt_dir or ckpt_dir) - meta_reader = FileSystemReader(str(meta_ckpt_dir)) - - # --- fast metadata pass (no tensor data yet) ----------------------------- - meta = meta_reader.read_metadata() # tiny JSON read - tmeta = meta.state_dict_metadata # key ➜ TensorMetadata - if return_full_empty: - wanted = [k for k in tmeta if hasattr(tmeta[k], "size")] - else: - wanted = [k for k in tmeta if re.search(pattern, k) and hasattr(tmeta[k], "size")] - if not wanted: - raise ValueError(f"No keys matching /{pattern}/ in {ckpt_dir}") - - # --- build "empty" placeholders ----------------------------------------- - placeholders = { - k: torch.empty(tuple(tmeta[k].size), dtype=tmeta[k].properties.dtype, device=device) for k in wanted - } - if return_full_empty: - return placeholders - # --- stream just those tensors (no process-group needed) ----------------- - data_reader = FileSystemReader(str(ckpt_dir)) - - load( - state_dict=placeholders, - storage_reader=data_reader, - no_dist=True, # switches off all collectives - ) - return placeholders # dict[str, Tensor] - - -def assert_optimizer_states_match(checkpoint_dirs): - """Compare optimizer state across provided torch_dist checkpoints. - - - Keys: ensure the set of optimizer tensor keys match across checkpoints - - Values: ensure corresponding tensors are equal (allclose) - - Structure (non-tensor common state): ensure common optimizer structures match - """ - assert len(checkpoint_dirs) > 1, "This test requires 2 or more checkpoints [ ...]." - - base_dir = checkpoint_dirs[0] - - # Compare optimizer tensors - base_plain = load_dist_checkpoint_pt(base_dir) - base_empty = load_dist_checkpoint_pt(base_dir, return_full_empty=True, device="meta") - base_opt_tensors = _filter_optimizer_tensors(base_plain) - assert base_opt_tensors, f"No optimizer tensors found in checkpoint: {base_dir}" - assertions = [] - for other_dir in checkpoint_dirs[1:]: - try: - other_plain = load_dist_checkpoint_pt(other_dir) - other_empty = load_dist_checkpoint_pt(other_dir, return_full_empty=True, device="meta") - other_opt_tensors = _filter_optimizer_tensors(other_plain) - assert other_opt_tensors, f"No optimizer tensors found in checkpoint: {other_dir}" - _assert_optimizer_tensors_equal(base_opt_tensors, other_opt_tensors, base_empty, other_empty) - print(f"Optimizer tensors match for {base_dir} and {other_dir}") - del other_plain - del other_opt_tensors - except AssertionError as e: - msg = f"AssertionError comparing {base_dir} to {other_dir}:\n{e}" - print(f"Optimizer tensors mismatch for {base_dir} and {other_dir}:\n{msg}") - assertions.append(AssertionError(msg)) - assert not assertions, f"AssertionErrors comparing {checkpoint_dirs}:\n{assertions}" - - -def main(): - """Main entry point for comparing optimizer states across multiple checkpoints.""" - parser = ArgumentParser( - description="Given checkpoints saved with adam b1,b2=0 trained for one step, " - "we can check that the gradients match under different training configurations. " - "Currently this test script has some hard-coded assumptions for GPT style models, " - "namely which layers are RowParallel and require different unsharding logic." - ) - parser.add_argument("checkpoints", nargs="+", type=Path, help="Path to the checkpoints to compare") - args = parser.parse_args() - assert_optimizer_states_match(args.checkpoints) - - -if __name__ == "__main__": - main() diff --git a/sub-packages/bionemo-testing/src/bionemo/testing/callbacks.py b/sub-packages/bionemo-testing/src/bionemo/testing/callbacks.py deleted file mode 100644 index ed49eeb498..0000000000 --- a/sub-packages/bionemo-testing/src/bionemo/testing/callbacks.py +++ /dev/null @@ -1,63 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from collections import defaultdict -from typing import Dict, List - -import torch -from lightning.pytorch import Callback - - -class MetricTracker(Callback): # noqa: D101 - def __init__(self, metrics_to_track_val: List[str], metrics_to_track_train: List[str]): # noqa: D107 - self.metrics_to_track_val = metrics_to_track_val - self.metrics_to_track_train = metrics_to_track_train - self._collection_val = defaultdict(list) - self._collection_train = defaultdict(list) - - def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx=0): # noqa: D102 - if isinstance(outputs, torch.Tensor): - self._collection_val["unnamed"].append(outputs) - else: - for metric in self.metrics_to_track_val: - self._collection_val[metric].append(outputs[metric]) - - def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx=0): # noqa: D102 - if isinstance(outputs, torch.Tensor): - self._collection_train["unnamed"].append(outputs) - else: - for metric in self.metrics_to_track_train: - self._collection_train[metric].append(outputs[metric]) - - def on_validation_epoch_end(self, trainer, pl_module): # noqa: D102 - elogs = trainer.logged_metrics # access it here - self._collection_val["logged_metrics"].extend(elogs) - - def on_train_epoch_end(self, trainer, pl_module): # noqa: D102 - elogs = trainer.logged_metrics # access it here - self._collection_train["logged_metrics"].extend(elogs) - - @property - def collection_val(self) -> Dict[str, torch.Tensor | List[str]]: # noqa: D102 - res = {k: torch.tensor(v) for k, v in self._collection_val.items() if k != "logged_metrics"} - res["logged_metrics"] = self._collection_val["logged_metrics"] - return res - - @property - def collection_train(self) -> Dict[str, torch.Tensor | str]: # noqa: D102 - res = {k: torch.tensor(v) for k, v in self._collection_train.items() if k != "logged_metrics"} - res["logged_metrics"] = self._collection_train["logged_metrics"] - return res diff --git a/sub-packages/bionemo-testing/src/bionemo/testing/data/__init__.py b/sub-packages/bionemo-testing/src/bionemo/testing/data/__init__.py deleted file mode 100644 index 25e6abfbc5..0000000000 --- a/sub-packages/bionemo-testing/src/bionemo/testing/data/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/sub-packages/bionemo-testing/src/bionemo/testing/data/esm2.py b/sub-packages/bionemo-testing/src/bionemo/testing/data/esm2.py deleted file mode 100644 index 2f69e7c75f..0000000000 --- a/sub-packages/bionemo-testing/src/bionemo/testing/data/esm2.py +++ /dev/null @@ -1,69 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import sqlite3 - -import pandas as pd - - -def create_mock_protein_dataset(tmp_path): - """Create a mock protein dataset.""" - db_file = tmp_path / "protein_dataset.db" - conn = sqlite3.connect(str(db_file)) - cursor = conn.cursor() - - cursor.execute( - """ - CREATE TABLE protein ( - id TEXT PRIMARY KEY, - sequence TEXT - ) - """ - ) - - proteins = [ - ("UniRef90_A", "ACDEFGHIKLMNPQRSTVWY"), - ("UniRef90_B", "DEFGHIKLMNPQRSTVWYAC"), - ("UniRef90_C", "MGHIKLMNPQRSTVWYACDE"), - ("UniRef50_A", "MKTVRQERLKSIVRI"), - ("UniRef50_B", "MRILERSKEPVSGAQLA"), - ] - cursor.executemany("INSERT INTO protein VALUES (?, ?)", proteins) - - conn.commit() - conn.close() - - return db_file - - -def create_mock_parquet_train_val_inputs(tmp_path): - """Create a mock protein train and val cluster parquet.""" - train_cluster_path = tmp_path / "train_clusters.parquet" - train_clusters = pd.DataFrame( - { - "ur90_id": [["UniRef90_A"], ["UniRef90_B", "UniRef90_C"]], - } - ) - train_clusters.to_parquet(train_cluster_path) - - valid_cluster_path = tmp_path / "valid_clusters.parquet" - valid_clusters = pd.DataFrame( - { - "ur50_id": ["UniRef50_A", "UniRef50_B", "UniRef90_A", "UniRef90_B"], - } - ) - valid_clusters.to_parquet(valid_cluster_path) - return train_cluster_path, valid_cluster_path diff --git a/sub-packages/bionemo-testing/src/bionemo/testing/data/fasta.py b/sub-packages/bionemo-testing/src/bionemo/testing/data/fasta.py deleted file mode 100644 index 7285edb239..0000000000 --- a/sub-packages/bionemo-testing/src/bionemo/testing/data/fasta.py +++ /dev/null @@ -1,63 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from pathlib import Path - - -ALU_SEQUENCE: str = ( - "GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACGAGGTC" - "aggagatcgagaccatcctggctaacacggtgaaaccccgtctctactaaaaatacaaaaaattagccgggc" - "GTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATGGCGTGAACCCGGGAGGCG" - "GAGCTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA" -) - - -def create_fasta_file( - fasta_file_path: Path, - num_sequences: int, - sequence_length: int | None = None, - sequence_lengths: list[int] | None = None, - repeating_dna_pattern: str = ALU_SEQUENCE, - max_line_length: int = 80, -) -> Path: - """Creates a fasta file with the given number of sequences, sequence length, and repeating dna pattern. Each contig uses a shifted version of the repeating pattern.""" - assert sequence_length is not None or sequence_lengths is not None - with open(fasta_file_path, "w") as f: - if sequence_lengths is not None: - assert len(sequence_lengths) == num_sequences - else: - assert sequence_length is not None - sequence_lengths: list[int] = [sequence_length] * num_sequences - for i in range(num_sequences): - # get the repeating pattern shifted by i for this contig - repeat_pattern_for_contig = repeating_dna_pattern[i:] + repeating_dna_pattern[:i] - # repeat the pattern enough times to reach the desired sequence length - if sequence_lengths[i] <= len(repeat_pattern_for_contig): - contig_output = repeat_pattern_for_contig[: sequence_lengths[i]] - else: - # Calculate how many complete repeats we need - num_repeats = sequence_lengths[i] // len(repeat_pattern_for_contig) - remainder = sequence_lengths[i] % len(repeat_pattern_for_contig) - contig_output = repeat_pattern_for_contig * num_repeats + repeat_pattern_for_contig[:remainder] - # verify the length of the contig is as expected - assert len(contig_output) == sequence_lengths[i] - # Fold the contig output into lines of max_line_length - contig_output = "\n".join( - contig_output[i : i + max_line_length] for i in range(0, sequence_lengths[i], max_line_length) - ) - # write to the fasta file with the actual contig_output, not the repeating pattern - f.write(f">contig_{i}\n{contig_output}\n") - return fasta_file_path diff --git a/sub-packages/bionemo-testing/src/bionemo/testing/data/load.py b/sub-packages/bionemo-testing/src/bionemo/testing/data/load.py deleted file mode 100644 index 5410485978..0000000000 --- a/sub-packages/bionemo-testing/src/bionemo/testing/data/load.py +++ /dev/null @@ -1,31 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Sequence - -from bionemo.core.data.load import default_ngc_client, default_pbss_client, entrypoint, load - - -_ = entrypoint -# This needs to be around so that ruff doesn't automatically remove it as it's unused. -# We don't want to include it in __all__. -# But older installations __may__ be using the old CLI path (bionemo.core.data.load:entrypoint) -# so this is here for backwards compatability. - - -__all__: Sequence[str] = ( - "default_ngc_client", - "default_pbss_client", - "load", -) diff --git a/sub-packages/bionemo-testing/src/bionemo/testing/data/resource.py b/sub-packages/bionemo-testing/src/bionemo/testing/data/resource.py deleted file mode 100644 index 677f6f49e0..0000000000 --- a/sub-packages/bionemo-testing/src/bionemo/testing/data/resource.py +++ /dev/null @@ -1,23 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Sequence - -from bionemo.core.data.resource import Resource, get_all_resources - - -__all__: Sequence[str] = ( - "Resource", - "get_all_resources", -) diff --git a/sub-packages/bionemo-testing/src/bionemo/testing/harnesses/__init__.py b/sub-packages/bionemo-testing/src/bionemo/testing/harnesses/__init__.py deleted file mode 100644 index 25e6abfbc5..0000000000 --- a/sub-packages/bionemo-testing/src/bionemo/testing/harnesses/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/sub-packages/bionemo-testing/src/bionemo/testing/harnesses/mode.py b/sub-packages/bionemo-testing/src/bionemo/testing/harnesses/mode.py deleted file mode 100644 index 040e2e5391..0000000000 --- a/sub-packages/bionemo-testing/src/bionemo/testing/harnesses/mode.py +++ /dev/null @@ -1,25 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from enum import Enum, auto - - -class Mode(Enum): - """Mode for stop-go testing.""" - - STOP = auto() - RESUME = auto() - CONTINUOUS = auto() diff --git a/sub-packages/bionemo-testing/src/bionemo/testing/harnesses/stop_and_go.py b/sub-packages/bionemo-testing/src/bionemo/testing/harnesses/stop_and_go.py deleted file mode 100644 index 77df8162ea..0000000000 --- a/sub-packages/bionemo-testing/src/bionemo/testing/harnesses/stop_and_go.py +++ /dev/null @@ -1,369 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import pathlib -import tempfile -from abc import ABC, abstractmethod -from typing import Dict, Literal, Sequence, Type, TypeVar - -import lightning.pytorch as pl -import nemo.lightning as nl -import pytest -from nemo.collections import llm -from nemo.lightning import resume -from nemo.lightning.nemo_logger import NeMoLogger -from nemo.lightning.pytorch import callbacks as nl_callbacks -from nemo.lightning.pytorch.strategies import MegatronStrategy -from nemo.utils import logging - -from bionemo.testing import testing_callbacks -from bionemo.testing.harnesses.mode import Mode -from bionemo.testing.megatron_parallel_state_utils import distributed_model_parallel_state -from bionemo.testing.torch import recursive_assert_approx_equal - - -__all__: Sequence[str] = ("CallbackDict", "StopAndGoHarness", "get_callback") - - -Callback = TypeVar("Callback", bound=pl.Callback) -CallbackDict = Dict[Mode, Dict[Type[pl.Callback], pl.Callback]] - - -def get_callback(callbacks: CallbackDict, mode: Mode, callback_type: Type[Callback]) -> Callback: - """Returns the callback with the given name and mode. - - Convenience function to make type hinting easier. - - Args: - callbacks: The dictionary of callbacks. - mode: The mode indicating whether to stop or go. - callback_type: The type of the callback. - - Returns: - pl.Callback: The callback with the given name and mode. - """ - return callbacks[mode][callback_type] # type: ignore - - -class StopAndGoHarness(ABC): - """Abstract base class for testing consistency between interrupted and continuous training. - - Users should override cls.setup_model and update cls.setup_class to customize the downstream test cases. Metadata - are collected through callbacks and users can add new unit tests by comparing the metadata for the interrupted and - continuous cases. - - By default, learning rate, global step, optimizer state, consumed samples, input and output tensors, and loss are - compared. Users can add additional metrics by adding new callbacks to `cls.callbacks` and associated test functions. - - Stop and go tests act as follows: - - setup a clean model for a brief training run, set callbacks to track. - - interrupt training via the StopAndGoException in the callback Raise. - - train the model resumed from the checkpoint with the same set of callbacks. - - train the model continuously without interruption with a new set of the same callbacks. - - compare each pair of interrupted and continuous callbacks to check for equality. - - Considerations when implementing this class: - - The derived test name should start with `Test`, and test methods should start with `test_` to enable pytest - discovery. - - devices, pipeline_model_parallel, and tensor_model_parallel may impact the setup of DataModule. Certain - datasets expect a known global batch size, which depends on the number of devices and conditional tensor - model parallel/ pipeline model parallel settings. By default, we are testing only on single device without - parallelism. - - 'mode' is useful in some cases, but not in all cases. Implement conditions based on these when useful. As an - example, it may be useful to implement a test that stops and resumes. - - changing callbacks to test metadata integrity (core feature of stop-and-go tests). - - changing the model construction to use different hyperparameters. - - ... etc - Each of the above tests cases may be useful for automated testing of various expected behavior. - - stop(), resume(), continuous() or collectively run_stop_and_go() are provided methods which execute the actual - tests, leveraging the conditions in the various setup methods, respecting 'mode' where necessary. - - Attributes: - root_dir: The root directory. - val_check_interval: The validation check interval. Stored as an attribute to ensure consistency. - exp_name: The experiment name. - extra_metrics_dict: A dictionary of metrics and their corresponding functions. - - See Also: bionemo.testing.callbacks. - """ - - # class variables that need to be overridden - num_steps: int - val_check_interval: int - limit_val_batches: int - lr: float = 1e-4 - precision: Literal["16-mixed", "bf16-mixed", "32"] - output_tensor_atol: float = 1e-3 # Absolute tolerance for model precision between output tensors. - output_tensor_rtol: float = 1e-4 # Relative tolerance for model precision between output tensors. - - # class variables that will be setup in setUpClass - tempdir: tempfile.TemporaryDirectory - metadata_dir: pathlib.Path - exp_name: str - callbacks: CallbackDict - nemo_logger: NeMoLogger - - @classmethod - def setup_class(cls) -> None: - """Sets up the class by creating a temporary directory, metadata_dir, exp_name and callbacks.""" - cls.tempdir = tempfile.TemporaryDirectory() - cls.metadata_dir = pathlib.Path(cls.tempdir.name) / "metadata" - cls.exp_name = cls.__name__ - - cls.callbacks = cls.get_default_callbacks() - - cls.nemo_logger = NeMoLogger( - log_dir=cls.tempdir.name, - name=cls.exp_name, - use_datetime_version=False, - version=None, - tensorboard=None, - wandb=None, - ckpt=None, - ) - - @classmethod - def teardown_class(cls) -> None: - """Tears down the class by cleaning up the temporary directory.""" - cls.tempdir.cleanup() - - @classmethod - @abstractmethod - def setup_model(cls, mode: Mode) -> tuple[pl.LightningModule, pl.LightningDataModule, nl.MegatronOptimizerModule]: - """Constructs the model, data, and optimizer for the test harness. - - Optionally supports separate code paths for 'stop'/'resume'/'continuous', although implementors are encouraged - to use the same code path for both. - - Args: - mode: The mode indicating whether to stop or go. - - Returns: - tuple: A tuple containing the model, data, and optimizer. - """ - raise NotImplementedError() - - @classmethod - def setup_trainer( - cls, - mode: Mode, - ) -> nl.Trainer: - """Setup trainer by passing stop, resume, or continuous callbacks according to mode. - - Args: - mode (Mode): The mode indicating whether to stop, resume, or train continuously. - - Returns: - (nl.Trainer): NeMo Lightning trainer object. - """ - strategy = MegatronStrategy( - ddp="megatron", - find_unused_parameters=True, - ckpt_include_optimizer=True, - ckpt_async_save=False, - ) - - trainer = nl.Trainer( - devices=1, - max_steps=cls.num_steps, - accelerator="gpu", - strategy=strategy, - limit_val_batches=cls.limit_val_batches, - val_check_interval=cls.val_check_interval, - log_every_n_steps=cls.val_check_interval, - num_nodes=1, - callbacks=list(cls.callbacks[mode].values()), - plugins=nl.MegatronMixedPrecision(precision=cls.precision), - ) - return trainer - - @classmethod - def get_default_callbacks(cls) -> CallbackDict: - """Returns a list of callbacks based on the specified mode. Base implementation provides reasonable defaults. - - To extend this method, call the super and append to the callbacks, depending on which mode you are in: - - ```python - callbacks = super().get_callbacks() - callbacks[mode]["MyCustomCallback"] = MyCustomCallback() - return callbacks - ``` - - Returns: - A dictionary of callbacks based on the specified mode, each of which maps a callback name to a callback - object. - """ - callbacks: CallbackDict = {} - - def make_callbacks() -> Dict[Type[pl.Callback], pl.Callback]: - return { - testing_callbacks.LearningRateCallback: testing_callbacks.LearningRateCallback(), - testing_callbacks.GlobalStepStateCallback: testing_callbacks.GlobalStepStateCallback(), - testing_callbacks.ConsumedSamplesCallback: testing_callbacks.ConsumedSamplesCallback(), - testing_callbacks.OptimizerStateCallback: testing_callbacks.OptimizerStateCallback(), - testing_callbacks.TrainInputCallback: testing_callbacks.TrainInputCallback(), - testing_callbacks.TrainOutputCallback: testing_callbacks.TrainOutputCallback(), - testing_callbacks.TrainLossCallback: testing_callbacks.TrainLossCallback(), - testing_callbacks.ValidInputCallback: testing_callbacks.ValidInputCallback(), - testing_callbacks.ValidOutputCallback: testing_callbacks.ValidOutputCallback(), - testing_callbacks.ValidLossCallback: testing_callbacks.ValidLossCallback(), - } - - interrupted_callbacks = make_callbacks() - callbacks[Mode.CONTINUOUS] = make_callbacks() - - for mode in [Mode.STOP, Mode.RESUME]: - consumed_samples_cls = testing_callbacks.TrainValInitConsumedSamplesStopAndGoCallback - callbacks[mode] = { - consumed_samples_cls: consumed_samples_cls(mode=mode), - **interrupted_callbacks, - } - - callbacks[Mode.STOP].update( - { - testing_callbacks.StopAfterValidEpochEndCallback: testing_callbacks.StopAfterValidEpochEndCallback(), - nl_callbacks.ModelCheckpoint: nl_callbacks.ModelCheckpoint( - save_last=True, - monitor="val_loss", - save_top_k=2, - always_save_context=True, - filename="{epoch}-{step}-{val_loss:.2f}", - ), - } - ) - - return callbacks - - # stop() and resume() are provided methods and run the requisite methods with the appropriate mode. - @classmethod - def stop(cls) -> None: - """Runs pre-training and 'stops' after the first checkpoint is saved. - - This method sets up the model, data, and optimizer for the Mode.STOP mode. - It then sets up the trainer and strategy for the Mode.STOP mode with the given metrics. - The training process is executed using the `llm.train` function, passing the model, data, trainer, logger, optimizer, and resume options. - If a `testing_callbacks.StopAndGoException` is raised during training, it is caught and no action is taken. - - Raises: - testing_callbacks.StopAndGoException: If a stop and go exception occurs during training. - """ - logging.info("Running stop()...") - - model, data, opt = cls.setup_model(mode=Mode.STOP) - trainer = cls.setup_trainer(Mode.STOP) - with distributed_model_parallel_state(): - llm.train( - model=model, - data=data, - trainer=trainer, - log=cls.nemo_logger, - optim=opt, - resume=resume.AutoResume( - resume_if_exists=False, # Looks for the -last checkpoint to continue training. - resume_ignore_no_checkpoint=True, # When false this will throw an error with no existing checkpoint. - ), - ) - - @classmethod - def resume(cls) -> None: - """Resumes the model from the checkpoint saved at the end of `stop()` and verifies the metadata integrity.""" - logging.info("Running resume()...") - - model, data, opt = cls.setup_model(mode=Mode.RESUME) - trainer = cls.setup_trainer(Mode.RESUME) - with distributed_model_parallel_state(): - llm.train( - model=model, - data=data, - trainer=trainer, - log=cls.nemo_logger, - optim=opt, - resume=resume.AutoResume( - resume_if_exists=True, # Looks for the -last checkpoint to continue training. - resume_ignore_no_checkpoint=False, # When false this will throw an error with no existing checkpoint. - ), - ) - - @classmethod - def continuous(cls) -> None: - """Trains the model in one continuous path without stopping.""" - logging.info("Running continuous()...") - - model, data, opt = cls.setup_model(mode=Mode.CONTINUOUS) - trainer = cls.setup_trainer(Mode.CONTINUOUS) - with distributed_model_parallel_state(): - llm.train(model=model, data=data, trainer=trainer, log=cls.nemo_logger, optim=opt) - - @classmethod - def run_stop_and_go(cls): - """Executes training both continuously and with a checkpoint interruption.""" - # Interrupted model training - cls.stop() - cls.resume() - - # Cleanup and reinitialize the temporary directory so we don't conflict with a previous checkpoint. - cls.tempdir.cleanup() - cls.tempdir = tempfile.TemporaryDirectory() - - # Continuous model training. - cls.continuous() - - @pytest.mark.parametrize( - "callback_type", - [ - testing_callbacks.LearningRateCallback, - testing_callbacks.GlobalStepStateCallback, - testing_callbacks.ConsumedSamplesCallback, - testing_callbacks.OptimizerStateCallback, - testing_callbacks.TrainInputCallback, - testing_callbacks.TrainOutputCallback, - testing_callbacks.TrainLossCallback, - testing_callbacks.ValidInputCallback, - testing_callbacks.ValidOutputCallback, - testing_callbacks.ValidLossCallback, - ], - ) - def test_stop_and_go_consistency(self, callback_type): - """Tests the consistency of the callback data between the interrupted and continuous checks.""" - interrupted_callback = get_callback(self.callbacks, Mode.RESUME, callback_type) - continuous_callback = get_callback(self.callbacks, Mode.CONTINUOUS, callback_type) - assert interrupted_callback.data, f"No data found for {callback_type}" - - if callback_type in {testing_callbacks.TrainOutputCallback, testing_callbacks.ValidOutputCallback}: - atol, rtol = self.output_tensor_atol, self.output_tensor_rtol - else: - atol, rtol = 1e-4, 1e-4 - - recursive_assert_approx_equal( - interrupted_callback.data, - continuous_callback.data, - atol=atol, - rtol=rtol, - ) - - def test_train_val_init_consumed_samples(self): - """Tests the initial consumed samples in stop-and-go scenario.""" - train_consumed_stop, val_consumed_stop = get_callback( - self.callbacks, Mode.STOP, testing_callbacks.TrainValInitConsumedSamplesStopAndGoCallback - ).data - train_consumed_go, val_consumed_go = get_callback( - self.callbacks, Mode.RESUME, testing_callbacks.TrainValInitConsumedSamplesStopAndGoCallback - ).data - - assert val_consumed_stop == 0 - assert val_consumed_go == 0 - assert train_consumed_stop == 0 - assert train_consumed_go > 0 diff --git a/sub-packages/bionemo-testing/src/bionemo/testing/lightning.py b/sub-packages/bionemo-testing/src/bionemo/testing/lightning.py deleted file mode 100644 index 084f1ef8a6..0000000000 --- a/sub-packages/bionemo-testing/src/bionemo/testing/lightning.py +++ /dev/null @@ -1,66 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -from typing import Dict, List - -import torch - -from bionemo.llm.data.collate import MLM_LOSS_IGNORE_INDEX - - -def get_random_microbatch( - microbatch_size: int, - max_sequence_length: int, - vocab_size: int, - seed: int, - mask_index: int = MLM_LOSS_IGNORE_INDEX, -) -> Dict[str, Dict[str, torch.Tensor]]: - """Generate random microbatches for testing. - - Note that this follows the convention that token_logits are s,b, while other fields are b,s. - """ - generator = torch.Generator(device=torch.cuda.current_device()).manual_seed(seed) - labels = torch.randint( - low=0, - high=vocab_size, - size=(microbatch_size, max_sequence_length), - generator=generator, - device=torch.cuda.current_device(), - ) # [b s] - loss_mask = torch.randint( - low=1, - high=1 + 1, - size=(microbatch_size, max_sequence_length), - dtype=torch.long, - device=torch.cuda.current_device(), - generator=generator, - ) # [b s] - token_logits = torch.rand( - max_sequence_length, microbatch_size, vocab_size, device=torch.cuda.current_device(), generator=generator - ) # [s b v] - labels[loss_mask == 0] = mask_index # propagate masking to labels - microbatch_output = { - "batch": {"labels": labels, "loss_mask": loss_mask}, - "forward_out": {"token_logits": token_logits}, - } - return microbatch_output - - -def extract_global_steps_from_log(log_string: str) -> List[int]: - """Extract global steps from a Pytorch lightening log string.""" - pattern = r"\| global_step: (\d+) \|" - matches = re.findall(pattern, log_string) - return [int(step) for step in matches] diff --git a/sub-packages/bionemo-testing/src/bionemo/testing/megatron_dataset_compatibility.py b/sub-packages/bionemo-testing/src/bionemo/testing/megatron_dataset_compatibility.py deleted file mode 100644 index 8d9a3e2e89..0000000000 --- a/sub-packages/bionemo-testing/src/bionemo/testing/megatron_dataset_compatibility.py +++ /dev/null @@ -1,127 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Callable -from unittest.mock import patch - -import numpy as np -import pytest -import torch -import torch.utils.data - -from bionemo.core.data.multi_epoch_dataset import EpochIndex - - -Index = int | EpochIndex - -TensorLike = torch.Tensor | np.ndarray -TensorCollectionOrTensor = TensorLike | dict[str, TensorLike] - - -def assert_dict_tensors_approx_equal(actual: TensorCollectionOrTensor, expected: TensorCollectionOrTensor) -> None: - """Assert that two tensors are equal.""" - if isinstance(actual, dict) and isinstance(expected, dict): - a_keys, b_keys = actual.keys(), expected.keys() - assert a_keys == b_keys - for key in a_keys: - torch.testing.assert_close(actual=actual[key], expected=expected[key]) - else: - torch.testing.assert_close(actual=actual, expected=expected) - - -class DatasetLocallyNondeterministic(AssertionError): - """Datasets are not locally deterministic.""" - - -class DatasetDistributedNondeterministic(AssertionError): - """Datasets are not locally deterministic.""" - - -def assert_dataset_compatible_with_megatron( - dataset: torch.utils.data.Dataset[TensorCollectionOrTensor], - index: Index = 0, - assert_elements_equal: Callable[ - [TensorCollectionOrTensor, TensorCollectionOrTensor], None - ] = assert_dict_tensors_approx_equal, -): - """Make sure that a dataset passes some basic sanity checks for megatron determinism constraints. - - Constraints tested: - * dataset[i] returns the same element regardless of device - * dataset[i] doesn't make calls to known problematic randomization procedures (currently `torch.manual_seed`). - - As more constraints are discovered, they should be added to this test. - """ - # 1. Make sure the dataset is deterministic when you ask for the same elements. - n_elements = len(dataset) # type: ignore - assert n_elements > 0, "Need one element or more to test" - try: - assert_elements_equal(dataset[index], dataset[index]) - except AssertionError as e_0: - raise DatasetLocallyNondeterministic(e_0) - with ( - patch("torch.manual_seed") as mock_manual_seed, - patch("torch.cuda.manual_seed") as mock_cuda_manual_seed, - patch("torch.cuda.manual_seed_all") as mock_cuda_manual_seed_all, - ): - _ = dataset[index] - if mock_manual_seed.call_count > 0 or mock_cuda_manual_seed.call_count > 0 or mock_cuda_manual_seed_all.call_count: - raise DatasetDistributedNondeterministic( - "You cannot safely use torch.manual_seed in a cluster with model parallelism. Use torch.Generator directly." - " See https://github.com/NVIDIA/Megatron-LM/blob/dddecd19/megatron/core/tensor_parallel/random.py#L198-L199" - ) - - -def assert_dataset_elements_not_equal( - dataset: torch.utils.data.Dataset[TensorCollectionOrTensor], - index_a: Index = 0, - index_b: Index = 1, - assert_elements_equal: Callable[ - [TensorCollectionOrTensor, TensorCollectionOrTensor], None - ] = assert_dict_tensors_approx_equal, -): - """Test the case where two indices return different elements on datasets that employ randomness, like masking. - - NOTE: if you have a dataset without any kinds of randomness, just use the `assert_dataset_compatible_with_megatron` - test and skip this one. This test is for the case when you want to test that a dataset that applies a random - transform to your elements as a function of index actually does so with two different indices that map to the same - underlying object. This test also runs `assert_dataset_compatible_with_megatron` behind the scenes so if you - do this you do not need to also do the other. - - With epoch upsampling approaches, some underlying index, say index=0, will be called multiple times by some wrapping - dataset object. For example if you have a dataset of length 1, and you wrap it in an up-sampler that maps it to - length 2 by mapping index 0 to 0 and 1 to 0, then in that wrapper we apply randomness to the result and we expect - different masks to be used for each call, even though the underlying object is the same. Again this test only - applies to a dataset that employs randomness. Another approach some of our datasets take is to use a special index - that captures both the underlying index, and the epoch index. This tuple of indices is used internally to seed the - mask. If that kind of dataset is used, then index_a could be (epoch=0, idx=0) and index_b could be (epoch=1, idx=0), - for example. We expect those to return different random features. - - The idea for using this test effectively is to identify cases where you have two indices that return the same - underlying object, but where you expect different randomization to be applied to each by the dataset. - - Args: - dataset: dataset object with randomness (eg masking) to test. - index_a: index for some element. Defaults to 0. - index_b: index for a different element. Defaults to 1. - assert_elements_equal: Function to compare two returned batch elements. Defaults to - `assert_dict_tensors_approx_equal` which works for both tensors and dictionaries of tensors. - """ - # 0, first sanity check for determinism/compatibility on idx0 and idx1 - assert_dataset_compatible_with_megatron(dataset, index=index_a, assert_elements_equal=assert_elements_equal) - assert_dataset_compatible_with_megatron(dataset, index=index_b, assert_elements_equal=assert_elements_equal) - # 1, now check that index_a != index_b - with pytest.raises(AssertionError): - assert_elements_equal(dataset[index_a], dataset[index_b]) diff --git a/sub-packages/bionemo-testing/src/bionemo/testing/megatron_parallel_state_utils.py b/sub-packages/bionemo-testing/src/bionemo/testing/megatron_parallel_state_utils.py deleted file mode 100644 index 85b5d079c0..0000000000 --- a/sub-packages/bionemo-testing/src/bionemo/testing/megatron_parallel_state_utils.py +++ /dev/null @@ -1,371 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This package contains utilities for managing the state of distributed model parallelism in Megatron and Apex. - -In general you should just use the context manager `distributed_model_parallel_state` to manage the state of -your test. This context manager will handle the setup and teardown of the distributed model parallel state for you. - -Example usage: -```python - -from bionemo.testing import megatron_parallel_state_utils - -def my_test(): - with megatron_parallel_state_utils.distributed_model_parallel_state(): - # your test code that requires megatron/apex parallel state to be set up here -``` - -""" - -import gc -import os -import socket -from contextlib import contextmanager -from typing import Any, Optional, Sequence -from unittest import mock -from unittest.mock import MagicMock - -import megatron.core.num_microbatches_calculator -import torch -import torch.distributed -import torch.multiprocessing.spawn -from megatron.core import parallel_state -from megatron.core.tensor_parallel import random as tp_random -from pytest import MonkeyPatch -from torch.testing._internal.distributed.fake_pg import FakeStore - - -__all__: Sequence[str] = ( - "clean_up_distributed_and_parallel_states", - "distributed_model_parallel_state", - "mock_distributed_parallel_state", -) - -DEFAULT_MASTER_ADDR = "localhost" -DEFAULT_MASTER_PORT = "29500" -DEFAULT_NCCL_TIMEOUT = "30" # in second - - -def _reset_microbatch_calculator(): - """Resets _GLOBAL_NUM_MICROBATCHES_CALCULATOR in megatron which is used in NeMo to initilised model parallel in - nemo.collections.nlp.modules.common.megatron.megatron_init.initialize_model_parallel_for_nemo - """ # noqa: D205, D415 - megatron.core.num_microbatches_calculator._GLOBAL_NUM_MICROBATCHES_CALCULATOR = None - - -def clean_up_distributed_and_parallel_states(verify_distributed_state=False): - """Clean up parallel states, torch.distributed and torch cuda cache.""" - _reset_microbatch_calculator() - # Destroy Megatron distributed/parallel state environment. - parallel_state.destroy_model_parallel() - # Destroy the torch default / world process group. - if torch.distributed.is_initialized(): - torch.distributed.destroy_process_group() - # Clear torch.compile/dynamo cache - try: - if hasattr(torch, "_dynamo"): - torch._dynamo.reset() - if hasattr(torch, "compiler"): - torch.compiler.reset() - except Exception as e: - print(f"Failed to reset torch compile: {e}") - # Free unused CPU memory. - gc.collect() - # Free reserved / cached GPU memory allocated by Torch / CUDA. - torch.cuda.empty_cache() - if verify_distributed_state: - # Utilize to debug OOM or orphaned processes in GPU. - allocated_vram = torch.cuda.memory_allocated() / 1024**3 - reserved_vram = torch.cuda.memory_reserved() / 1024**3 - print( - "\n--------------------------------\n" - f"Memory Profile for Device: {torch.cuda.current_device()}\n" - f"Allocated: {allocated_vram} GB\n" - f"Reserved: {reserved_vram} GB\n" - f"GPU Processes:\n{torch.cuda.list_gpu_processes()}\n" - "--------------------------------\n" - ) - - -@contextmanager -def clean_parallel_state_context(): - """Puts you into a clean parallel state, and again tears it down at the end.""" - try: - clean_up_distributed_and_parallel_states() - yield - finally: - clean_up_distributed_and_parallel_states() - - -def find_free_network_port(address: str = "localhost") -> int: - """Finds a free port for the specified address. Defaults to localhost.""" - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.bind((address, 0)) - addr_port = s.getsockname() - s.close() - if addr_port is None: - # Could not find any free port. - return None, None - return addr_port - - -@contextmanager -def distributed_model_parallel_state( - seed: int = 42, - rank: int = 0, - world_size: int = 1, - backend: str = "nccl", - **initialize_model_parallel_kwargs, -): - """Context manager for torch distributed and parallel state testing. - - Args: - seed (int): random seed to be passed into tensor_parallel.random (https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/tensor_parallel/random.py). default to 42. - rank (int): global rank of the current cuda device. default to 0. - world_size (int): world size or number of devices. default to 1. - backend (str): backend to torch.distributed.init_process_group. default to 'nccl'. - **initialize_model_parallel_kwargs: kwargs to be passed into initialize_model_parallel (https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/parallel_state.py). - """ - with MonkeyPatch.context() as context: - initial_states = None - try: - clean_up_distributed_and_parallel_states() - - # distributed and parallel state set up - if not os.environ.get("MASTER_ADDR", None): - context.setenv("MASTER_ADDR", DEFAULT_MASTER_ADDR) - if not os.environ.get("MASTER_PORT", None): - network_address, free_network_port = find_free_network_port(address=DEFAULT_MASTER_ADDR) - context.setenv( - "MASTER_PORT", free_network_port if free_network_port is not None else DEFAULT_MASTER_PORT - ) - if not os.environ.get("NCCL_TIMEOUT", None): - context.setenv("NCCL_TIMEOUT", DEFAULT_NCCL_TIMEOUT) - context.setenv("RANK", str(rank)) - - torch.distributed.init_process_group(backend=backend, world_size=world_size) - parallel_state.initialize_model_parallel(**initialize_model_parallel_kwargs) - - # tensor parallel random seed set up - # do not call torch.cuda.manual_seed after so! - if tp_random.get_cuda_rng_tracker().is_initialized(): - initial_states = tp_random.get_cuda_rng_tracker().get_states() - if seed is not None: - tp_random.model_parallel_cuda_manual_seed(seed) - - yield - finally: - # restore/unset tensor parallel random seed - if initial_states is not None: - tp_random.get_cuda_rng_tracker().set_states(initial_states) - else: - # Reset to the unset state - tp_random.get_cuda_rng_tracker().reset() - - clean_up_distributed_and_parallel_states() - - -@contextmanager -def mock_distributed_parallel_state( - world_size: int = 8, - rank: int = 0, - tensor_model_parallel_size: int = 1, - pipeline_model_parallel_size: int = 1, - virtual_pipeline_model_parallel_size: Optional[int] = None, - context_parallel_size: int = 1, - expert_model_parallel_size: int = 1, - seed: int | None = 42, -): - """A context manager that facilitates easy mocking of torch.distributed for an arbitrary GPU in a simulated cluster. - - Key functions that are mocked: - * `torch.distributed.new_group` when `backend="gloo"` which doesn't support a `backend="fake"` - * `torch.distributed.destroy_process_group` when `backend="gloo"` since new "gloo" groups are not actually made - * `torch._C._cuda_setDevice` which changes the current device behind the scenes. We assign devices round-robin - to support `world_size > torch.cuda.device_count()`. - - Outside of this mocking, a fake cluster is initialized using `backend="fake"` in `torch.distributed`. This sets up - enough global state and environment for megatron to think that it is initializing a larger cluster with some - settings where the current context has some user defined rank. You can then test the megatron state on a - hypothetical rank in some large world size. - - Args: - world_size: The world size (cluster size). Defaults to 8. - rank: the GPU number globally in the cluster. Defaults to 0. - tensor_model_parallel_size: tensor model parallel setting for megatron. Defaults to 1. - pipeline_model_parallel_size: pipeline model parallel setting for megatron. Defaults to 1. - virtual_pipeline_model_parallel_size: virtual pipeline model parallel size for megatron. Defaults to None. - context_parallel_size: context parallel size. Defaults to 1. - expert_model_parallel_size: expert model parallel size. Defaults to 1. - seed: seed for RNG state. Defaults to 42. - """ - # First set up mocks for torch.distributed state/info - ori_device_count = torch.cuda.device_count() - # Conditionally mock torch.distributed.new_group based on backend argument - ori_dist_new_group = torch.distributed.new_group - - def mock_new_group(*args, **kwargs): - if kwargs.get("backend") == "gloo": - # Return a specific mock if backend is 'gloo' - return MagicMock(name="gloo_group") - else: - # Return another mock or a different behavior for other backends - return ori_dist_new_group(*args, **kwargs) - - ori_destroy_pg = torch.distributed.destroy_process_group - - def mock_destroy_gloo_group(pg=None): - if isinstance(pg, MagicMock): - return None - ori_destroy_pg(pg) - - # The next mock is required to "set the device" to one that is greater than the number of actual GPUs - # the consequence of this mock is that the device is always dev 0 - ori_set_device = torch._C._cuda_setDevice - - def mock_set_device(device): - if ori_device_count > 0: - ori_set_device(device % ori_device_count) # wrap around the request - - with ( - mock.patch("torch.distributed.new_group", side_effect=mock_new_group), - mock.patch("torch.distributed.destroy_process_group", side_effect=mock_destroy_gloo_group), - mock.patch("torch._C._cuda_setDevice", side_effect=mock_set_device), - ): - # Next set up state etc - state_util = _MockMegatronParallelStateSingleton() # static singleton class - state_util.world_size = world_size - state_util.rank = rank - initial_states: Optional[Any] = None - try: - state_util.set_world_size(world_size=world_size, rank=rank) - state_util.initialize_model_parallel( - tensor_model_parallel_size=tensor_model_parallel_size, - pipeline_model_parallel_size=pipeline_model_parallel_size, - virtual_pipeline_model_parallel_size=virtual_pipeline_model_parallel_size, - context_parallel_size=context_parallel_size, - expert_model_parallel_size=expert_model_parallel_size, - ) - # Our goal is to set required state on entry, and then restore current state on exit for the RNGs. - # there are two possibilities that are handled below: - # 1. If the RNG state is not initialized, we need to set it up and then - # unset it on exit to restore the current state. We track that this is the case when `initial_states` is `None`. - # 2. If the RNG state is initialized, we need to track this state and reset it on exit to be what it was on entry. - # We track that this is the case when `initial_states` is not `None`. - if tp_random.get_cuda_rng_tracker().is_initialized(): - initial_states = tp_random.get_cuda_rng_tracker().get_states() - if seed is not None: - # Set the seed if provided, this case is valid whether or not the RNG had state previously. - # on exit the RNG state will be restored to what it was on entry. - tp_random.model_parallel_cuda_manual_seed(seed) - else: - # This is the case where the RNG state is not initialized and no seed was provided. - # We need to raise an error in this case, as we cannot restore the RNG state on exit and we need a seed - # to initialize the RNG state to. This only happens if the user overrides the default seed and sets it - # to None, and additionally if the RNG state was not initialized externally, as there is a default seed of 42. - if initial_states is None: - raise ValueError( - "You must provide a seed if the initial parallel state is unset. " - "Either provide a seed or leave the default seed (rather setting to None) " - "or initialize the RNG state externally." - ) - yield - finally: - if initial_states is not None: - tp_random.get_cuda_rng_tracker().set_states(initial_states) - else: - # Reset to the unset state - tp_random.get_cuda_rng_tracker().reset() - state_util.destroy_model_parallel() - - -class _MockMegatronParallelStateSingleton: - _instance = None - - def __init__( - self, - world_size=torch.cuda.device_count(), - rank=int(os.getenv("LOCAL_RANK", 0)), - inited=False, - store=FakeStore(), - ): - """A singleton to deal with global megatron state for simulating a fake cluster. - - Args: - world_size: the cluster size. Defaults to torch.cuda.device_count(). - rank: rank of this node. Defaults to int(os.getenv("LOCAL_RANK", 0)). - inited: if this global cluster has been initiated. Defaults to False. - store: the FakeStore for process groups. Defaults to FakeStore(). - """ - self.world_size = world_size - self.rank = rank - self.inited = inited - # Fake store idea: see https://github.com/pytorch/pytorch/blob/main/test/distributed/test_fake_pg.py - self.store = store - - def __new__(cls): - # Makes this a singleton - if cls._instance is None: - cls._instance = super().__new__(cls) - return cls._instance - - def initialize_distributed(self): - torch.cuda.set_device(self.rank % self.world_size) - # Fake store idea: see https://github.com/pytorch/pytorch/blob/main/test/distributed/test_fake_pg.py - torch.distributed.init_process_group( - backend="fake", - world_size=self.world_size, - rank=self.rank, - store=self.store, - ) - self.inited = True - - def set_world_size(self, world_size=None, rank=None): - self.world_size = torch.cuda.device_count() if world_size is None else world_size - if torch.distributed.is_initialized() and self.world_size != torch.distributed.get_world_size(): - torch.distributed.destroy_process_group() - - if rank is None: - self.rank = int(os.environ.get("LOCAL_RANK", 0)) - if self.rank >= self.world_size: - self.rank = -1 - else: - self.rank = rank - - def destroy_model_parallel(self): - if not self.inited: - return - # torch.distributed.barrier() - parallel_state.destroy_model_parallel() - self.inited = False - torch.distributed.destroy_process_group() - - def initialize_model_parallel( - self, - tensor_model_parallel_size=1, - pipeline_model_parallel_size=1, - virtual_pipeline_model_parallel_size=None, - **kwargs, - ): - parallel_state.destroy_model_parallel() - self.initialize_distributed() - parallel_state.initialize_model_parallel( - tensor_model_parallel_size, - pipeline_model_parallel_size, - virtual_pipeline_model_parallel_size, - **kwargs, - ) - self.inited = True diff --git a/sub-packages/bionemo-testing/src/bionemo/testing/subprocess_utils.py b/sub-packages/bionemo-testing/src/bionemo/testing/subprocess_utils.py deleted file mode 100644 index 3fe582e1f9..0000000000 --- a/sub-packages/bionemo-testing/src/bionemo/testing/subprocess_utils.py +++ /dev/null @@ -1,129 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import subprocess -import sys -import time - -from lightning.fabric.plugins.environments.lightning import find_free_network_port - - -def run_command_with_timeout(command, path, env, timeout=3600): - """Run command with timeout and incremental output processing to prevent hanging.""" - # Start process without capturing output in the main process - process = subprocess.Popen( - command, - shell=True, - cwd=path, - env=env, - text=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - bufsize=1, # Line buffered - ) - - stdout_data = [] - stderr_data = [] - start_time = time.time() - - try: - # Use select to handle output in a non-blocking way - import select - - # Get file descriptors for stdout and stderr - stdout_fd = process.stdout.fileno() - stderr_fd = process.stderr.fileno() - - # Set up select lists - read_fds = [stdout_fd, stderr_fd] - - # Process output incrementally - while read_fds and process.poll() is None: - # Check for timeout - if timeout and time.time() - start_time > timeout: - process.terminate() - time.sleep(0.5) - if process.poll() is None: - process.kill() - raise subprocess.TimeoutExpired(command, timeout) - - # Wait for output with a short timeout to allow checking process status - ready_fds, _, _ = select.select(read_fds, [], [], 1.0) - - for fd in ready_fds: - if fd == stdout_fd: - line = process.stdout.readline() - if not line: - read_fds.remove(stdout_fd) - continue - stdout_data.append(line) - # Optionally process/print output incrementally - # print(f"STDOUT: {line.strip()}") - - if fd == stderr_fd: - line = process.stderr.readline() - if not line: - read_fds.remove(stderr_fd) - continue - stderr_data.append(line) - # Optionally process/print error output incrementally - # print(f"STDERR: {line.strip()}") - - # Get any remaining output - remaining_stdout, remaining_stderr = process.communicate() - if remaining_stdout: - stdout_data.append(remaining_stdout) - if remaining_stderr: - stderr_data.append(remaining_stderr) - - # Create result object similar to subprocess.run - result = subprocess.CompletedProcess( - args=command, returncode=process.returncode, stdout="".join(stdout_data), stderr="".join(stderr_data) - ) - return result - - except Exception as e: - # Make sure we don't leave zombie processes - if process.poll() is None: - process.terminate() - time.sleep(0.5) - if process.poll() is None: - process.kill() - raise e - - -def run_command_in_subprocess(command: str, path: str, timeout: int = 3600) -> str: - """Run a command in a subprocess and return the output.""" - open_port = find_free_network_port() - # a local copy of the environment - env = dict(**os.environ) - env["MASTER_PORT"] = str(open_port) - - result = run_command_with_timeout( - command=command, - path=path, - env=env, - timeout=timeout, # Set an appropriate timeout in seconds - ) - - # For debugging purposes, print the output if the test fails. - if result.returncode != 0: - sys.stderr.write("STDOUT:\n" + result.stdout + "\n") - sys.stderr.write("STDERR:\n" + result.stderr + "\n") - - assert result.returncode == 0, f"Command failed: {command}" - - return result.stdout diff --git a/sub-packages/bionemo-testing/src/bionemo/testing/tensorboard.py b/sub-packages/bionemo-testing/src/bionemo/testing/tensorboard.py deleted file mode 100644 index 84457fccfe..0000000000 --- a/sub-packages/bionemo-testing/src/bionemo/testing/tensorboard.py +++ /dev/null @@ -1,60 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from pathlib import Path -from typing import Optional - -from tensorboard.backend.event_processing.event_accumulator import EventAccumulator - - -def verify_tensorboard_logs(tb_log_dir: Path, expected_metrics: list[str], min_steps: int = 1) -> Optional[str]: - """Verify that TensorBoard logs exist and contain expected metrics. - - Args: - tb_log_dir: Path to the TensorBoard log directory - expected_metrics: List of metric names expected in the logs - min_steps: Minimum number of steps expected in the logs - - Returns: - None if verification succeeds, error message string if it fails - """ - # Find event files in the log directory - event_files = list(tb_log_dir.glob("events.out.tfevents.*")) - if len(event_files) == 0: - return f"No TensorBoard event files found in {tb_log_dir}" - - # Load the event file - event_acc = EventAccumulator(str(tb_log_dir)) - event_acc.Reload() - - # Get available scalar tags - scalar_tags = event_acc.Tags()["scalars"] - - # Check that expected metrics are present - for metric in expected_metrics: - # Check if metric exists in any form (might have prefixes like "train/" or suffixes) - metric_found = any(metric in tag for tag in scalar_tags) - if not metric_found: - return f"Expected metric '{metric}' not found in TensorBoard logs. Available tags: {scalar_tags}" - - # Verify we have logged data for at least min_steps - if scalar_tags: - # Get the first available metric to check step count - first_metric = scalar_tags[0] - events = event_acc.Scalars(first_metric) - if len(events) < min_steps: - return f"Expected at least {min_steps} steps logged, but found {len(events)}" - - return None diff --git a/sub-packages/bionemo-testing/src/bionemo/testing/testing_callbacks.py b/sub-packages/bionemo-testing/src/bionemo/testing/testing_callbacks.py deleted file mode 100644 index a16b3ef6aa..0000000000 --- a/sub-packages/bionemo-testing/src/bionemo/testing/testing_callbacks.py +++ /dev/null @@ -1,296 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import os -import signal -from abc import ABC, abstractmethod -from typing import Any, Dict, List, Optional, Union - -import numpy as np -import torch -from lightning.pytorch import Callback, LightningModule, Trainer -from nemo.lightning import io -from nemo.lightning.data import MegatronPretrainingSampler -from nemo.lightning.megatron_parallel import CallbackMethods, DataT, MegatronLossReduction, MegatronStep -from overrides import override - -from bionemo.testing.harnesses.mode import Mode -from bionemo.testing.torch import recursive_detach - - -class StopAfterValidEpochEndCallback(Callback, CallbackMethods): - """A callback that stops training after the validation epoch. - - Use this callback for pytest based Stop and go tests. - """ - - def on_validation_epoch_end(self, trainer: Trainer, pl_module: LightningModule): # noqa: D102 - if trainer.sanity_checking: - return - trainer.should_stop = True - - -class SignalAfterGivenStepCallback(Callback, CallbackMethods): - """A callback that emits a given signal to the current process at the defined step. - - Use this callback for pytest based Stop and go tests. - """ - - def __init__( - self, - stop_step: int, - signal_: signal.Signals = signal.SIGUSR2, - use_trainer_should_stop: bool = False, - stop_before_step: bool = False, - ): - """Initializes the callback with the given stop_step.""" - # Note that the stop step will be one less than the requested step if stop_before_step is True. - # this is because the first step is 0 so you get i+1 steps normally. - if stop_before_step: - self.stop_step = stop_step - 1 - else: - self.stop_step = stop_step - self.signal = signal_ - # If True, ask the trainer to stop by setting should_stop to True rather than emitting a kill signal. - self.use_trainer_should_stop = use_trainer_should_stop - - def on_megatron_step_start(self, step: MegatronStep) -> MegatronStep: - """Stop training if the global step is greater than or equal to the stop_step.""" - if step.trainer.global_step >= self.stop_step: - if self.use_trainer_should_stop: - # Ask the trainer to stop by setting should_stop to True rather than emitting a kill signal. - step.trainer.should_stop = True - else: - os.kill(os.getpid(), self.signal) - return step - - -class BaseInterruptedVsContinuousCallback(Callback, CallbackMethods, io.IOMixin): - """Base class for serializable stop-and-go callback to compare continuous to interrupted training. - - This class is used by extending a callback and collecting data into the `self.data` attribute. This data is then - compared between continuous and interrupted training. - - See nemo.lightning.megatron_parallel.CallbackMethods for the available callback methods. - """ - - def __init__(self): - """Initializes the callback.""" - self.data = [] - - def __deepcopy__(self, memo): - """Don't actually attempt to copy this data when this callback is being serialized.""" - ... - - -class LearningRateCallback(BaseInterruptedVsContinuousCallback): - """Stop-and-go callback for learning rate before pausing and after resuming training.""" - - def on_megatron_step_start(self, step: MegatronStep) -> MegatronStep: - """Get learning rate as metadata.""" - if step.trainer.training: - self.data.append(np.array(step.trainer.optimizers[0].param_groups[0]["lr"])) - return step - - -class GlobalStepStateCallback(BaseInterruptedVsContinuousCallback): - """Stop-and-go callback for global_step before pausing and after resuming training.""" - - def on_megatron_step_start(self, step: MegatronStep) -> MegatronStep: - """Get learning rate as metadata.""" - if step.trainer.training: - self.data.append(np.array(step.trainer.global_step)) - return step - - -class ConsumedSamplesCallback(BaseInterruptedVsContinuousCallback): - """Stop-and-go callback to check consumed samples before pausing and after resuming training.""" - - def on_megatron_step_start(self, step: MegatronStep) -> MegatronStep: - """Get consumed samples as metadata.""" - if step.trainer.training: - data_sampler = step.trainer.datamodule.data_sampler - consumed_samples = data_sampler.compute_consumed_samples( - step.trainer.global_step - step.trainer.datamodule.init_global_step - ) - self.data.append(np.array(consumed_samples)) - return step - - -class TrainInputCallback(BaseInterruptedVsContinuousCallback): - """Collect training input samples for comparison.""" - - def on_megatron_microbatch_end( - self, - step: MegatronStep, - batch: DataT, - forward_callback: "MegatronLossReduction", - output: Any, - ) -> None: - """Get consumed samples as metadata.""" - if step.trainer.training: - self.data.append(recursive_detach(batch)) - - -class ValidInputCallback(BaseInterruptedVsContinuousCallback): - """Collect validation input samples for comparison.""" - - def on_megatron_microbatch_end( - self, - step: MegatronStep, - batch: DataT, - forward_callback: "MegatronLossReduction", - output: Any, - ) -> None: - """Get consumed samples as metadata.""" - if step.trainer.validating: - self.data.append(recursive_detach(batch)) - - -class TrainOutputCallback(BaseInterruptedVsContinuousCallback): - """Collect training output samples for comparison.""" - - def on_megatron_microbatch_end( - self, - step: MegatronStep, - batch: DataT, - forward_callback: "MegatronLossReduction", - output: Any, - ) -> None: - """Get consumed samples as metadata.""" - if step.trainer.training: - self.data.append(recursive_detach(output)) - - -class ValidOutputCallback(BaseInterruptedVsContinuousCallback): - """Collect validation output samples for comparison.""" - - def on_megatron_microbatch_end( - self, - step: MegatronStep, - batch: DataT, - forward_callback: "MegatronLossReduction", - output: Any, - ) -> None: - """Get consumed samples as metadata.""" - if step.trainer.validating: - self.data.append(recursive_detach(output)) - - -class TrainLossCallback(BaseInterruptedVsContinuousCallback): - """Collect training loss samples for comparison.""" - - def on_megatron_step_end( - self, - step: MegatronStep, - microbatch_outputs: List[Any], - reduced: Optional[Union[torch.Tensor, Dict[str, torch.Tensor]]] = None, - ) -> None: - """Get consumed samples as metadata.""" - if step.trainer.training: - self.data.append(recursive_detach(reduced)) - - -class ValidLossCallback(BaseInterruptedVsContinuousCallback): - """Collect training loss samples for comparison.""" - - def on_megatron_step_end( - self, - step: MegatronStep, - microbatch_outputs: List[Any], - reduced: Optional[Union[torch.Tensor, Dict[str, torch.Tensor]]] = None, - ) -> None: - """Get consumed samples as metadata.""" - if step.trainer.validating: - self.data.append(recursive_detach(reduced)) - - -class OptimizerStateCallback(BaseInterruptedVsContinuousCallback): - """Stop-and-go callback to check optimizer states before pausing and after resuming training.""" - - def on_megatron_step_start(self, step: MegatronStep) -> MegatronStep: - """Get optimizer states as metadata.""" - if step.trainer.training: - self.data.append( - recursive_detach( - [ - optimizer.mcore_optimizer.optimizer.state_dict()["state"] - for optimizer in step.trainer.optimizers - ] - ) - ) - return step - - -class AbstractStopAndGoCallback(ABC, BaseInterruptedVsContinuousCallback): - """Abstract base class for stop-and-go callback to compare metadata before pausing and after resuming training. - - This base class provides utility methods to help streamline stop and go comparison. - - Provided methods: - - __init__: initializes the callback with the given mode. - - get_metadata: abstract method that should be overridden to get metadata from the trainer and pl_module. - - Default behaviors: - - in stop mode, metadata is gotten and compared on_validation_epoch_end. - - in go mode, metadata is gotten and saved on_train_epoch_start. - - Override these behaviors if necessary. - """ - - def __init__(self, mode: Mode = Mode.STOP): - """Initialize StopAndGoCallback. - - Args: - mode (str, optional): Mode to run in. Must be either Mode.STOP or Mode.RESUME. Defaults to Mode.STOP. - - Notes: - User must override get_metadata to get metadata from the trainer and pl_module. - """ - if mode not in [Mode.STOP, Mode.RESUME]: - raise ValueError(f"mode must be 'stop' or 'go', got {mode}") - self.mode = mode - super().__init__() - - @abstractmethod - def get_metadata(self, trainer: Trainer, pl_module: LightningModule) -> Any: - """Get metadata from trainer and pl_module.""" - raise NotImplementedError - - def on_train_epoch_start(self, trainer: Trainer, pl_module: LightningModule): # noqa: D102 - if self.mode == Mode.RESUME: - self.data = self.get_metadata(trainer, pl_module) - - def on_validation_epoch_end(self, trainer: Trainer, pl_module: LightningModule): # noqa: D102 - if not trainer.sanity_checking and self.mode == Mode.STOP: - self.data = self.get_metadata(trainer, pl_module) - - -class TrainValInitConsumedSamplesStopAndGoCallback(AbstractStopAndGoCallback): - """Stop-and-go callback to check consumed samples before pausing and after resuming training. - - This is currently the only callback that doesn't fit with the new pattern of directly comparing continuous and - interrupted training, since the dataloaders don't track their consumed_samples before and after checkpoint - resumption. - """ - - @override - def get_metadata(self, trainer: Trainer, pl_module: LightningModule) -> Any: - """Get consumed samples as metadata.""" - # return trainer.datamodule.state_dict()["consumed_samples"] # TODO why state_dict can be empty despite working lines below - train_data_sampler: MegatronPretrainingSampler = trainer.train_dataloader.batch_sampler - val_data_sampler: MegatronPretrainingSampler = trainer.val_dataloaders.batch_sampler - return train_data_sampler.consumed_samples, val_data_sampler.consumed_samples diff --git a/sub-packages/bionemo-testing/src/bionemo/testing/torch.py b/sub-packages/bionemo-testing/src/bionemo/testing/torch.py deleted file mode 100644 index a091c5bf8e..0000000000 --- a/sub-packages/bionemo-testing/src/bionemo/testing/torch.py +++ /dev/null @@ -1,79 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import numpy as np -import torch - - -def check_fp8_support(device_id: int = 0) -> tuple[bool, str, str]: - """Check if FP8 is supported on the current GPU. - - FP8 requires compute capability 8.9+ (Ada Lovelace/Hopper architecture or newer). - """ - if not torch.cuda.is_available(): - return False, "0.0", "CUDA not available" - device_props = torch.cuda.get_device_properties(device_id) - compute_capability = f"{device_props.major}.{device_props.minor}" - device_name = device_props.name - # FP8 is supported on compute capability 8.9+ (Ada Lovelace/Hopper architecture) - is_supported = (device_props.major > 8) or (device_props.major == 8 and device_props.minor >= 9) - return is_supported, compute_capability, f"Device: {device_name}, Compute Capability: {compute_capability}" - - -def recursive_detach(x): - """Detach all tensors in a nested structure.""" - if isinstance(x, torch.Tensor): - return x.detach().cpu() - elif isinstance(x, (list, tuple)): - return type(x)(recursive_detach(item) for item in x) - elif isinstance(x, dict): - return {key: recursive_detach(value) for key, value in x.items()} - else: - return x - - -def recursive_assert_approx_equal(x, y, atol=1e-4, rtol=1e-4): - """Assert that all tensors in a nested structure are approximately equal.""" - if isinstance(x, torch.Tensor): - torch.testing.assert_close(x, y, atol=atol, rtol=rtol) - elif isinstance(x, np.ndarray): - np.testing.assert_allclose(x, y, atol=atol, rtol=rtol) - elif isinstance(x, (list, tuple)): - assert len(x) == len(y), f"Length mismatch: {len(x)} vs {len(y)}" - for x_item, y_item in zip(x, y): - recursive_assert_approx_equal(x_item, y_item, atol=atol, rtol=rtol) - elif isinstance(x, dict): - assert x.keys() == y.keys() - for key in x: - recursive_assert_approx_equal(x[key], y[key], atol=atol, rtol=rtol) - else: - assert x == y - - -def get_device_and_memory_allocated() -> str: - """Get the current device index, name, and memory usage.""" - current_device_index = torch.cuda.current_device() - props = torch.cuda.get_device_properties(current_device_index) - message = f""" - current device index: {current_device_index} - current device uuid: {props.uuid} - current device name: {props.name} - memory, total on device: {torch.cuda.mem_get_info()[1] / 1024**3:.3f} GB - memory, available on device: {torch.cuda.mem_get_info()[0] / 1024**3:.3f} GB - memory allocated for tensors etc: {torch.cuda.memory_allocated() / 1024**3:.3f} GB - max memory reserved for tensors etc: {torch.cuda.max_memory_allocated() / 1024**3:.3f} GB - """ - return message diff --git a/sub-packages/bionemo-testing/src/bionemo/testing/utils.py b/sub-packages/bionemo-testing/src/bionemo/testing/utils.py deleted file mode 100644 index 50429e093f..0000000000 --- a/sub-packages/bionemo-testing/src/bionemo/testing/utils.py +++ /dev/null @@ -1,93 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Optional, Sequence - -import torch - - -__all__: Sequence[str] = ( - "assert_matrix_correlation_above_value", - "assert_matrix_mape_below_value", -) - - -def assert_matrix_mape_below_value( # noqa: D417 - actual: torch.Tensor, - expected: torch.Tensor, - mask: Optional[torch.Tensor] = None, - max_mape: float = 0.1, - eps: float = 1e-3, - msg: str = "", -) -> None: - """Assert that two tensors are close with a root mean squared error (RMSE) - relative to the scaled root mean square values for each matrix. This tells - you if the RMSE implies that the two matrices are more similar to eachother - as-is than would be the case if values were randomly permuted. - - Args: - actual: The actual tensor. - expected: The expected tensor. - mask: If there are only some values you want to compare, - apply this mask and RMSE will be computed on the unmasked items only. - min_relative_rmse: The relative tolerance parameter. - """ # noqa: D205 - if mask is None: - mask = torch.ones_like(actual) - else: - if len(mask.shape) < len(actual.shape): - mask = mask[..., None] - masked_actual = actual[mask.expand_as(actual).to(bool)] - masked_expected = expected[mask.expand_as(expected).to(bool)] - mape = ( - torch.mean( - torch.abs(masked_actual - masked_expected) - / torch.maximum(torch.abs(masked_expected), torch.zeros_like(masked_expected) + eps) - ) - * 100.0 - ) - if mape > max_mape: - raise AssertionError(f"MAPE below threshold: {mape} > {max_mape}. {msg}") - - -def assert_matrix_correlation_above_value( # noqa: D417 - actual: torch.Tensor, - expected: torch.Tensor, - mask: Optional[torch.Tensor] = None, - min_correlation: float = 0.95, - msg: str = "", -) -> None: - """Assert that two tensors are close with a root mean squared error (RMSE) - relative to the scaled root mean square values for each matrix. This tells - you if the RMSE implies that the two matrices are more similar to eachother - as-is than would be the case if values were randomly permuted. - - Args: - actual: The actual tensor. - expected: The expected tensor. - mask: If there are only some values you want to compare, - apply this mask and RMSE will be computed on the unmasked items only. - min_relative_rmse: The relative tolerance parameter. - """ # noqa: D205 - if mask is None: - mask = torch.ones_like(actual) - else: - if len(mask.shape) < len(actual.shape): - mask = mask[..., None] - masked_actual = actual[mask.expand_as(actual).to(bool)] - masked_expected = expected[mask.expand_as(expected).to(bool)] - corr = torch.corrcoef(torch.stack([masked_actual, masked_expected]))[0, 1] - if corr < min_correlation: - raise AssertionError(f"Correlation below threshold: {corr} < {min_correlation}. {msg}") diff --git a/sub-packages/bionemo-testing/tests/bionemo/testing/data/test_fasta.py b/sub-packages/bionemo-testing/tests/bionemo/testing/data/test_fasta.py deleted file mode 100644 index 96c9f32c1d..0000000000 --- a/sub-packages/bionemo-testing/tests/bionemo/testing/data/test_fasta.py +++ /dev/null @@ -1,41 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from pathlib import Path - -import pytest - -from bionemo.noodles.nvfaidx import NvFaidx -from bionemo.testing.data.fasta import ALU_SEQUENCE, create_fasta_file - - -@pytest.mark.parametrize("target_sequence_length, num_sequences", [(123, 3), (1234, 2), (12345, 1)]) -def test_created_fasta_file_has_expected_length( - tmp_path: Path, - target_sequence_length: int, - num_sequences: int, -) -> None: - fasta_file_path = tmp_path / "test.fasta" - create_fasta_file(fasta_file_path, num_sequences, target_sequence_length, repeating_dna_pattern=ALU_SEQUENCE) - assert fasta_file_path.stat().st_size > 0 - idx = NvFaidx(fasta_file_path) - assert len(idx) == num_sequences - n_out = 0 - for i, (seq_name, sequence) in enumerate(sorted(idx.items())): - assert seq_name == f"contig_{i}" - assert len(sequence) == target_sequence_length - if i == 0: - assert ALU_SEQUENCE[:target_sequence_length] in sequence - n_out += 1 - assert n_out == num_sequences diff --git a/sub-packages/bionemo-testing/tests/bionemo/testing/test_megatron_dataset_compatibility.py b/sub-packages/bionemo-testing/tests/bionemo/testing/test_megatron_dataset_compatibility.py deleted file mode 100644 index 66b6759131..0000000000 --- a/sub-packages/bionemo-testing/tests/bionemo/testing/test_megatron_dataset_compatibility.py +++ /dev/null @@ -1,88 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import random -from typing import Tuple - -import pytest -import torch -import torch.utils.data - -from bionemo.testing.megatron_dataset_compatibility import ( - DatasetDistributedNondeterministic, - DatasetLocallyNondeterministic, - assert_dataset_compatible_with_megatron, -) - - -class DistributedBadDataset(torch.utils.data.Dataset): - def __init__(self, seed: int = 3, len: int = 2, shape: Tuple[int, ...] = (3,)): - self.seed = seed - self.len = len - self.shape = shape - - def __len__(self): - return self.len - - def __getitem__(self, index: int) -> dict[str, torch.Tensor]: - torch.manual_seed(self.seed + index) # fails because torch.manual_seed is impacted by distributed parallel - return {"tensor": torch.rand(self.shape)} - - -class LocallyBadDataset(torch.utils.data.Dataset): - def __init__(self, len: int = 2, shape: Tuple[int, ...] = (3,)): - self.len = len - self.shape = shape - - def __len__(self): - return self.len - - def __getitem__(self, index: int) -> dict[str, torch.Tensor]: - # Generate a totally random seed, simulate not setting a seed - random_seed = random.randint(0, 2**32 - 1) - # Set the random seed for PyTorch - torch.manual_seed(random_seed + index) - return {"tensor": torch.rand(self.shape)} - - -class OKDataset(torch.utils.data.Dataset): - def __init__(self, seed: int = 3, len: int = 2, shape: Tuple[int, ...] = (3,)): - self.seed = seed - self.len = len - self.shape = shape - - def __len__(self): - return self.len - - def __getitem__(self, index: int) -> dict[str, torch.Tensor]: - generator = torch.Generator() - generator.manual_seed(self.seed + index) - return {"tensor": torch.rand(self.shape, generator=generator)} - - -def test_ok_dataset_passes(): - ok_ds = OKDataset() - assert_dataset_compatible_with_megatron(ok_ds) - - -def test_locally_bad_dataset_fails(): - locally_bad_ds = LocallyBadDataset() - with pytest.raises(DatasetLocallyNondeterministic): - assert_dataset_compatible_with_megatron(locally_bad_ds) - - -def test_distributed_bad_dataset_fails(): - distributed_bad_ds = DistributedBadDataset() - with pytest.raises(DatasetDistributedNondeterministic): - assert_dataset_compatible_with_megatron(distributed_bad_ds) diff --git a/sub-packages/bionemo-testing/tests/bionemo/testing/test_megatron_parallel_state_utils.py b/sub-packages/bionemo-testing/tests/bionemo/testing/test_megatron_parallel_state_utils.py deleted file mode 100644 index 95dbee08db..0000000000 --- a/sub-packages/bionemo-testing/tests/bionemo/testing/test_megatron_parallel_state_utils.py +++ /dev/null @@ -1,199 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest -import torch -import torch.distributed as dist -from megatron.core import parallel_state -from nemo import lightning as nl - -from bionemo.testing import megatron_parallel_state_utils - - -MAX_WORLD_SIZE = 4 -AVAILABLE_WORLD_SIZE = torch.cuda.device_count() -WORLD_SIZES = range(min(MAX_WORLD_SIZE, torch.cuda.device_count())) - - -def _test_all_reduce_sum(rank: int, world_size: int): - """Private test function for torch.distributed mean reduce.""" - with megatron_parallel_state_utils.distributed_model_parallel_state(rank=rank, world_size=world_size): - tensor = torch.tensor([rank + 1]).cuda(rank) - dist.all_reduce(tensor) - assert tensor.item() == world_size * (world_size + 1) / 2 - - -@pytest.mark.parametrize("world_size", WORLD_SIZES) -def test_all_reduce_sum(world_size: int): - """Multiprocessing test of _test_all_reduce_sum.""" - torch.multiprocessing.spawn( - fn=_test_all_reduce_sum, - args=(world_size,), - nprocs=world_size, - ) - - -def _test_data_parallel_group(rank: int, world_size: int): - """Private test function for dp parallel state.""" - with megatron_parallel_state_utils.distributed_model_parallel_state(rank=rank, world_size=world_size): - assert parallel_state.get_data_parallel_rank() == rank - assert parallel_state.get_data_parallel_world_size() == world_size - assert parallel_state.get_data_parallel_src_rank() == 0 - - -@pytest.mark.parametrize("world_size", WORLD_SIZES) -def test_data_parallel_group(world_size: int): - """Multiprocessing test of _test_data_parallel_group.""" - torch.multiprocessing.spawn( - fn=_test_data_parallel_group, - args=(world_size,), - nprocs=world_size, - ) - - -def _test_tensor_model_parallel_group(rank: int, world_size: int): - """Private test function for tp parallel state.""" - with megatron_parallel_state_utils.distributed_model_parallel_state( - rank=rank, world_size=world_size, tensor_model_parallel_size=world_size - ): - assert parallel_state.get_tensor_model_parallel_rank() == rank - assert parallel_state.get_tensor_model_parallel_world_size() == world_size - assert parallel_state.get_tensor_model_parallel_src_rank() == 0 - - -@pytest.mark.parametrize("world_size", WORLD_SIZES) -def test_tensor_model_parallel_group(world_size: int): - """Multiprocessing test of _test_tensor_model_parallel_group.""" - torch.multiprocessing.spawn( - fn=_test_tensor_model_parallel_group, - args=(world_size,), - nprocs=world_size, - ) - - -def _test_pipeline_model_parallel_group(rank: int, world_size: int): - """Private test function for pp parallel state.""" - with megatron_parallel_state_utils.distributed_model_parallel_state( - rank=rank, world_size=world_size, pipeline_model_parallel_size=world_size - ): - assert parallel_state.get_pipeline_model_parallel_rank() == rank - assert parallel_state.get_pipeline_model_parallel_world_size() == world_size - if rank == 0: - assert parallel_state.is_pipeline_first_stage() - if rank == world_size: - assert parallel_state.is_pipeline_last_stage() - - -@pytest.mark.parametrize("world_size", WORLD_SIZES) -def test_pipeline_model_parallel_group(world_size: int): - """Multiprocessing test of _test_pipeline_model_parallel_group.""" - torch.multiprocessing.spawn( - fn=_test_pipeline_model_parallel_group, - args=(world_size,), - nprocs=world_size, - ) - - -def test_load_megatron_strategy(): - # This will clean up most of the megatron global state that can get created - with megatron_parallel_state_utils.distributed_model_parallel_state(): - strategy = nl.MegatronStrategy(tensor_model_parallel_size=1) - assert strategy.tensor_model_parallel_size == 1 - - -def test_construct_nemo_lightning_trainer(): - # This will clean up most of the megatron global state that can get created - with megatron_parallel_state_utils.distributed_model_parallel_state(): - trainer = nl.Trainer( - devices=1, - max_steps=5, - accelerator="gpu", - strategy=nl.MegatronStrategy(tensor_model_parallel_size=1), - ) - assert trainer.max_steps == 5 - - -def test_mock_rank0_first_pipeline(): - with megatron_parallel_state_utils.mock_distributed_parallel_state( - world_size=8, rank=0, pipeline_model_parallel_size=8 - ): - assert parallel_state.is_pipeline_first_stage() - assert not parallel_state.is_pipeline_last_stage() - - -def test_mock_rank4_mid_pipeline(): - with megatron_parallel_state_utils.mock_distributed_parallel_state( - world_size=8, rank=4, pipeline_model_parallel_size=8 - ): - assert not parallel_state.is_pipeline_first_stage() - assert not parallel_state.is_pipeline_last_stage() - - -def test_mock_rank7_last_pipeline(): - with megatron_parallel_state_utils.mock_distributed_parallel_state( - world_size=8, rank=7, pipeline_model_parallel_size=8 - ): - assert not parallel_state.is_pipeline_first_stage() - assert parallel_state.is_pipeline_last_stage() - - -def test_mock_get_pp_group(): - with megatron_parallel_state_utils.mock_distributed_parallel_state(world_size=2, pipeline_model_parallel_size=2): - assert parallel_state.get_pipeline_model_parallel_group() is not None - - -def test_mock_get_tp_group(): - with megatron_parallel_state_utils.mock_distributed_parallel_state(world_size=2, tensor_model_parallel_size=2): - assert parallel_state.get_tensor_model_parallel_group() is not None - - -def test_mock_get_cp_group(): - with megatron_parallel_state_utils.mock_distributed_parallel_state(world_size=2, context_parallel_size=2): - assert parallel_state.get_context_parallel_group() is not None - - -def test_mock_all_reduce(): - # Adapted from https://github.com/pytorch/pytorch/blob/main/test/distributed/test_fake_pg.py - with megatron_parallel_state_utils.mock_distributed_parallel_state(world_size=2, rank=1): - output = torch.ones(3, 3).cuda() * dist.get_rank() - dist.all_reduce(output) - assert tuple(output.shape) == (3, 3) - - -def test_mock_allgather(): - # Adapted from https://github.com/pytorch/pytorch/blob/main/test/distributed/test_fake_pg.py - with megatron_parallel_state_utils.mock_distributed_parallel_state(world_size=2, rank=1): - input_tensor = torch.ones(3, 3) * dist.get_rank() - output_tensors = [torch.empty_like(input_tensor) for _ in range(2)] - dist.all_gather(output_tensors, input_tensor) - for _, out_tensor in enumerate(output_tensors): - assert tuple(out_tensor.shape) == (3, 3) - - -def test_mock_reduce_scatter(): - # Adapted from https://github.com/pytorch/pytorch/blob/main/test/distributed/test_fake_pg.py - with megatron_parallel_state_utils.mock_distributed_parallel_state(world_size=2, rank=1): - to_reduce_scatter = [torch.ones(3, 3) * rank for rank in range(2)] - output_tensor = torch.empty(3, 3) - - dist.reduce_scatter(output_tensor, to_reduce_scatter) - assert tuple(output_tensor.shape) == (3, 3) - - -def test_mock_all_reduce_sum(): - with megatron_parallel_state_utils.mock_distributed_parallel_state(world_size=2, rank=1): - tensor = torch.tensor([dist.get_rank() + 1]) - dist.all_reduce(tensor) - assert tensor.item() == 2 # there is no actual communication in mock distributed diff --git a/sub-packages/bionemo-testing/tests/bionemo/testing/test_torch.py b/sub-packages/bionemo-testing/tests/bionemo/testing/test_torch.py deleted file mode 100644 index bcd96ce529..0000000000 --- a/sub-packages/bionemo-testing/tests/bionemo/testing/test_torch.py +++ /dev/null @@ -1,26 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-Apache2 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from bionemo.testing.torch import get_device_and_memory_allocated - - -def test_get_device_and_memory_allocated(): - message = get_device_and_memory_allocated() - assert message is not None - assert "memory, total on device" in message - assert "memory, available on device" in message - assert "memory allocated for tensors etc" in message - assert "max memory reserved for tensors etc" in message diff --git a/tach.toml b/tach.toml index 6a36ed09f9..dd6ddcb921 100644 --- a/tach.toml +++ b/tach.toml @@ -10,12 +10,8 @@ exclude = [ ] source_roots = [ "sub-packages/bionemo-core/src", - # "sub-packages/bionemo-evo2/src", # DEPRECATED: replaced by bionemo-recipes/recipes/evo2_megatron/ - # "sub-packages/bionemo-example_model/src", # DEPRECATED: NeMo/Megatron example model - # "sub-packages/bionemo-llm/src", # DEPRECATED: NeMo/Megatron LLM components "sub-packages/bionemo-scdl/src", "sub-packages/bionemo-size-aware-batching/src", - # "sub-packages/bionemo-testing/src", # DEPRECATED: NeMo/Megatron test utilities "sub-packages/bionemo-webdatamodule/src", ] @@ -23,27 +19,6 @@ source_roots = [ path = "bionemo.core" depends_on = [] -# [[modules]] # DEPRECATED: replaced by bionemo-recipes/recipes/evo2_megatron/ -# path = "bionemo.evo2" -# depends_on = [ -# "bionemo.noodles", -# "bionemo.core", -# "bionemo.llm", -# ] - -# [[modules]] # DEPRECATED: NeMo/Megatron example model -# path = "bionemo.example_model" -# depends_on = [ -# "bionemo.core", -# "bionemo.llm", -# ] - -# [[modules]] # DEPRECATED: NeMo/Megatron LLM components -# path = "bionemo.llm" -# depends_on = [ -# "bionemo.core", -# ] - [[modules]] path = "bionemo.noodles" depends_on = [ @@ -62,13 +37,6 @@ depends_on = [ "bionemo.core", ] -# [[modules]] # DEPRECATED: NeMo/Megatron test utilities -# path = "bionemo.testing" -# depends_on = [ -# "bionemo.core", -# "bionemo.llm", -# ] - [[modules]] path = "bionemo.webdatamodule" depends_on = [ From 73a15e67a54201741af4cae213f355b2f78cda80 Mon Sep 17 00:00:00 2001 From: Timur Rvachov Date: Wed, 4 Mar 2026 14:24:32 +0000 Subject: [PATCH 4/4] Update .secrets.baseline after sub-package removal The detect-secrets baseline needed a line number refresh after removing deprecated sub-packages shifted content in pyproject.toml. Co-Authored-By: Claude Opus 4.6 --- .secrets.baseline | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index 9bf60fbce5..a9b7f171fd 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -148,9 +148,9 @@ "filename": "pyproject.toml", "hashed_secret": "79670e9c9d1c7ea5b81a96a2053d81437712c78e", "is_verified": false, - "line_number": 41 + "line_number": 37 } ] }, - "generated_at": "2025-12-29T20:49:21Z" + "generated_at": "2026-03-04T14:22:49Z" }