diff --git a/PyTorch/build-in/other/continual-learning/Dockerfile b/PyTorch/build-in/other/continual-learning/Dockerfile
new file mode 100644
index 000000000..8e364d1a1
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/Dockerfile
@@ -0,0 +1,12 @@
+ARG FROM_IMAGE_NAME=jfrog.tecorigin.net/tecotp-docker/release/ubuntu22.04/x86_64/pytorch:2.1.1-torch_sdaa2.1.1
+FROM ${FROM_IMAGE_NAME}
+
+WORKDIR /workspace/
+ADD requirements.txt /workspace/
+RUN rm -rf /bin/sh && ln -s /bin/bash /bin/sh
+RUN source activate torch_env && pip install --no-cache-dir -r requirements.txt
+ADD . /workspace/other/continual-learning
+WORKDIR /workspace/other/continual-learning
+
+ARG FROM_IMAGE_NAME=jfrog.tecorigin.net/tecotp-docker/release/ubuntu22.04/x86_64/pytorch:2.1.1-torch_sdaa2.1.1
+FROM ${FROM_IMAGE_NAME}
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/ICLRblogpost/README.md b/PyTorch/build-in/other/continual-learning/ICLRblogpost/README.md
new file mode 100644
index 000000000..096c649b8
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/ICLRblogpost/README.md
@@ -0,0 +1,54 @@
+# On the computation of the Fisher Information in continual learning (2025, ICLR Blogpost)
+
+The code in this repository is used for the experiments reported in the
+[ICLR 2025 blog post "On the computation of the Fisher Information in continual learning"](https://iclr-blogposts.github.io/2025/blog/fisher/)
+(or see the arXiv-preprint [here](https://arxiv.org/abs/2502.11756)).
+
+This blog post compares the performance of Elastic Weight Consolidation (EWC) with various different ways of computing the diagonal elements of the Fisher Information matrix.
+The following options are considered:
+- **EXACT**
+The elements of the Fisher Information are computed exactly. All training samples are used.
+To use this option: `./main.py --ewc --fisher-labels='all'`
+
+- **EXACT ($n$=500)**
+The elements of the Fisher Information are computed exactly. Only 500 training samples are used.
+To use this option: `./main.py --ewc --fisher-labels='all' --fisher-n=500`
+
+- **SAMPLE**
+The elements of the Fisher Information are estimated using a single Monte Carlo sample. All training samples are used.
+To use this option: `./main.py --ewc --fisher-labels='sample'`
+
+- **EMPIRICAL**
+The empirical Fisher Information is used. All training samples are used.
+To use this option: `./main.py --ewc --fisher-labels='true'`
+
+- **BATCHED ($b$=128)**
+The empirical Fisher Information is approximated using mini-batches (see blog post for details).
+To use this option: `./main.py --ewc --fisher-labels='true' --fisher-batch=128`
+
+
+To run the experiments from the blog post, the following lines of code can be used:
+
+```bash
+python3 ICLRblogpost/compare_FI.py --seed=1 --n-seeds=30 --experiment=splitMNIST --scenario=task
+python3 ICLRblogpost/compare_FI.py --seed=1 --n-seeds=30 --experiment=CIFAR10 --scenario=task --reducedResNet --iters=2000 --lr=0.001
+```
+
+
+### Citation
+If this is useful, please consider citing the blog post:
+```
+@inproceedings{vandeven2025fisher,
+ title={On the computation of the {F}isher {I}nformation in continual learning},
+ author={van de Ven, Gido M},
+ booktitle={ICLR Blogposts 2025},
+ year={2025},
+ date={April 28, 2025},
+ url={https://iclr-blogposts.github.io/2025/blog/fisher/}
+}
+```
+
+
+### Acknowledgments
+This project has been supported by a senior postdoctoral fellowship from the
+Resarch Foundation -- Flanders (FWO) under grant number 1266823N.
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/ICLRblogpost/compare_FI.py b/PyTorch/build-in/other/continual-learning/ICLRblogpost/compare_FI.py
new file mode 100644
index 000000000..ec82db246
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/ICLRblogpost/compare_FI.py
@@ -0,0 +1,356 @@
+#!/usr/bin/env python3
+import sys
+import os
+import numpy as np
+# -expand module search path to parent directory
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
+# -custom-written code
+import main
+from params.param_stamp import get_param_stamp_from_args
+from params.param_values import check_for_errors,set_default_values
+from params import options
+from visual import visual_plt as my_plt
+
+
+## Parameter-values to compare
+lamda_list = [1., 10., 100., 1000., 10000., 100000., 1000000., 10000000., 100000000., 1000000000., 10000000000.,
+ 100000000000., 1000000000000., 10000000000000.]
+
+
+## Function for specifying input-options and organizing / checking them
+def handle_inputs():
+ # Set indicator-dictionary for correctly retrieving / checking input options
+ kwargs = {'comparison': True, 'compare_hyper': True}
+ # Define input options
+ parser = options.define_args(filename="compare_hyperParams",
+ description='Compare performance EWC with differrent ways of computing FI matrix.')
+ parser = options.add_general_options(parser, **kwargs)
+ parser = options.add_eval_options(parser, **kwargs)
+ parser = options.add_problem_options(parser, **kwargs)
+ parser = options.add_model_options(parser, **kwargs)
+ parser = options.add_train_options(parser, **kwargs)
+ parser.add_argument('--n-seeds', type=int, default=1, help='how often to repeat?')
+ # Add options specific for EWC
+ param_reg = parser.add_argument_group('Parameter Regularization')
+ param_reg.add_argument('--offline', action='store_true', help='use Offline EWC rather than Online EWC')
+ param_reg.add_argument("--fisher-n-all", type=float, default=500, metavar='N',
+ help="how many samples to approximate FI in 'ALL-n=X'")
+ # Parse, process (i.e., set defaults for unselected options) and check chosen options
+ args = parser.parse_args()
+ args.log_per_context = True
+ set_default_values(args, also_hyper_params=False) # -set defaults, some are based on chosen scenario / experiment
+ check_for_errors(args, **kwargs) # -check whether incompatible options are selected
+ return args
+
+
+## Function for running experiments and collecting results
+def get_result(args):
+ # -get param-stamp
+ param_stamp = get_param_stamp_from_args(args)
+ # -check whether already run, and if not do so
+ if os.path.isfile('{}/time-{}.txt'.format(args.r_dir, param_stamp)) and os.path.isfile('{}/acc-{}.txt'.format(args.r_dir, param_stamp)):
+ print(" already run: {}".format(param_stamp))
+ else:
+ args.train = True
+ print("\n ...running: {} ...".format(param_stamp))
+ main.run(args)
+ # -get average accuracy
+ fileName = '{}/acc-{}.txt'.format(args.r_dir, param_stamp)
+ file = open(fileName)
+ ave = float(file.readline())
+ file.close()
+ # -get training time
+ fileName = '{}/time-{}.txt'.format(args.r_dir, param_stamp)
+ file = open(fileName)
+ traintime = float(file.readline())
+ file.close()
+ # -return it
+ return (traintime, ave)
+
+
+def collect_all(method_dict, seed_list, args, name=None):
+ # -print name of method on screen
+ if name is not None:
+ print("\n------{}------".format(name))
+ # -run method for all random seeds
+ for seed in seed_list:
+ args.seed = seed
+ method_dict[seed] = get_result(args)
+ # -return updated dictionary with results
+ return method_dict
+
+
+if __name__ == '__main__':
+
+ ## Load input-arguments
+ args = handle_inputs()
+ args.time = True
+
+ # Create plots- and results-directories if needed
+ if not os.path.isdir(args.r_dir):
+ os.mkdir(args.r_dir)
+ if not os.path.isdir(args.p_dir):
+ os.mkdir(args.p_dir)
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #--------------------------#
+ #----- RUN ALL MODELS -----#
+ #--------------------------#
+
+ seed_list = list(range(args.seed, args.seed+args.n_seeds))
+
+ ## Baselline
+ args.replay = "none"
+ BASE = {}
+ BASE = collect_all(BASE, seed_list, args, name="None")
+
+ # -set EWC-specific arguments
+ args.weight_penalty = True
+ args.importance_weighting = 'fisher'
+
+ ## EWC, "sample"
+ SAMPLE = {}
+ args.fisher_labels = "sample"
+ args.fisher_n = None
+ args.fisher_batch = 1
+ for ewc_lambda in lamda_list:
+ args.reg_strength=ewc_lambda
+ SAMPLE[ewc_lambda] = {}
+ SAMPLE[ewc_lambda] = collect_all(SAMPLE[ewc_lambda], seed_list, args,
+ name="EWC -- FI-labels='sample' (lambda={})".format(ewc_lambda))
+
+ ## EWC, "true"
+ TRUE = {}
+ args.fisher_labels = "true"
+ args.fisher_n = None
+ args.fisher_batch = 1
+ for ewc_lambda in lamda_list:
+ args.reg_strength=ewc_lambda
+ TRUE[ewc_lambda] = {}
+ TRUE[ewc_lambda] = collect_all(TRUE[ewc_lambda], seed_list, args,
+ name="EWC -- FI-labels='true' (lambda={})".format(ewc_lambda))
+
+ ## EWC, "true" - batch=128
+ TRUE128 = {}
+ args.fisher_labels = "true"
+ args.fisher_n = None
+ args.fisher_batch = 128
+ for ewc_lambda in lamda_list:
+ args.reg_strength=ewc_lambda
+ TRUE128[ewc_lambda] = {}
+ TRUE128[ewc_lambda] = collect_all(TRUE128[ewc_lambda], seed_list, args,
+ name="EWC -- FI-labels='true' - batch=128 (lambda={})".format(ewc_lambda))
+
+ ## EWC, "all" -- only [args.fisher_n_all] samples per task
+ ALL500 = {}
+ args.fisher_labels = "all"
+ args.fisher_n = args.fisher_n_all
+ args.fisher_batch = 1
+ for ewc_lambda in lamda_list:
+ args.reg_strength=ewc_lambda
+ ALL500[ewc_lambda] = {}
+ ALL500[ewc_lambda] = collect_all(ALL500[ewc_lambda], seed_list, args,
+ name="EWC -- FI-labels='all' - n={} (lambda={})".format(args.fisher_n_all, ewc_lambda))
+
+ ## EWC, "all"
+ ALL = {}
+ args.fisher_labels = "all"
+ args.fisher_n = None
+ args.fisher_batch = 1
+ for ewc_lambda in lamda_list:
+ args.reg_strength=ewc_lambda
+ ALL[ewc_lambda] = {}
+ ALL[ewc_lambda] = collect_all(ALL[ewc_lambda], seed_list, args,
+ name="EWC -- FI-labels='all' (lambda={})".format(ewc_lambda))
+
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #-----------------------------------------#
+ #----- COLLECT DATA & PRINT ON SCREEN-----#
+ #-----------------------------------------#
+
+ ext_lambda_list = [0] + lamda_list
+ ext_lambda_list = lamda_list
+ print("\n")
+
+ base_entries = [BASE[seed][1] for seed in seed_list]
+ mean_base = np.mean(base_entries)
+ sem_base = (np.sqrt(np.var(base_entries)) / np.sqrt(args.n_seeds)) if args.n_seeds>1 else None
+
+ ###---EWC "all" ---###
+ mean_all = []
+ sem_all = []
+ for ewc_lambda in lamda_list:
+ new_entries = [ALL[ewc_lambda][seed][1] for seed in seed_list]
+ mean_all.append(np.mean(new_entries))
+ if args.n_seeds>1:
+ sem_all.append(np.sqrt(np.var(new_entries)) / np.sqrt((args.n_seeds)))
+ lambda_all = ext_lambda_list[np.argmax(mean_all)]
+ # -print on screen
+ print("\n\nEWC -- FI-labels='all'")
+ print(" param list (lambda): {}".format(ext_lambda_list))
+ print(" {}".format(mean_all))
+ print("---> lambda = {} -- {}".format(lambda_all, np.max(mean_all)))
+
+ ###---EWC "all, with n=500" ---###
+ mean_all500 = []
+ if args.n_seeds>1:
+ sem_all500 = []
+ for ewc_lambda in lamda_list:
+ new_entries = [ALL500[ewc_lambda][seed][1] for seed in seed_list]
+ mean_all500.append(np.mean(new_entries))
+ if args.n_seeds>1:
+ sem_all500.append(np.sqrt(np.var(new_entries)) / np.sqrt((args.n_seeds)))
+ lambda_all500 = ext_lambda_list[np.argmax(mean_all500)]
+ # -print on screen
+ print("\n\nEWC -- FI-labels='all' - n={}".format(args.fisher_n_all))
+ print(" param list (lambda): {}".format(ext_lambda_list))
+ print(" {}".format(mean_all500))
+ print("---> lambda = {} -- {}".format(ext_lambda_list[np.argmax(mean_all500)], np.max(mean_all500)))
+
+ ###---EWC "sample" ---###
+ mean_sample = []
+ if args.n_seeds>1:
+ sem_sample = []
+ for ewc_lambda in lamda_list:
+ new_entries = [SAMPLE[ewc_lambda][seed][1] for seed in seed_list]
+ mean_sample.append(np.mean(new_entries))
+ if args.n_seeds>1:
+ sem_sample.append(np.sqrt(np.var(new_entries)) / np.sqrt((args.n_seeds)))
+ lambda_sample = ext_lambda_list[np.argmax(mean_sample)]
+ # -print on screen
+ print("\n\nEWC -- FI-labels='sample'")
+ print(" param list (lambda): {}".format(ext_lambda_list))
+ print(" {}".format(mean_sample))
+ print("---> lambda = {} -- {}".format(ext_lambda_list[np.argmax(mean_sample)], np.max(mean_sample)))
+
+ ###---EWC "true" ---###
+ mean_true = []
+ if args.n_seeds>1:
+ sem_true = []
+ for ewc_lambda in lamda_list:
+ new_entries = [TRUE[ewc_lambda][seed][1] for seed in seed_list]
+ mean_true.append(np.mean(new_entries))
+ if args.n_seeds>1:
+ sem_true.append(np.sqrt(np.var(new_entries)) / np.sqrt((args.n_seeds)))
+ lambda_true = ext_lambda_list[np.argmax(mean_true)]
+ # -print on screen
+ print("\n\nEWC -- FI-labels='true'")
+ print(" param list (lambda): {}".format(ext_lambda_list))
+ print(" {}".format(mean_true))
+ print("---> lambda = {} -- {}".format(ext_lambda_list[np.argmax(mean_true)], np.max(mean_true)))
+
+ ###---EWC "true" - batch=128 ---###
+ mean_true128 = []
+ if args.n_seeds>1:
+ sem_true128 = []
+ for ewc_lambda in lamda_list:
+ new_entries = [TRUE128[ewc_lambda][seed][1] for seed in seed_list]
+ mean_true128.append(np.mean(new_entries))
+ if args.n_seeds>1:
+ sem_true128.append(np.sqrt(np.var(new_entries)) / np.sqrt((args.n_seeds)))
+ lambda_true128 = ext_lambda_list[np.argmax(mean_true128)]
+ # -print on screen
+ print("\n\nEWC -- FI-labels='true' - batch=128")
+ print(" param list (lambda): {}".format(ext_lambda_list))
+ print(" {}".format(mean_true128))
+ print("---> lambda = {} -- {}".format(ext_lambda_list[np.argmax(mean_true128)], np.max(mean_true128)))
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #--------------------#
+ #----- PLOTTING -----#
+ #--------------------#
+
+ # name for plot
+ plot_name = "hyperParams-{}{}-{}".format(args.experiment, args.contexts, args.scenario)
+ scheme = "incremental {} learning".format(args.scenario)
+ title = "{} - {}".format(args.experiment, scheme)
+ ylabel = "Test accuracy (after all contexts)"
+
+ # open pdf
+ pp = my_plt.open_pdf("{}/{}.pdf".format(args.p_dir, plot_name))
+ figure_list = []
+
+
+ ########### ALL HYPERPARAM-VALUES ###########
+
+ # - select lines, names and colors
+ lines = [mean_all, mean_all500, mean_sample, mean_true, mean_true128]
+ errors = [sem_all, sem_all500, sem_sample, sem_true, sem_true128] if args.n_seeds>1 else None
+ names = ["All", "All - n={}".format(args.fisher_n_all), "Sample", "True", "True - batch=128"]
+ colors = ["black", "grey", "red", "orange", "blue"]
+ # - make plot (line plot - only average)
+ figure = my_plt.plot_lines(lines, x_axes=ext_lambda_list, ylabel=ylabel, line_names=names, list_with_errors=errors,
+ title=title, x_log=True, xlabel="EWC: lambda log-scale)",
+ with_dots=True, colors=colors, h_lines=[mean_base],
+ h_errors=[sem_base] if args.n_seeds>1 else None, h_labels=["None"])
+ figure_list.append(figure)
+
+
+ ########### ACCURACY (AND TRAIN-TIMES) OF BEST HYPERPARAMS ###########
+
+ # Collect the best accuracies (and training times)
+ ave_prec = {}
+ train_time = {}
+ for seed in seed_list:
+ ave_prec[seed] = [BASE[seed][1], ALL[lambda_all][seed][1], ALL500[lambda_all500][seed][1], SAMPLE[lambda_sample][seed][1],
+ TRUE[lambda_true][seed][1], TRUE128[lambda_true128][seed][1]]
+ train_time[seed] = [BASE[seed][0], ALL[lambda_all][seed][0], ALL500[lambda_all500][seed][0], SAMPLE[lambda_sample][seed][0],
+ TRUE[lambda_true][seed][0], TRUE128[lambda_true128][seed][0]]
+ names = ["None", "All", "All - n={}".format(args.fisher_n_all), "Sample", "True", "True - batch=128"]
+ colors = ["green", "black", "grey", "red", "orange", "blue"]
+ ids = [0, 1, 2, 3, 4, 5]
+
+ # Avearge accuracy
+ # -bar-plot
+ means = [np.mean([ave_prec[seed][id] for seed in seed_list]) for id in ids]
+ if len(seed_list)>1:
+ sems = [np.sqrt(np.var([ave_prec[seed][id] for seed in seed_list]))/np.sqrt(len(seed_list)) for id in ids]
+ cis = [1.96*np.sqrt(np.var([ave_prec[seed][id] for seed in seed_list]))/np.sqrt(len(seed_list)) for id in ids]
+ figure = my_plt.plot_bar(means, names=names, colors=colors, ylabel="average precision (after all tasks)",
+ title=title, yerr=cis if len(seed_list)>1 else None, ylim=(0,1))
+ figure_list.append(figure)
+ # -print results to screen
+ print("\n\n"+"#"*49+"\n AVERAGE TEST ACCURACY (in %)\n"+"-"*49)
+ for i,name in enumerate(names):
+ if len(seed_list) > 1:
+ print("{:21s} {:.2f} (+/- {:.2f}), n={}".format(name, 100*means[i], 100*sems[i], len(seed_list)))
+ else:
+ print("{:21s} {:.2f}".format(name, 100*means[i]))
+ if i==0:
+ print("-"*49)
+ print("#"*49)
+
+ # Training time
+ # -bar-plot
+ means = [np.mean([train_time[seed][id] for seed in seed_list]) for id in ids]
+ if len(seed_list) > 1:
+ sems = [np.sqrt(np.var([train_time[seed][id] for seed in seed_list])) / np.sqrt(len(seed_list)) for id in ids]
+ cis = [1.96 * np.sqrt(np.var([train_time[seed][id] for seed in seed_list])) / np.sqrt(len(seed_list)) for id in ids]
+ figure = my_plt.plot_bar(means, names=names, colors=colors, ylabel="Training Time (in Sec)",
+ title=title, yerr=cis if len(seed_list) > 1 else None)
+ figure_list.append(figure)
+ # -print results to screen
+ print("\n\n" + "#" * 49 + "\n TOTAL TRAINING TIME (in Sec)\n" + "-" * 49)
+ for i, name in enumerate(names):
+ if len(seed_list) > 1:
+ print("{:21s} {:4.0f} (+/- {:2.0f}), n={}".format(name, means[i], sems[i], len(seed_list)))
+ else:
+ print("{:21s} {:4.0f}".format(name, means[i]))
+ if i==0:
+ print("-"*49)
+ print("#" * 49)
+
+
+ # add figures to pdf
+ for figure in figure_list:
+ pp.savefig(figure)
+
+ # close the pdf
+ pp.close()
+
+ # Print name of generated plot on screen
+ print("\nGenerated plot: {}/{}.pdf\n".format(args.p_dir, plot_name))
diff --git a/PyTorch/build-in/other/continual-learning/LICENSE b/PyTorch/build-in/other/continual-learning/LICENSE
new file mode 100644
index 000000000..8127a5016
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2022 Gido van de Ven
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/PyTorch/build-in/other/continual-learning/NeurIPStutorial/README.md b/PyTorch/build-in/other/continual-learning/NeurIPStutorial/README.md
new file mode 100644
index 000000000..ca302c958
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/NeurIPStutorial/README.md
@@ -0,0 +1,59 @@
+# NeurIPS tutorial "Lifelong Learning Machines"
+
+The code in this repository is used as part of the
+[NeurIPS 2022 tutorial "Lifelong Learning Machines"](https://sites.google.com/view/neurips2022-llm-tutorial).
+
+In Part 2a of the tutorial, an overview is provided of currently used strategies for continual learning.
+In this overview, following
+[van de Ven et al. (2022, *Nature Machine Intelligence*)](https://www.nature.com/articles/s42256-022-00568-3),
+five computational strategies for continual learning are distinguished:
+
+
+
+To illustrate the relative strengths and weaknesses of these different computational strategies,
+an empirical comparison is performed in which for each strategy two representative examples methods are included:
+- ***Parameter regularization***
+ - Elastic weight consolidation (**EWC**;
+ [Kirkpatrick et al, 2017 *PNAS*](https://www.pnas.org/doi/10.1073/pnas.1611835114))
+ - Synaptic Intelligence (**SI**;
+ [Zenke et al., 2017 *ICML*](http://proceedings.mlr.press/v70/zenke17a))
+- ***Functional regularization***
+ - Learning without forgetting (**LwF**;
+ [Li & Hoiem, 2017 *TPAMI*](https://ieeexplore.ieee.org/abstract/document/8107520))
+ - Functional Regularization of the Memorable Past (**FROMP**;
+ [Pan et al., 2020 *NeurIPS*](https://proceedings.neurips.cc/paper/2020/hash/2f3bbb9730639e9ea48f309d9a79ff01-Abstract.html))
+- ***Replay***
+ - Deep Generative Replay (**DGR**;
+ [Shin et al., 2017 *NeurIPS*](https://proceedings.neurips.cc/paper/2017/hash/0efbe98067c6c73dba1250d2beaa81f9-Abstract.html))
+ - Experience Replay (**ER**;
+ [Chaudhry et al., 2019 *arXiv*](https://arxiv.org/abs/1902.10486))
+- ***Context-specific components***
+ - Separate Networks (**SepN**)
+ - Context-dependent Gating (**XdG**;
+ [Masse et al., 2018 *PNAS*](https://www.pnas.org/doi/abs/10.1073/pnas.1803839115))
+- ***Template-based classification***
+ - Generative Classifier (**GenC**;
+ [van de Ven et al., 2021 *CVPR-W*](https://openaccess.thecvf.com/content/CVPR2021W/CLVision/html/van_de_Ven_Class-Incremental_Learning_With_Generative_Classifiers_CVPRW_2021_paper.html))
+ - Incremental Classifier and Representation Learning (**iCaRL**;
+ [Rebuffi et al., 2017 *CVPR*](https://openaccess.thecvf.com/content_cvpr_2017/html/Rebuffi_iCaRL_Incremental_Classifier_CVPR_2017_paper.html))
+
+The comparison is done on the Split MNIST benchmark,
+which is performed according to each of the three continual learning scenarios
+([van de Ven et al., 2022 *Nature Machine Intelligence*](https://www.nature.com/articles/s42256-022-00568-3)):
+
+
+
+Note that in the NeurIPS tutorial, the term "task" is used instead of "context"
+to refer to the incremental parts of the Split MNIST protocol.
+
+To run the experimental comparison from the tutorial yourself, the following lines of code can be used:
+
+```bash
+python NeurIPStutorial/compare_for_tutorial.py --seed=2 --n-seeds=20 --experiment=splitMNIST --scenario=task
+python NeurIPStutorial/compare_for_tutorial.py --seed=2 --n-seeds=20 --experiment=splitMNIST --scenario=domain
+python NeurIPStutorial/compare_for_tutorial.py --seed=2 --n-seeds=20 --experiment=splitMNIST --scenario=class
+```
+
+This should produce graphs similar to the ones below:
+
+
diff --git a/PyTorch/build-in/other/continual-learning/NeurIPStutorial/compare_for_tutorial.py b/PyTorch/build-in/other/continual-learning/NeurIPStutorial/compare_for_tutorial.py
new file mode 100644
index 000000000..012c1919d
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/NeurIPStutorial/compare_for_tutorial.py
@@ -0,0 +1,334 @@
+#!/usr/bin/env python3
+import sys
+import os
+import numpy as np
+# -expand module search path to parent directory
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
+# -custom-written code
+import main
+import utils
+from utils import checkattr
+from params.param_stamp import get_param_stamp_from_args
+from params.param_values import check_for_errors,set_default_values
+from params import options
+from visual import visual_plt
+
+
+## Function for specifying input-options and organizing / checking them
+def handle_inputs():
+ # Set indicator-dictionary for correctly retrieving / checking input options
+ kwargs = {'comparison': True, 'compare_all': True}
+ # Define input options
+ parser = options.define_args(filename="compare", description='Compare and plot performance of CL strategies.')
+ parser = options.add_general_options(parser, **kwargs)
+ parser = options.add_eval_options(parser, **kwargs)
+ parser = options.add_problem_options(parser, **kwargs)
+ parser = options.add_model_options(parser, **kwargs)
+ parser = options.add_train_options(parser, **kwargs)
+ parser = options.add_cl_options(parser, **kwargs)
+ # Parse, process (i.e., set defaults for unselected options) and check chosen options
+ args = parser.parse_args()
+ args.log_per_context = True
+ set_default_values(args, also_hyper_params=True) # -set defaults, some are based on chosen scenario / experiment
+ check_for_errors(args, **kwargs) # -check whether incompatible options are selected
+ return args
+
+
+## Functions for running experiments and collecting results
+def get_results(args):
+ # -get param-stamp
+ param_stamp = get_param_stamp_from_args(args)
+ # -check whether the 'results-dict' is already available; if not, run the experiment
+ file_to_check = '{}/dict-{}--n{}{}.pkl'.format(
+ args.r_dir, param_stamp, "All" if args.acc_n is None else args.acc_n,
+ "--S{}".format(args.eval_s) if checkattr(args, 'gen_classifier') else ""
+ )
+ if os.path.isfile(file_to_check):
+ print(" already run: {}".format(param_stamp))
+ else:
+ args.train = True
+ args.results_dict = True
+ print(" ...running: {}".format(param_stamp))
+ main.run(args)
+ # -get average accuracy
+ fileName = '{}/acc-{}{}.txt'.format(args.r_dir, param_stamp,
+ "--S{}".format(args.eval_s) if checkattr(args, 'gen_classifier') else "")
+ file = open(fileName)
+ ave = float(file.readline())
+ file.close()
+ # -get results-dict
+ dict = utils.load_object(
+ "{}/dict-{}--n{}{}".format(args.r_dir, param_stamp, "All" if args.acc_n is None else args.acc_n,
+ "--S{}".format(args.eval_s) if checkattr(args, 'gen_classifier') else "")
+ )
+ # -print average accuracy on screen
+ print("--> average accuracy: {}".format(ave))
+ # -return average accuracy
+ return (dict, ave)
+
+def collect_all(method_dict, seed_list, args, name=None):
+ # -print name of method on screen
+ if name is not None:
+ print("\n------{}------".format(name))
+ # -run method for all random seeds
+ for seed in seed_list:
+ args.seed = seed
+ method_dict[seed] = get_results(args)
+ # -return updated dictionary with results
+ return method_dict
+
+
+
+if __name__ == '__main__':
+
+ ## Load input-arguments
+ args = handle_inputs()
+
+ # Create plots- and results-directories if needed
+ if not os.path.isdir(args.r_dir):
+ os.mkdir(args.r_dir)
+ if not os.path.isdir(args.p_dir):
+ os.mkdir(args.p_dir)
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #--------------------------#
+ #----- RUN ALL MODELS -----#
+ #--------------------------#
+
+ seed_list = list(range(args.seed, args.seed+args.n_seeds))
+
+
+ ###----"BASELINES"----###
+
+ ## None
+ args.replay = "none"
+ NONE = {}
+ NONE = collect_all(NONE, seed_list, args, name="None")
+
+ ## JOINT training, again for each context (only using number of iterations from that context)
+ args.cummulative = True
+ args.reinit = True
+ JOINT = {}
+ JOINT = collect_all(JOINT, seed_list, args, name="Joint")
+ args.reinit = False
+ args.cummulative = False
+
+
+ ###----"CONTEXT-SPECIFIC"----####
+
+ if args.scenario=="task":
+ ## Separate network per context
+ fc_units_temp = args.fc_units
+ args.fc_units = args.fc_units_sep
+ args.separate_networks = True
+ SEP = {}
+ SEP = collect_all(SEP, seed_list, args, name="Separate Networks")
+ args.separate_networks = False
+ args.fc_units = fc_units_temp
+
+ ## XdG
+ always_xdg = checkattr(args, 'xdg')
+ args.xdg = True
+ XDG = {}
+ XDG = collect_all(XDG, seed_list, args, name="XdG")
+ args.xdg = always_xdg
+
+
+ ###----"PARAMETER REGULARIZATION"----####
+
+ ## EWC
+ args.weight_penalty = True
+ args.importance_weighting = 'fisher'
+ args.offline = True
+ args.reg_strength = args.ewc_lambda
+ EWC = {}
+ EWC = collect_all(EWC, seed_list, args, name="EWC")
+ args.weight_penalty = False
+ args.offline = False
+
+ ## SI
+ args.weight_penalty = True
+ args.importance_weighting = 'si'
+ args.reg_strength = args.si_c
+ SI = {}
+ SI = collect_all(SI, seed_list, args, name="SI")
+ args.weight_penalty = False
+
+
+ ###----"FUNCTIONAL REGULARIZATION"----####
+
+ ## LwF
+ args.replay = "current"
+ args.distill = True
+ LWF = {}
+ LWF = collect_all(LWF, seed_list, args, name="LwF")
+ args.replay = "none"
+ args.distill = False
+
+ ## FROMP
+ args.fromp = True
+ args.sample_selection = "fromp"
+ FROMP = {}
+ FROMP = collect_all(FROMP, seed_list, args, name="FROMP")
+ args.fromp = False
+
+
+ ###----"REPLAY"----###
+
+ ## DGR
+ args.replay = "generative"
+ args.distill = False
+ DGR = {}
+ DGR = collect_all(DGR, seed_list, args, name="Deep Generative Replay")
+
+ ## Experience Replay
+ args.replay = "buffer"
+ args.sample_selection = "random"
+ ER = {}
+ ER = collect_all(ER, seed_list, args, name="Experience Replay (budget = {})".format(args.budget))
+ args.replay = "none"
+
+
+ ###----"TEMPLATE-BASED CLASSIFICATION"----####
+
+ if args.scenario=="class":
+ ## iCaRL
+ args.bce = True
+ args.bce_distill = True
+ args.prototypes = True
+ args.add_buffer = True
+ args.sample_selection = "herding"
+ args.neg_samples = "all-so-far"
+ ICARL = {}
+ ICARL = collect_all(ICARL, seed_list, args, name="iCaRL (budget = {})".format(args.budget))
+ args.bce = False
+ args.bce_distill = False
+ args.prototypes = False
+ args.add_buffer = False
+
+ ## Generative Classifier
+ args.gen_classifier = True
+ classes_per_context = 2 if args.experiment=="splitMNIST" else 10
+ args.iters = int(args.iters / classes_per_context)
+ args.fc_units = args.fc_units_gc
+ args.fc_lay = args.fc_lay_gc
+ args.z_dim = args.z_dim_gc
+ args.hidden = True
+ args.lr = 0.001
+ GENCLASS = {}
+ GENCLASS = collect_all(GENCLASS, seed_list, args, name="Generative Classifier")
+
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #---------------------------------------------#
+ #----- COLLECT RESULTS: AVERAGE ACCURACY -----#
+ #---------------------------------------------#
+
+ ## For each seed, create list with average test accuracy
+ ave_acc = {}
+ for seed in seed_list:
+ ave_acc[seed] = [NONE[seed][1], JOINT[seed][1], EWC[seed][1], SI[seed][1], LWF[seed][1], FROMP[seed][1],
+ DGR[seed][1], ER[seed][1]]
+ if args.scenario=="task":
+ ave_acc[seed].append(XDG[seed][1])
+ ave_acc[seed].append(SEP[seed][1])
+ elif args.scenario=="class":
+ ave_acc[seed].append(ICARL[seed][1])
+ ave_acc[seed].append(GENCLASS[seed][1])
+
+ ## For each seed, create lists with test accuracy throughout training
+ prec = {}
+ for seed in seed_list:
+ # -for plot of average accuracy throughout training
+ key = "average"
+ prec[seed] = [NONE[seed][0][key], JOINT[seed][0][key], EWC[seed][0][key], SI[seed][0][key], LWF[seed][0][key],
+ FROMP[seed][0][key], DGR[seed][0][key], ER[seed][0][key]]
+ if args.scenario=="task":
+ prec[seed].append(XDG[seed][0][key])
+ prec[seed].append(SEP[seed][0][key])
+ elif args.scenario=="class":
+ prec[seed].append(ICARL[seed][0][key])
+ prec[seed].append(GENCLASS[seed][0][key])
+
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #--------------------------------------------------#
+ #----- REPORTING / PLOTTING: AVERAGE ACCURACY -----#
+ #--------------------------------------------------#
+
+ # name for plot
+ plot_name = "tutorialplots-{}{}-{}".format(args.experiment, args.contexts, args.scenario)
+ scheme = "{}-incremental learning".format(args.scenario)
+ title = "{} - {}".format(args.experiment, scheme)
+
+ # select names / colors / ids
+ names = ["None", "Joint"]
+ colors = ["grey", "black"]
+ ids = [0, 1]
+ if args.scenario=="task":
+ names += ['Separate Networks', 'XdG']
+ colors += ['dodgerblue', 'deepskyblue']
+ ids += [9, 8]
+ names += ['EWC', 'SI', 'LwF', 'FROMP (b={})'.format(args.budget), 'DGR', "ER (b={})".format(args.budget)]
+ colors += ['darkgreen', 'yellowgreen', 'gold', 'goldenrod', 'indianred', 'red']
+ ids += [2, 3, 4, 5, 6, 7]
+ if args.scenario=="class":
+ names += ['Generative Classifier', "iCaRL (b={})".format(args.budget)]
+ colors += ['indigo', 'purple']
+ ids += [9, 8]
+
+ # open pdf
+ pp = visual_plt.open_pdf("{}/{}.pdf".format(args.p_dir, plot_name))
+ figure_list = []
+
+ # bar-plot
+ means = [np.mean([ave_acc[seed][id] for seed in seed_list]) for id in ids]
+ if len(seed_list)>1:
+ sems = [np.sqrt(np.var([ave_acc[seed][id] for seed in seed_list])/(len(seed_list)-1)) for id in ids]
+ cis = [1.96*np.sqrt(np.var([ave_acc[seed][id] for seed in seed_list])/(len(seed_list)-1)) for id in ids]
+ figure = visual_plt.plot_bar(means, names=names, colors=colors, ylabel="Average accuracy (after all contexts)",
+ title=title, yerr=cis if len(seed_list)>1 else None, ylim=(0,1))
+ figure_list.append(figure)
+
+ # print results to screen
+ print("\n\n"+"#"*60+"\nSUMMARY RESULTS: {}\n".format(title)+"#"*60)
+ for i,name in enumerate(names):
+ if len(seed_list) > 1:
+ print("{:27s} {:.2f} (+/- {:.2f}), n={}".format(name, 100*means[i], 100*sems[i], len(seed_list)))
+ else:
+ print("{:27s} {:.2f}".format(name, 100*means[i]))
+ if i==1:
+ print("="*60)
+ print("#"*60)
+
+ # line-plot
+ x_axes = NONE[args.seed][0]["x_context"]
+ ave_lines = []
+ sem_lines = []
+ for id in ids:
+ new_ave_line = []
+ new_sem_line = []
+ for line_id in range(len(prec[args.seed][id])):
+ all_entries = [prec[seed][id][line_id] for seed in seed_list]
+ new_ave_line.append(np.mean(all_entries))
+ if len(seed_list) > 1:
+ new_sem_line.append(1.96*np.sqrt(np.var(all_entries)/(len(all_entries)-1)))
+ ave_lines.append(new_ave_line)
+ sem_lines.append(new_sem_line)
+ figure = visual_plt.plot_lines(ave_lines, x_axes=x_axes, line_names=names, colors=colors, title=title,
+ xlabel="# of contexts", ylabel="Average accuracy (on contexts so far)",
+ list_with_errors=sem_lines if len(seed_list)>1 else None)
+ figure_list.append(figure)
+
+ # add all figures to pdf
+ for figure in figure_list:
+ pp.savefig(figure)
+
+ # close the pdf
+ pp.close()
+
+ # Print name of generated plot on screen
+ print("\nGenerated plot: {}/{}.pdf\n".format(args.p_dir, plot_name))
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/README.md b/PyTorch/build-in/other/continual-learning/README.md
new file mode 100644
index 000000000..8fd1a3ca4
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/README.md
@@ -0,0 +1,100 @@
+# continual-learning
+
+## 1. 模型概述
+continual-learning是一个基于 PyTorch 的持续学习(Continual Learning)算法综合实现库,由 Gido van de Ven 等人开发,用于复现其发表在 Nature Machine Intelligence(2022)上的论文《Three types of incremental learning》中的实验。
+它系统性地实现了多种主流持续学习方法,包括 EWC、Synaptic Intelligence (SI)、LwF、iCaRL、Experience Replay (ER)、Deep Generative Replay (DGR) 等,并支持三种经典持续学习场景:任务增量(task)、领域增量(domain)和类别增量(class)。项目代码结构清晰,便于比较不同方法在 SplitMNIST、PermutedMNIST、CIFAR10/100 等基准数据集上的性能,是研究持续学习/灾难性遗忘问题的重要开源工具。
+
+
+- 参考实现:
+ ```
+ url=https://github.com/GMvandeVen/continual-learning
+ commit_id=e6d795aa81b9cef742b8de76cb71222d4d1ce00b
+ ```
+
+
+## 2. 快速开始
+使用本模型执行训练的主要流程如下:
+1. 基础环境安装:介绍训练前需要完成的基础环境检查和安装。
+2. 获取数据集:介绍如何获取训练所需的数据集。
+3. 构建Docker环境:介绍如何使用Dockerfile创建模型训练时所需的Docker环境。
+4. 启动训练:介绍如何运行训练。
+
+### 2.1 基础环境安装
+
+请参考[基础环境安装](../../../doc/Environment.md)章节,完成训练前的基础环境检查和安装。
+
+
+### 2.2 准备数据集
+
+- 本实验采用MNIST数据集进行训练,请解压后放在仓库的store文件夹中。
+
+
+### 2.3 构建Docker环境
+
+使用Dockerfile,创建运行模型训练所需的Docker环境。
+
+#### 2.3.1 执行以下命令,进入Dockerfile所在目录。
+
+ ```
+ cd /PyTorch/other/continual-learning
+ ```
+ 其中: `modelzoo-dir`是ModelZoo仓库的主目录。
+
+#### 2.3.2 执行以下命令,构建名为`sdaa_continual-learning`的镜像。
+
+ ```
+ DOCKER_BUILDKIT=0 COMPOSE_DOCKER_CLI_BUILD=0 docker build . -t sdaa_continual-learning
+ ```
+
+#### 2.3.3 执行以下命令,启动容器。
+
+ ```
+ docker run -itd --name sdaa_continual-learning -v :/datasets --net=host --ipc=host --device /dev/tcaicard0 --device /dev/tcaicard1 --device /dev/tcaicard2 --device /dev/tcaicard3 --shm-size=128g sdaa_mobilenetv3 /bin/bash
+ ```
+
+ 其中:`-v`参数用于将主机上的目录或文件挂载到容器内部,对于模型训练,您需要将主机上的数据集目录挂载到docker中的`/datasets/`目录。更多容器配置参数说明参考[文档](../../../doc/Docker.md)。
+
+
+#### 2.3.4 执行以下命令,进入容器。
+
+ ```
+ docker exec -it sdaa_continual-learning /bin/bash
+ ```
+
+#### 2.3.5 执行以下命令,启动虚拟环境。
+
+ ```
+ conda activate torch_env_py310
+ ```
+
+#### 2.3.6 执行以下命令,安装其他环境依赖包。
+
+ ```
+ pip install -r requirements.txt
+ ```
+
+
+### 2.4 启动训练
+
+#### 2.4.1 在Docker环境中,进入训练脚本所在目录。
+ ```
+ cd /workspace/other/continual-learning
+ ```
+
+#### 2.4.2 运行以下命令训练。
+
+ - 检查数据集路径,请参考2.2组织数据集。
+ - 启动训练:
+ ```
+ python ./main.py --experiment=splitMNIST --scenario=task --si
+ ```
+
+### 2.5 训练结果
+
+输出训练loss曲线及结果(代码参考[get_loss.py](./get_loss.py))
+
+MeanRelativeError: -0.121553406
+MeanAbsoluteError: 0.00038663193
+Rule,mean_relative_error -0.121553406
+pass mean_relative_error=-0.121553406 <= 0.05 or mean_absolute_error=0.00038663193 <= 0.0002
+
diff --git a/PyTorch/build-in/other/continual-learning/StabilityGap/README.md b/PyTorch/build-in/other/continual-learning/StabilityGap/README.md
new file mode 100644
index 000000000..fd96acf9f
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/StabilityGap/README.md
@@ -0,0 +1,14 @@
+# Stability Gap example
+
+The script `stability_gap_example.py` provides a simple example of the **stability gap** [(De Lange et al.; 2023, *ICLR*)](https://openreview.net/forum?id=Zy350cRstc6). This phenomenon of temporary forgetting can be consistently observed when using state-of-the-art continual learning methods (e.g., replay or regularization) to incrementally train a deep neural network on multiple tasks. Strikingly, as described by [Hess et al. (2023, *ContinualAI Unconference*)](https://proceedings.mlr.press/v249/hess24a.html), the stability gap occurs even with **incremental joint training** (i.e., when training on a new task, all previous tasks are fully retrained as well), which can be interpreted as "full replay" or "perfect regularization".
+
+The example in this script uses **Rotated MNIST** with three tasks (rotations: 0°, 80° and 160°) as the task sequence:
+
+
+
+This task sequence is performed according to the domain-incremental learning scenario ([van de Ven et al.; 2022, *Nat Mach Intell*](https://www.nature.com/articles/s42256-022-00568-3)).
+A fully-connected neural network (with two hidden layers of 400 ReLUs each) is trained on this task sequence using incremental joint training, while the model's performance on the first task is evaluated after each training iteration.
+
+Running this script should produce a plot similar to:
+
+
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/StabilityGap/stability_gap_example.py b/PyTorch/build-in/other/continual-learning/StabilityGap/stability_gap_example.py
new file mode 100644
index 000000000..3f9ca594c
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/StabilityGap/stability_gap_example.py
@@ -0,0 +1,193 @@
+#!/usr/bin/env python3
+
+# Standard libraries
+import sys
+import os
+import numpy as np
+import tqdm
+# Pytorch
+import torch
+from torch.nn import functional as F
+from torchvision import datasets, transforms
+# For visualization
+from torchvision.utils import make_grid
+import matplotlib.pyplot as plt
+
+# Expand the module search path to parent directory
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
+# Load custom-written code
+import utils
+from visual import visual_plt
+from eval.evaluate import test_acc
+from models.classifier import Classifier
+from data.manipulate import TransformedDataset
+
+
+################## INITIAL SET-UP ##################
+
+# Specify directories, and if needed create them
+p_dir = "./store/plots"
+d_dir = "./store/data"
+if not os.path.isdir(p_dir):
+ print("Creating directory: {}".format(p_dir))
+ os.makedirs(p_dir)
+if not os.path.isdir(d_dir):
+ os.makedirs(d_dir)
+ print("Creating directory: {}".format(d_dir))
+
+# Open pdf for plotting
+plot_name = "stability_gap_example"
+full_plot_name = "{}/{}.pdf".format(p_dir, plot_name)
+pp = visual_plt.open_pdf(full_plot_name)
+figure_list = []
+
+
+
+################## CREATE TASK SEQUENCE ##################
+
+## Download the MNIST dataset
+print("\n\n " +' LOAD DATA '.center(70, '*'))
+MNIST_trainset = datasets.MNIST(root='data/', train=True, download=True,
+ transform=transforms.ToTensor())
+MNIST_testset = datasets.MNIST(root='data/', train=False, download=True,
+ transform=transforms.ToTensor())
+config = {'size': 28, 'channels': 1, 'classes': 10}
+
+# Set for each task the amount of rotation to use
+rotations = [0, 80, 160]
+
+# Specify for each task the transformed train- and testset
+n_tasks = len(rotations)
+train_datasets = []
+test_datasets = []
+for rotation in rotations:
+ train_datasets.append(TransformedDataset(
+ MNIST_trainset, transform=transforms.RandomRotation(degrees=(rotation,rotation)),
+ ))
+ test_datasets.append(TransformedDataset(
+ MNIST_testset, transform=transforms.RandomRotation(degrees=(rotation,rotation)),
+ ))
+
+# Visualize the different tasks
+figure, axis = plt.subplots(1, n_tasks, figsize=(3*n_tasks, 4))
+n_samples = 49
+for task_id in range(len(train_datasets)):
+ # Show [n_samples] examples from the training set for each task
+ data_loader = torch.utils.data.DataLoader(train_datasets[task_id], batch_size=n_samples, shuffle=True)
+ image_tensor, _ = next(iter(data_loader))
+ image_grid = make_grid(image_tensor, nrow=int(np.sqrt(n_samples)), pad_value=1) # pad_value=0 would give black borders
+ axis[task_id].imshow(np.transpose(image_grid.numpy(), (1,2,0)))
+ axis[task_id].set_title("Task {}".format(task_id+1))
+ axis[task_id].axis('off')
+figure_list.append(figure)
+
+
+
+################## SET UP THE MODEL ##################
+
+print("\n\n " + ' DEFINE THE CLASSIFIER '.center(70, '*'))
+
+# Specify the architectural layout of the network to use
+fc_lay = 3 #--> number of fully-connected layers
+fc_units = 400 #--> number of units in each hidden layer
+
+# Define the model
+model = Classifier(image_size=config['size'], image_channels=config['channels'], classes=config['classes'],
+ fc_layers=fc_lay, fc_units=fc_units, fc_bn=False)
+
+# Print some model info to screen
+utils.print_model_info(model)
+
+
+
+################## TRAINING AND EVALUATION ##################
+
+print('\n\n' + ' TRAINING + CONTINUAL EVALUATION '.center(70, '*'))
+
+# Define a function to train a model, while also evaluating its performance after each iteration
+def train_and_evaluate(model, trainset, iters, lr, batch_size, testset,
+ test_size=512, performance=[]):
+ '''Function to train a [model] on a given [dataset],
+ while evaluating after each training iteration on [testset].'''
+
+ optimizer = torch.optim.SGD(model.parameters(), lr=lr)
+ model.train()
+ iters_left = 1
+ progress_bar = tqdm.tqdm(range(1, iters+1))
+
+ for _ in range(1, iters+1):
+ optimizer.zero_grad()
+
+ # Collect data from [trainset] and compute the loss
+ iters_left -= 1
+ if iters_left==0:
+ data_loader = iter(torch.utils.data.DataLoader(trainset, batch_size=batch_size,
+ shuffle=True, drop_last=True))
+ iters_left = len(data_loader)
+ x, y = next(data_loader)
+ y_hat = model(x)
+ loss = torch.nn.functional.cross_entropy(input=y_hat, target=y, reduction='mean')
+
+ # Calculate test accuracy (in %)
+ accuracy = 100*test_acc(model, testset, test_size=test_size, verbose=False, batch_size=512)
+ performance.append(accuracy)
+
+ # Take gradient step
+ loss.backward()
+ optimizer.step()
+ progress_bar.set_description(
+ ' | training loss: {loss:.3} | test accuracy: {prec:.3}% |'
+ .format(loss=loss.item(), prec=accuracy)
+ )
+ progress_bar.update(1)
+ progress_bar.close()
+
+# Specify the training parameters
+iters = 500 #--> for how many iterations to train?
+lr = 0.1 #--> learning rate
+batch_size = 128 #--> size of mini-batches
+test_size = 2000 #--> number of test samples to evaluate on after each iteration
+
+# Define a list to keep track of the performance on task 1 after each iteration
+performance_task1 = []
+
+# Iterate through the contexts
+for task_id in range(n_tasks):
+ current_task = task_id+1
+
+ # Concatenate the training data of all tasks so far
+ joint_dataset = torch.utils.data.ConcatDataset(train_datasets[:current_task])
+
+ # Determine the batch size to use
+ batch_size_to_use = current_task*batch_size
+
+ # Train
+ print('Training after arrival of Task {}:'.format(current_task))
+ train_and_evaluate(model, trainset=joint_dataset, iters=iters, lr=lr,
+ batch_size=batch_size_to_use, testset=test_datasets[0],
+ test_size=test_size, performance=performance_task1)
+
+
+
+################## PLOTTING ##################
+
+## Plot per-iteration performance curve
+figure = visual_plt.plot_lines(
+ [performance_task1], x_axes=list(range(n_tasks*iters)),
+ line_names=['Incremental Joint'],
+ title="Performance on Task 1 throughout 'Incremental Joint Training'",
+ ylabel="Test Accuracy (%) on Task 1",
+ xlabel="Total number of training iterations", figsize=(10,5),
+ v_line=[iters*(i+1) for i in range(n_tasks-1)], v_label='Task switch', ylim=(70,100),
+)
+figure_list.append(figure)
+
+## Finalize the pdf with the plots
+# -add figures to pdf
+for figure in figure_list:
+ pp.savefig(figure)
+# -close pdf
+pp.close()
+# -print name of generated plot on screen
+print("\nGenerated plot: {}\n".format(full_plot_name))
+
diff --git a/PyTorch/build-in/other/continual-learning/all_results.sh b/PyTorch/build-in/other/continual-learning/all_results.sh
new file mode 100644
index 000000000..be542195e
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/all_results.sh
@@ -0,0 +1,92 @@
+#!/usr/bin/env bash
+
+
+
+
+########### ICLR 2025 Blogpost ###########
+
+python3 ICLRblogpost/compare_FI.py --seed=1 --n-seeds=30 --experiment=splitMNIST --scenario=task
+python3 ICLRblogpost/compare_FI.py --seed=1 --n-seeds=30 --experiment=CIFAR10 --scenario=task --reducedResNet --iters=2000 --lr=0.001
+
+
+
+
+########### NeurIPS 2022 Tutorial ###########
+
+python NeurIPStutorial/compare_for_tutorial.py --seed=2 --n-seeds=20 --experiment=splitMNIST --scenario=task
+python NeurIPStutorial/compare_for_tutorial.py --seed=2 --n-seeds=20 --experiment=splitMNIST --scenario=domain
+python NeurIPStutorial/compare_for_tutorial.py --seed=2 --n-seeds=20 --experiment=splitMNIST --scenario=class
+
+
+
+
+########### Three Types of Incremental Learning (2022, Nat Mach Intell) ###########
+
+## MNIST
+
+./compare_hyperParams.py --seed=1 --experiment=splitMNIST --scenario=task
+./compare_hyperParams.py --seed=1 --experiment=splitMNIST --scenario=domain
+./compare_hyperParams.py --seed=1 --experiment=splitMNIST --scenario=class
+
+./compare.py --seed=2 --n-seeds=20 --experiment=splitMNIST --scenario=task
+./compare.py --seed=2 --n-seeds=20 --experiment=splitMNIST --scenario=domain
+./compare.py --seed=2 --n-seeds=20 --experiment=splitMNIST --scenario=class
+
+./compare_replay.py --seed=2 --n-seeds=5 --experiment=splitMNIST --scenario=task --tau-per-budget
+./compare_replay.py --seed=2 --n-seeds=5 --experiment=splitMNIST --scenario=domain --tau-per-budget
+./compare_replay.py --seed=2 --n-seeds=5 --experiment=splitMNIST --scenario=class --tau-per-budget
+
+
+## Pre-training on CIFAR-10
+
+./main_pretrain.py --experiment=CIFAR10 --epochs=100 --augment --convE-stag=e100 --seed-to-stag --seed=1
+./main_pretrain.py --experiment=CIFAR10 --epochs=100 --augment --convE-stag=e100 --seed-to-stag --seed=2
+./main_pretrain.py --experiment=CIFAR10 --epochs=100 --augment --convE-stag=e100 --seed-to-stag --seed=3
+./main_pretrain.py --experiment=CIFAR10 --epochs=100 --augment --convE-stag=e100 --seed-to-stag --seed=4
+./main_pretrain.py --experiment=CIFAR10 --epochs=100 --augment --convE-stag=e100 --seed-to-stag --seed=5
+./main_pretrain.py --experiment=CIFAR10 --epochs=100 --augment --convE-stag=e100 --seed-to-stag --seed=6
+./main_pretrain.py --experiment=CIFAR10 --epochs=100 --augment --convE-stag=e100 --seed-to-stag --seed=7
+./main_pretrain.py --experiment=CIFAR10 --epochs=100 --augment --convE-stag=e100 --seed-to-stag --seed=8
+./main_pretrain.py --experiment=CIFAR10 --epochs=100 --augment --convE-stag=e100 --seed-to-stag --seed=9
+./main_pretrain.py --experiment=CIFAR10 --epochs=100 --augment --convE-stag=e100 --seed-to-stag --seed=10
+./main_pretrain.py --experiment=CIFAR10 --epochs=100 --augment --convE-stag=e100 --seed-to-stag --seed=11
+
+
+## CIFAR-100
+
+./compare_hyperParams.py --seed=1 --experiment=CIFAR100 --scenario=task --pre-convE --freeze-convE --seed-to-ltag --no-fromp
+./compare_hyperParams.py --seed=1 --experiment=CIFAR100 --scenario=domain --pre-convE --freeze-convE --seed-to-ltag --no-fromp
+./compare_hyperParams.py --seed=1 --experiment=CIFAR100 --scenario=class --pre-convE --freeze-convE --seed-to-ltag --no-fromp
+
+./compare.py --seed=2 --n-seeds=10 --experiment=CIFAR100 --scenario=task --pre-convE --freeze-convE --no-fromp --seed-to-ltag
+./compare.py --seed=2 --n-seeds=10 --experiment=CIFAR100 --scenario=domain --pre-convE --freeze-convE --no-fromp --seed-to-ltag
+./compare.py --seed=2 --n-seeds=10 --experiment=CIFAR100 --scenario=class --pre-convE --freeze-convE --no-fromp --seed-to-ltag --eval-s=10000
+
+./compare_replay.py --seed=2 --n-seeds=5 --experiment=CIFAR100 --scenario=task --pre-convE --freeze-convE --seed-to-ltag --no-fromp
+./compare_replay.py --seed=2 --n-seeds=5 --experiment=CIFAR100 --scenario=domain --pre-convE --freeze-convE --seed-to-ltag --no-fromp
+./compare_replay.py --seed=2 --n-seeds=5 --experiment=CIFAR100 --scenario=class --pre-convE --freeze-convE --seed-to-ltag --no-fromp
+
+
+## Permuted MNIST
+
+./compare_hyperParams.py --seed=1 --experiment=permMNIST --scenario=task --fisher-n=1000 --no-xdg --no-fromp --no-bir
+./compare_hyperParams.py --seed=1 --experiment=permMNIST --scenario=task --singlehead --fisher-n=1000 --no-reg --no-fromp --no-bir
+./compare_hyperParams.py --seed=1 --experiment=permMNIST --scenario=task --singlehead --xdg --gating-prop=0.6 --fisher-n=1000 --no-xdg --no-fromp --no-bir
+./compare_hyperParams.py --seed=1 --experiment=permMNIST --scenario=domain --fisher-n=1000 --no-fromp --no-bir
+
+./compare.py --seed=2 --n-seeds=5 --experiment=permMNIST --scenario=task --no-context-spec --no-fromp --no-bir --no-agem --fisher-n=1000
+./compare.py --seed=2 --n-seeds=5 --experiment=permMNIST --scenario=task --singlehead --xdg --gating-prop=0.6 --no-context-spec --no-fromp --no-bir --no-agem --fisher-n=1000
+./compare.py --seed=2 --n-seeds=5 --experiment=permMNIST --scenario=domain --no-fromp --no-bir --no-agem --fisher-n=1000
+
+
+## Task-free Split MNIST
+
+./compare_hyperParams_task_free.py --seed=1 --experiment=splitMNIST --scenario=task
+./compare_hyperParams_task_free.py --seed=1 --experiment=splitMNIST --scenario=domain
+./compare_hyperParams_task_free.py --seed=1 --experiment=splitMNIST --scenario=class
+
+./compare_task_free.py --seed=2 --n-seeds=20 --experiment=splitMNIST --scenario=task --gating-prop=0.45 --c=10.
+./compare_task_free.py --seed=2 --n-seeds=20 --experiment=splitMNIST --scenario=domain --c=10.
+./compare_task_free.py --seed=2 --n-seeds=20 --experiment=splitMNIST --scenario=class --c=10.
+
+#
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/compare.py b/PyTorch/build-in/other/continual-learning/compare.py
new file mode 100644
index 000000000..a07bfa2c2
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/compare.py
@@ -0,0 +1,362 @@
+#!/usr/bin/env python3
+import os
+import numpy as np
+# -custom-written code
+import main
+from utils import checkattr
+from params.param_stamp import get_param_stamp_from_args
+from params.param_values import check_for_errors,set_default_values
+from params import options
+from visual import visual_plt
+
+
+## Function for specifying input-options and organizing / checking them
+def handle_inputs():
+ # Set indicator-dictionary for correctly retrieving / checking input options
+ kwargs = {'comparison': True, 'compare_all': True}
+ # Define input options
+ parser = options.define_args(filename="compare", description='Compare performance of CL strategies.')
+ parser = options.add_general_options(parser, **kwargs)
+ parser = options.add_eval_options(parser, **kwargs)
+ parser = options.add_problem_options(parser, **kwargs)
+ parser = options.add_model_options(parser, **kwargs)
+ parser = options.add_train_options(parser, **kwargs)
+ parser = options.add_cl_options(parser, **kwargs)
+ # Should some methods not be included in the comparison?
+ parser.add_argument('--no-context-spec', action='store_true', help="no XdG or Separate Networks")
+ parser.add_argument('--no-reg', action='store_true', help="no EWC or SI")
+ parser.add_argument('--no-fromp', action='store_true', help="no FROMP")
+ parser.add_argument('--no-bir', action='store_true', help="no BI-R")
+ parser.add_argument('--no-agem', action='store_true', help="no A-GEM")
+ # Parse, process (i.e., set defaults for unselected options) and check chosen options
+ args = parser.parse_args()
+ args.log_per_context = True
+ set_default_values(args, also_hyper_params=True) # -set defaults, some are based on chosen scenario / experiment
+ check_for_errors(args, **kwargs) # -check whether incompatible options are selected
+ return args
+
+
+## Functions for running experiments and collecting results
+def get_results(args):
+ # -get param-stamp
+ param_stamp = get_param_stamp_from_args(args)
+ # -check whether already run; if not do so
+ file_to_check = '{}/acc-{}{}.txt'.format(args.r_dir, param_stamp,
+ "--S{}".format(args.eval_s) if checkattr(args, 'gen_classifier') else "")
+ if os.path.isfile(file_to_check):
+ print(" already run: {}".format(param_stamp))
+ elif os.path.isfile("{}/mM-{}".format(args.m_dir, param_stamp)):
+ args.train = False
+ print(" ...testing: {}".format(param_stamp))
+ main.run(args)
+ else:
+ args.train = True
+ print(" ...running: {}".format(param_stamp))
+ main.run(args)
+ # -get average accuracy
+ fileName = '{}/acc-{}{}.txt'.format(args.r_dir, param_stamp,
+ "--S{}".format(args.eval_s) if checkattr(args, 'gen_classifier') else "")
+ file = open(fileName)
+ ave = float(file.readline())
+ file.close()
+ # -print average accuracy on screen
+ print("--> average accuracy: {}".format(ave))
+ # -return average accuracy
+ return ave
+
+def collect_all(method_dict, seed_list, args, name=None):
+ # -print name of method on screen
+ if name is not None:
+ print("\n------{}------".format(name))
+ # -run method for all random seeds
+ for seed in seed_list:
+ args.seed = seed
+ method_dict[seed] = get_results(args)
+ # -return updated dictionary with results
+ return method_dict
+
+
+
+if __name__ == '__main__':
+
+ ## Load input-arguments
+ args = handle_inputs()
+
+ # Create plots- and results-directories if needed
+ if not os.path.isdir(args.r_dir):
+ os.mkdir(args.r_dir)
+ if not os.path.isdir(args.p_dir):
+ os.mkdir(args.p_dir)
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #--------------------------#
+ #----- RUN ALL MODELS -----#
+ #--------------------------#
+
+ seed_list = list(range(args.seed, args.seed+args.n_seeds))
+
+
+ ###----"BASELINES"----###
+
+ ## None
+ args.replay = "none"
+ NONE = {}
+ NONE = collect_all(NONE, seed_list, args, name="None")
+
+ ## JOINT training (using total number of iterations from all contexts)
+ iters_temp = args.iters
+ args.iters = args.contexts*iters_temp
+ args.joint = True
+ JOINT = {}
+ JOINT = collect_all(JOINT, seed_list, args, name="Joint")
+ args.joint = False
+ args.iters = iters_temp
+
+
+ ###----"CONTEXT-SPECIFIC"----####
+
+ if args.scenario=="task" and not checkattr(args, 'no_context_spec'):
+ ## Separate network per context
+ fc_units_temp = args.fc_units
+ args.fc_units = args.fc_units_sep
+ args.separate_networks = True
+ SEP = {}
+ SEP = collect_all(SEP, seed_list, args, name="Separate Networks")
+ args.separate_networks = False
+ args.fc_units = fc_units_temp
+
+ ## XdG
+ always_xdg = checkattr(args, 'xdg')
+ args.xdg = True
+ XDG = {}
+ XDG = collect_all(XDG, seed_list, args, name="XdG")
+ args.xdg = always_xdg
+
+
+ ###----"PARAMETER REGULARIZATION"----####
+
+ if not checkattr(args, 'no_reg'):
+ ## EWC
+ args.weight_penalty = True
+ args.importance_weighting = 'fisher'
+ args.offline = True
+ args.reg_strength = args.ewc_lambda
+ EWC = {}
+ EWC = collect_all(EWC, seed_list, args, name="EWC")
+ args.weight_penalty = False
+ args.offline = False
+
+ ## SI
+ args.weight_penalty = True
+ args.importance_weighting = 'si'
+ args.reg_strength = args.si_c
+ SI = {}
+ SI = collect_all(SI, seed_list, args, name="SI")
+ args.weight_penalty = False
+ else:
+ EWC = SI = None
+
+
+ ###----"FUNCTIONAL REGULARIZATION"----####
+
+ ## LwF
+ args.replay = "current"
+ args.distill = True
+ LWF = {}
+ LWF = collect_all(LWF, seed_list, args, name="LwF")
+ args.replay = "none"
+ args.distill = False
+
+ ## FROMP
+ if not checkattr(args, 'no_fromp'):
+ args.fromp = True
+ args.sample_selection = "fromp"
+ FROMP = {}
+ FROMP = collect_all(FROMP, seed_list, args, name="FROMP")
+ args.fromp = False
+ else:
+ FROMP = None
+
+
+ ###----"REPLAY"----###
+
+ ## DGR
+ args.replay = "generative"
+ args.distill = False
+ DGR = {}
+ DGR = collect_all(DGR, seed_list, args, name="Deep Generative Replay")
+
+ ## BI-R
+ if not checkattr(args, 'no_bir'):
+ args.replay = "generative"
+ args.feedback = True
+ args.hidden = True
+ args.dg_gates = True
+ args.prior = "GMM"
+ args.per_class = True
+ args.distill = True
+ BIR = {}
+ BIR = collect_all(BIR, seed_list, args, name="Brain-Inspired Replay")
+ args.feedback = False
+ args.hidden = False
+ args.dg_gates = False
+ args.prior = "standard"
+ args.per_class = False
+ args.distill = False
+ else:
+ BIR = None
+
+ ## Experience Replay
+ args.replay = "buffer"
+ args.sample_selection = "random"
+ ER = {}
+ ER = collect_all(ER, seed_list, args, name="Experience Replay (budget = {})".format(args.budget))
+ args.replay = "none"
+
+ ## A-GEM
+ if not checkattr(args, 'no_agem'):
+ args.replay = "buffer"
+ args.sample_selection = "random"
+ args.use_replay = "inequality"
+ AGEM = {}
+ AGEM = collect_all(AGEM, seed_list, args, name="A-GEM (budget = {})".format(args.budget))
+ args.replay = "none"
+ args.use_replay = "normal"
+ else:
+ AGEM = None
+
+
+ ###----"TEMPLATE-BASED CLASSIFICATION"----####
+
+ if args.scenario=="class" and not args.neg_samples=="current":
+ ## iCaRL
+ args.bce = True
+ args.bce_distill = True
+ args.prototypes = True
+ args.add_buffer = True
+ args.sample_selection = "herding"
+ args.neg_samples = "all-so-far"
+ ICARL = {}
+ ICARL = collect_all(ICARL, seed_list, args, name="iCaRL (budget = {})".format(args.budget))
+ args.bce = False
+ args.bce_distill = False
+ args.prototypes = False
+ args.add_buffer = False
+
+ ## Generative Classifier
+ args.gen_classifier = True
+ classes_per_context = 2 if args.experiment=="splitMNIST" else 10
+ args.iters = int(args.iters / classes_per_context)
+ args.fc_units = args.fc_units_gc
+ args.fc_lay = args.fc_lay_gc
+ args.z_dim = args.z_dim_gc
+ args.hidden = True
+ args.lr = 0.001
+ GENCLASS = {}
+ GENCLASS = collect_all(GENCLASS, seed_list, args, name="Generative Classifier")
+
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #---------------------------------------------#
+ #----- COLLECT RESULTS: AVERAGE ACCURACY -----#
+ #---------------------------------------------#
+
+ ## For each seed, create list with average test accuracy
+ ave_acc = {}
+ for seed in seed_list:
+ ave_acc[seed] = [NONE[seed], JOINT[seed],
+ 0 if EWC is None else EWC[seed], 0 if SI is None else SI[seed], LWF[seed],
+ 0 if FROMP is None else FROMP[seed],
+ DGR[seed], 0 if BIR is None else BIR[seed], ER[seed], 0 if AGEM is None else AGEM[seed]]
+ if args.scenario=="task" and not checkattr(args, 'no_context_spec'):
+ ave_acc[seed].append(XDG[seed])
+ ave_acc[seed].append(SEP[seed])
+ elif args.scenario=="class" and not args.neg_samples=="current":
+ ave_acc[seed].append(ICARL[seed])
+ ave_acc[seed].append(GENCLASS[seed])
+
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #--------------------------------------------------#
+ #----- REPORTING / PLOTTING: AVERAGE ACCURACY -----#
+ #--------------------------------------------------#
+
+ # name for plot
+ plot_name = "summary-{}{}-{}".format(args.experiment, args.contexts, args.scenario)
+ scheme = "{}-incremental learning".format(args.scenario)
+ title = "{} - {}".format(args.experiment, scheme)
+
+ # select names / colors / ids
+ names = ["None", "Joint"]
+ colors = ["grey", "black"]
+ ids = [0, 1]
+ if args.scenario=="task" and not checkattr(args, 'no_context_spec'):
+ names += ['Separate Networks', 'XdG']
+ colors += ['dodgerblue', 'deepskyblue']
+ ids += [11, 10]
+ if not checkattr(args, 'no_reg'):
+ names += ['EWC', 'SI']
+ colors += ['darkgreen', 'yellowgreen']
+ ids += [2, 3]
+ names.append('LwF')
+ colors.append('gold')
+ ids.append(4)
+ if not checkattr(args, 'no_fromp'):
+ names.append("FROMP (b={})".format(args.budget))
+ colors.append('goldenrod')
+ ids.append(5)
+ names.append('DGR')
+ colors.append('indianred')
+ ids.append(6)
+ if not checkattr(args, 'no_bir'):
+ names.append('BI-R')
+ colors.append('lightcoral')
+ ids.append(7)
+ names.append("ER (b={})".format(args.budget))
+ colors.append('red')
+ ids.append(8)
+ if not checkattr(args, 'no_agem'):
+ names.append("A-GEM (b={})".format(args.budget))
+ colors.append('orangered')
+ ids.append(9)
+ if args.scenario=="class" and not args.neg_samples=="current":
+ names += ['Generative Classifier', "iCaRL (b={})".format(args.budget)]
+ colors += ['indigo', 'purple']
+ ids += [11, 10]
+
+ # open pdf
+ pp = visual_plt.open_pdf("{}/{}.pdf".format(args.p_dir, plot_name))
+ figure_list = []
+
+ # bar-plot
+ means = [np.mean([ave_acc[seed][id] for seed in seed_list]) for id in ids]
+ if len(seed_list)>1:
+ sems = [np.sqrt(np.var([ave_acc[seed][id] for seed in seed_list])/(len(seed_list)-1)) for id in ids]
+ cis = [1.96*np.sqrt(np.var([ave_acc[seed][id] for seed in seed_list])/(len(seed_list)-1)) for id in ids]
+ figure = visual_plt.plot_bar(means, names=names, colors=colors, ylabel="average accuracy (after all contexts)",
+ title=title, yerr=cis if len(seed_list)>1 else None, ylim=(0,1))
+ figure_list.append(figure)
+
+ # print results to screen
+ print("\n\n"+"#"*60+"\nSUMMARY RESULTS: {}\n".format(title)+"#"*60)
+ for i,name in enumerate(names):
+ if len(seed_list) > 1:
+ print("{:27s} {:.2f} (+/- {:.2f}), n={}".format(name, 100*means[i], 100*sems[i], len(seed_list)))
+ else:
+ print("{:27s} {:.2f}".format(name, 100*means[i]))
+ if i==1:
+ print("="*60)
+ print("#"*60)
+
+ # add all figures to pdf
+ for figure in figure_list:
+ pp.savefig(figure)
+
+ # close the pdf
+ pp.close()
+
+ # Print name of generated plot on screen
+ print("\nGenerated plot: {}/{}.pdf\n".format(args.p_dir, plot_name))
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/compare_hyperParams.py b/PyTorch/build-in/other/continual-learning/compare_hyperParams.py
new file mode 100644
index 000000000..009c86392
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/compare_hyperParams.py
@@ -0,0 +1,328 @@
+#!/usr/bin/env python3
+import os
+import numpy as np
+from matplotlib.pyplot import get_cmap
+# -custom-written code
+import main
+import utils
+from params.param_stamp import get_param_stamp_from_args
+from params.param_values import check_for_errors,set_default_values
+from params import options
+from visual import visual_plt as my_plt
+
+
+## Parameter-values to compare
+lamda_list = [1., 10., 100., 1000., 10000., 100000., 1000000., 10000000., 100000000., 1000000000., 10000000000.,
+ 100000000000., 1000000000000., 10000000000000.]
+lamda_list_permMNIST = [1., 10., 100., 1000., 10000., 100000., 1000000., 10000000.]
+c_list = [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1., 5., 10., 50., 100., 500., 1000., 5000., 10000., 50000., 100000.]
+c_list_permMNIST = [0.01, 0.1, 1., 10., 100., 1000., 10000., 100000.]
+xdg_list = [0., 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
+xdg_list_permMNIST = [0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
+dg_prop_list = [0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
+tau_list = [0.001, 0.01, 0.1, 1., 10., 100., 1000., 10000., 100000.]
+budget_list_splitMNIST = [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000]
+budget_list_splitCIFAR100 = [1, 2, 5, 10, 20]
+
+
+## Function for specifying input-options and organizing / checking them
+def handle_inputs():
+ # Set indicator-dictionary for correctly retrieving / checking input options
+ kwargs = {'comparison': True, 'compare_hyper': True}
+ # Define input options
+ parser = options.define_args(filename="compare_hyperParams", description='Hyperparamer gridsearches.')
+ parser = options.add_general_options(parser, **kwargs)
+ parser = options.add_eval_options(parser, **kwargs)
+ parser = options.add_problem_options(parser, **kwargs)
+ parser = options.add_model_options(parser, **kwargs)
+ parser = options.add_train_options(parser, **kwargs)
+ parser = options.add_cl_options(parser, **kwargs)
+ # Should the gridsearch not be run for some methods?
+ parser.add_argument('--no-xdg', action='store_true', help="no XdG")
+ parser.add_argument('--no-reg', action='store_true', help="no EWC or SI")
+ parser.add_argument('--no-fromp', action='store_true', help="no FROMP")
+ parser.add_argument('--no-bir', action='store_true', help="no BI-R")
+ # Parse, process (i.e., set defaults for unselected options) and check chosen options
+ args = parser.parse_args()
+ args.log_per_context = True
+ set_default_values(args, also_hyper_params=False) # -set defaults, some are based on chosen scenario / experiment
+ check_for_errors(args, **kwargs) # -check whether incompatible options are selected
+ return args
+
+
+## Function for running experiments and collecting results
+def get_result(args):
+ # -get param-stamp
+ param_stamp = get_param_stamp_from_args(args)
+ # -check whether already run, and if not do so
+ if os.path.isfile('{}/acc-{}.txt'.format(args.r_dir, param_stamp)):
+ print(" already run: {}".format(param_stamp))
+ else:
+ args.train = True
+ print("\n ...running: {} ...".format(param_stamp))
+ main.run(args)
+ # -get average accuracy
+ fileName = '{}/acc-{}.txt'.format(args.r_dir, param_stamp)
+ file = open(fileName)
+ ave = float(file.readline())
+ file.close()
+ # -return it
+ return ave
+
+
+if __name__ == '__main__':
+
+ ## Load input-arguments
+ args = handle_inputs()
+
+ # Create plots- and results-directories if needed
+ if not os.path.isdir(args.r_dir):
+ os.mkdir(args.r_dir)
+ if not os.path.isdir(args.p_dir):
+ os.mkdir(args.p_dir)
+
+ ## Select parameter-lists based on chosen experiment
+ xdg_list = xdg_list_permMNIST if args.experiment=="permMNIST" else xdg_list
+ lamda_list = lamda_list_permMNIST if args.experiment=="permMNIST" else lamda_list
+ c_list = c_list_permMNIST if args.experiment=="permMNIST" else c_list
+ budget_list = budget_list_splitMNIST if args.experiment=="splitMNIST" else budget_list_splitCIFAR100
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #--------------------------#
+ #----- RUN ALL MODELS -----#
+ #--------------------------#
+
+ ## Baselline
+ args.replay = "none"
+ BASE = get_result(args)
+
+ ## XdG
+ if args.scenario=="task" and not utils.checkattr(args, 'no_xdg'):
+ XDG = {}
+ always_xdg = utils.checkattr(args, 'xdg')
+ if always_xdg:
+ gating_prop_selected = args.gating_prop
+ args.xdg = True
+ for xdg in xdg_list:
+ args.gating_prop = xdg
+ XDG[xdg] = get_result(args)
+ args.xdg = always_xdg
+ if always_xdg:
+ args.gating_prop = gating_prop_selected
+
+ ## EWC
+ if not utils.checkattr(args, 'no_reg'):
+ EWC = {}
+ args.weight_penalty = True
+ args.offline = True
+ args.importance_weighting = 'fisher'
+ for ewc_lambda in lamda_list:
+ args.reg_strength = ewc_lambda
+ EWC[ewc_lambda] = get_result(args)
+ args.weight_penalty = False
+ args.offline = False
+
+ ## SI
+ if not utils.checkattr(args, 'no_reg'):
+ SI = {}
+ args.weight_penalty = True
+ args.importance_weighting = 'si'
+ for si_c in c_list:
+ args.reg_strength = si_c
+ SI[si_c] = get_result(args)
+ args.weight_penalty = False
+
+ ## FROMP
+ if not utils.checkattr(args, 'no_fromp'):
+ FROMP = {}
+ args.fromp = True
+ args.sample_selection = 'fromp'
+ for budget in budget_list:
+ args.budget = budget
+ FROMP[budget] = {}
+ for tau in tau_list:
+ args.tau = tau
+ FROMP[budget][tau] = get_result(args)
+ args.fromp = False
+
+ ## BI-R
+ if not utils.checkattr(args, 'no_bir'):
+ BIR = {}
+ args.replay = "generative"
+ args.feedback = True
+ args.hidden = True
+ args.dg_gates = True
+ args.prior = "GMM"
+ args.per_class = True
+ args.distill = True
+ for dg_prop in dg_prop_list:
+ args.dg_prop = dg_prop
+ BIR[dg_prop] = get_result(args)
+
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #-----------------------------------------#
+ #----- COLLECT DATA & PRINT ON SCREEN-----#
+ #-----------------------------------------#
+
+ ext_c_list = [0] + c_list
+ ext_lambda_list = [0] + lamda_list
+ ext_tau_list = [0] + tau_list
+ print("\n")
+
+
+ ###---XdG---###
+
+ if args.scenario == "task" and not utils.checkattr(args, 'no_xdg'):
+ # -collect data
+ ave_acc_xdg = [XDG[c] for c in xdg_list]
+ # -print on screen
+ print("\n\nCONTEXT-DEPENDENT GATING (XDG))")
+ print(" param list (gating_prop): {}".format(xdg_list))
+ print(" {}".format(ave_acc_xdg))
+ print("---> gating_prop = {} -- {}".format(xdg_list[np.argmax(ave_acc_xdg)], np.max(ave_acc_xdg)))
+
+
+ ###---EWC---###
+
+ if not utils.checkattr(args, 'no_reg'):
+ # -collect data
+ ave_acc_ewc = [BASE] + [EWC[ewc_lambda] for ewc_lambda in lamda_list]
+ # -print on screen
+ print("\n\nELASTIC WEIGHT CONSOLIDATION (EWC)")
+ print(" param-list (lambda): {}".format(ext_lambda_list))
+ print(" {}".format(ave_acc_ewc))
+ print("---> lambda = {} -- {}".format(ext_lambda_list[np.argmax(ave_acc_ewc)], np.max(ave_acc_ewc)))
+
+
+ ###---SI---###
+
+ if not utils.checkattr(args, 'no_reg'):
+ # -collect data
+ ave_acc_si = [BASE] + [SI[c] for c in c_list]
+ # -print on screen
+ print("\n\nSYNAPTIC INTELLIGENCE (SI)")
+ print(" param list (si_c): {}".format(ext_c_list))
+ print(" {}".format(ave_acc_si))
+ print("---> si_c = {} -- {}".format(ext_c_list[np.argmax(ave_acc_si)], np.max(ave_acc_si)))
+
+
+ ###---FROMP---###
+
+ if not utils.checkattr(args, 'no_fromp'):
+ ave_acc_fromp_per_budget = []
+ for budget in budget_list:
+ # -collect data
+ ave_acc_fromp = [FROMP[budget][tau] for tau in tau_list]
+ ave_acc_fromp_ext = [BASE] + [FROMP[budget][tau] for tau in tau_list]
+ # -print on screen
+ print("\n\nFROMP (budget={})".format(budget))
+ print(" param-list (tau): {}".format(ext_tau_list))
+ print(" {}".format(ave_acc_fromp_ext))
+ print("---> tau = {} -- {}".format(ext_tau_list[np.argmax(ave_acc_fromp_ext)],
+ np.max(ave_acc_fromp_ext)))
+ # -collect data for each budget for plotting in one graph
+ ave_acc_fromp_per_budget.append(ave_acc_fromp)
+
+
+ ###---BI-R---###
+
+ if not utils.checkattr(args, 'no_bir'):
+ # -collect data
+ ave_acc_bir = [BIR[dg_prop] for dg_prop in dg_prop_list]
+ # -print on screen
+ print("\n\nBRAIN-INSPIRED REPLAY (BI-R)")
+ print(" param list (dg_prop): {}".format(dg_prop_list))
+ print(" {}".format(ave_acc_bir))
+ print("---> dg_prop = {} -- {}".format(dg_prop_list[np.argmax(ave_acc_bir)], np.max(ave_acc_bir)))
+ print('\n')
+
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #--------------------#
+ #----- PLOTTING -----#
+ #--------------------#
+
+ # name for plot
+ plot_name = "hyperParams-{}{}-{}".format(args.experiment, args.contexts, args.scenario)
+ scheme = "incremental {} learning".format(args.scenario)
+ title = "{} - {}".format(args.experiment, scheme)
+ ylabel = "Test accuracy (after all contexts)"
+
+ # calculate limits y-axes (to have equal axes for all graphs)
+ full_list = []
+ if not utils.checkattr(args, 'no_reg'):
+ full_list += ave_acc_ewc + ave_acc_si
+ if not utils.checkattr(args, 'no_fromp'):
+ for item in ave_acc_fromp_per_budget:
+ full_list += item
+ if not utils.checkattr(args, 'no_bir'):
+ full_list += ave_acc_bir
+ if args.scenario=="task" and not utils.checkattr(args, 'no_xdg'):
+ full_list += ave_acc_xdg
+ miny = np.min(full_list)
+ maxy = np.max(full_list)
+ marginy = 0.1*(maxy-miny)
+ ylim = (np.max([miny-2*marginy, 0]),
+ np.min([maxy+marginy,1])) if not args.scenario=="class" else (0, np.min([maxy+marginy,1]))
+
+ # open pdf
+ pp = my_plt.open_pdf("{}/{}.pdf".format(args.p_dir, plot_name))
+ figure_list = []
+
+
+ ###---XdG---###
+ if args.scenario=="task" and not utils.checkattr(args, 'no_xdg'):
+ figure = my_plt.plot_lines([ave_acc_xdg], x_axes=xdg_list, ylabel=ylabel,
+ line_names=["XdG"], colors=["deepskyblue"], ylim=ylim,
+ title=title, x_log=False, xlabel="XdG: % of nodes gated",
+ with_dots=True, h_line=BASE, h_label="None")
+ figure_list.append(figure)
+
+
+ ###---EWC---###
+ if not utils.checkattr(args, 'no_reg'):
+ figure = my_plt.plot_lines([ave_acc_ewc[1:]], x_axes=lamda_list, ylabel=ylabel, line_names=["EWC"],
+ colors=["darkgreen"], title=title, x_log=True, xlabel="EWC: lambda (log-scale)",
+ with_dots=True, ylim=ylim, h_line=BASE, h_label="None")
+ figure_list.append(figure)
+
+
+ ###---SI---###
+ if not utils.checkattr(args, 'no_reg'):
+ figure = my_plt.plot_lines([ave_acc_si[1:]], x_axes=c_list, ylabel=ylabel, line_names=["SI"],
+ colors=["yellowgreen"], title=title, x_log=True, xlabel="SI: c (log-scale)",
+ with_dots=True, ylim=ylim, h_line=BASE, h_label="None")
+ figure_list.append(figure)
+
+
+ ###---FROMP---###
+ if not utils.checkattr(args, 'no_fromp'):
+ colors = get_cmap('YlOrBr')(np.linspace(1.0, 0.5, len(budget_list))).tolist()
+ figure = my_plt.plot_lines(ave_acc_fromp_per_budget, x_axes=tau_list, ylabel=ylabel,
+ line_names=["FROMP (budget={})".format(budget) for budget in budget_list],
+ colors=colors, title=title, x_log=True, xlabel="FROMP: tau (log-scale)",
+ with_dots=True, ylim=ylim, h_line=BASE, h_label="None")
+ figure_list.append(figure)
+
+
+ ###---BI-R---###
+ if not utils.checkattr(args, 'no_bir'):
+ figure = my_plt.plot_lines([ave_acc_bir], x_axes=dg_prop_list, ylabel=ylabel, line_names=["BI-R"],
+ colors=["lightcoral"], title=title, x_log=False, with_dots=True,
+ xlabel="BI-R: % of nodes gated in decoder", ylim=ylim, h_line=BASE, h_label="None")
+ figure_list.append(figure)
+
+
+ # add figures to pdf
+ for figure in figure_list:
+ pp.savefig(figure)
+
+ # close the pdf
+ pp.close()
+
+ # Print name of generated plot on screen
+ print("\nGenerated plot: {}/{}.pdf\n".format(args.p_dir, plot_name))
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/compare_hyperParams_task_free.py b/PyTorch/build-in/other/continual-learning/compare_hyperParams_task_free.py
new file mode 100644
index 000000000..7f5695fa1
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/compare_hyperParams_task_free.py
@@ -0,0 +1,194 @@
+#!/usr/bin/env python3
+import os
+import numpy as np
+# -custom-written code
+import main_task_free
+import utils
+from params.param_stamp import get_param_stamp_from_args
+from params.param_values import check_for_errors,set_default_values
+from params import options
+from visual import visual_plt as my_plt
+
+
+## Parameter-values to compare
+c_list = [0.001, 0.01, 0.1, 1., 10., 100., 1000., 10000., 100000., 1000000., 10000000.]
+xdg_list = [0., 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
+
+
+## Function for specifying input-options and organizing / checking them
+def handle_inputs():
+ # Set indicator-dictionary for correctly retrieving / checking input options
+ kwargs = {'comparison': True, 'compare_hyper': True, 'no_boundaries': True}
+ # Define input options
+ parser = options.define_args(filename="compare_hyperParams_task_free", description='Hyperparamer gridsearches.')
+ parser = options.add_general_options(parser, **kwargs)
+ parser = options.add_eval_options(parser, **kwargs)
+ parser = options.add_problem_options(parser, **kwargs)
+ parser = options.add_model_options(parser, **kwargs)
+ parser = options.add_train_options(parser, **kwargs)
+ parser = options.add_cl_options(parser, **kwargs)
+ # Should the gridsearch not be run for some methods?
+ parser.add_argument('--no-xdg', action='store_true', help="no XdG")
+ parser.add_argument('--no-si', action='store_true', help="no SI")
+ # Parse, process (i.e., set defaults for unselected options) and check chosen options
+ args = parser.parse_args()
+ args.log_per_context = True
+ set_default_values(args, also_hyper_params=False) # -set defaults, some are based on chosen scenario / experiment
+ check_for_errors(args, **kwargs) # -check whether incompatible options are selected
+ return args
+
+
+## Function for running experiments and collecting results
+def get_result(args):
+ # -get param-stamp
+ param_stamp = get_param_stamp_from_args(args, no_boundaries=True)
+ # -check whether already run, and if not do so
+ if os.path.isfile('{}/acc-{}.txt'.format(args.r_dir, param_stamp)):
+ print(" already run: {}".format(param_stamp))
+ else:
+ args.train = True
+ print("\n ...running: {} ...".format(param_stamp))
+ main_task_free.run(args)
+ # -get average accuracy
+ fileName = '{}/acc-{}.txt'.format(args.r_dir, param_stamp)
+ file = open(fileName)
+ ave = float(file.readline())
+ file.close()
+ # -return it
+ return ave
+
+
+if __name__ == '__main__':
+
+ ## Load input-arguments
+ args = handle_inputs()
+
+ # Create plots- and results-directories if needed
+ if not os.path.isdir(args.r_dir):
+ os.mkdir(args.r_dir)
+ if not os.path.isdir(args.p_dir):
+ os.mkdir(args.p_dir)
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #--------------------------#
+ #----- RUN ALL MODELS -----#
+ #--------------------------#
+
+ ## Baselline
+ args.replay = "none"
+ BASE = get_result(args)
+
+ ## XdG
+ if args.scenario=="task" and not utils.checkattr(args, 'no_xdg'):
+ XDG = {}
+ always_xdg = utils.checkattr(args, 'xdg')
+ if always_xdg:
+ gating_prop_selected = args.gating_prop
+ args.xdg = True
+ for xdg in xdg_list:
+ args.gating_prop = xdg
+ XDG[xdg] = get_result(args)
+ args.xdg = always_xdg
+ if always_xdg:
+ args.gating_prop = gating_prop_selected
+
+ ## SI
+ if not utils.checkattr(args, 'no_si'):
+ SI = {}
+ args.weight_penalty = True
+ args.importance_weighting = 'si'
+ for si_c in c_list:
+ args.reg_strength = si_c
+ SI[si_c] = get_result(args)
+ args.weight_penalty = False
+
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #-----------------------------------------#
+ #----- COLLECT DATA & PRINT ON SCREEN-----#
+ #-----------------------------------------#
+
+ ext_c_list = [0] + c_list
+ print("\n")
+
+
+ ###---XdG---###
+
+ if args.scenario == "task" and not utils.checkattr(args, 'no_xdg'):
+ # -collect data
+ ave_acc_xdg = [XDG[c] for c in xdg_list]
+ # -print on screen
+ print("\n\nCONTEXT-DEPENDENT GATING (XDG))")
+ print(" param list (gating_prop): {}".format(xdg_list))
+ print(" {}".format(ave_acc_xdg))
+ print("---> gating_prop = {} -- {}".format(xdg_list[np.argmax(ave_acc_xdg)], np.max(ave_acc_xdg)))
+
+
+ ###---SI---###
+
+ if not utils.checkattr(args, 'no_si'):
+ # -collect data
+ ave_acc_si = [BASE] + [SI[c] for c in c_list]
+ # -print on screen
+ print("\n\nSYNAPTIC INTELLIGENCE (SI)")
+ print(" param list (si_c): {}".format(ext_c_list))
+ print(" {}".format(ave_acc_si))
+ print("---> si_c = {} -- {}".format(ext_c_list[np.argmax(ave_acc_si)], np.max(ave_acc_si)))
+
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #--------------------#
+ #----- PLOTTING -----#
+ #--------------------#
+
+ # name for plot
+ plot_name = "hyperParams-{}{}-{}".format(args.experiment, args.contexts, args.scenario)
+ scheme = "incremental {} learning".format(args.scenario)
+ title = "{} - {}".format(args.experiment, scheme)
+ ylabel = "Test accuracy (after all contexts)"
+
+ # calculate limits y-axes (to have equal axes for all graphs)
+ full_list = []
+ if not utils.checkattr(args, 'no_si'):
+ full_list += ave_acc_si
+ if args.scenario=="task" and not utils.checkattr(args, 'no_xdg'):
+ full_list += ave_acc_xdg
+ miny = np.min(full_list)
+ maxy = np.max(full_list)
+ marginy = 0.1*(maxy-miny)
+ ylim = (np.max([miny-2*marginy, 0]),
+ np.min([maxy+marginy,1])) if not args.scenario=="class" else (0, np.min([maxy+marginy,1]))
+
+ # open pdf
+ pp = my_plt.open_pdf("{}/{}.pdf".format(args.p_dir, plot_name))
+ figure_list = []
+
+
+ ###---XdG---###
+ if args.scenario=="task" and not utils.checkattr(args, 'no_xdg'):
+ figure = my_plt.plot_lines([ave_acc_xdg], x_axes=xdg_list, ylabel=ylabel,
+ line_names=["XdG"], colors=["deepskyblue"], ylim=ylim,
+ title=title, x_log=False, xlabel="XdG: % of nodes gated",
+ with_dots=True, h_line=BASE, h_label="None")
+ figure_list.append(figure)
+
+ ###---SI---###
+ if not utils.checkattr(args, 'no_si'):
+ figure = my_plt.plot_lines([ave_acc_si[1:]], x_axes=c_list, ylabel=ylabel, line_names=["SI"],
+ colors=["yellowgreen"], title=title, x_log=True, xlabel="SI: c (log-scale)",
+ with_dots=True, ylim=ylim, h_line=BASE, h_label="None")
+ figure_list.append(figure)
+
+
+ # add figures to pdf
+ for figure in figure_list:
+ pp.savefig(figure)
+
+ # close the pdf
+ pp.close()
+
+ # Print name of generated plot on screen
+ print("\nGenerated plot: {}/{}.pdf\n".format(args.p_dir, plot_name))
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/compare_replay.py b/PyTorch/build-in/other/continual-learning/compare_replay.py
new file mode 100644
index 000000000..a7bfcbf36
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/compare_replay.py
@@ -0,0 +1,282 @@
+#!/usr/bin/env python3
+import os
+import numpy as np
+# -custom-written code
+import main
+from utils import checkattr
+from params.param_stamp import get_param_stamp_from_args
+from params.param_values import check_for_errors,set_default_values
+from params import options
+from visual import visual_plt
+
+
+## Memory budget values to compare
+budget_list_CIFAR100 = [1, 2, 5, 10, 20, 50, 100, 200, 500]
+budget_list_splitMNIST = [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000]
+
+
+## Function for specifying input-options and organizing / checking them
+def handle_inputs():
+ # Set indicator-dictionary for correctly retrieving / checking input options
+ kwargs = {'comparison': True, 'compare_replay': True}
+ # Define input options
+ parser = options.define_args(filename="compare_replay",
+ description='Evaluate CL methods storing data as function of available memory budget.')
+ parser = options.add_general_options(parser, **kwargs)
+ parser = options.add_eval_options(parser, **kwargs)
+ parser = options.add_problem_options(parser, **kwargs)
+ parser = options.add_model_options(parser, **kwargs)
+ parser = options.add_train_options(parser, **kwargs)
+ parser = options.add_cl_options(parser, **kwargs)
+ # Should some methods not be included?
+ parser.add_argument('--no-fromp', action='store_true', help="no FROMP")
+ # Parse, process (i.e., set defaults for unselected options) and check chosen options
+ args = parser.parse_args()
+ set_default_values(args, also_hyper_params=False) # -set defaults, some are based on chosen scenario / experiment
+ check_for_errors(args, **kwargs) # -check whether incompatible options are selected
+ return args
+
+
+def get_result(args):
+ # -get param-stamp
+ param_stamp = get_param_stamp_from_args(args)
+ # -check whether already run, and if not do so
+ if os.path.isfile('{}/acc-{}.txt'.format(args.r_dir, param_stamp)):
+ print(" already run: {}".format(param_stamp))
+ else:
+ args.train = True
+ print("\n ...running: {} ...".format(param_stamp))
+ main.run(args)
+ # -get average accuracy
+ fileName = '{}/acc-{}.txt'.format(args.r_dir, param_stamp)
+ file = open(fileName)
+ ave = float(file.readline())
+ file.close()
+ # -return it
+ return ave
+
+
+def collect_all(method_dict, seed_list, args, name=None):
+ # -print name of method on screen
+ if name is not None:
+ print("\n------{}------".format(name))
+ # -run method for all random seeds
+ for seed in seed_list:
+ args.seed = seed
+ method_dict[seed] = get_result(args)
+ # -return updated dictionary with results
+ return method_dict
+
+
+
+if __name__ == '__main__':
+
+ ## Load input-arguments
+ args = handle_inputs()
+
+ # -create results-directory if needed
+ if not os.path.isdir(args.r_dir):
+ os.mkdir(args.r_dir)
+ # -create plots-directory if needed
+ if not os.path.isdir(args.p_dir):
+ os.mkdir(args.p_dir)
+
+ ## Select correct memory budget list
+ budget_list = budget_list_CIFAR100 if args.experiment=="CIFAR100" else budget_list_splitMNIST
+
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #--------------------------#
+ #----- RUN ALL MODELS -----#
+ #--------------------------#
+
+ seed_list = list(range(args.seed, args.seed+args.n_seeds))
+
+ budget_limit_FROMP = 1000
+ if checkattr(args, 'tau_per_budget'):
+ if args.scenario=="task":
+ tau_dict = {'1': 100000., '2': 1000., '5': 100000., '10': 0.001, '20': 10000., '50': 1000.,
+ '100': 0.01, '200': 0.01, '500': 0.1, '1000': 10.}
+ elif args.scenario=="domain":
+ tau_dict = {'1': 0.001, '2': 100000., '5': 100000., '10': 100000., '20': 100000., '50': 10000.,
+ '100': 10., '200': 1., '500': 10., '1000': 0.1}
+ elif args.scenario=="class":
+ tau_dict = {'1': 100000., '2': 0.01, '5': 10000., '10': 100000., '20': 10000., '50': 1000.,
+ '100': 1000., '200': 10., '500': 0.001, '1000': 1.}
+
+
+ ###### BASELINES #########
+
+ args.replay = "none"
+ BASE = {}
+ BASE = collect_all(BASE, seed_list, args, name="None")
+
+ iters_temp = args.iters
+ args.iters = args.contexts*iters_temp
+ args.joint = True
+ JOINT = {}
+ JOINT = collect_all(JOINT, seed_list, args, name="Joint")
+ args.joint = False
+ args.iters = iters_temp
+
+
+ ###### CL METHODS STORING DATA #########
+
+ ## Experience Replay
+ args.replay = "buffer"
+ args.sample_selection = "random"
+ args.distill = False
+ ER = {}
+ for budget in budget_list:
+ args.budget = budget
+ ER[budget] = {}
+ ER[budget] = collect_all(ER[budget], seed_list, args, name="Experience Replay - budget = {}".format(budget))
+
+ ## A-GEM
+ args.replay = "buffer"
+ args.distill = False
+ args.sample_selection = "random"
+ args.use_replay = "inequality"
+ AGEM = {}
+ for budget in budget_list:
+ args.budget = budget
+ AGEM[budget] = {}
+ AGEM[budget] = collect_all(AGEM[budget], seed_list, args, name="A-GEM - budget = {}".format(budget))
+ args.use_replay = "normal"
+
+ ## FROMP
+ if not checkattr(args, 'no_fromp'):
+ args.replay = "none"
+ args.fromp = True
+ args.sample_selection = "fromp"
+ FROMP = {}
+ for budget in budget_list:
+ if budget<=budget_limit_FROMP:
+ args.budget = budget
+ if checkattr(args, 'tau_per_budget'):
+ args.tau = tau_dict['{}'.format(budget)]
+ FROMP[budget] = {}
+ FROMP[budget] = collect_all(FROMP[budget], seed_list, args, name="FROMP - budget = {}".format(budget))
+ args.fromp = False
+
+ ## iCaRL
+ if args.scenario=="class":
+ args.replay = "none"
+ args.prototypes = True
+ args.bce = True
+ args.bce_distill = True
+ args.add_buffer = True
+ args.sample_selection = 'herding'
+ args.neg_samples = "all-so-far"
+ ICARL = {}
+ for budget in budget_list:
+ args.budget = budget
+ ICARL[budget] = {}
+ ICARL[budget] = collect_all(ICARL[budget], seed_list, args, name="iCaRL - budget = {}".format(budget))
+
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #--------------------#
+ #----- PLOTTING -----#
+ #--------------------#
+
+ # name for plot
+ plot_name = "summaryExactRep-{}{}-{}".format(args.experiment,args.contexts,args.scenario)
+ scheme = "incremental {} learning".format(args.scenario)
+ title = "{} - {}".format(args.experiment, scheme)
+
+ # open pdf
+ pp = visual_plt.open_pdf("{}/{}.pdf".format(args.p_dir, plot_name))
+ figure_list = []
+
+ # set scale of y-axis
+ y_lim = [0,1] if args.scenario=="class" else None
+ y_lim = None
+
+ # Methods for comparison
+ h_lines = [np.mean([BASE[seed] for seed in seed_list]), np.mean([JOINT[seed] for seed in seed_list])]
+ h_errors = [np.sqrt(np.var([BASE[seed] for seed in seed_list]) / (len(seed_list)-1)),
+ np.sqrt(np.var([JOINT[seed] for seed in seed_list]) / (len(seed_list)-1))] if args.n_seeds>1 else None
+ h_labels = ["None", "Joint"]
+ h_colors = ["grey", "black"]
+
+
+ # Different variants of exact replay
+ # -prepare
+ ave_ER = []
+ sem_ER = []
+ ave_AGEM = []
+ sem_AGEM = []
+ if not checkattr(args, 'no_fromp'):
+ ave_FROMP = []
+ sem_FROMP = []
+ if args.scenario=="class":
+ ave_ICARL = []
+ sem_ICARL = []
+
+ for budget in budget_list:
+ all_entries = [ER[budget][seed] for seed in seed_list]
+ ave_ER.append(np.mean(all_entries))
+ if args.n_seeds > 1:
+ sem_ER.append(np.sqrt(np.var(all_entries) / (len(all_entries) - 1)))
+
+ all_entries = [AGEM[budget][seed] for seed in seed_list]
+ ave_AGEM.append(np.mean(all_entries))
+ if args.n_seeds > 1:
+ sem_AGEM.append(np.sqrt(np.var(all_entries) / (len(all_entries) - 1)))
+
+ if not checkattr(args, 'no_fromp'):
+ if budget<=budget_limit_FROMP:
+ all_entries = [FROMP[budget][seed] for seed in seed_list]
+ ave_FROMP.append(np.mean(all_entries))
+ if args.n_seeds > 1:
+ sem_FROMP.append(np.sqrt(np.var(all_entries) / (len(all_entries) - 1)))
+ else:
+ ave_FROMP.append(np.nan)
+ if args.n_seeds>1:
+ sem_FROMP.append(np.nan)
+
+ if args.scenario=="class":
+ all_entries = [ICARL[budget][seed] for seed in seed_list]
+ ave_ICARL.append(np.mean(all_entries))
+ if args.n_seeds > 1:
+ sem_ICARL.append(np.sqrt(np.var(all_entries) / (len(all_entries) - 1)))
+
+ # -collect
+ lines = [ave_ER, ave_AGEM]
+ errors = [sem_ER, sem_AGEM] if args.n_seeds > 1 else None
+ line_names = ["ER", "A-GEM"]
+ colors = ["red", "orangered"]
+ if not checkattr(args, 'no_fromp'):
+ lines.append(ave_FROMP)
+ line_names.append("FROMP")
+ colors.append("goldenrod")
+ if args.n_seeds>1:
+ errors.append(sem_FROMP)
+ if args.scenario=="class":
+ lines.append(ave_ICARL)
+ line_names.append("iCaRL")
+ colors.append("purple")
+ if args.n_seeds>1:
+ errors.append(sem_ICARL)
+
+ # -plot
+ figure = visual_plt.plot_lines(
+ lines, x_axes=budget_list, ylabel="average accuracy (after all contexts)", title=title, x_log=True, ylim=y_lim,
+ line_names=line_names, xlabel="Total memory budget", with_dots=True, colors=colors, list_with_errors=errors,
+ h_lines=h_lines, h_errors=h_errors, h_labels=h_labels, h_colors=h_colors,
+ )
+ figure_list.append(figure)
+
+
+ # add figures to pdf
+ for figure in figure_list:
+ pp.savefig(figure)
+
+ # close the pdf
+ pp.close()
+
+ # Print name of generated plot on screen
+ print("\nGenerated plot: {}/{}.pdf\n".format(args.p_dir, plot_name))
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/compare_task_free.py b/PyTorch/build-in/other/continual-learning/compare_task_free.py
new file mode 100644
index 000000000..cb4179f0b
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/compare_task_free.py
@@ -0,0 +1,294 @@
+#!/usr/bin/env python3
+import os
+import numpy as np
+# -custom-written code
+import main_task_free
+from utils import checkattr
+from params.param_stamp import get_param_stamp_from_args
+from params.param_values import check_for_errors,set_default_values
+from params import options
+from visual import visual_plt
+
+
+## Function for specifying input-options and organizing / checking them
+def handle_inputs():
+ # Set indicator-dictionary for correctly retrieving / checking input options
+ kwargs = {'comparison': True, 'compare_all': True, 'no_boundaries': True}
+ # Define input options
+ parser = options.define_args(filename="compare_task_free", description='Compare performance of CL strategies.')
+ parser = options.add_general_options(parser, **kwargs)
+ parser = options.add_eval_options(parser, **kwargs)
+ parser = options.add_problem_options(parser, **kwargs)
+ parser = options.add_model_options(parser, **kwargs)
+ parser = options.add_train_options(parser, **kwargs)
+ parser = options.add_cl_options(parser, **kwargs)
+ # Should some methods not be included in the comparison?
+ parser.add_argument('--no-context-spec', action='store_true', help="no XdG or Separate Networks")
+ parser.add_argument('--no-si', action='store_true', help="no SI")
+ parser.add_argument('--no-agem', action='store_true', help="no A-GEM")
+ # Parse, process (i.e., set defaults for unselected options) and check chosen options
+ args = parser.parse_args()
+ args.log_per_context = True
+ set_default_values(args, also_hyper_params=True, no_boundaries=True) # -set defaults, some based on chosen options
+ check_for_errors(args, **kwargs) # -check for incompatible options
+ return args
+
+
+## Functions for running experiments and collecting results
+def get_results(args):
+ # -get param-stamp
+ param_stamp = get_param_stamp_from_args(args, no_boundaries=True)
+ # -check whether already run; if not do so
+ file_to_check = '{}/acc-{}{}.txt'.format(args.r_dir, param_stamp,
+ "--S{}".format(args.eval_s) if checkattr(args, 'gen_classifier') else "")
+ if os.path.isfile(file_to_check):
+ print(" already run: {}".format(param_stamp))
+ elif os.path.isfile("{}/mM-{}".format(args.m_dir, param_stamp)):
+ args.train = False
+ print(" ...testing: {}".format(param_stamp))
+ main_task_free.run(args)
+ else:
+ args.train = True
+ print(" ...running: {}".format(param_stamp))
+ main_task_free.run(args)
+ # -get average accuracy
+ fileName = '{}/acc-{}{}.txt'.format(args.r_dir, param_stamp,
+ "--S{}".format(args.eval_s) if checkattr(args, 'gen_classifier') else "")
+ file = open(fileName)
+ ave = float(file.readline())
+ file.close()
+ # -print average accuracy on screen
+ print("--> average accuracy: {}".format(ave))
+ # -return average accuracy
+ return ave
+
+def collect_all(method_dict, seed_list, args, name=None):
+ # -print name of method on screen
+ if name is not None:
+ print("\n------{}------".format(name))
+ # -run method for all random seeds
+ for seed in seed_list:
+ args.seed = seed
+ method_dict[seed] = get_results(args)
+ # -return updated dictionary with results
+ return method_dict
+
+
+
+if __name__ == '__main__':
+
+ ## Load input-arguments
+ args = handle_inputs()
+
+ # Create plots- and results-directories if needed
+ if not os.path.isdir(args.r_dir):
+ os.mkdir(args.r_dir)
+ if not os.path.isdir(args.p_dir):
+ os.mkdir(args.p_dir)
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #--------------------------#
+ #----- RUN ALL MODELS -----#
+ #--------------------------#
+
+ seed_list = list(range(args.seed, args.seed+args.n_seeds))
+
+
+ ###----"BASELINES"----###
+
+ ## None
+ args.replay = "none"
+ NONE = {}
+ NONE = collect_all(NONE, seed_list, args, name="None")
+
+ ## JOINT training (using a random stream, rather than what was selected)
+ stream_temp = args.stream
+ args.stream = 'random'
+ JOINT = {}
+ JOINT = collect_all(JOINT, seed_list, args, name="Joint")
+ args.stream = stream_temp
+
+
+ ###----"CONTEXT-SPECIFIC"----####
+
+ if args.scenario=="task" and not checkattr(args, 'no_context_spec'):
+ ## Separate network per context
+ fc_units_temp = args.fc_units
+ args.fc_units = args.fc_units_sep
+ args.separate_networks = True
+ SEP = {}
+ SEP = collect_all(SEP, seed_list, args, name="Separate Networks")
+ args.separate_networks = False
+ args.fc_units = fc_units_temp
+
+ ## XdG
+ always_xdg = checkattr(args, 'xdg')
+ args.xdg = True
+ XDG = {}
+ XDG = collect_all(XDG, seed_list, args, name="XdG")
+ args.xdg = always_xdg
+
+
+ ###----"PARAMETER REGULARIZATION"----####
+
+ if not checkattr(args, 'no_si'):
+ ## SI
+ args.weight_penalty = True
+ args.importance_weighting = 'si'
+ args.reg_strength = args.si_c
+ SI = {}
+ SI = collect_all(SI, seed_list, args, name="SI")
+ args.weight_penalty = False
+ else:
+ SI = None
+
+
+ ###----"FUNCTIONAL REGULARIZATION"----####
+
+ ## LwF
+ args.replay = "current"
+ args.distill = True
+ LWF = {}
+ LWF = collect_all(LWF, seed_list, args, name="LwF")
+ args.replay = "none"
+ args.distill = False
+
+
+ ###----"REPLAY"----###
+ if hasattr(args, 'replay_update') and args.replay_update is not None:
+ args.update_every = args.replay_update
+
+ ## Experience Replay
+ args.replay = "buffer"
+ ER = {}
+ ER = collect_all(ER, seed_list, args, name="Experience Replay (budget = {})".format(args.budget))
+ args.replay = "none"
+
+ ## A-GEM
+ if not checkattr(args, 'no_agem'):
+ args.replay = "buffer"
+ args.use_replay = "inequality"
+ AGEM = {}
+ AGEM = collect_all(AGEM, seed_list, args, name="A-GEM (budget = {})".format(args.budget))
+ args.replay = "none"
+ args.use_replay = "normal"
+ else:
+ AGEM = None
+
+
+ ###----"TEMPLATE-BASED CLASSIFICATION"----####
+
+ if args.scenario=="class":
+ ## iCaRL
+ args.bce = True
+ args.prototypes = True
+ args.replay = "buffer"
+ ICARL = {}
+ ICARL = collect_all(ICARL, seed_list, args, name="iCaRL (budget = {})".format(args.budget))
+ args.bce = False
+ args.prototypes = False
+ args.replay = "none"
+
+ ## Generative Classifier
+ args.gen_classifier = True
+ args.fc_units = args.fc_units_gc
+ args.fc_lay = args.fc_lay_gc
+ args.z_dim = args.z_dim_gc
+ args.hidden = True
+ args.lr = 0.001
+ GENCLASS = {}
+ GENCLASS = collect_all(GENCLASS, seed_list, args, name="Generative Classifier")
+
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #---------------------------------------------#
+ #----- COLLECT RESULTS: AVERAGE ACCURACY -----#
+ #---------------------------------------------#
+
+ ## For each seed, create list with average test accuracy
+ ave_acc = {}
+ for seed in seed_list:
+ ave_acc[seed] = [NONE[seed], JOINT[seed],
+ 0 if SI is None else SI[seed], LWF[seed],
+ ER[seed], 0 if AGEM is None else AGEM[seed]]
+ if args.scenario=="task" and not checkattr(args, 'no_context_spec'):
+ ave_acc[seed].append(XDG[seed])
+ ave_acc[seed].append(SEP[seed])
+ elif args.scenario=="class":
+ ave_acc[seed].append(ICARL[seed])
+ ave_acc[seed].append(GENCLASS[seed])
+
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #--------------------------------------------------#
+ #----- REPORTING / PLOTTING: AVERAGE ACCURACY -----#
+ #--------------------------------------------------#
+
+ # name for plot
+ plot_name = "summary-{}{}-{}".format(args.experiment, args.contexts, args.scenario)
+ scheme = "{}-incremental learning".format(args.scenario)
+ title = "{} - {}".format(args.experiment, scheme)
+
+ # select names / colors / ids
+ names = ["None", "Joint"]
+ colors = ["grey", "black"]
+ ids = [0, 1]
+ if args.scenario=="task" and not checkattr(args, 'no_context_spec'):
+ names += ['Separate Networks', 'XdG']
+ colors += ['dodgerblue', 'deepskyblue']
+ ids += [7, 6]
+ if not checkattr(args, 'no_si'):
+ names += ['SI']
+ colors += ['yellowgreen']
+ ids += [2]
+ names.append('LwF')
+ colors.append('gold')
+ ids.append(3)
+ names.append("ER (b={})".format(args.budget))
+ colors.append('red')
+ ids.append(4)
+ if not checkattr(args, 'no_agem'):
+ names.append("A-GEM (b={})".format(args.budget))
+ colors.append('orangered')
+ ids.append(5)
+ if args.scenario=="class":
+ names += ['Generative Classifier', "iCaRL (b={})".format(args.budget)]
+ colors += ['indigo', 'purple']
+ ids += [7, 6]
+
+ # open pdf
+ pp = visual_plt.open_pdf("{}/{}.pdf".format(args.p_dir, plot_name))
+ figure_list = []
+
+ # bar-plot
+ means = [np.mean([ave_acc[seed][id] for seed in seed_list]) for id in ids]
+ if len(seed_list)>1:
+ sems = [np.sqrt(np.var([ave_acc[seed][id] for seed in seed_list])/(len(seed_list)-1)) for id in ids]
+ cis = [1.96*np.sqrt(np.var([ave_acc[seed][id] for seed in seed_list])/(len(seed_list)-1)) for id in ids]
+ figure = visual_plt.plot_bar(means, names=names, colors=colors, ylabel="average accuracy (after all contexts)",
+ title=title, yerr=cis if len(seed_list)>1 else None, ylim=(0,1))
+ figure_list.append(figure)
+
+ # print results to screen
+ print("\n\n"+"#"*60+"\nSUMMARY RESULTS: {}\n".format(title)+"#"*60)
+ for i,name in enumerate(names):
+ if len(seed_list) > 1:
+ print("{:27s} {:.2f} (+/- {:.2f}), n={}".format(name, 100*means[i], 100*sems[i], len(seed_list)))
+ else:
+ print("{:27s} {:.2f}".format(name, 100*means[i]))
+ if i==1:
+ print("="*60)
+ print("#"*60)
+
+ # add all figures to pdf
+ for figure in figure_list:
+ pp.savefig(figure)
+
+ # close the pdf
+ pp.close()
+
+ # Print name of generated plot on screen
+ print("\nGenerated plot: {}/{}.pdf\n".format(args.p_dir, plot_name))
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/cover.txt b/PyTorch/build-in/other/continual-learning/cover.txt
new file mode 100644
index 000000000..a8e2f23f4
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/cover.txt
@@ -0,0 +1,5 @@
+all api: ['_copy_from_and_resize', '_has_compatible_shallow_copy_type', '_index_put_impl_', '_local_scalar_dense', '_log_softmax', '_log_softmax_backward_data', '_pin_memory', 'add', 'add_', 'amax', 'argmax', 'as_strided', 'clone', 'copy_', 'empty', 'empty_strided', 'eq', 'fill_', 'fused_adam', 'index', 'is_pinned', 'matmul', 'max', 'mm', 'mul', 'neg', 'nll_loss_backward', 'nll_loss_forward', 'relu', 'resize_', 'slice_backward', 'sub', 'sum', 'threshold_backward', 'uniform_', 'view', 'zero_'], total: 37
+
+fallback op: ['_index_put_impl_'], total: 1
+
+coverage rate: 97.30%
diff --git a/PyTorch/build-in/other/continual-learning/coverage.py b/PyTorch/build-in/other/continual-learning/coverage.py
new file mode 100644
index 000000000..264e48bcd
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/coverage.py
@@ -0,0 +1,28 @@
+import argparse
+import re
+
+def get_api_info(path):
+ pattern_api = '(?<=sdaa::)\s*\w+'
+ pattern_fallback = '(?<=default::)\s*\w+'
+ all_api_set = set()
+ fallback_api_set = set()
+ with open(path, 'r', encoding='utf-8') as f:
+ lines = f.readlines()
+ for line in lines:
+ if 'sdaa::' in line:
+ api = re.search(pattern_api, line).group()
+ all_api_set.add(api)
+ if 'default::' in line:
+ api = re.search(pattern_fallback, line).group()
+ fallback_api_set.add(api)
+ return sorted(list(all_api_set)), sorted(list(fallback_api_set))
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='suanzifugailvtongji')
+ parser.add_argument('--path', type=str, help='rezhilujing', required=True)
+ args = parser.parse_args()
+ all_api_set, fallback_api_set = get_api_info(args.path)
+ print(f"all api: {all_api_set}, total: {len(all_api_set)}\n")
+ print(f"fallback op: {fallback_api_set}, total: {len(fallback_api_set)}\n")
+ print(f"coverage rate: {(1 - len(fallback_api_set) / len(all_api_set)) * 100:.2f}%")
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/data/__init__.py b/PyTorch/build-in/other/continual-learning/data/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/PyTorch/build-in/other/continual-learning/data/available.py b/PyTorch/build-in/other/continual-learning/data/available.py
new file mode 100644
index 000000000..8bc047a9d
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/data/available.py
@@ -0,0 +1,53 @@
+from torchvision import datasets, transforms
+from data.manipulate import UnNormalize
+
+
+# specify available data-sets.
+AVAILABLE_DATASETS = {
+ 'MNIST': datasets.MNIST,
+ 'CIFAR100': datasets.CIFAR100,
+ 'CIFAR10': datasets.CIFAR10,
+}
+
+# specify available transforms.
+AVAILABLE_TRANSFORMS = {
+ 'MNIST': [
+ transforms.ToTensor(),
+ ],
+ 'MNIST32': [
+ transforms.Pad(2),
+ transforms.ToTensor(),
+ ],
+ 'CIFAR10': [
+ transforms.ToTensor(),
+ ],
+ 'CIFAR100': [
+ transforms.ToTensor(),
+ ],
+ 'CIFAR10_norm': [
+ transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2470, 0.2435, 0.2616])
+ ],
+ 'CIFAR100_norm': [
+ transforms.Normalize(mean=[0.5071, 0.4865, 0.4409], std=[0.2673, 0.2564, 0.2761])
+ ],
+ 'CIFAR10_denorm': UnNormalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2470, 0.2435, 0.2616]),
+ 'CIFAR100_denorm': UnNormalize(mean=[0.5071, 0.4865, 0.4409], std=[0.2673, 0.2564, 0.2761]),
+ 'augment_from_tensor': [
+ transforms.ToPILImage(),
+ transforms.RandomCrop(32, padding=4, padding_mode='symmetric'),
+ transforms.RandomHorizontalFlip(),
+ transforms.ToTensor(),
+ ],
+ 'augment': [
+ transforms.RandomCrop(32, padding=4, padding_mode='symmetric'),
+ transforms.RandomHorizontalFlip(),
+ ],
+}
+
+# specify configurations of available data-sets.
+DATASET_CONFIGS = {
+ 'MNIST': {'size': 28, 'channels': 1, 'classes': 10},
+ 'MNIST32': {'size': 32, 'channels': 1, 'classes': 10},
+ 'CIFAR10': {'size': 32, 'channels': 3, 'classes': 10},
+ 'CIFAR100': {'size': 32, 'channels': 3, 'classes': 100},
+}
diff --git a/PyTorch/build-in/other/continual-learning/data/datastream.py b/PyTorch/build-in/other/continual-learning/data/datastream.py
new file mode 100644
index 000000000..1566652a9
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/data/datastream.py
@@ -0,0 +1,73 @@
+import itertools
+import torch
+from torch.utils.data import DataLoader
+
+
+def repeater(data_loader):
+ '''Function to enable looping through a data-loader indefinetely.'''
+ for loader in itertools.repeat(data_loader):
+ for data in loader:
+ yield data
+
+
+class DataStream:
+ '''Iterator for setting up data-stream, with context for each observation or iteration given by `label_stream`.'''
+
+ def __init__(self, datasets, label_stream, batch_size=1, per_batch=False, shuffle=True, return_context=False):
+ '''Instantiate the DataStream-object.
+ Args:
+ datasets (list): list of Datasets, each on representing a context
+ label_stream (LabelStream): iterator dictating from which context (task, domain or class) to sample
+ batch_size (int, optional): # of samples per mini-batch (default: ``1``)
+ per_batch (bool, optional): if ``True``, each label from `label_stream` specifies entire mini-batch;
+ if ``False``, there is separate context-label for each sample in a mini-batch (default: ``False``)
+ shuffle (bool, optional): whether the DataLoader should shuffle the Datasets (default: ``True``)
+ return_context (bool, optional): whether identity of the context should be returned (default: ``False``)
+ '''
+
+ self.datasets = datasets
+ self.label_stream = label_stream
+ self.n_contexts = label_stream.n_contexts
+ self.batch_size = batch_size
+ self.per_batch = per_batch
+ self.return_context = return_context
+
+ # To keep track of the actual label-sequence being used
+ self.sequence = []
+
+ # Create separate data-loader for each context (using 'repeater' to enable looping through them indefinitely)
+ self.dataloaders = []
+ for context_label in range(self.n_contexts):
+ self.dataloaders.append(repeater(
+ DataLoader(datasets[context_label], batch_size=batch_size if per_batch else 1, shuffle=shuffle,
+ drop_last=True)
+ ))
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ '''Function to return the next batch (x,y,c).'''
+ if self.per_batch or self.batch_size == 1:
+ # All samples in the mini-batch come from same context.
+ context_label = next(self.label_stream)
+ self.sequence.append(context_label)
+ (x, y) = next(self.dataloaders[context_label])
+ c = torch.tensor([context_label]*self.batch_size) if self.return_context else None
+ else:
+ # Multiple samples per mini-batch that might come from different contexts.
+ x = []
+ y = []
+ c = [] if self.return_context else None
+ for _ in range(self.batch_size):
+ context_label = next(self.label_stream)
+ self.sequence.append(context_label)
+ (xi, yi) = next(self.dataloaders[context_label])
+ x.append(xi)
+ y.append(yi)
+ if self.return_context:
+ c.append(context_label)
+ x = torch.cat(x)
+ y = torch.cat(y)
+ c = torch.tensor(c) if self.return_context else None
+ return (x, y, c)
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/data/labelstream.py b/PyTorch/build-in/other/continual-learning/data/labelstream.py
new file mode 100644
index 000000000..7de5bb99f
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/data/labelstream.py
@@ -0,0 +1,122 @@
+import random
+import torch
+
+
+class LabelStream:
+ '''Base class for iterators that determine from which context should be sampled.'''
+
+ def __init__(self):
+ self.n_contexts = None
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ raise NotImplementedError # returns 'context_id' (i.e., starting from context 0); type=int
+
+
+class SharpBoundaryStream(LabelStream):
+ '''Set up a label-stream with strictly separated contexts (as in the academic continual learning setting).'''
+
+ def __init__(self, n_contexts, iters_per_context):
+ '''Instantiate the dissociated-stream object by defining its parameters.
+ Args:
+ n_contexts (int): number of contexts
+ iters_per_context (int): number of iterations to generate per context
+ '''
+
+ super().__init__()
+ self.n_contexts = n_contexts
+ self.iters_per_context = iters_per_context
+
+ # For keeping track of context
+ self.iters_count = 0
+ self.context = 0
+
+ def __next__(self):
+ self.iters_count += 1
+ # -move to next context when all iterations of current context are done
+ if self.iters_count > self.iters_per_context:
+ self.iters_count = 1
+ self.context += 1
+ if self.context >= self.n_contexts:
+ raise StopIteration
+ next_label = self.context
+ return next_label
+
+
+class RandomStream(LabelStream):
+ '''Set up a completely random label-stream.'''
+
+ def __init__(self, n_contexts):
+ super().__init__()
+ self.n_contexts = n_contexts
+
+ def __next__(self):
+ return random.randint(0, self.n_contexts-1)
+
+
+def _linear_line(number, direction="up"):
+ if direction == "up":
+ return torch.FloatTensor([(i+0.5) / number for i in range(number)])
+ return torch.FloatTensor([1 - ((i+0.5) / number) for i in range(number)])
+
+def _probs_per_context(n_contexts, iters_per_context, context_id, fuzziness=3):
+ if (2 * fuzziness) > iters_per_context:
+ raise ValueError("Fuzziness must be smaller than half the number of iterations per context.")
+
+ # Start with zero probability for every iteration
+ probs = torch.zeros(n_contexts * iters_per_context)
+
+ # Depending on which context, add non-zero probabilities
+ if context_id == 0:
+ # -first period of seeing context 0
+ end = int(iters_per_context / 2)
+ probs[0:(end - fuzziness)].add_(1)
+ probs[(end - fuzziness):(end + fuzziness)] = _linear_line(2 * fuzziness, direction="down")
+ # -second period of seeing context 0
+ start = int(iters_per_context / 2) + (n_contexts - 1) * iters_per_context
+ probs[(start - fuzziness):(start + fuzziness)] = _linear_line(2 * fuzziness, direction="up")
+ probs[(start + fuzziness):(iters_per_context * n_contexts)].add_(1)
+ else:
+ start = int(iters_per_context / 2) + (context_id - 1) * iters_per_context
+ end = int(iters_per_context / 2) + context_id * iters_per_context
+ probs[(start - fuzziness):(start + fuzziness)] = _linear_line(2 * fuzziness, direction="up")
+ probs[(start + fuzziness):(end - fuzziness)].add_(1)
+ probs[(end - fuzziness):(end + fuzziness)] = _linear_line(2 * fuzziness, direction="down")
+
+ return probs
+
+class FuzzyBoundaryStream(LabelStream):
+ '''Set up a label-stream for an experiment with fuzzy context boundaries.'''
+
+ def __init__(self, n_contexts, iters_per_context, fuzziness, batch_size=1):
+ super().__init__()
+ self.n_contexts = n_contexts
+ self.batch_size = batch_size
+ self.total_iters = iters_per_context*n_contexts
+ self.batch_count = 0
+ self.iters_count = 0
+
+ # For each context, get a tensor with its probability per iteration
+ context_probs_per_iter = [_probs_per_context(
+ n_contexts, iters_per_context, context_id, fuzziness=fuzziness
+ ) for context_id in range(n_contexts)]
+
+ # For each iteration, specify a probability-distribution over the contexts
+ self.context_probs = []
+ context_probs_tensor = torch.cat(context_probs_per_iter).view(n_contexts, iters_per_context*n_contexts)
+ for iter_id in range(iters_per_context*n_contexts):
+ self.context_probs.append(context_probs_tensor[:, iter_id])
+
+ def __next__(self):
+ self.batch_count += 1
+ # -move to next iteration when all mini-batch samples of current iteration are done
+ if self.batch_count > self.batch_size:
+ self.batch_count = 1
+ self.iters_count += 1
+ if self.iters_count >= self.total_iters:
+ raise StopIteration
+ # -sample a context label using the probability-distribution of current iteration
+ context_label = random.choices(range(self.n_contexts), self.context_probs[self.iters_count])[0]
+ return context_label
diff --git a/PyTorch/build-in/other/continual-learning/data/load.py b/PyTorch/build-in/other/continual-learning/data/load.py
new file mode 100644
index 000000000..a21da5e4a
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/data/load.py
@@ -0,0 +1,155 @@
+import copy
+import numpy as np
+from torchvision import transforms
+from torch.utils.data import ConcatDataset
+from data.manipulate import permutate_image_pixels, SubDataset, TransformedDataset
+from data.available import AVAILABLE_DATASETS, AVAILABLE_TRANSFORMS, DATASET_CONFIGS
+
+
+def get_dataset(name, type='train', download=True, capacity=None, permutation=None, dir='./store/datasets',
+ verbose=False, augment=False, normalize=False, target_transform=None):
+ '''Create [train|valid|test]-dataset.'''
+
+ data_name = 'MNIST' if name in ('MNIST28', 'MNIST32') else name
+ dataset_class = AVAILABLE_DATASETS[data_name]
+
+ # specify image-transformations to be applied
+ transforms_list = [*AVAILABLE_TRANSFORMS['augment']] if augment else []
+ transforms_list += [*AVAILABLE_TRANSFORMS[name]]
+ if normalize:
+ transforms_list += [*AVAILABLE_TRANSFORMS[name+"_norm"]]
+ if permutation is not None:
+ transforms_list.append(transforms.Lambda(lambda x, p=permutation: permutate_image_pixels(x, p)))
+ dataset_transform = transforms.Compose(transforms_list)
+
+ # load data-set
+ dataset = dataset_class('{dir}/{name}'.format(dir=dir, name=data_name), train=False if type=='test' else True,
+ download=download, transform=dataset_transform, target_transform=target_transform)
+
+ # print information about dataset on the screen
+ if verbose:
+ print(" --> {}: '{}'-dataset consisting of {} samples".format(name, type, len(dataset)))
+
+ # if dataset is (possibly) not large enough, create copies until it is.
+ if capacity is not None and len(dataset) < capacity:
+ dataset = ConcatDataset([copy.deepcopy(dataset) for _ in range(int(np.ceil(capacity / len(dataset))))])
+
+ return dataset
+
+#----------------------------------------------------------------------------------------------------------#
+
+def get_singlecontext_datasets(name, data_dir="./store/datasets", normalize=False, augment=False, verbose=False):
+ '''Load, organize and return train- and test-dataset for requested single-context experiment.'''
+
+ # Get config-dict and data-sets
+ config = DATASET_CONFIGS[name]
+ config['output_units'] = config['classes']
+ config['normalize'] = normalize
+ if normalize:
+ config['denormalize'] = AVAILABLE_TRANSFORMS[name+"_denorm"]
+ trainset = get_dataset(name, type='train', dir=data_dir, verbose=verbose, normalize=normalize, augment=augment)
+ testset = get_dataset(name, type='test', dir=data_dir, verbose=verbose, normalize=normalize)
+
+ # Return tuple of data-sets and config-dictionary
+ return (trainset, testset), config
+
+#----------------------------------------------------------------------------------------------------------#
+
+def get_context_set(name, scenario, contexts, data_dir="./datasets", only_config=False, verbose=False,
+ exception=False, normalize=False, augment=False, singlehead=False, train_set_per_class=False):
+ '''Load, organize and return a context set (both train- and test-data) for the requested experiment.
+
+ [exception]: ; if True, for visualization no permutation is applied to first context (permMNIST) or digits
+ are not shuffled before being distributed over the contexts (e.g., splitMNIST, CIFAR100)'''
+
+ ## NOTE: options 'normalize' and 'augment' only implemented for CIFAR-based experiments.
+
+ # Define data-type
+ if name == "splitMNIST":
+ data_type = 'MNIST'
+ elif name == "permMNIST":
+ data_type = 'MNIST32'
+ if train_set_per_class:
+ raise NotImplementedError('Permuted MNIST currently has no support for separate training dataset per class')
+ elif name == "CIFAR10":
+ data_type = 'CIFAR10'
+ elif name == "CIFAR100":
+ data_type = 'CIFAR100'
+ else:
+ raise ValueError('Given undefined experiment: {}'.format(name))
+
+ # Get config-dict
+ config = DATASET_CONFIGS[data_type].copy()
+ config['normalize'] = normalize if name=='CIFAR100' else False
+ if config['normalize']:
+ config['denormalize'] = AVAILABLE_TRANSFORMS["CIFAR100_denorm"]
+ # check for number of contexts
+ if contexts > config['classes'] and not name=="permMNIST":
+ raise ValueError("Experiment '{}' cannot have more than {} contexts!".format(name, config['classes']))
+ # -how many classes per context?
+ classes_per_context = 10 if name=="permMNIST" else int(np.floor(config['classes'] / contexts))
+ config['classes_per_context'] = classes_per_context
+ config['output_units'] = classes_per_context if (scenario=='domain' or
+ (scenario=="task" and singlehead)) else classes_per_context*contexts
+ # -if only config-dict is needed, return it
+ if only_config:
+ return config
+
+ # Depending on experiment, get and organize the datasets
+ if name == 'permMNIST':
+ # get train and test datasets
+ trainset = get_dataset(data_type, type="train", dir=data_dir, target_transform=None, verbose=verbose)
+ testset = get_dataset(data_type, type="test", dir=data_dir, target_transform=None, verbose=verbose)
+ # generate pixel-permutations
+ if exception:
+ permutations = [None] + [np.random.permutation(config['size']**2) for _ in range(contexts-1)]
+ else:
+ permutations = [np.random.permutation(config['size']**2) for _ in range(contexts)]
+ # specify transformed datasets per context
+ train_datasets = []
+ test_datasets = []
+ for context_id, perm in enumerate(permutations):
+ target_transform = transforms.Lambda(
+ lambda y, x=context_id: y + x*classes_per_context
+ ) if scenario in ('task', 'class') and not (scenario=='task' and singlehead) else None
+ train_datasets.append(TransformedDataset(
+ trainset, transform=transforms.Lambda(lambda x, p=perm: permutate_image_pixels(x, p)),
+ target_transform=target_transform
+ ))
+ test_datasets.append(TransformedDataset(
+ testset, transform=transforms.Lambda(lambda x, p=perm: permutate_image_pixels(x, p)),
+ target_transform=target_transform
+ ))
+ else:
+ # prepare permutation to shuffle label-ids (to create different class batches for each random seed)
+ classes = config['classes']
+ perm_class_list = np.array(list(range(classes))) if exception else np.random.permutation(list(range(classes)))
+ target_transform = transforms.Lambda(lambda y, p=perm_class_list: int(p[y]))
+ # prepare train and test datasets with all classes
+ trainset = get_dataset(data_type, type="train", dir=data_dir, target_transform=target_transform,
+ verbose=verbose, augment=augment, normalize=normalize)
+ testset = get_dataset(data_type, type="test", dir=data_dir, target_transform=target_transform, verbose=verbose,
+ augment=augment, normalize=normalize)
+ # generate labels-per-dataset (if requested, training data is split up per class rather than per context)
+ labels_per_dataset_train = [[label] for label in range(classes)] if train_set_per_class else [
+ list(np.array(range(classes_per_context))+classes_per_context*context_id) for context_id in range(contexts)
+ ]
+ labels_per_dataset_test = [
+ list(np.array(range(classes_per_context))+classes_per_context*context_id) for context_id in range(contexts)
+ ]
+ # split the train and test datasets up into sub-datasets
+ train_datasets = []
+ for labels in labels_per_dataset_train:
+ target_transform = transforms.Lambda(lambda y, x=labels[0]: y-x) if (
+ scenario=='domain' or (scenario=='task' and singlehead)
+ ) else None
+ train_datasets.append(SubDataset(trainset, labels, target_transform=target_transform))
+ test_datasets = []
+ for labels in labels_per_dataset_test:
+ target_transform = transforms.Lambda(lambda y, x=labels[0]: y-x) if (
+ scenario=='domain' or (scenario=='task' and singlehead)
+ ) else None
+ test_datasets.append(SubDataset(testset, labels, target_transform=target_transform))
+
+ # Return tuple of train- and test-dataset, config-dictionary and number of classes per context
+ return ((train_datasets, test_datasets), config)
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/data/manipulate.py b/PyTorch/build-in/other/continual-learning/data/manipulate.py
new file mode 100644
index 000000000..13c4d356b
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/data/manipulate.py
@@ -0,0 +1,119 @@
+import torch
+from torch.utils.data import Dataset
+
+
+def permutate_image_pixels(image, permutation):
+ '''Permutate the pixels of an image according to [permutation].
+
+ [image] 3D-tensor containing the image
+ [permutation] of pixel-indeces in their new order'''
+
+ if permutation is None:
+ return image
+ else:
+ c, h, w = image.size()
+ image = image.view(c, -1)
+ image = image[:, permutation] #--> same permutation for each channel
+ image = image.view(c, h, w)
+ return image
+
+#----------------------------------------------------------------------------------------------------------#
+
+class SubDataset(Dataset):
+ '''To sub-sample a dataset, taking only those samples with label in [sub_labels].
+
+ After this selection of samples has been made, it is possible to transform the target-labels,
+ which can be useful when doing continual learning with fixed number of output units.'''
+
+ def __init__(self, original_dataset, sub_labels, target_transform=None):
+ super().__init__()
+ self.dataset = original_dataset
+ self.sub_indeces = []
+ for index in range(len(self.dataset)):
+ if hasattr(original_dataset, "targets"):
+ if self.dataset.target_transform is None:
+ label = self.dataset.targets[index]
+ else:
+ label = self.dataset.target_transform(self.dataset.targets[index])
+ else:
+ label = self.dataset[index][1]
+ if label in sub_labels:
+ self.sub_indeces.append(index)
+ self.target_transform = target_transform
+
+ def __len__(self):
+ return len(self.sub_indeces)
+
+ def __getitem__(self, index):
+ sample = self.dataset[self.sub_indeces[index]]
+ if self.target_transform:
+ target = self.target_transform(sample[1])
+ sample = (sample[0], target)
+ return sample
+
+
+class MemorySetDataset(Dataset):
+ '''Create dataset from list of with shape (N, C, H, W) (i.e., with N images each).
+
+ The images at the i-th entry of [memory_sets] belong to class [i], unless a [target_transform] is specified'''
+
+ def __init__(self, memory_sets, target_transform=None):
+ super().__init__()
+ self.memory_sets = memory_sets
+ self.target_transform = target_transform
+
+ def __len__(self):
+ total = 0
+ for class_id in range(len(self.memory_sets)):
+ total += len(self.memory_sets[class_id])
+ return total
+
+ def __getitem__(self, index):
+ total = 0
+ for class_id in range(len(self.memory_sets)):
+ examples_in_this_class = len(self.memory_sets[class_id])
+ if index < (total + examples_in_this_class):
+ class_id_to_return = class_id if self.target_transform is None else self.target_transform(class_id)
+ example_id = index - total
+ break
+ else:
+ total += examples_in_this_class
+ image = torch.from_numpy(self.memory_sets[class_id][example_id])
+ return (image, class_id_to_return)
+
+
+class TransformedDataset(Dataset):
+ '''To modify an existing dataset with a transform.
+ This is useful for creating different permutations of MNIST without loading the data multiple times.'''
+
+ def __init__(self, original_dataset, transform=None, target_transform=None):
+ super().__init__()
+ self.dataset = original_dataset
+ self.transform = transform
+ self.target_transform = target_transform
+
+ def __len__(self):
+ return len(self.dataset)
+
+ def __getitem__(self, index):
+ (input, target) = self.dataset[index]
+ if self.transform:
+ input = self.transform(input)
+ if self.target_transform:
+ target = self.target_transform(target)
+ return (input, target)
+
+# ----------------------------------------------------------------------------------------------------------#
+
+class UnNormalize(object):
+ def __init__(self, mean, std):
+ self.mean = mean
+ self.std = std
+
+ def __call__(self, tensor):
+ """Denormalize image, either single image (C,H,W) or image batch (N,C,H,W)"""
+ batch = (len(tensor.size()) == 4)
+ for t, m, s in zip(tensor.permute(1, 0, 2, 3) if batch else tensor, self.mean, self.std):
+ t.mul_(s).add_(m)
+ # The normalize code -> t.sub_(m).div_(s)
+ return tensor
diff --git a/PyTorch/build-in/other/continual-learning/eval/__init__.py b/PyTorch/build-in/other/continual-learning/eval/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/PyTorch/build-in/other/continual-learning/eval/callbacks.py b/PyTorch/build-in/other/continual-learning/eval/callbacks.py
new file mode 100644
index 000000000..16e8b5f80
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/eval/callbacks.py
@@ -0,0 +1,198 @@
+from eval import evaluate
+
+
+#########################################################
+## Callback-functions for evaluating model-performance ##
+#########################################################
+
+def _sample_cb(log, config, visdom=None, test_datasets=None, sample_size=64):
+ '''Initiates function for evaluating samples of generative model.
+
+ [test_datasets] None or of (if provided, also reconstructions are shown)'''
+
+ def sample_cb(generator, batch, context=1, class_id=None, **kwargs):
+ '''Callback-function, to evaluate sample (and reconstruction) ability of the model.'''
+
+ if batch % log == 0:
+
+ # Evaluate reconstruction-ability of model on [test_dataset]
+ if test_datasets is not None:
+ # Reconstruct samples from current context
+ evaluate.show_reconstruction(generator, test_datasets[context-1], config, size=int(sample_size/2),
+ visdom=visdom, context=context)
+
+ # Generate samples
+ evaluate.show_samples(
+ generator, config, visdom=visdom, size=sample_size,
+ visdom_title='Samples{}'.format(" VAE-{}".format(class_id) if class_id is not None else "")
+ )
+
+ # Return the callback-function (except if visdom is not selected!)
+ return sample_cb if (visdom is not None) else None
+
+
+def _eval_cb(log, test_datasets, visdom=None, plotting_dict=None, iters_per_context=None, test_size=None,
+ summary_graph=True, S='mean'):
+ '''Initiates function for evaluating performance of classifier (in terms of accuracy).
+
+ [test_datasets] of ; also if only 1 context, it should be presented as a list!
+ '''
+
+ def eval_cb(classifier, batch, context=1):
+ '''Callback-function, to evaluate performance of classifier.'''
+
+ iteration = batch if (context is None or context==1) else (context-1)*iters_per_context + batch
+
+ # Evaluate the classifier every [log] iterations
+ if iteration % log == 0:
+
+ # If needed, set the requested way of doing inference as attributes of the classifier
+ if (S is not None) and hasattr(classifier, 'S'):
+ classifier.S = S
+
+ # Evaluate the classifier on multiple contexts (and log to visdom)
+ evaluate.test_all_so_far(classifier, test_datasets, context, iteration, test_size=test_size,
+ visdom=visdom, summary_graph=summary_graph, plotting_dict=plotting_dict)
+
+ ## Return the callback-function (except if visdom is not selected!)
+ return eval_cb if (visdom is not None) or (plotting_dict is not None) else None
+
+
+##------------------------------------------------------------------------------------------------------------------##
+
+########################################################################
+## Callback-functions for keeping track of loss and training progress ##
+########################################################################
+
+def _classifier_loss_cb(log=1, visdom=None, model=None, contexts=None, iters_per_context=None, progress_bar=True):
+ '''Initiates function for keeping track of, and reporting on, the progress of the classifier's training.'''
+
+ def cb(bar, iter, loss_dict, context=1):
+ '''Callback-function, to call on every iteration to keep track of training progress.'''
+
+ if visdom is not None:
+ from visual import visual_visdom
+
+ iteration = iter if context==1 else (context-1)*iters_per_context + iter
+
+ # progress-bar
+ if progress_bar and bar is not None:
+ context_stm = "" if (contexts is None) else " Context: {}/{} |".format(context, contexts)
+ bar.set_description(
+ ' |{t_stm} training loss: {loss:.3} | training accuracy: {prec:.3} |'
+ .format(t_stm=context_stm, loss=loss_dict['loss_total'], prec=loss_dict['accuracy'])
+ )
+ bar.update(1)
+
+ # log the loss of the solver (to visdom)
+ if (visdom is not None) and (iteration % log == 0):
+ if contexts is None or contexts==1:
+ plot_data = [loss_dict['pred']]
+ names = ['prediction']
+ else:
+ plot_data = [loss_dict['pred']]
+ names = ['current']
+ if hasattr(model, 'replay') and not model.replay=='none':
+ if model.replay_targets == "hard":
+ plot_data += [loss_dict['pred_r']]
+ names += ['replay']
+ elif model.replay_targets == "soft":
+ plot_data += [loss_dict['distil_r']]
+ names += ['distill']
+ if hasattr(model, 'reg_strength') and model.reg_strength>0:
+ plot_data += [loss_dict['param_reg']]
+ names += ['param reg']
+ visual_visdom.visualize_scalars(
+ scalars=plot_data, names=names, iteration=iteration,
+ title="CLASSIFIER: loss ({})".format(visdom["graph"]), env=visdom["env"], ylabel="training loss"
+ )
+
+ # Return the callback-function.
+ return cb
+
+
+def _VAE_loss_cb(log=1, visdom=None, model=None, contexts=None, iters_per_context=None, replay=False,
+ progress_bar=True):
+ '''Initiates functions for keeping track of, and reporting on, the progress of the generator's training.'''
+
+ if visdom is not None:
+ from visual import visual_visdom
+
+ def cb(bar, iter, loss_dict, context=1):
+ '''Callback-function, to perform on every iteration to keep track of training progress.'''
+
+ iteration = iter if context==1 else (context-1)*iters_per_context + iter
+
+ # progress-bar
+ if progress_bar and bar is not None:
+ context_stm = "" if (contexts is None) else " Context: {}/{} |".format(context, contexts)
+ bar.set_description(' |{t_stm} training loss: {loss:.3} |{acc}'.format(
+ t_stm=context_stm, loss=loss_dict['loss_total'], acc=' training accuracy: {:.3} |'.format(
+ loss_dict['accuracy']
+ ) if model.label=='CondVAE' and model.lamda_pl>0 else ''
+ ))
+ bar.update(1)
+
+ # log the loss of the solver (to visdom)
+ if (visdom is not None) and (iteration % log == 0):
+ if contexts is None or contexts==1:
+ plot_data = [loss_dict['recon'], loss_dict['variat']]
+ names = ['Recon', 'Variat']
+ if model.lamda_pl > 0:
+ plot_data += [loss_dict['pred']]
+ names += ['Prediction']
+ else:
+ plot_data = [loss_dict['recon'], loss_dict['variat']]
+ names = ['Recon', 'Variat']
+ if model.label=='CondVAE' and model.lamda_pl > 0:
+ plot_data += [loss_dict['pred']]
+ names += ['Prediction']
+ if replay:
+ plot_data += [loss_dict['recon_r'], loss_dict['variat_r']]
+ names += ['Recon - r', 'Variat - r']
+ if model.label=='CondVAE' and model.lamda_pl>0:
+ if model.replay_targets=="hard":
+ plot_data += [loss_dict['pred_r']]
+ names += ['Pred - r']
+ elif model.replay_targets=="soft":
+ plot_data += [loss_dict['distil_r']]
+ names += ['Distill - r']
+ visual_visdom.visualize_scalars(
+ scalars=plot_data, names=names, iteration=iteration,
+ title="VAE: loss ({})".format(visdom["graph"]), env=visdom["env"], ylabel="training loss"
+ )
+
+ # Return the callback-function
+ return cb
+
+
+def _gen_classifier_loss_cb(log=1, classes=None, visdom=None, progress_bar=True):
+ '''Initiates functions for keeping track of, and reporting on, the progress of the generator's training.'''
+
+ if visdom is not None:
+ from visual import visual_visdom
+
+ def cb(bar, iter, loss_dict, class_id=0):
+ '''Callback-function, to perform on every iteration to keep track of training progress.'''
+
+ # progress-bar
+ if progress_bar and bar is not None:
+ class_stm = "" if (classes is None) else " Class: {}/{} |".format(class_id+1, classes)
+ model_stm = " " if (classes is None) else " "
+ bar.set_description('{m_stm}|{c_stm} training loss: {loss:.3} |'
+ .format(m_stm=model_stm, c_stm=class_stm, loss=loss_dict['loss_total']))
+ bar.update(1)
+
+ # plot training loss every [log]
+ if (visdom is not None) and (iter % log == 0):
+ plot_data = [loss_dict['recon'], loss_dict['variat']]
+ names = ['Recon loss', 'Variat loss']
+
+ visual_visdom.visualize_scalars(
+ scalars=plot_data, names=names, iteration=iter,
+ title="VAE{}: loss ({})".format("" if classes is None else "-{}".format(class_id), visdom["graph"]),
+ env=visdom["env"], ylabel="training loss"
+ )
+
+ # Return the callback-function
+ return cb
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/eval/evaluate.py b/PyTorch/build-in/other/continual-learning/eval/evaluate.py
new file mode 100644
index 000000000..1f1d10832
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/eval/evaluate.py
@@ -0,0 +1,222 @@
+import numpy as np
+import torch
+from visual import visual_plt
+from visual import visual_visdom
+from utils import get_data_loader,checkattr
+
+
+####--------------------------------------------------------------------------------------------------------------####
+
+####-----------------------------####
+####----CLASSIFIER EVALUATION----####
+####-----------------------------####
+
+def test_acc(model, dataset, batch_size=128, test_size=1024, verbose=True, context_id=None, allowed_classes=None,
+ no_context_mask=False, **kwargs):
+ '''Evaluate accuracy (= proportion of samples classified correctly) of a classifier ([model]) on [dataset].
+
+ [allowed_classes] None or containing all "active classes" between which should be chosen
+ (these "active classes" are assumed to be contiguous)'''
+
+ # Get device-type / using cuda?
+ device = model.device if hasattr(model, 'device') else model._device()
+ cuda = model.cuda if hasattr(model, 'cuda') else model._is_on_cuda()
+
+ # Set model to eval()-mode
+ mode = model.training
+ model.eval()
+
+ # Apply context-specifc "gating-mask" for each hidden fully connected layer (or remove it!)
+ if hasattr(model, "mask_dict") and model.mask_dict is not None:
+ if no_context_mask:
+ model.reset_XdGmask()
+ else:
+ model.apply_XdGmask(context=context_id+1)
+
+ # Should output-labels be adjusted for allowed classes? (ASSUMPTION: [allowed_classes] has consecutive numbers)
+ label_correction = 0 if checkattr(model, 'stream_classifier') or (allowed_classes is None) else allowed_classes[0]
+
+ # If there is a separate network per context, select the correct subnetwork
+ if model.label=="SeparateClassifiers":
+ model = getattr(model, 'context{}'.format(context_id+1))
+ allowed_classes = None
+
+ # Loop over batches in [dataset]
+ data_loader = get_data_loader(dataset, batch_size, cuda=cuda)
+ total_tested = total_correct = 0
+ for x, y in data_loader:
+ # -break on [test_size] (if "None", full dataset is used)
+ if test_size:
+ if total_tested >= test_size:
+ break
+ # -if the model is a "stream-classifier", add context
+ if checkattr(model, 'stream_classifier'):
+ context_tensor = torch.tensor([context_id]*x.shape[0]).to(device)
+ # -evaluate model (if requested, only on [allowed_classes])
+ with torch.no_grad():
+ if checkattr(model, 'stream_classifier'):
+ scores = model.classify(x.to(device), context=context_tensor)
+ else:
+ scores = model.classify(x.to(device), allowed_classes=allowed_classes)
+ _, predicted = torch.max(scores.cpu(), 1)
+ if model.prototypes and max(predicted).item() >= model.classes:
+ # -in case of Domain-IL (or Task-IL + singlehead), collapse all corresponding domains to same class
+ predicted = predicted % model.classes
+ # -update statistics
+ y = y-label_correction
+ total_correct += (predicted == y).sum().item()
+ total_tested += len(x)
+ accuracy = total_correct / total_tested
+
+ # Set model back to its initial mode, print result on screen (if requested) and return it
+ model.train(mode=mode)
+ if verbose:
+ print('=> accuracy: {:.3f}'.format(accuracy))
+ return accuracy
+
+
+def test_all_so_far(model, datasets, current_context, iteration, test_size=None, no_context_mask=False,
+ visdom=None, summary_graph=True, plotting_dict=None, verbose=False):
+ '''Evaluate accuracy of a classifier (=[model]) on all contexts so far (= up to [current_context]) using [datasets].
+
+ [visdom] None or with name of "graph" and "env" (if None, no visdom-plots are made)'''
+
+ n_contexts = len(datasets)
+
+ # Evaluate accuracy of model predictions
+ # - in the academic CL setting: for all contexts so far, reporting "0" for future contexts
+ # - in task-free stream setting (current_context==None): always for all contexts
+ precs = []
+ for i in range(n_contexts):
+ if (current_context is None) or (i+1 <= current_context):
+ allowed_classes = None
+ if model.scenario=='task' and not checkattr(model, 'singlehead'):
+ allowed_classes = list(range(model.classes_per_context * i, model.classes_per_context * (i + 1)))
+ precs.append(test_acc(model, datasets[i], test_size=test_size, verbose=verbose,
+ allowed_classes=allowed_classes, no_context_mask=no_context_mask, context_id=i))
+ else:
+ precs.append(0)
+ if current_context is None:
+ current_context = i+1
+ average_precs = sum([precs[context_id] for context_id in range(current_context)]) / current_context
+
+ # Print results on screen
+ if verbose:
+ print(' => ave accuracy: {:.3f}'.format(average_precs))
+
+ # Add results to [plotting_dict]
+ if plotting_dict is not None:
+ for i in range(n_contexts):
+ plotting_dict['acc per context']['context {}'.format(i+1)].append(precs[i])
+ plotting_dict['average'].append(average_precs)
+ plotting_dict['x_iteration'].append(iteration)
+ plotting_dict['x_context'].append(current_context)
+
+ # Send results to visdom server
+ names = ['context {}'.format(i + 1) for i in range(n_contexts)]
+ if visdom is not None:
+ visual_visdom.visualize_scalars(
+ precs, names=names, title="accuracy ({})".format(visdom["graph"]),
+ iteration=iteration, env=visdom["env"], ylabel="test accuracy"
+ )
+ if n_contexts>1 and summary_graph:
+ visual_visdom.visualize_scalars(
+ [average_precs], names=["ave"], title="ave accuracy ({})".format(visdom["graph"]),
+ iteration=iteration, env=visdom["env"], ylabel="test accuracy"
+ )
+
+
+def initiate_plotting_dict(n_contexts):
+ '''Initiate with accuracy-measures to keep track of for plotting.'''
+ plotting_dict = {}
+ plotting_dict["acc per context"] = {}
+ for i in range(n_contexts):
+ plotting_dict["acc per context"]["context {}".format(i+1)] = []
+ plotting_dict["average"] = [] # average accuracy over all contexts so far: Task-IL -> only classes in context
+ # Class-IL -> all classes so far
+ plotting_dict["x_iteration"] = [] # total number of iterations so far
+ plotting_dict["x_context"] = [] # number of contexts so far (i.e., context on which training just finished)
+ return plotting_dict
+
+
+####--------------------------------------------------------------------------------------------------------------####
+
+####-----------------------------####
+####----GENERATION EVALUATION----####
+####-----------------------------####
+
+def show_samples(model, config, pdf=None, visdom=None, size=32, pdf_title="Generated images", visdom_title="Samples"):
+ '''Plot samples from a generative model in [pdf] and/or in [visdom].'''
+
+ # Set model to evaluation-mode
+ mode = model.training
+ model.eval()
+
+ # Generate samples from the model
+ sample = model.sample(size)
+ image_tensor = sample.view(-1, config['channels'], config['size'], config['size']).cpu()
+ # -denormalize images if needed
+ if config['normalize']:
+ image_tensor = config['denormalize'](image_tensor).clamp(min=0, max=1)
+
+ # Plot generated images in [pdf] and/or [visdom]
+ # -number of rows
+ nrow = int(np.ceil(np.sqrt(size)))
+ # -make plots
+ if pdf is not None:
+ visual_plt.plot_images_from_tensor(image_tensor, pdf, title=pdf_title, nrow=nrow)
+ if visdom is not None:
+ visual_visdom.visualize_images(
+ tensor=image_tensor, title='{} ({})'.format(visdom_title, visdom["graph"]), env=visdom["env"], nrow=nrow,
+ )
+
+ # Set model back to initial mode
+ model.train(mode=mode)
+
+
+####--------------------------------------------------------------------------------------------------------------####
+
+####---------------------------------####
+####----RECONSTRUCTION EVALUATION----####
+####---------------------------------####
+
+def show_reconstruction(model, dataset, config, pdf=None, visdom=None, size=32, context=None):
+ '''Plot reconstructed examples by an auto-encoder [model] on [dataset], in [pdf] and/or in [visdom].'''
+
+ # Set model to evaluation-mode
+ mode = model.training
+ model.eval()
+
+ # Get data
+ data_loader = get_data_loader(dataset, size, cuda=model._is_on_cuda())
+ (data, labels) = next(iter(data_loader))
+ data, labels = data.to(model._device()), labels.to(model._device())
+
+ # Evaluate model
+ with torch.no_grad():
+ recon_batch = model(data, full=False)
+
+ # Plot original and reconstructed images
+ comparison = torch.cat(
+ [data.view(-1, config['channels'], config['size'], config['size'])[:size],
+ recon_batch.view(-1, config['channels'], config['size'], config['size'])[:size]]
+ ).cpu()
+ image_tensor = comparison.view(-1, config['channels'], config['size'], config['size'])
+ # -denormalize images if needed
+ if config['normalize']:
+ image_tensor = config['denormalize'](image_tensor).clamp(min=0, max=1)
+ # -number of rows
+ nrow = int(np.ceil(np.sqrt(size*2)))
+ # -make plots
+ if pdf is not None:
+ context_stm = "" if context is None else " (context {})".format(context)
+ visual_plt.plot_images_from_tensor(
+ image_tensor, pdf, nrow=nrow, title="Reconstructions" + context_stm
+ )
+ if visdom is not None:
+ visual_visdom.visualize_images(
+ tensor=image_tensor, title='Reconstructions ({})'.format(visdom["graph"]), env=visdom["env"], nrow=nrow,
+ )
+
+ # Set model back to initial mode
+ model.train(mode=mode)
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/example_DGR.ipynb b/PyTorch/build-in/other/continual-learning/example_DGR.ipynb
new file mode 100644
index 000000000..2988512fb
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/example_DGR.ipynb
@@ -0,0 +1,628 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "a00a09c9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Load required libraries\n",
+ "import torch\n",
+ "import tqdm\n",
+ "import copy\n",
+ "import numpy as np\n",
+ "# -custom-written libraries\n",
+ "import utils\n",
+ "from data.load import get_context_set\n",
+ "from models.classifier import Classifier\n",
+ "from models.vae import VAE\n",
+ "from eval import evaluate, callbacks as cb\n",
+ "from visual import visual_plt"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "cb0539a1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Enable plotting in the notebook\n",
+ "%matplotlib inline "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "57e3b84a",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CUDA is used\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Is cuda available?\n",
+ "cuda = torch.cuda.is_available()\n",
+ "device = torch.device(\"cuda\" if cuda else \"cpu\")\n",
+ "print(\"CUDA is {}used\".format(\"\" if cuda else \"not \"))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "320f9e43",
+ "metadata": {},
+ "source": [
+ "## DATA: Prepare the data"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "c6ead555",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Specify what kind of continual learning experiment we should run\n",
+ "experiment = \"splitMNIST\" #--> create context set by splitting up the MNIST dataset\n",
+ "contexts = 5 #--> split the dataset up into how many contexts?\n",
+ "iters = 1000 #--> number of iterations per context\n",
+ "batch = 128 #--> number of samples per iteration (i.e., the mini-batch size)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "ee01ec88",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Specify according to which scenario the continual learning experiment should be performed?\n",
+ "scenario = \"class\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "2746c66a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Where is, or should, the data be stored?\n",
+ "d_dir = './store/datasets'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "id": "d9b6f329",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ " --> MNIST: 'train'-dataset consisting of 60000 samples\n",
+ " --> MNIST: 'test'-dataset consisting of 10000 samples\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Load the context set (both train- and test-data) for the specified continual learning experiment\n",
+ "(train_datasets, test_datasets), config = get_context_set(\n",
+ " name=experiment, scenario=scenario, contexts=contexts, data_dir=d_dir, verbose=True, exception=True,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "51fc95ea",
+ "metadata": {},
+ "source": [
+ "## CLASSIFIER: Specify the classifier network"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "06a212fa",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Specify the architectural layout of the network to use\n",
+ "fc_lay = 3 #--> number of fully-connected layers\n",
+ "fc_units = 400 #--> number of units in each hidden layer\n",
+ "fc_bn = False #--> use batch-norm\n",
+ "fc_nl = \"relu\" #--> what non-linearity to use?"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "id": "a9749091",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Define the model\n",
+ "model = Classifier(\n",
+ " image_size=config['size'], image_channels=config['channels'], classes=config['output_units'],\n",
+ " # -conv-layers are not used\n",
+ " depth=0,\n",
+ " # -fc-layers\n",
+ " fc_layers=fc_lay, fc_units=fc_units, fc_bn=fc_bn, fc_nl=fc_nl, excit_buffer=True,\n",
+ ").to(device)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "id": "d6b15c63",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Indicate to model what scenario it will be trained on and how many classes there are in each context\n",
+ "model.scenario = scenario\n",
+ "model.classes_per_context = config['classes_per_context']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "id": "6b08f0cf",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Indicate to the classifier model that it will be trained with generative replay\n",
+ "model.replay_mode = 'generative'\n",
+ "model.replay_targets = 'hard'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "id": "749dc0cd",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "-------------------------------------------------------\n",
+ "Classifier(\n",
+ " (convE): ConvLayers(\n",
+ " (pooling): Identity()\n",
+ " )\n",
+ " (flatten): Flatten()\n",
+ " (fcE): MLP(\n",
+ " (fcLayer1): fc_layer(\n",
+ " (linear): LinearExcitability(in_features=784, out_features=400)\n",
+ " (nl): ReLU()\n",
+ " )\n",
+ " (fcLayer2): fc_layer(\n",
+ " (linear): LinearExcitability(in_features=400, out_features=400)\n",
+ " (nl): ReLU()\n",
+ " )\n",
+ " )\n",
+ " (classifier): fc_layer(\n",
+ " (linear): LinearExcitability(in_features=400, out_features=10)\n",
+ " )\n",
+ ")\n",
+ "-------------------------------------------------------\n",
+ "--> this network has 478410 parameters (~0.5 million)\n",
+ " of which: - learnable: 478410 (~0.5 million)\n",
+ " - fixed: 0 (~0.0 million)\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Print layout of the model to the screen\n",
+ "utils.print_model_info(model)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "10f44613",
+ "metadata": {},
+ "source": [
+ "## GENERATOR: Specify the generative model"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "id": "67d36f29",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Specify the architectural layout of the network to use\n",
+ "g_fc_lay = 3 #--> number of fully-connected layers\n",
+ "g_fc_units = 400 #--> number of units in each hidden layer\n",
+ "g_fc_bn = False #--> use batch-norm\n",
+ "g_fc_nl = \"relu\" #--> what non-linearity to use?\n",
+ "g_z_dim = 100 #--> number of units in latent space VAE"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "id": "654bd91d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Define the model\n",
+ "generator = VAE(\n",
+ " image_size=config['size'], image_channels=config['channels'],\n",
+ " # -conv-layers are not used\n",
+ " depth=0,\n",
+ " # -fc-layers\n",
+ " fc_layers=g_fc_lay, fc_units=g_fc_units, fc_bn=g_fc_bn, fc_nl=g_fc_nl, excit_buffer=True,\n",
+ " # -prior\n",
+ " prior='standard', z_dim=g_z_dim,\n",
+ " #-decoder\n",
+ " recon_loss='BCE', network_output='sigmoid'\n",
+ ").to(device)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "id": "3a2575d3",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "-------------------------------------------------------\n",
+ "VAE(\n",
+ " (convE): ConvLayers(\n",
+ " (pooling): Identity()\n",
+ " )\n",
+ " (flatten): Flatten()\n",
+ " (fcE): MLP(\n",
+ " (fcLayer1): fc_layer(\n",
+ " (linear): LinearExcitability(in_features=784, out_features=400)\n",
+ " (nl): ReLU()\n",
+ " )\n",
+ " (fcLayer2): fc_layer(\n",
+ " (linear): LinearExcitability(in_features=400, out_features=400)\n",
+ " (nl): ReLU()\n",
+ " )\n",
+ " )\n",
+ " (toZ): fc_layer_split(\n",
+ " (mean): fc_layer(\n",
+ " (linear): LinearExcitability(in_features=400, out_features=100)\n",
+ " )\n",
+ " (logvar): fc_layer(\n",
+ " (linear): LinearExcitability(in_features=400, out_features=100)\n",
+ " )\n",
+ " )\n",
+ " (fromZ): fc_layer(\n",
+ " (linear): LinearExcitability(in_features=100, out_features=400)\n",
+ " (nl): ReLU()\n",
+ " )\n",
+ " (fcD): MLP(\n",
+ " (fcLayer1): fc_layer(\n",
+ " (linear): LinearExcitability(in_features=400, out_features=400)\n",
+ " (nl): ReLU()\n",
+ " )\n",
+ " (fcLayer2): fc_layer(\n",
+ " (linear): LinearExcitability(in_features=400, out_features=784)\n",
+ " (nl): Sigmoid()\n",
+ " )\n",
+ " )\n",
+ " (to_image): Reshape(channels = 1)\n",
+ " (convD): DeconvLayers()\n",
+ ")\n",
+ "-------------------------------------------------------\n",
+ "--> this network has 1069684 parameters (~1.1 million)\n",
+ " of which: - learnable: 1069684 (~1.1 million)\n",
+ " - fixed: 0 (~0.0 million)\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Print layout of the generator to the screen\n",
+ "utils.print_model_info(generator)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "6448262e",
+ "metadata": {},
+ "source": [
+ "## PREPARE FOR TRAINING"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "id": "680da4f9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Settings for the optimizer to use\n",
+ "lr = 0.001 #--> learning rate to use"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "id": "8e3602ca",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# For the classifier model, set up the optimizer to use\n",
+ "model.optim_list = [{'params': filter(lambda p: p.requires_grad, model.parameters()), 'lr': lr}]\n",
+ "model.optimizer = torch.optim.Adam(model.optim_list, betas=(0.9, 0.999))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "id": "fe5483d1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# For the generative model, set up the optimizer to use\n",
+ "generator.optim_list = [{'params': filter(lambda p: p.requires_grad, generator.parameters()), 'lr': lr}]\n",
+ "generator.optimizer = torch.optim.Adam(generator.optim_list, betas=(0.9, 0.999))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "id": "c0403c75",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Prepare for keeping track of performance (on test set) during training, for later plotting\n",
+ "plotting_dict = evaluate.initiate_plotting_dict(contexts)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "id": "fe52ca66",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Define callback function for keeping track of performance trhoughout training\n",
+ "eval_periodicity = 50\n",
+ "eval_cb = cb._eval_cb(log=eval_periodicity, test_datasets=test_datasets, plotting_dict=plotting_dict,\n",
+ " visdom=None, iters_per_context=iters, test_size=100)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "id": "1e4e4e84",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Set the models in training mode\n",
+ "_ = model.train()\n",
+ "_ = generator.train()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "81b4b344",
+ "metadata": {},
+ "source": [
+ "## TRAIN: Train the classifier and generative model on the continual learning experiment"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "id": "c6013758",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ " | Context: 1/5 | training loss: 1.02e-06 | training accuracy: 1.0 |: 100%|████████████████████████████| 1000/1000 [00:28<00:00, 35.00it/s]\n",
+ " | Context: 2/5 | training loss: 0.00302 | training accuracy: 1.0 |: 100%|█████████████████████████████| 1000/1000 [00:35<00:00, 27.94it/s]\n",
+ " | Context: 3/5 | training loss: 0.0424 | training accuracy: 1.0 |: 100%|██████████████████████████████| 1000/1000 [00:36<00:00, 27.69it/s]\n",
+ " | Context: 4/5 | training loss: 0.0366 | training accuracy: 1.0 |: 100%|██████████████████████████████| 1000/1000 [00:36<00:00, 27.38it/s]\n",
+ " | Context: 5/5 | training loss: 0.14 | training accuracy: 0.977 |: 100%|██████████████████████████████| 1000/1000 [00:37<00:00, 26.70it/s]\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Indicate that on first context, there are no stored models yet for generating replay\n",
+ "previous_model = previous_generator = None\n",
+ " \n",
+ "# Loop over all contexts.\n",
+ "for context, train_dataset in enumerate(train_datasets, 1):\n",
+ " \n",
+ " # Find [active_classes]\n",
+ " if scenario==\"task\":\n",
+ " # -for Task-IL scenario, create with for all contexts so far a with the active classes\n",
+ " active_classes = [list(\n",
+ " range(model.classes_per_context * i, model.classes_per_context * (i+1))\n",
+ " ) for i in range(context)]\n",
+ " else:\n",
+ " # -for Domain- and Class-IL scenario, always all classes are active\n",
+ " active_classes = None\n",
+ " \n",
+ " # Initialize # iters left on current data-loader(s)\n",
+ " iters_left = 1\n",
+ " \n",
+ " # Define tqdm progress bar\n",
+ " progress = tqdm.tqdm(range(1, iters+1))\n",
+ " \n",
+ " # Loop over all iterations\n",
+ " for batch_index in range(1, iters+1):\n",
+ " \n",
+ " # Update # iters left on current data-loader(s) and, if needed, create new one(s)\n",
+ " iters_left -= 1\n",
+ " if iters_left==0:\n",
+ " data_loader = iter(utils.get_data_loader(train_dataset, batch, cuda=cuda, drop_last=True))\n",
+ " iters_left = len(data_loader)\n",
+ " \n",
+ " # -----------------Collect data------------------#\n",
+ "\n",
+ " #--> Sample training data of current context\n",
+ " x, y = next(data_loader)\n",
+ " # Adjust y-targets to 'active range'\n",
+ " y = y-model.classes_per_context*(context-1) if scenario=='task' else y\n",
+ " # Transfer them to correct device\n",
+ " x, y = x.to(device), y.to(device)\n",
+ " \n",
+ " #--> Sample inputs to be replayed from stored copy of generative model\n",
+ " if previous_model is not None:\n",
+ " x_ = previous_generator.sample(batch, only_x=True)\n",
+ " if not scenario=='task':\n",
+ " with torch.no_grad():\n",
+ " scores_ = previous_model.classify(x_)\n",
+ " _, y_ = torch.max(scores_, dim=1)\n",
+ " else:\n",
+ " # With Task-IL, [x_] needs to be evaluated according to each past context\n",
+ " scores_ = list()\n",
+ " y_ = list()\n",
+ " with torch.no_grad():\n",
+ " all_scores_ = previous_model.classify(x_)\n",
+ " for context_id in range(context-1):\n",
+ " temp_scores_ = all_scores_[:, active_classes[context_id]]\n",
+ " scores_.append(temp_scores_)\n",
+ " _, temp_y_ = torch.max(temp_scores_, dim=1)\n",
+ " y_.append(temp_y_)\n",
+ "\n",
+ " # Only keep predicted y/scores if required (as otherwise unnecessary computations will be done)\n",
+ " y_ = y_ if (model.replay_targets == \"hard\") else None\n",
+ " scores_ = scores_ if (model.replay_targets == \"soft\") else None\n",
+ " else:\n",
+ " x_ = y_ = scores_ = None #-> when training on the first context, there is no replay yet\n",
+ " \n",
+ " # Train the classifier model on this batch\n",
+ " loss_dict = model.train_a_batch(\n",
+ " x, y, x_=x_, y_=y_, scores_=scores_, rnt = 1./context,\n",
+ " active_classes=active_classes, context=context\n",
+ " )\n",
+ " \n",
+ " # Train the generative model on this batch\n",
+ " _ = generator.train_a_batch(x, x_=x_, rnt=1./context)\n",
+ " \n",
+ " # Update progress bar\n",
+ " context_stm = \" Context: {}/{} |\".format(context, contexts)\n",
+ " progress.set_description(\n",
+ " ' |{t_stm} training loss: {loss:.3} | training accuracy: {prec:.3} |'\n",
+ " .format(t_stm=context_stm, loss=loss_dict['loss_total'], prec=loss_dict['accuracy'])\n",
+ " )\n",
+ " progress.update(1)\n",
+ " \n",
+ " # Execute callback-function to keep track of performance throughout training\n",
+ " if eval_cb is not None:\n",
+ " eval_cb(model, batch_index, context=context)\n",
+ " \n",
+ " ##----------> UPON FINISHING EACH CONTEXT...\n",
+ "\n",
+ " # Close progres-bar\n",
+ " progress.close()\n",
+ " \n",
+ " # Update the source for the replay\n",
+ " previous_generator = copy.deepcopy(generator).eval()\n",
+ " previous_model = copy.deepcopy(model).eval()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "41bcf17f",
+ "metadata": {},
+ "source": [
+ "## EVALUATION: average accuracy throughout training and samples from generator"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "id": "ca4edf2d",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAakAAAGzCAYAAACVYeimAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOy9d5gdV30+/t7e6/aVVqtqybJNDMYY00wxCGJTAgQIKTYQ4AfGYEwJ9hOw5RIDSbCDCwGSAAEcCAESEgIkOJQETPnagA2y1etq+97ey/z+EO/RZ87OXRXv7p2V5n2e+9zduW3mzDmf8n7KcRmGYcCBAwcOHDiwIdzdPgEHDhw4cOCgExwl5cCBAwcObAtHSTlw4MCBA9vCUVIOHDhw4MC2cJSUAwcOHDiwLRwl5cCBAwcObAtHSTlw4MCBA9vCUVIOHDhw4MC2cJSUAwcOHDiwLRwl5WBR4HK5cPPNNy/rb05OTuLVr341enp64HK5cNdddy3r7zs4NRw4cAAulwuf/exnu30qy461a9fi6quvPq3PdmNt2QmOkrIRHn30Ubz61a/G6OgogsEgVq1ahRe+8IW4++67u31qtsS73/1ufOc738ENN9yAz3/+83jxi1/c7VNyAOD+++93DAYHiwZvt0/AwTH8+Mc/xvOe9zysWbMGb37zmzE4OIjDhw/jJz/5Cf7mb/4G1157bbdP0Xb4n//5H7z85S/He9/73m6figOB+++/H7/+9a9x3XXXmY6Pjo6iUqnA5/N158QcrEg4SsomuP3225FIJPDzn/8cyWTS9NrU1FR3TsrmmJqamjdWViiVSohEIkt/QmcoyuUywuHwE/4el8uFYDC4CGfk4GyCQ/fZBHv37sV5551nKXT7+/tN/3/mM5/B85//fPT39yMQCGDr1q34xCc+Me9za9euxZVXXonvf//7eOpTn4pQKIQLLrgA3//+9wEAX/va13DBBRcgGAzioosuwi9+8QvT56+++mpEo1Hs27cP27ZtQyQSwfDwMG655RacTPP8sbExvPGNb8TAwAACgQDOO+88/MM//MO89919990477zzEA6HkUql8NSnPhX3339/x+/97Gc/C5fLBcMwcO+998LlcsHlcple+8EPfoC3v/3t6O/vx+rVq9Vn77vvPpx33nkIBAIYHh7GNddcg2w2a/r+5z73uTj//PPxyCOP4LLLLkM4HMbGjRvxL//yLwCAH/zgB7jkkksQCoWwefNmfPe73z3hWADAwYMH8bKXvQyRSAT9/f2KrnS5XOqeED/96U/x4he/GIlEAuFwGJdddhl+9KMfmd5z8803w+VyYc+ePbj66quRTCaRSCTwhje8AeVyed7vf+ELX8BFF12EUCiEdDqN173udTh8+LDltT/00EN4znOeg3A4jBtvvBEA8G//9m+44oorMDw8jEAggA0bNuDWW29Fq9Uyff6b3/wmDh48qO7L2rVrAcyPSf3VX/0VXC4XDh48OO9cb7jhBvj9fmQymVMaEyt8//vfh8vlwj//8z9j+/btWLVqFWKxGF796lcjl8uhVqvhuuuuQ39/P6LRKN7whjegVquZvqPZbOLWW2/Fhg0bEAgEsHbtWtx4443z3mcYBm677TasXr0a4XAYz3ve8/Cb3/zG8ryy2Syuu+46jIyMIBAIYOPGjfjIRz6Cdrt9wms6q2A4sAVe9KIXGbFYzHj00UdP+N6LL77YuPrqq40777zTuPvuu40XvehFBgDjnnvuMb1vdHTU2Lx5szE0NGTcfPPNxp133mmsWrXKiEajxhe+8AVjzZo1xoc//GHjwx/+sJFIJIyNGzcarVZLff6qq64ygsGgsWnTJuOP//iPjXvuuce48sorDQDGBz/4QdNvATBuuukm9f/ExISxevVqY2RkxLjllluMT3ziE8bLXvYyA4Bx5513qvd96lOfMgAYr371q41PfvKTxt/8zd8Yb3rTm4x3vvOdHa9/7969xuc//3kDgPHCF77Q+PznP298/vOfNwzDMD7zmc8YAIytW7cal112mXH33XcbH/7whw3DMIybbrrJAGBcfvnlxt1332284x3vMDwej3HxxRcb9Xpdff9ll11mDA8PGyMjI8b73vc+4+677za2bt1qeDwe40tf+pIxODho3HzzzcZdd91lrFq1ykgkEkY+n1/wnhWLRWP9+vVGKBQyPvCBDxh33XWX8bSnPc34nd/5HQOA8b3vfU+994EHHjD8fr9x6aWXGn/9139t3HnnncaTnvQkw+/3Gz/96U/V+3g9T37yk41XvvKVxn333Wf86Z/+qQHAeP/732/6/dtuu81wuVzGa1/7WuO+++4ztm/fbvT29hpr1641MpmM6doHBweNvr4+49prrzU++clPGv/6r/9qGIZhvOIVrzBe85rXGH/5l39pfOITnzB+//d/3wBgvPe971Wf/6//+i/jwgsvNHp7e9V9+frXv24YhmHs37/fAGB85jOfMQzDMA4ePGi4XC7jox/96LzxWr9+vXHFFVec8phY4Xvf+54BwLjwwguNSy+91Pj4xz9uvPOd7zRcLpfxute9znj9619vvOQlLzHuvfde44//+I8NAMb27dtN33HVVVepeXrvvfcaf/Inf2IAMF7xileY3vfnf/7nBgDjd3/3d4177rnHeOMb32gMDw8bvb29xlVXXaXeVyqVjCc96UlGT0+PceONNxp/+7d/a/zJn/yJ4XK5jHe9612m79TX1tkGR0nZBP/1X/9leDwew+PxGJdeeqnx/ve/3/jOd75jEp5EuVyed2zbtm3G+vXrTcdGR0cNAMaPf/xjdew73/mOAcAIhULGwYMH1fFPfvKT84QlF+a1116rjrXbbeOKK64w/H6/MT09rY7rC+lNb3qTMTQ0ZMzMzJjO6XWve52RSCTUNbz85S83zjvvvBOMjjUAGNdcc43pGJXUs571LKPZbKrjU1NTht/vN170oheZFPE999xjADD+4R/+QR277LLLDADG/fffr449/vjjBgDD7XYbP/nJT9RxjicFbyf89V//tQFACXzDMIxKpWJs2bLFNO7tdtvYtGmTsW3bNqPdbqv3lstlY926dcYLX/hCdYxK6o1vfKPpt37v937P6OnpUf8fOHDA8Hg8xu23325636OPPmp4vV7TcV773/7t3867Bqt599a3vtUIh8NGtVpVx6644gpjdHR03nt1JWUYhnHppZcaF110kel9P/vZzwwAxj/+4z+e8phYgUrq/PPPN62nP/iDPzBcLpfxkpe8xPT+Sy+91HT+v/zlLw0Axp/+6Z+a3vfe977XAGD8z//8j2EYx+fYFVdcYTrPG2+80QBgUlK33nqrEYlEjF27dpm+8wMf+IDh8XiMQ4cOqWNnu5Jy6D6b4IUvfCEefPBBvOxlL8OvfvUrfPSjH8W2bduwatUqfOMb3zC9NxQKqb9zuRxmZmZw2WWXYd++fcjlcqb3bt26FZdeeqn6/5JLLgEAPP/5z8eaNWvmHd+3b9+8c3vHO96h/na5XHjHO96Ber3ekeYyDANf/epX8dKXvhSGYWBmZkY9tm3bhlwuh4cffhgAkEwmceTIEfz85z8/qXE6Wbz5zW+Gx+NR/3/3u99FvV7HddddB7fbbXpfPB7HN7/5TdPno9EoXve616n/N2/ejGQyiXPPPVeNFbDwuEl8+9vfxqpVq/Cyl71MHQsGg3jzm99set8vf/lL7N69G69//esxOzurxq1UKuEFL3gBfvjDH86jg/6//+//M/3/7Gc/G7Ozs8jn8wCO0brtdhuvec1rTPdicHAQmzZtwve+9z3T5wOBAN7whjfMuwY57wqFAmZmZvDsZz8b5XIZjz/++ILX3wmvfe1r8dBDD2Hv3r3q2Je//GUEAgG8/OUvP+0xscKf/MmfmJI2LrnkEhiGgTe+8Y2m911yySU4fPgwms0mAOA///M/AQDXX3+96X3vec97AEDNHc6xa6+9VtHPAOYlkADAV77yFTz72c9GKpUy3ZPLL78crVYLP/zhD094PWcLnMQJG+Hiiy/G1772NdTrdfzqV7/C17/+ddx555149atfjV/+8pfYunUrAOBHP/oRbrrpJjz44IPzYg+5XA6JREL9LxURAPXayMiI5XEZAwAAt9uN9evXm46dc845AI7FGKwwPT2NbDaLT33qU/jUpz5l+R4mg/zZn/0Zvvvd7+JpT3saNm7ciBe96EV4/etfj2c+85mWnztZrFu3zvQ/4x6bN282Hff7/Vi/fv28uMjq1atNggY4NkYnO246Dh48iA0bNsz7zo0bN5r+3717NwDgqquu6vhduVwOqVRK/a/fY76WyWQQj8exe/duGIaBTZs2WX6fnm23atUq+P3+ee/7zW9+gz//8z/H//zP/ygFKM/pdPD7v//7uP766/HlL38ZN954IwzDwFe+8hW85CUvQTweB3B6Y2KFU1kL7XYbuVwOPT09OHjwINxu97x7NTg4iGQyqeYOn/Vx7uvrm3duu3fvxiOPPIK+vj7Lc3WSpY7DUVI2hN/vx8UXX4yLL74Y55xzDt7whjfgK1/5Cm666Sbs3bsXL3jBC7BlyxZ87GMfw8jICPx+P/7zP/8Td9555zyLUnoTJ3PcOImEiBOB5/BHf/RHHQXLk570JADAueeei507d+I//uM/8O1vfxtf/epXcd999+FDH/oQtm/fftrnIK3+00E3xg04PnZ/+Zd/iQsvvNDyPdFo9JTOqd1uw+Vy4Vvf+pble/Xvsxq7bDaLyy67DPF4HLfccgs2bNiAYDCIhx9+GH/2Z3922sH+4eFhPPvZz8Y///M/48Ybb8RPfvITHDp0CB/5yEfUe05nTKzwRO+pbmA8EbTbbbzwhS/E+9//fsvXaQg6cJSU7fHUpz4VADA+Pg4A+Pd//3fUajV84xvfMFmGOmWzWGi329i3b59p0ezatQsAVNaWjr6+PsRiMbRaLVx++eUn/I1IJILXvva1eO1rX4t6vY5XvvKVuP3223HDDTcsWsry6OgoAGDnzp0mz7Ber2P//v0ndZ5P9Pd37NgBwzBMwm7Pnj2m923YsAEAEI/HF+2cNmzYAMMwsG7dutMWft///vcxOzuLr33ta3jOc56jju/fv3/ee09VmL/2ta/F29/+duzcuRNf/vKXEQ6H8dKXvtR0/sDijsmpYHR0FO12G7t378a5556rjk9OTiKbzaq5xefdu3eb5tj09PQ8T3vDhg0oFotduZ6VBicmZRN873vfs7TGyYeTpqLVJ9+by+Xwmc98ZsnO7Z577lF/G4aBe+65Bz6fDy94wQss3+/xePCqV70KX/3qV/HrX/963uvT09Pq79nZWdNrfr8fW7duhWEYaDQai3QFwOWXXw6/34+Pf/zjprH7+7//e+RyOVxxxRWL9ltW2LZtG8bGxkzxxWq1ik9/+tOm91100UXYsGED/uqv/grFYnHe98ixO1m88pWvhMfjwfbt2+fNMcMw5t0DK1jNu3q9jvvuu2/eeyORyCnRf6961avg8XjwT//0T/jKV76CK6+80lTXthRjcir43d/9XQCY10XjYx/7GACouXP55ZfD5/Ph7rvvNo2TVfeN17zmNXjwwQfxne98Z95r2WxWxcMcOJ6UbXDttdeiXC7j937v97BlyxbU63X8+Mc/xpe//GWsXbtWBbJf9KIXwe/346UvfSne+ta3olgs4tOf/jT6+/uVt7WYCAaD+Pa3v42rrroKl1xyCb71rW/hm9/8Jm688caOfDoAfPjDH8b3vvc9XHLJJXjzm9+MrVu3Ym5uDg8//DC++93vYm5uTl3P4OAgnvnMZ2JgYACPPfYY7rnnHlxxxRWIxWKLdh19fX244YYbsH37drz4xS/Gy172MuzcuRP33XcfLr74YvzRH/3Rov2WFd761rfinnvuwR/8wR/gXe96F4aGhvDFL35ReYr0PtxuN/7u7/4OL3nJS3DeeefhDW94A1atWoWxsTF873vfQzwex7//+7+f0m9v2LABt912G2644QYcOHAAr3jFKxCLxbB//358/etfx1ve8pYTdu14xjOegVQqhauuugrvfOc74XK58PnPf97SsLrooovw5S9/Gddffz0uvvhiRKNRk2eko7+/H8973vPwsY99DIVCAa997WtNry/FmJwKfud3fgdXXXUVPvWpTyna82c/+xk+97nP4RWveAWe97znATg2x9773vfijjvuwJVXXonf/d3fxS9+8Qt861vfQm9vr+k73/e+9+Eb3/gGrrzySlx99dW46KKLUCqV8Oijj+Jf/uVfcODAgXmfOWuxrLmEDjriW9/6lvHGN77R2LJlixGNRg2/329s3LjRuPbaa43JyUnTe7/xjW8YT3rSk4xgMGisXbvW+MhHPmL8wz/8gwHA2L9/v3rf6OioqdaEgEXqNtOD//Iv/1Idu+qqq4xIJGLs3bvXeNGLXmSEw2FjYGDAuOmmm0xp3PxOPU12cnLSuOaaa4yRkRHD5/MZg4ODxgte8ALjU5/6lHrPJz/5SeM5z3mO0dPTYwQCAWPDhg3G+973PiOXy51wzKyugynoP//5zy0/c8899xhbtmwxfD6fMTAwYLztbW8z1QkZxrE0bKu0+FMZTyvs27fPuOKKK4xQKGT09fUZ73nPe4yvfvWrBgBTWrthGMYvfvEL45WvfKUal9HRUeM1r3mN8cADD6j3MAVdlgLIMZBzwTAM46tf/arxrGc9y4hEIkYkEjG2bNliXHPNNcbOnTtPeO2GYRg/+tGPjKc//elGKBQyhoeHVZkEtNKFYrFovP71rzeSyaQBQKVzW6WgE5/+9KcNAEYsFjMqlYrl75/MmFiBKehf+cpXLMdJnytW49poNIzt27cb69atM3w+nzEyMmLccMMNptR7wzCMVqtlbN++3RgaGjJCoZDx3Oc+1/j1r39tjI6OmlLQDcMwCoWCccMNNxgbN240/H6/0dvbazzjGc8w/uqv/sqUKm+1ts4muAxjkSK+Ds44XH311fiXf/kXS4rFweLgrrvuwrvf/W4cOXIEq1at6vbpOHBgOzgxKQcOlgmVSsX0f7VaxSc/+Uls2rTJUVAOHHSAE5Ny4GCZ8MpXvhJr1qzBhRdeiFwuhy984Qt4/PHH8cUvfrHbp+bAgW3hKCkHDpYJ27Ztw9/93d/hi1/8IlqtFrZu3YovfelL8xIFHDhwcBxOTMqBAwcOHNgWXYtJ3XvvvVi7di2CwSAuueQS/OxnP+vWqThw4MCBA5uiK0qKNRQ33XQTHn74YfzO7/wOtm3b5vSrcuDAgQMHJnSF7rvkkktw8cUXq04G7XYbIyMjuPbaa/GBD3zghJ9vt9s4evQoYrHYovbTcuDAgQMHywPDMFAoFDA8PGzamUDHsidO1Ot1PPTQQ7jhhhvUMbfbjcsvvxwPPvig5WdqtZppB8yxsTHVEdyBAwcOHKxcHD582LR7to5lV1IzMzNotVoYGBgwHR8YGOi4J80dd9xh2RH78OHDqp2/AwcOHDhYOcjn8xgZGTlh+7MVkYJ+ww03mDYc48XF43FHSTlw4MDBCsaJQjbLrqR6e3vh8XgwOTlpOj45OYnBwUHLzwQCAQQCgQW/d2ZmBjt27JhX1X+2Ih6PY+vWraYNEGdnZ7Fjx455GyWerYjFYti6dSuSyaQ6Njc3hx07dqBUKnXvxGyEaDSKrVu3mjbty2Qy2LFjh9Mu67eIRCLYunUr0um0OpbNZrFjxw4UCoUunpl9EA6HsXXrVvT09JzyZ5ddSfn9flx00UV44IEH8IpXvALAsUSIBx54wLRN+anisccewwc/+EEcOXJkkc50ZWPr1q24/fbb1eaCAPD444/jpptumrcL7dmKzZs347bbbsNTnvIUdWz37t3Yvn37CbeDP1uwceNG3Hbbbbj44ovVsb179+LWW29VO+ae7Vi3bh1uv/12XHLJJerYgQMHcPvtt3cMYZxtGB0dxa233npaO253he67/vrrcdVVV+GpT30qnva0p+Guu+5CqVRS21GcDsrlMg4fPmy5CdvZiEQiYUo2AY71jjt8+LAjgH+LSCTijNEJEAgEUK1WTceq1SqOHDnijNFv4fV6LfsyOmN0HG63+7RZrq4oqde+9rWYnp7Ghz70IUxMTODCCy/Et7/97XnJFA4cOHDg4OxG1xIn3vGOdzwhes+BAwcOHJz5WBHZfQ4cOHDg4HgmnMvlgtvttsyMc7lcMAzD9AAw73mlwFFSDhw4cGBzSOXEh9/vh9frNSkidm5ot9tot9swDAOtVku9p91uA1hZispRUg4cOHBgY+jek9frhdvtRigUgt/vh8vlgsfjAQCljJrNJhqNBtrtNur1OtrtNlqtlvouR0k5cHCSkHTFSlo4DhwsNaiUgGMektvtRjAYRDqdRiAQQG9vL2KxGKLRKHp7ewEAuVwO1WoVhUIBmUwGtVoNMzMzqFarqNfrKpuVlOBKgKOkHCwrFuLQV6KV58DBUkB6T8BxJeXz+RCNRhEKhdDb24tUKoVUKqV6301NTaFUKmFubg7AsdKcQqGAVqtl8qRWEhwl5WBJoS828ukSVFBWyslRWAujk9CRx/Wx7fT3ckM/R/lslRQgA/+dkgLOBMikCFJ7wWAQwWAQ8Xgcg4ODCIfDGB0dRV9fH4LBIKLRKFqtFkKhEFqtFoLBIPx+P5rNJvx+PxqNBur1+oryoAhHSTlYElgFeuXDKsArwf/PZu/qZKzeToJeF/pSoOv/d2Ns9XPUlROFszw/PjO+wkSAM0lRcW1Iz8nj8SAejyORSCCZTGL16tWIRqPYtGkTVq1ahVarhUajgUajgUgkAsMwUKlUEAwG0W63lZKq1WqWRqLd4SgpB4sKK+VEwePxeODxeDoqKT7Lh8xGOpOEEdHJ05Rj1EmgdzIA5PfJsdXHudVqLXu2l1UKNZ85R5i55vF4LO+7TArgNRJnytyQ4+H1euHz+dSz3++H3++HYRio1WpoNBool8toNBrI5/MolUooFosolUqoVCqo1+toNptotVqm7wdWxng5SsrBKaNTXEk+MwuJAofZSKFQyFJJtdttNBoNGIaBRqOBVqulhJEUqFZUz0pCJwVD4UxB5Ha71bhRucvMLqn4+X7pfQBAq9VCpVJBs9lEvV5HvV5Hq9VCrVZDq9VSwXRmgwFLJ7R05eR2uxEIBNS18cFjzFyjlwBAZbAVi0Xk83nTtXAOLeU1LDX0NeTxeBAOh+H3+xEOhxEKhRAOh5FIJBAOh00KaWZmBvV6HTMzMyiXy8jn85ibm0Oj0UCxWESj0UCz2ZzHUAD2Hy9HSS0h9IWpHyOsiu1WkuegKy2dsvB6vUoAhUIhk+VM2qbVasHj8aDdbsPlcqHZbCr+nAJIWn8rkVu38pqksqFiohcRDAZNlI/H4zGlHHNsqcx0iqzVasHn86HRaKBarcLr9SpBReHvdrvV2C71eMp5QeVKT4GPYDAIj8eDSCSCQCCgDBXgOBXYbDZRqVTgcrlUnMWK3lzpkEYL1xDHyePxoFarodlsIp/PY3Z2FrVaDXNzc6hUKigUCsjn88pAkV4zvxtYGePkKKlFgrSKKSwSiQQikQiSySRGRkYQDAaRTCaVoPZ4PMrabTQaKBQKKBQKKJVKOHLkCMrlsnLbJfXVzWvU/5eeEwCT8AwEAvB6vYhEIvD7/YjFYmrrEF5Ls9lEu91Wi4l1Ha1WC/V6HZVKxTRGsjBxpSw0PW4k4y4syKTFHIvFkEwm4ff7kUgk4PP5EIlE1Jyh18TvoeICjnulFOycU41GQ1E/1WoVc3NzyoNi1pekgp7oeFpRkjxnJgD4fD7EYjH1HA6H1ZzhNbFQlR4050U4HEYwGESlUsH09DSq1apSvnJu2H1edIIeO5RxNzZA9nq9qFar6t5mMhl1v2u1GqrVKqrVqil+J+8L7/dKGCdHSS0SpJXo9/vh8/nQ19eHvr4+rFmzBk9/+tORSCQwMjKCVCqlLKJms6msn4mJCYyPj2NmZgY/+9nPVBopJ5tdKK6F4k608vx+P0KhEHw+HxKJhFLQ6XRaUXoUqOTLeZ1URrVaDR6Px6ScgPkxCLsutE5JDRwrn8+nsrBSqRRCoRD6+vowODio/g4Gg0ilUojFYsqS1g0in88HwzBQLpfRbDZRrVZNMYp6vY5CoaAMHpfLhUqloh48r8Uaw07xSLfbjUgkgng8jkAggGQyqep9UqmUmjdUvvI72u22EsCkAkn7kRKmQLfrfDhV6NQ2EyKOHj0KwzBQKpVQq9VQq9VQLBZN9K5ueADH09j177T7eDlK6jTBBURqhpk3gUAAqVQKgUAAAwMDSKfT6O/vx6pVqxCNRpXAobfBCcVMHJfLhVAohNnZWaRSKZPwqVQqlplwdoKMsfh8PgQCAVXXEY1GEQ6HVQyk3W6jXC6brDoZn2FmEoUxKQsr2s9uC62TguL1kf6MRqMIBALo6elBJBJBX18fent7lRD3+/3zPCn5nZxHhmEozxU4psg9Ho/aZoPevX5eVvHFJ3rdVEhWiRDhcBixWExdO+cIjbZAIGCKq0nBGgqF4PV6lXEDAKFQCM1mE7VazTKRYiVBsgPAccOB18vsPI5HpVJBrVZDvV5XhpyM3RLSKOL4kbVYCWPlKKnTACeKz+dTFvCFF16IJz/5yUin09i6dSui0agSzj6fD6FQyBTk5vcYhoFEIoFWq4Xh4WFUq1WUSiWsXbsWuVwO//3f/41KpYJSqaSCn3ZVVLo3GYlEEA6HMTw8jHg8jkgkgkgkAuA4zTc1NYVarWaisDg+9BJIXQFQCs4qjmcHRWVFiQLHhS0TBvx+P5LJJAYGBhCNRrFhwwakUimk02n09fXB5/MhHA4r70IqaypwXcCQ9qtWq/D7/SpmUa1WUalUlPKQ1DEpZyqsJzJ+/A6fz2eKN1E5kV0YGBhQ1+f1ehEKhRQFGI1GVayp2WyqOeByudTcicfjiMViyOVyyOVy8Pv9yOfz8xJs7LpOTgSdlnO73SiVSqjX6wBg8qqYOCITjHjNUjm53W6Ew2GEw2FFC3IN8v7bdawcJXWK4E2XgiYSiWBwcBCrV69Gb28v1q1bh1gshmAwqLa9161WKVwpOGhJ+nw+9Pf3K4osHA6ryboSIJU4aT/GETgeFI608NvttulZHy+Z4bbY1v9SwSpRQnpSzNoiBZZIJBCPxxGNRpWnJbMkpQLn90kKWB83fb7JTMqlFOCSpuO1yvRpznF6V7Iswcq7kwk4brdbZX7W63UEAgHliXEd2VngniwkW6ArEh6TqeUyVis9MnkfKLc4B8lK2H28HCV1kiBl4fV6sXnzZjzlKU9BPB7Hhg0bkEgkMDQ0pGIJqVRKLUDCKnXaKsbC3yF1uGbNGmzevBkTExOYnZ01Tcblhk5HSOiCKRKJoL+/H7FYDENDQyoJwOfzodlsqlYt9LiYiUalTWEkEygohEjt2Lk+xkox0bugwu7p6cGaNWsQi8WwevVqRe+xPkgmzFB501uiV00FAByfY8zsqtfryGazqn/b7OwsKpWK6ukmLfEnqrSkkpHnLIWmy+VSsUcaXVTAvKZyuWy6FjIS9LiksvN4PMrrZCyT1BexEr0pqYhIa8pYEu8XlbWVbOH90JNy4vG4SrhgjRU9NLvCUVInCXpQfr8fo6OjeM5znoPe3l6cf/75SKfTilPne4mFMnXks7SGvV6v4uv7+/uxevVqNJtNU2aXXSEbYSaTScRiMaRSKSSTSbVwarWaCuAz7ZhCyzAM5TFxIZIupYDT6VL9bzvAKrtNxl2CwSBisRj6+voQj8fR09ODRCJhspxJ17HGid4VACVgpHVM+iaXyyGTyaBer6vEgtnZWZWmTOqI3y8posWg+wCYFJR8jUaHYRgqvsb7zWsmqyBrqTgHpPdFqtwwDBSLRRQKBTV/JKVpt7lxMpCKSi+6tvKIdcNVGoz0Num1u91uFItFNVZ2h6OkTgBmYIVCIZx33nkYGBjABRdcgJGREcTjcRVr0l3xdrutqr1rtZoqPmSAMxgMIhKJqBRtKiB90jDATP7e4/Eo68oukAuCdE4kElGWGwP/fC+TQ+S1yFRZKbQYMA4EAqjVavNoHSoz6eXZQSBZBcFlwgQp0EgkopQ0LVw+WOdChSQTJziP6FkAUAonn8+rlGQWcmazWRSLRdTrdfU7enzziY6bbnRJQcq6N3boZtIQlbbf7zd5DoxRMfuTigeAKXs0mUyqBBGmY0vvQMYvF+Malxr6uXJNMHFIKn35Pqvr4vrgmmTSCpU/k2vstG6s4CipEyAYDKK/vx99fX14/etfj4svvhjxeBy9vb2mRAjeYJkGOjk5ibm5OczOzmLPnj0qgF2v19Hf34/R0VFEIhGsW7cO8XhcKUNphUejUfT39yObzSruXdIZ3UAnAUyFGw6HkU6nMTg4qDypWCymBBYFUqPRUJ4Uv0dmhNGLKJfLqraHlBkAU7af7lXxPLsJPTZATzwajSKRSKhEiUAgMK+lDSk6Kigrw0RSo4ZxvFMHlRQVHOck64mYJcqYhpUlfjrXynshn5m5yfNoNpsolUqm7hKkgWXcjV0VmGItszhlcevIyIhKFAkGg8jlcmrO8Lwo7OW5rgQwSUgaOHrMUTcw9FgUadJIJIJUKqXKGvL5vBpLq++zExwl1QGkZmKxGAYGBtSjv79fZSNJaoOUTL1eVwtlfHxcKamJiQlVYEfqjoquk4XE/6W1002cTDyKBaq03vg3U1+pWJg2rQsoeb1SGUnBJLsknGhcOlGvywkpfOlx8lp4rZwD9HRYkEklpRsmupFAJdVsNlEul1VqP5UDv0dPU14sL0p+h1R6smibrzG+BsBE68pEC16zTAzg33KucR1GIhFEo1E0m02Ew2EAmOctriSPCpjfr/JkjDDdMNLnnv5wEidWKDweDwYHB5FOp3H++efjyiuvRE9PDzZv3oxEImGi98jr7927F3v37sXc3Bx+/etfI5/PI5vNolAooF6vo1gsKi+CVEd/f78SuuTbpVdGS4o91kh92Km2gZOcyikej6ui3XQ6rdLOmfpKYcv4nWyTxHGlt8TgLhNJSInlcjlVDKx3oJD0lZXCB5a3marevojxAXoRvA563zLpgdQVjRupuDnuMt2a31EsFpHNZlX8hx4slRO9p6Xodcc5K7uIuFzHUsp1Rc3sRZmuLulzlibE43E0Gg14PB5UKhVEo1FFW7F8w+12q7R0v9+PUqmEw4cPY2ZmBo1GQ3mPHG95/XaDjKXJ+0wF3amwX8oNGa+S32HVw6+Td2YXOEpKAy2PeDyO/v5+bNiwAc94xjMULSNjRjKjaGpqCrt27cLExAR+9KMfYW5uDsViUdEOwDHlx9T0vr4+xaNTiFl5TLQgaUXaKVvJKs2V3Ld8MOVYLh6mwNKjkl0GqMhlTRCpUPldukfVKYliOZWTFApybKzSsaUHRUOkXC6jVCopAU/KjpSXTM7h2MjCaHpSbCclu4VL72YxKL5O0Ocor1O36ulRUQFJZcXuI/QmAShFw0QRGne8DhoClUoFxWIRuVwOpVJJ/Yaeqm1HgUzoHq6VV9XJk+oEzhW9E4Xd4SgpAaYFRyIRPO1pT8O5556LjRs3qgQHCh62ncnlcnjooYcwPT2NvXv3Yt++fcjlcqoTMVOlZVLB8PAwent7sWnTJpxzzjlIpVKIx+OmzD3SPmyBsmPHDoyNjanUXTstLFr3zFhLJpPo6+tDT08Pksmkqo3Sm5tKKqvZbKoxIg0kLXIpgPgAYCpuBGASxlYCSMY1lmoMdRqOBghpTabc01jRqRlSWHKs6G3I75ZjI70WPvi6rpD0x1JBNxhkuyI+eN9572XsSE+z5uuSwpLv4/czUYAbAbI/oIzvUFB3KgrvNnRaUjdcZeIQ32MVk+Lf7OXIjD62w6KXzbG0kwEs4SgpgXA4jPXr16O3txfbtm3Ds5/9bJUqzEViGMfqVyYnJ3Ho0CF88YtfxGOPPYZ8Po9CoWCioAgKn1AohPXr12P9+vU477zz8JSnPEVl3HAhAVA0Yrlcxt69e/GTn/xE9V3Ts5W6CT02wLY+w8PDGBwcRF9fn/KKSPlQWLDhKYWFLNaUHhfHklmW9KiYyg4cX6T6otWV0XJZz3qMjgpKpp6nUilEIpF5HpdMt2dGVzAYnLdlSblcNo2npAXpoVspKFlfttSKir8hn+n9WcXoeC28TnqCpArlOHGeSO8IgNrmw+VyIR6Pq3owl8uFRqOh5qHs96crbTusLyvlJKm5TuyArqQMwzDVRDFjlLKE90L/PTuMAeEoKRy/oeyh1t/frzqYS8FJAZHNZnHw4EEcPnwYs7OzyOVyqjBSgpRVOBxGT08PotEohoaGMDQ0hHQ6regrvQiyXq9jbm4OmUwG2WwW5XLZdk1mAXM2HoWxpOX0rSMkdcWeYzJDTxasys7NVlSZpAyZfKIXReuKaznpHStFxZgU99Vic1kASlHzXHn9Ml4nqT2Z7UeFpAfD9fiFrrC7DSsPgNCFMXCc+qbiIuPA2Bsf9M5pHHLsSftJAa7H6OxIA3ai/k72HGWWJf8nOEf1tWInOEoKx1vu9PX14dJLL8XIyAhGR0fnZfDl83lUq1U8+OCD+PKXv4y5uTns27dP1UBJoep2u1Ux65o1a3DppZeip6cHT3rSk7Bq1Sq1bYVs9cMY1/T0NH7wgx9gbGwMjz76KKanp+cVXXYTkpqiF8WaKKnkKRhkken4+DgqlYrapE1ej2z7w+4C7ChPCoxUmc/nM8VqSONIYaOf81IHyqVg5ZxiPRRjnNFoFKtWrcLQ0JCipAAo6oUxKdlfjeMiG6lGIhGVEOD1epXSl/NIUmFWXlU3IAWtlbCUnhXvPePA7Pote9VRQTMOx61IeJz1hay7i0QiKhmJ3gS3M1msoubFgjwHqWikV6wbY1Z0HxUzlbVMwPH7/fNi3nZTVo6SwvG4SigUwuDgIIaGhlT/NIIeDrfU2LFjh9r+QKYH6wKKXQU2bNiA3t5erF27FoODg6bguYypMPB99OhRHDx4EHNzc8qLsks2kpUwlnQWkyWkh0hrV+5rRKEqv5cZb+wsoAsuLjQmXTBeIRWcvnCB5V1wusDlOTORhOnSHC8AKkVfxqFksoRsBMtnUmTcboPNeGU/RAk70VpWRoR8SO9ZClvSoLx2qaQYB2aWo2EYpg7x0uvmOLKThxTQdhHOEjplu5AXavVZPeuV3hPH+WRLOroBR0kByqJNJpPqEQgETMqDrWXm5uZQKBSU4KG3wP/j8bhqMLtu3ToMDQ1hYGAAW7duVRsg6u2NOOEY5zpw4AAef/xxHD58GHNzc7YIaOoWmqT5WPmfTCZV53e3260SPbLZLKrVKo4ePYrx8XHVGaDVapk8SUk7SMVHytQwDEWRkc6RwXjgeFAZsO6XuNRjJBUTi5sTiQR6enowPDys6u56e3tNWW4y0YHKRW67QVqLHgSTA0gfMm5TLBbh8XhU+rae3Wcn8Lx4/UwuYp85tgajEuYY0RCS4yLpvnw+j3a7rWhn9pKU9KBhGMr4k/HgpagfeyKQ5yUpO7kerRQ+k5kY4+RaCgQCpuQUdvWgwpJ0p10U9lmvpFyuY7uFJhIJtScUqSrguBXSaDQwMzOD8fFx1dZFKqloNIpIJIKhoSE861nPQn9/P84//3ysX78egUBAJV/IDDZ+P39jcnISjz76qFJSR48enRdr6SZ0S1fGoaikuMsqlVStVsPU1BSKxSLGx8cxMTFhop8ozLmwuDj43bSAmWpMJcXUfcJKSS2nwNE9ACZLsMN5Op1WW5b09/ejt7dXKR3ZBUJXUi6XS3mdfD+AeQXlpAvZYokdBZg0oI9TNyEFoEx+AGDKzEsmk2psWP8l+xWyNkzPBGTZR09Pj5pf9Fhl5wt6r2wrJePOOh3ZLehKinMMmN+7Uj5LhU9jhg96pM1mU8V/ZQKF3TzKs15JATClP/Mm6Tw+cNzjGhoawrnnnqu6ArRaLUXjcCfedDqttoonry69J+D4ouCOoxMTEzhw4ACOHj1qSpSwG6wSJrggZNBf1pFRyOrfw++gNyqVv0z7lx6RVNydKAp93JZSUVnFA/S0c8ZXSPHSe2CBM9OCGVuRwpmxN37G5Tq+iSDHiwq80WhYbomij1G355Uem6KiovAktc6/2dqpXq/D6/WqeKbsiG8Yhno/ADXvAKgxkIaEVFL8rExtt1tshspdZzWkYUYZI+cf1xX/1sfebglZOs56JSW9AtIjnNz0poBjN5+B74GBAVx00UWmSSOz2rjlguwiIbOugONCt1qtYv/+/ZidncX//u//4t///d9RKpWQyWRslyihxwpkZ+VEIoFYLAbDMJQl63Id6xpQKBRUkJoxAiosWneM2XAr9Xg8rn5PdkrQt2PQs9kWWmzLsRApILgdBxNKYrGY8jLpAbB4t1arYXJyUsXreIy9+xjYJqh4mAhAWiyVSilFVSgUVBKGThXZDbw2dpRotVqYm5tTrZ14/blcTtGYsmUWwTgTY3nMAqUXpRtVTM4h7cXv01tHdQtyvuqp4lJRSY+PcozxcHrz0qNyu93I5XKmbEnZtkrCDsbNWa+kJKQg5ESVE4LWKfeMokByu91qAsh2L1ZWD0FBzQ7VMzMzmJqawuTkpKmOo9vQJ6lMCpAWKYP70pMCjvdPk7y3LCCU1Bg5dFp+wPyO2p16z/G98nk5oQf+5bjwwTnB+y7781Gh0KNit3B66pLqYcypXq8ryk8KZXpt8jftRvkBnb0p6Q1xTdKj5FYtVFKS7uLY0tukl0qlDhynUPU5LFP47UDzSXSa17ynkvaWbbikJ8X5IEtqOiViyO+3w1w565UULf/Z2VlMTk7iwIEDynplBhktMQbxJS/MSaFnInXKvOHxUqmEubk5TE1N4b/+67+we/du1SndLh6UhPSkJM8vt/5mZl65XJ5H99FSo3dK4cmtPGjx6VtP8PPM3uKDHeVJhekdFpYz5drKy5T0MVOe8/k8arUaisUi3O5jW4Jns1nVWaRYLColJVPQef40imgQML6SSqXU/WCM0ErR2xEy9sMel+z0zyxOKjCOi1U2mlXshv0OOfe4TxsTb/T5IZXXQokJywmel4Qe1yZcLpeSUeyf6fP5VEcbGa+yotHlPDmZLL/lGhdHSYksn7m5OUxMTACA2ucnGAyaYib0knij9TgTv1PPkuFxPrNW6MiRI/jpT3+KX/7yl8o6tisoFGRSAxMZuHAokClcZDdr6XlSqcnWNUy40LPaqIhkRwV6IPQ6ZU+2TgtvKcekk6LidbAtjYyh5PN5TE9PKyVFz0l24pBWvfTOmJXFRAkGygEoj14WB8tnu0AqFgAqNufxeBQ1zPe1222llKUg1Vv6yMQTWTcGQHkUkkKWY0IPWCpAuY67Cf2arQq3SfP5fD5Eo1HE43GVKcnyBCp5fudCCsrq2vVkjeUYl7NeSQHHra9isYi9e/cim80qGo4xKElHyRonPrg42DGBz1xoBBfHxMQEHn74YRw9elQVH9o5bkBYKWRZb1KtVgGYG+PSk5K1KHoNkfQQZPCaSop0GLc60QsQrTyobowNx4fnRG+Qe2J5PB41HsViUe2WK7uWyCQJaeToxaZWAks/D10AdVvYErrxxgfXgL5/lry/fI80EuV9p1JiIgQTTRhzphK0opC7HYfqBJ1Klinmch7QqGYs1OfzIRaLqYJvPvTv5Xq0YiGkMtLvm5XHudjj5ygpmBXH97//fQSDQYyOjmJgYACDg4M499xzVRyKyRHsD8abQ2Hk9/tVx3RZEMz30fL/9a9/jX/8x3/E7Oysah5rx8UBdG5Zw0nMXmAUHuwTxqwsaQEzLiWD15FIBB6PR8VXZKYWO8nPzc0hm82ahLlsgWPlSQHzu0cv9pjonhTHRcadSE/m83kYhqE8qnK5rHqpsYO5bHxqZeXL7VpkqyhpDEkqiELYjsJXno9ULLyX0mjjfJBzg2Mi56L8nPwMMyCllyrjX7IprzQArAR1N6DHOn0+n6pL1DNk2T1naGhI0egej0f1/2T5Bq+LMoqZf3rMt5PCkp4m/6YRSizGmDlKCscHkg0Yq9UqotGomqg9PT0Ih8NoNpuWSgo4bjmHw2EkEglTirT8HdI52WwW09PTyGazqsZlJUEXCqRWONEpoEl1yfGgopKJFzo9Rk+CAsRKKenps8tB70l0SorRr0E2Nm2320pJMZOP4yfrfXT6RQoCwJxwwrGTn+lURmF3kI3otH7067AySOT/+hhYjclyz5vThe710NAj1et2uxXVy/6Q8j1UZJImlEkoTLzhb0mvaiH51MmDXyw4SgrHJynjKfV6HUeOHMHMzAwOHTqEPXv2mCq3GTehUPZ4POjt7VXWy+DgoHK75U2rVqt45JFHcODAAfzqV7/C3NycSs1eKZB0C70BWlO02hgvYZo4J6+k9phyzhRqjimFONPwp6enUSqVVCPffD6vvI6FdpollkrwWHlQ/D0Z5G+32ypGJzc4pMKl4lqo76AEBVJPTw9WrVqFgYEBpNNpeDwe1RmdXRfs2pOO0C1zCkKOgV60yuOy5lB+Vqf7rK7Vaoyt7qVueHR73KQHxZINPgeDQUXtJRIJ1QGmt7d3nvyhUuLnXa5j5TPSg5Jd9HmMRpRO0Uro475YHuiiK6k77rgDX/va1/D4448jFArhGc94Bj7ykY9g8+bN6j3VahXvec978KUvfQm1Wg3btm3Dfffdh4GBgcU+nVMCBQlwvCmj2+3G2NiYErKSSqHl4vV6sX79emzYsEFZtDL9VyrBsbExPP744zhy5IiKsXR7AZwq6NJTyAJQSoY76ZLukuPGXVc5PrIzOMeaFAy9DNb7UHFRwNObkh4V0FkwLQU6KSnOIxlfkbQn6VDpNUnBKT0oHRQybLEVj8dVwolMImFiiV2pPqKTouLfVoqCn9GPnazX2Ok9nRRUN6EnRliVNrBeMRAIqN2w4/E4EokEACgFIzfM5PrjmgOOjyEpQEmLyjgg36t7o5JCXGg9nioWXUn94Ac/wDXXXIOLL74YzWYTN954I170ohdhx44diEQiAIB3v/vd+OY3v4mvfOUrSCQSeMc73oFXvvKV+NGPfrTYp3PKkIPMG0JPR9b3AMcpFwAYGBjABRdcoJrT0v0GoCzrXC6H/fv34ze/+Q3GxsZWjAclBYm0+qlguQiYOMGCXY6hzDZjrRkpU44Tv6fRaCCXy2Fubg6zs7OYmZlBpVJRBcG08hbynvRzX2xYCQ4ZvJaGiVRGkv7spFzl+Uq+n2PHOERPTw96e3tVBhfvB5U542B6rM6OsLK4ZYadVBiSXteP0wAi9G4oUrDT0Om0P5sugLsNricqDj1DUfeQqIBkcggTKyKRiCr8lnE44Ni4BoNBNT6k19mSSq45qbhcruMNe3lvFmvcFl1Jffvb3zb9/9nPfhb9/f146KGH8JznPAe5XA5///d/j/vvvx/Pf/7zAQCf+cxncO655+InP/kJnv70py/2KZ0ydOtMr2rnM2tTXC4X1q5di+c+97mIxWJIp9OqJT4AJXinp6fxyCOP4H//93/VJLDDAjgZyEUr40LValVRTRwTCmoKbVb2ezwe1SiWCRP0RNkGp1qtYnp6GhMTEypFv1arqW7wjHXRM+0k3JcK8v7z+uQ1y/dIT0ku4E5JHp3A3wiHwxgYGEA8Hsfw8DBWr16tyiRYS5bJZJDL5VQ6uxQsdoYVdaTHOuQY6+Mms0L5fs4tNqSVxaxMx+5Uk2g3xS4VFNkerkHgOB3Ia5Xyh5/h+gOAoaGheXtv0TgvFovqNcaCyZLw/XIeS8UvvbLFik8teUwql8sBANLpNADgoYceQqPRwOWXX67es2XLFqxZswYPPvigpZLS0ybz+fwSn7UZVtYtcMxS4zbp3NSQHcCllVepVDA5OYmJiQmVmLESBIeEtHatqBk9Y0jWC9GKAzCPspAeGunDarVq2h+Ii4O0gx7IXS4FpUNmXEkPW3oAUiHplN6JhCC/gzE8uSU66R2ZkcVkDBY36wkYKw2daD0duhLj33r/RLmli5zLVvfEjmMmPRgWzss2YWzxpCeHWCVcBINBNUbSI+JnZTIOcCypQq49OX4LxakWA0uqpNrtNq677jo885nPxPnnnw8AmJiYgN/vRzKZNL13YGBAFdLquOOOO7B9+/alPNVTAm94T08Pfu/3fg/nnHMOzj33XPT19ZmEL2/q/v378aUvfQlHjx7Fnj17VpwHpQsKBlBJf1IgSu+CC0COB2Mp0WhUPWSXitnZWZRKJUxOTmJ8fByZTAYzMzNqa3RpwQFmJblc4FhIJUyLXY5XpzgL33Mi8Pvdbjfi8Tji8bgqh0gmk1i1ahVSqZRK9ikUChgfH8eRI0cwPT2Ncrls6206TgTdKJIUvE63cm7JLDZmuAUCAUQiEaTTaUWRGYahjF6plEhl202561QfY425XE5lvrpcLrW5pmzzRCNHlsLI9mV6wo9MnMjn85idnYXP51O9TNlVn/eCSk2m8C+2Ab6kSuqaa67Br3/9a/zf//3fE/qeG264Addff736P5/PY2Rk5Ime3mmDNzocDmPjxo140pOehP7+/nlp6bzxuVwOO3fuxNjYGLLZ7IpLN5fQA6X0kkgZyIUhKQQuGsYH2F+OY0RhW6lUUCqVVGyFC6ZTVwliMTnwTtDpJ91il+fE89HjjicT59AVIQVtLBZTnjs3TeRvsKUQE03sKGxPFadipUujgRm3bF5MSpSF1PRE9OQI6e3a0aPSww9UtF6vF6VSSR2r1+sqw1Z6jtI7CgaDymPiGqXxzHo8botiGIaptEQqPMmELFW5w5IpqXe84x34j//4D/zwhz/E6tWr1fHBwUHU63Vks1mTNzU5OYnBwUHL7+JkswNcLhcGBgawdu1arFmzBqtXr0Zvby8ikci8DK+xsTFMTU0pBTU1NaU6MqwkSGFhdVxSAPJBgRGPxxEKhdDT04O+vj5l7XHScxPEbDaLQqGgYir0BmRdFH/X6lyWElbxEb2YVu4Yqwe0ZbKEVcdpfr8UtEyKGBkZwdDQEPr7+zE8PKy6ntfrdeRyOWX8TExMYHJyEvl8fsUrqJPJrpOeuxw3lookk0lEIhGVKOByudScqlarCIfD82LDOi3bbcg1BpjjnNyuxOPxqD6Pk5OTqNVqSCQSioKnwci15nK5VMKXjEvJcgg+y/gqE030zFSZBagr+MUYx0VXUoZh4Nprr8XXv/51fP/738e6detMr1900UXw+Xx44IEH8KpXvQoAsHPnThw6dAiXXnrpYp/OooICamhoCE9/+tOxatUqrF27FgMDA/OCuq1WC4cOHcKOHTvwm9/8BocOHVrRXpTuIeivyT5xXFikYLhVRV9fHwYHB1UiBWkCZqVlMhnk83nVWYI1azqF0E0BogtG2UuPBZRUvrRU9aCzLhCB410iGH8KhULo6+tDJBLBhg0bsG7dOqTTaYyMjCAQCCjaJ5vN4uDBg8hkMjhy5AjGx8dVV4WVrKQ6Qc5DwOxBseOC3PIlmUwiHA4rg5gZqEzUYRxPUsh286T086GHxNgjcGwcqtUq/H6/MvA4/+QeZu12W80vfoZlHdx4k5AJFTJpg8pRGl1WGbeLNXaLrqSuueYa3H///fi3f/s3xGIxFWdikVkikcCb3vQmXH/99Uin04jH47j22mtx6aWX2iKzrxNofXi9XiQSCQwPD2NgYEAFIHWLo1qtYmpqCvv378fk5KQpPnAmQfeypJdBmoVUFfdYkrse04KrVqsolUoolUqmOih9sttBaADWBaCyJIEelewMIdvw6Km/pF2Y9RgKhdDf349wOIxUKoVoNKq68bdaLVUzxlR9dlNfaG+glQzdk9X/lrQrY1HhcFjNOdJVelNWwByPsZuCImTcTBY9u1wu09YmpP1o7HD+yZR1dpd3u90mephZs3zojZz1uJPskNJp7GzpSX3iE58AADz3uc81Hf/MZz6Dq6++GgBw5513wu1241WvepWpmNfOcLvdSCQSCIfD2LJlC573vOchmUwqGkGme5Ku+slPfoKvf/3rqm7lTEAn6o+g5xCJRFTG45o1a5BKpTA8PKwC2LTO2FliZmYGY2NjKBQKJoG7FJbZE4Eej5IeFTc5ZDxEJpVUKhXMzs6iXq+jVCopgcCYHpMvent7MTAwgGg0irVr16pxTCaTKlZQq9Wwf/9+TE1N4ciRI3jkkUdQKpUwMzOj2iytVINIp7d0pcRnPeWfyikWiynlPjAwoMaNWaZMr5aev2y7pW/h0W1Img8w98ujESQz8hiPmp6extGjR01zkdmxXJ9MxWcCBb+T84deZqPRQCaTUWMkGyB3UlLy3J8oloTuOxGCwSDuvfde3HvvvYv980sG1vvEYjGkUin09fUhFouZ6hF4g9kpgRNFVmyfyZDCm2mu9AzoCdCL4sSWm//JvZRWgqC1yjCjt83uGhQgpPxo8coMK1lLlkgkkE6nVZIEvVC2rqHVXCgUkMlkMDc3h0wmo6irE8XuVgKsDCGrpBUrT4pUFr0o1gXp3yM9kxO11rIL5LnJDDt2d6Gy4d9MctCVFClPFvvSw2SGKpUPGQ5ZL0XDUSonq6SJxRxDp3ffCUCBm0gksG3bNmzduhVbt25FPB5XNx843tBzYmIC3/zmN1X7I31PoDMNumfBJJdYLIbe3l5V3MwO8hQMVEbcw2t6ehr5fF7RfXYVGHocSSZP0JMKBoPo6elR3TTYsqhYLKqO8UwbluPm9XqRSqXQ09Nj2gRS7qVFL3Pnzp1qm5dCoWC5r9aZBCtaWVLLpEXT6TRWr16NaDSqsiAZM2EZA9tssaO+bCFlJy9KQp6PpPn0LhT0EmUsiokTUiHxNRlD1uPK/N52u60oQamQ9JZeSxUzdpTUCcDgfyKRwLOe9Sw873nPU56Bzmu3223MzMzge9/7Hvbs2YOjR4+umNZHTwRSYMii01QqhVgshmQyiUQioVKmWXjKfZRmZmaQyWTUzrR6lwQ7CQx5LlZ0XzgcRjQaVSUJ9KyktctAtexWQSVFz8kwDEWrsCVUoVDAkSNHUCwW1fxi0oneyWKlw4r2IyTVKuOf8XgcqVQKg4ODiEajauNSUqyyzohsBw2AlaDgrWKzkgbksxU9qkMqJKLT++nBW9X7LRXFJ+EoqRMgFothzZo1GBwcRDqdVoJHwjAMVd8j29KwfcnZAAoM2aWZDxmDYpYb2/YUCgW1Z5RsuWJnYQEcp3blQ27GyGugkQNAUXahUEhRM0yY4LPX61WWP7dxYdZjsVjExMSEScA2Gg3bBvsXC3osik1V2SqK6fosdQCON4t2uVxKkdODKhQKylCQXjtgL4PoRLA614WoUqvP6t9hpaikt2T1+aUeO0dJdQBv0qpVq/DiF78Yg4ODWLdunQqM667x3Nwcjhw5gv3792NsbAzj4+NnvBclPShmVoXDYcRiMcRiMUQiESVEGKRl7Gl8fBylUglHjx5Vf3OrCStFZRfhwfNgLz4G3pnOWyqVVF1Us9k0bV/CtF896E/+n4kkVNz79u1DsVjE+Pi42maeyRfFYlG117K7B/BEIMeJ646xYXb9DgQCGBkZwfDwsDIESN9RQU1NTan5RmqZG04uVaeEpYK+Jk6mpswKejmJ1fdIpkg+W53PUsFRUh1AARwMBtHX16cyhjpVqrOtTzabVXUYK2HCnyo6LQiZhi0f7Ocn6ymYNSTjAXLTP7spJmC+hcprkoF32UtNVv3rhb26kSPLF9gotlgsqj2hcrmcqhtjXIvjdaYqp07gHJP1aexewrUpi03dbjfK5bIqb+Cck2UOK0U5dcLpnrv+OSuF1Q2lpMNRUh1AqmpgYADnnXceBgYGkEgk5sWhKJgeffRRfPOb38TExARyudyKnvQng4VSsaXAoAKnl0CPaWpqSnXtplfQSXDYZSzledCTarVairqkV5TP59FuH9ulOZfLKWs/kUiY2vbIrRByuRxqtRomJycxNjaGcrmMo0ePqm1KuFUCCy5XCi36RNDJUyAtyngeAJTLZczNzam5KJHP51WR88TEhIkqteM8swPsNBaOkrIAF0EgEEA8Hlc7oLI3H8G4RKPRwOHDh/Hzn/9cFcedLdAVlWyMSiVF6oVeQaVSQT6fVxsb0pvSFZQdBbAMWMt9tZrNphKatVpNBfM5R4LBoKpRYTIFU3zr9Trm5uZQqVRw5MgRHDhwwETt0dOUv2nHsVkO0PPUO88zOYJBfhlHYesoJuro3T/O1rFcKXCUVAfIbsq6YgKgit2OHj2KfD6vLDS9tciZBpl1xYfs81Uul011Un6/H9FoFF6vF4VCQW1VMjc3h1qtpryoU9nIsNuQ1y6z9kht+v1+NJtN+P1+lEolTE9Pw+/3IxKJmOYVqc9ms6nSyGdnZ5Vykh0k7JwevdSQY02jp1AoqBpFn8+n5hITJmT9DulTsh5n+3iuNDhKygL0pMh1WxUAMjV4x44dOHr0KHbv3o2ZmRkViD2TIRUVFTKpr0wmg3K5rASDz+dTSordzev1OvL5vEoAKJVKSrisFOpF0kQs1K5Wq8jn80pJ6zEoPZNPJl9QsMrdYh1Bas4e47riHPJ6vahUKsqDZYcPvSOCTC7Ri1H133JgPzhKqgMogEulEsbHx1Gv11WHCaYbk0bgZoZyp8wzHXo6KgsMZdpvqVRSjWS9Xq9SXrKwUgqNlaKgCHm+erEkj+kb8clnWSxpVRh5NisnK0ijgIqcVDLHmGUOerseft6KSj7d7DgHywNHSVmAFEGj0cBDDz2EcrmMcDiMdDqNcDisBHC5XMbBgwdRKBQwMzOjBM3ZAp3yo1Bwu90oFovIZrPzPAe5DQCfqdhX8thxHKis5IZynZ6tlNFKU9TLCamkOM/YaUG+Z6E+co7yX3lwlFQHMM7AWJPf70d/fz8ikQhKpRLy+TxqtRpmZmZQrVbP6okvr122DOLeWbqAkMf0v1c6HCWz9JBdwKUxQFjdg4VSqh3YG46S6gAZa3G5XKprtd/vV1lZknZwJv8xSG9CHjvR3w4cnCpOxiCQ8VP9c53+d2AvOEqqAxi4ZnDb5XIhl8udcMI7MNMyDhwsJU5m/TlrdGXjjFFS8XgcW7duNW1JfzZj8+bN87YpiMVi2Lp1K2KxWJfOyl7YuHEjIpGI6Vg0GsXWrVvnjd3ZinXr1iEajZqORaNRbNmyRfUkPNsxOjo6b4wikQi2bNkCj8fTpbOyF0ZGRk5b7riMFWhm5PN5JBIJ5HI5xONxAEAul8PBgwdRq9W6fHb2QDgcVpvmEfl8HgcOHHDG6LcIhUJYu3atScDk83kcPHhQxdPOdgSDQaxdu9YkYAqFAg4ePIhKpdLFM7MPgsEgRkdHlSwCjqXJsyjbARAIBLB27VrTGFnJcSucMUrKgQMHDhysHJysHHd3fMWBAwcOHDjoMhwl5cCBAwcObIszJnFidnYWO3fudDjg3yIWi2HLli0mN3pubg47d+48qxrgLgQmACQSCXUsk8lg586dKJVKXTwz+4AJADIhKZvNYufOnSgWi907MRshHA5jy5YtSKVS6lgul8POnTtRKBSW/Xw61Y11E+FwGJs3b0Y6nT7lz54xSmrHjh340Ic+hCNHjnT7VGyBrVu34tZbb8WTnvQkdezxxx/Hhz70IRw8eLCLZ2YfbN68Gbfeeiue/OQnq2O7du3CTTfdhH379nXxzOyDjRs34tZbb8VTn/pUdWzv3r3Yvn07du/e3cUzsw/WrVuHW2+9FZdccok6tn//ftx66614/PHHu3hm9sHo6ChuueUWPOMZzzjlz54xSootivbv39/tU7EFYrHYvCw+jtGePXu6dFb2QigUmpfFV6lUnDES8Pl889iJSqWCQ4cOOWP0W7jd7nljVK1WnTHScLoMjhOTcuDAgQMHtoWjpBw4cODAgW1xxtB9Dhw4cODgOGQChd6BH5jf+9AOCRZWcJTUCoDeL9CBAwcOFgL315J/c2NIwLzDuN33cnOU1BLD2VDNgQMHywWpmKiQ2D/Q6/XC6/Wa9tTibsdsqG1Hg9hRUksIWjD6Dq069J1D9desthtw4MDBwrCits5U8FqpmHw+H3w+H9xuNwKBANxuNyKRCMLhsNqHq9VqoVgsolKpoNlsqh2N7bYxpKOklghSQelKyqrYTp8UVnveOIrKgYOF0Ym5sGOB61KA8sXj8cDn88Hj8SAQCMDr9SIajSIWi6HdbqtNXfkAjm30Cthvix1HSS0RrJSMYRjK9T6RV8W/uVW27mWdqYvsTIZVIFv+faIdZp19zI5BH7tOxp+OTuO5ksdVjzt5PB643W74fD6lnMLhMLxeL/x+P7xeLxqNBprNpnpIGWPH63eU1BJDBiilJ+R2u+HxeCwDnPKznFB6cNPxquwNK4FpdZ8XErJy7pyMt32mw2r8yFBQOAPW4yLHkv/Lv+26W+9ChqxVYgTjTsFgUCmnSCQCn8+nlBQ3cq3X68qjkvLFbnCUVBcgrR4rKpALiIpNfg6wzwJ6IljIkzhRmqzdrv9EConPFKK838y2knNAgoJDChFSMbrla7cxWSx0Uuy6cvJ6vab1JD+vJwpwHPn3Ql6EHcd1IU/S4/HA4/HA6/WquFQwGFRKyu/3wzAM+Hw+xezIeWjH63WU1BLDKjZFdzwYDMLtdsPv95u8qmaziXK5rBaSbmXbdTKdCFbeA8dDepZcOBQm9CipuHkM6K4Q6aRcpRCVwpSBbMYKSMFQyMrP0NpttVpoNBrK4q3X62i32+o1mUJsRwXeCQtRc528TI4PBTDjLR6PB6FQCMFgEF6vF6FQyGT4yUSBSqWCer2OSqWCUqmEVqulEgZ02qtb3tXJZATrc43KKRQKwe/3IxaLIZFIIBAIoKenR1F/Xq9XtXCqVqtq/tCrkkrdLnLGUVLLBKsFx4mlKykpqE60mO0wiU4FusDhGFB4Uwh5PJ55gkN6EYA9FJT+v359VveZtAvjBjRUpEVLgUrF5PF4TJb/QrUtdp8TC3me8n+rsaRC51gGg0F4PB6Vueb3+xGJREzrqd1uq/iL1+tVPS3r9TrcbrcpDVun1Pm3nbCQsSc9qEAggEAggHA4bFJSwLHdhAEoZS89ecno2AGOkloiWCkkCmAKpXA4bLKwJW+uexgA1HvszB9bQV9ILpdLLQ4Kl2AwiGQyCZ/Ph1AoBJ/Ph2q1ilKphHq9jqmpKZTLZVSrVZOgJrph5epxEfnM6/P7/UpgxONxFSPw+/0IBoOIRqMmT4poNBrI5XJoNBrKym00GiiVSmg0GigUCqhWq2g0GqjVaorKkrFPO86RTspd/1+OpVwzHo8H4XAYsVgMPp8PyWRSeQupVAo+nw/hcNi0fhqNhhqryclJFItF5HI5uFwupag4vvTYaRwB9lFSnWg+euVUUJQt8XgcgUAA0WgUgUBA0X1+vx/VahU+nw/ZbNa0Pq3KYLoNR0ktAXRXXFc4pCm4mKjAJJ2lC3YAJsFsJ0tnIXTyHmnZpVIppFIpxONxrF69GoFAALFYDMFgEIVCAbOzs4qW4bjW63UAy9/OxUpIAPOD99LCpwIOh8Po6+uD3+9HIpFAKBRCJBJBMpk0WbJULrVaDVNTU6jVakp41mo15PN5NBoNeDweFAoFpaB0CtROdA2xkPepH5fzhUYcjZdEIoF0Oo1QKIT+/n6Ew2EMDQ1hcHBQGTlyzdVqNZRKJVSrVfj9fmSzWfh8PpOCr9fr6jzkWOoJT92CFeWvK3J6mH6/H6FQaJ6SCgaDCAaD8Pv9KJfLah1KOSOvU/5eN6/fUVJLBHnjaVHLhRaJRBCJREyeEq1mq6w/+b1cNHalIwhd0ZJ+oKDx+/3o7e1FKpVCLBZT3DnpCVIx9LgosDle3fAoreJP8jqlsGCWVSgUQjgcRjQaVXQUj4VCIVPwWhopkUgEXq9XZWKR8ms0GiiXyyqG0Gg0FD1MS3ilzBEJOec5JhS49JqCwSBSqRR6e3sRDAbR39+PUCiEnp4eJBIJNeaSvvJ4PDAMQxW0cvyCwSBcLhfK5TLa7bZlIpM8N7uMoZVS16lQrjV6UFT0NHTl2tGTdqzovm5e/5IrqQ9/+MO44YYb8K53vQt33XUXgGMBu/e85z340pe+hFqthm3btuG+++7DwMDAUp/OkkJa1gBMXoOkJ2hFJxIJ0wQpFArI5XLqs0ya4HfT0rG7xQyYFRRpmmQyiWQyiVgshrVr1yIajaK3txfJZBLhcBi9vb0mjrxQKCAej6NYLKJYLMLv9wMAisWiitssl7WnKyX5N4UbhQMD11TGkUgEsVgMw8PD8Pv9iEajyqpl/IRChAqJ8ZNGo4FKpaLoqlAopJRTIBBQOwg3Gg3lEVBZSc/MbnPEygiTYykLUFOpFMLhMNauXYtkMom+vj4MDQ0hGAyip6dHGQL0Xv1+P1wul6I/a7UaisWiMnI45vV6HeVyWVF+jF1JJkM3CrtlFFkdt/I4o9EootEokskkenp61Hyj5yhTzylfvF4vms2mGgMpZyTl2S2DZ0mV1M9//nN88pOfNO0OCwDvfve78c1vfhNf+cpXkEgk8I53vAOvfOUr8aMf/WgpT2dJYbXgOHl0N5xxGNISwLEbX6lUTHGbhegR+bedhI+EjM/QwmXVeyqVUouJFnI0GjV5Fc1mE+FwGK1WS9EUMguu0wJermvTH5LOZPBat2p161bGrvSMRiaSUHACUB4mv6PRaMxLJ9YFqp3nCGE1jlw34XBYGXWpVArpdBrpdFp5VVxbHEufzwcAyrtst9vw+Xxot9smQ4Jer0zesVrHfLaLorLyeuS8k7Enrj3KGWY56qEDO143sWRKqlgs4g//8A/x6U9/Grfddps6nsvl8Pd///e4//778fznPx8A8JnPfAbnnnsufvKTn+DpT3/6Up3SkkFXTHwEg0EVMF+1apVaVCyuCwQCSjkxc4tWDt1xXRDyd+wseLhomGa/atUqRKNRDA4OYnh4GKFQCAMDA0rxkL6rVqsmnpzV8q1WyxSXs1vxIa9Xp3eZHMG4lFUGpxQgTH6oVqumZ6afA1AeVyQSUYpJpqZ7PB5TDzarFjfdGjc5n4HjjIOM6XE+MG7X29uL0dFRRKNRbNq0Cel0GrFYzJRkIxUNcLytD2N59EzZn65araJWq6m1JpmJTvVn3Rgzfd3LYwBMNHosFlMxOiryWCymxphlLcViEdVqVY0JaVAZdiBdbJcGAkumpK655hpcccUVuPzyy01K6qGHHkKj0cDll1+ujm3ZsgVr1qzBgw8+aKmkarWaaSv0fD6/VKd9yrCKTUg+PRgMIhaLKe6c1IX0FmRHCf1/+d0y+8au3pRcQFRS/f39SKfTWL16NUZHRxUdJtPMgeMpwVLoBINBdVxSEHaqDdJTpGUxJQPWeoq5btBw3Khwms2mSbjS6qVSCwaD6li9Xlf1LxwfKi2dIrYLdGUt4ymM2ZIOXrVqFWKxGFavXo10Oo1AIKBYCHqbcgz1tURPlApfp7zk2NhlrDqxJvKZ48aMxkgkglQqhZ6eHsTjcYTDYRMFzAQSziuZICLlC5UVcLzcAzB3uljOMVoSJfWlL30JDz/8MH7+85/Pe21iYgJ+vx/JZNJ0fGBgABMTE5bfd8cdd2D79u1LcaqLArlIZIFhNBpFPB5XcRjGIUg9cAHV63Vl2UlqR1IftGwWOge7LC6Xy4VwOKyy9latWoXe3l709fUhFovB5XKZMtaYDEErmtYfU4er1SrK5TJKpZLJS7AD5LhLJUqOn5Y705wBcycJHuNnmWZPgWrlURuGoepcmAno8XhQLBbV56R3xvOU/9sJcp5LJRWLxdSDSSd6FqQet5WedqVSUQYu504ul0M+n0ehUFBxKs5BaRzqj6W67oWO6x4Un6UxTMVO2UI6mckiUs7IhywQ141hK+9Nj0ktp7xZdCV1+PBhvOtd78J///d/q4X0RHHDDTfg+uuvV//n83mMjIwsynefLqQ1YxW89Pv9GBwcRE9PD5LJJIaGhkwxFQreer2u2uWXy2XUajWTdceJKBeRPpnsIngkN55Op7Fhwwak02k85SlPweDgoFpM1WoVk5OTqFQqKBQKKJVKyhr0er1K0NTrdVSrVeRyOczOzmJ6ehqFQmGep9nN6yUozGixV6tVtFotFS9qtVpqm4RQKKSuj5/lfWcnBCksdTqM8y0UCqkYFecTf4u/b5dsPyshq3uTnANMiOjp6cHg4CAGBwcRCoUUtSdjdyzEpXEnr53rinOoVqthfHwc+XweMzMzmJqaMtGqtVptXhcPHYs1biejoPT36B47M2UZr6NiZ2yX64hKmAqZCSNUVFLWAJiXOMI5qHucy4VFV1IPPfQQpqam8JSnPEUda7Va+OEPf4h77rkH3/nOd1Cv15HNZk3e1OTkJAYHBy2/k9aBXWC14CTNw/OVWUeMUcgbz0WmBzOtLBs9WcCOdJ+07licm0wmEY/HVdEqLXx6GHwwuO1yudRYyJZAuuVnB1pGz6CTHhItfN2CZdYmBYHMQKNXTcEhr08mRHCcDcNQwX/50GNfVgaNHeaMPpf1xBOZXOL1ek0KlqyCFbVHZVOtVpWSYhyK8RhJ98lO4BJWc2wpx+1kKD75ul7Ey3HS6y6lXNENK/mdnGN6WIHv7fT3UmPRldQLXvACPProo6Zjb3jDG7Blyxb82Z/9GUZGRuDz+fDAAw/gVa96FQBg586dOHToEC699NLFPp1FgW7RWMWfqJQikQhGRkZMtRvRaFSlvXJBsHsAqT7d9aYFLq0n2VuLQk63cLoFChgGcNetW4eLL75Y0X3RaBTFYlGl2R85ckR5RfQywuEwgOPjy64L2WxWFWNKCgxYfo+gU/xC0mu09vWmp+zH6PF4FCVH6peWLecDPwPAJLiZOCE9K3YuIZXabDZNafo8LztRflbGHWOYNOxkFh7XDpNrqKTJOshkAHpGmUwGxWJR9eoja1EqlZTyomEgG87K+9pNZW5FuzHWTU86FAohFoupGBR79+nXxXnH2jAm8tA4ohHAOCbpaj2JRBpmnXbyXewxW3QlFYvFcP7555uORSIR9PT0qONvetObcP311yOdTiMej+Paa6/FpZdeuqIy+3S6gkHycDiMZDKJSCQyb+IwJRaACm5zIumWnMx80usXOnHV3V5MVNbhcBjpdBpr1qxBNBpVjS5Jb5bLZWSzWeTzefVZKmX5fexfJwVQJ09qOa9d/y09bkT6jpSeDNa7XC7V2omGR7vdVu2OGC+gEpKeGueBLuA5doFAAM1mU6ViS0vYrp63XENSEUuvkEpZrh2OOceP3hI9KSpqGkKyhZSsRZOxv04x36WmSq2oPyvPCTguD2RZi+wmwbGjTJFeItcZWQvpieuel1RYUknpc0qPT3WiMZ8IutJx4s4774Tb7carXvUqUzGvHWFlzei8MDsikA+mBai3u+FiqlQqKBaLSmjTytO9BELnjKUwtAP1RYEaCAQwNDSEgYEBrF69WhUuA8eSIIrFIqanp5HNZpVFy7HiWLKIk9ZcoVBQfepkoarVOSzlGMjFqD/zd2Wmoi5Q2+22Uj4yI43Wu9W+PvwNCk8KYD0bkJC92xirYvq+bKKqt9bqRAEtNaTy1WlKGn265ygpY1rz+rhJQU5lze+w6nYuz0Vff8thDHUyPCV0ZU6FLkMLfr9fzTMrA1gqEsZ+ZW2d/jcwv5kzFR+/z4pGXGwsi5L6/ve/b/o/GAzi3nvvxb333rscP39akNYCFwXBSeLxeEz9xJheHolE5i0wTnY2t+RDNg1l5hqAeZ/j71qdT7chg94bN27Epk2bsH79egwMDMDj8ShlnMlkcOTIEeTzeczNzaFarcLlcimFzlZB9ATq9Trm5uZMdJ+eMiyxnIpKP64vVL1RqUym0D0i+X5dcFo1OZUCWwoJCi2ZZKBnEHaKMywn5O9TScnWPbJ8Q6buy1oofpbKXU82AqC8DXmdFOLSI9cz+jp5VIs5t6zGvtO94Htlb0gahaTJ2QGehjCTJDj39OJddkOXxgF/i4pdjrWcp3KbGL6uj9lirkWnd98C0CkS6XJLHl3vhKDX8cg9a0qlkuLEZfNQmb0nf19a6VYLyg5eFK1WKirGpTj5KURkIaUUrlII0ZIjDUMa52QKeJd7HKy8Kh6Xf/NZnxedzlsXVtKjopLTqScKMRlIpwKQSTfdUEoSVqyEnANWNVO8Jkl9A+axtbo+ehukt/gsEwx0Gs/qHsrf6hbkOOnjJq9ZJuRI5dTJc+zESuhUsvSk9CSepaRCAUdJzYPO/fKYtGTY3sjr9SKZTCp6hQKZQXAutmKxiGw2i0ajgSNHjiCTyaBaraJYLM7jjbkApbstrXFagiezq+hyjBUt+Hg8jlQqheHhYYyOjiKVSgE4NqlLpRLK5TIymQymp6dNHgW7NNMS9Hq9ihKdmZnB0aNHUSgUUKlUFlRSy3X9OvduRQ1ZUWiMT3Wy1KUA1i16OQ9JGTJTlIoIgNr0T6Ybyy7pzDi0EvTLAV1BSYUkd5ENhUJIJBIq8YiF31x/TC6RApjePOktAIrRYM9DGoWkQmXXCZmm38kIXOyxOllvSioNyabQKOH9lEYuDULee2mwMJtUN445bvwNqaSk5yTfR1hlRi6WN+UoKQtIS0V6Ulwk+sZisrUPKQgA6sYyWYDFhLlcTsWiODk6CTjJKcvJpHtU3QSzzAKBgIrNMYtIcuRUPqwPIu2gp87SCuR+UuVy2ZR5ZQd0osp0Wk4e75QKLN8rj1t5DDKoLRUmx1LSfjLmYDWfuz2WuhdFIcwyDmb4yabD8tz1NSOVPABT8gWFdCgUQrPZVOtWxquI5VBQJwsrr0k3pOV4UPZI5aMnZEmjxSo+J0MK0muS80nS1dKjWgo4Suq3kDdeKiT+zcVPLphWP5WHFBzSYqnX6yiVSpiZmVFKqlQqKTpL58KlRQfA5GFJZUZ0m+qjVc99obhNAjPz6vW6qu5vNptKodPzZC1VOBxW110ul1WhL+nBTgkTQPepmIXOQSoWUsPMxOLcIb3ZKRYilRu/Swp0dodn/IVxTqYdSwEPzFeISz1+VgrZSjEFg0FVshGPxxVtzHOnBy6z87h7sTTmaATJuLDf71eGIeOd+XwebrfbVArRDXSiYHUloFPrfNCwY/xR1hnq3wNAhR9k6YvVb1pR1XwP752E9P4X06B0lBTMi1a6upKOkJl8MnuKgobvlUqKk4FZbax8Z2cAedMBmCgZCiNdSXWKa3QDnKzBYNDUmZqBV8bd2IaGVAtwPGCbSqUUZcrxLJVKyOVylll93b7mE0GPi8j7LJVUOBxW1wVAeY+SZiYkrcV5wXiNy+VShgFRrVYRCATQarXmBcZ5jnxe6niCrhCt4lCyRor0XiKRUPtDcV7QYJGFu0wtp+Kil0DPkhSo3+9X69Hn86FWqyGTySjjgf0h7QYZp+P/VkqKHpTcZ4yfkwkpANRYytIO3VPjmEvjSNKGhmFYKinpjTlKapFhZbVIy4U3W25oJ5WZnjILmLsx65XtVlSCflNP9L8dIMeIY2JFQ0kKR9IzehIAvQrGCmgh2/HarWB1z3hMv17ArNSsLFHdy6YAZr9DZmlJikZ6TzrV18lqX+7YlBXVJ4UvBaGMh1AZyWcaPzzG99N74rPMWJNbWTQaDVM9lh2wkCyScoj3Vp9T/A59bUmqWO9yI9+r/yZwnPaTxrhUZpICXOy5dFYrKXlT9BsuaxFY0R+JRNSEp3Dg++PxOOLxuOn7mTIta6LoYsvf1+MYVpa4nTwoYH7gm2nPnKwcI8MwEIlElGCV6di0xqQwZqJFLpdTDUDZ0dsuMThCjy1JY0Me5+KlEKagAObXvMnYI5MD+F2ksvh57gwQj8dNQXQaA2zDpCurbo6jFH4UtrLWR3Zn0elQJtBwy4laraZodJ1xcLuPdfiIx+NqWxOpsEqlElKpFLxeLyKRiBLaTDTpJnQ5xLEig8MkI16LTM2XSl8mPcg5Sa9T32BTrmk9aYyhDv4v27kB5gSvxcZZraQIKwtCLiY93dzKQpU1GTLBQfackwvJSrjJc+GkkoLPTgKakGNEC0sP6uvj53K5lOKRkEJa8uUnSj1fKdCD3Facv/xbbyhMOhiAUlayPkgaV/oc5evdRCdrX49NSY+cY0Lvmgk47BZPZSUNOSopNixmSyUZZ5Z0mcyaszrPbmEhj1PvxiE9Hf2zunGr08dy7um/D5gznXkOlE/S29Jl1mLhrFVS+uK14sllmxa54yVfl++XbY/Im3MhsS5K788n0Yna04WZHZSVPm6MPwUCARQKBeTzeRUT4XhYNYqVHoaM7xUKBWQyGRQKBWXp2cmLlJAKgtAXL99HJcO4AQUvvW09kUbvQELlLb9TdhWgAOE9OZkxWwqhshB0T0o2kZU0HK9fbmUzOTmJYrGo2h3VajXMzs6q9SbXltt9vGMJ96BiPFDGwOr1+rxOLlbj0o1xknJIGnnMemQRL//nfGi326ZaOZ47KVJpNMsELqns9O4eHBs5VjImpbMHi4mzTklZCRNdQekWC2kJaXnpMSnZyoeToVwuq4fVQrJCp5iGPN4phrWckGPGQCypGGaWAccr/GWfNDkOHEPJlZfLZZVgImvC7KagFoKcV8Bxb6nVapkC1qSq2HWE75U0jfSsqtUqgOM79MrqfzmWukFzorFbqrGVFrccG6s4lCy0pfdEb6larWJmZgb5fB6lUkkpqUwmYxK+ch1zLhmGgVQqZeqhKctHrOKE8u/lnndy7ugept4KidfEkgOv16u8Ro6jVFIyNi4fBMdCFlDrnrpUZHrcdClw1ikpYiHlJBeQ5GEB82LmZJcxKmkx616DvNmdLDZdoHSKR9lBYHNiMuXZ7XZjbm7OxJ23223lJTBjT1IypF4ohPUkE/lbKwVWcURZzEvBQ+qKr+lduOW84ZgxE0vOJRnM1utiJGUmP0PhtVSChVjIutYpdUnx8Vqr1arK8mS/R3pTsu8l54wcD90D1ykqPQGhWzSf7oUsZFToc0uGFeS957oEoIwdXTHp88uKvZFUnv7bgHXNmkP3LQI6JUmQ3tNroawyf7iw2GuOk5wWi/QsZOBf0kD6xNQnmZwY0lKxg8Dm+TSbTZRKJUxOTiKfz8Pv9yOTyaiuHIZhqFoWepUej0d1bGZCiqRF2fm8U8NdO0MqGOB4MTYVuZxjFLwUKHqpge5BSuUu5xKtXvk9PBddGOmKilhOj0EKQJ4/kwCkcnK73cjlchgfH0elUjHRfbLvpbxGXgvXJg0lGQtmhh/r1az61+ljstSepnzWocsJdrORjASvXTYs1qlh2QFeJk1wruqyhgakPp9lbRXPbSkbDJyVSorQ6T6rQK6VlSXfKwXEQgFJ/aYtZLVZWSZ286CkBcW4k8vlQqlUUnEGeg6kpFjnIgtQ5ZgDmCeoVxKs7plu7UqFQ09bXq/8Wx8DPelGMgBSwOpzxA5zZiHI9Sc9KY6PbPEkH7JeSo6NFeshjVKrtHyrz1j9v5Sw8qj0ZyuPRmde5BhK6PPMip3RPXS9v6H8DXmeS+VFAWexkpL8vQxOcquIcDisCk/1G0JPwO/3q605MpmM4scl/cAFJOsI+D0L3VCdplmqCXC6kIuFXSUqlQoAYGZmRo2lfC/B3mz0VtlzjQpfJlusFE9KKm4KAypoWr2GYZioLTbQ7XSvJU3DOcS5GgqFVM9DWVukK3sA84SyPGc+L/UYWwlgKShJPXI86IFnMhkVn2QyDVkK0qR6vQ+pd3qtMskgFAqponyr1kDy/JbLMJRjI70YzhHGmpilSOOvXC4DMGfc6QX/Us7Rcye7o3tiVp4m0/c5trJnqDSq9L3eFhNnnZKyCuBKz0kW7DIYSW5Xfk5mCAWDQczNzZkC/VaWju49dXLtO3lRdgPPTdIzhmGgUCioMZTCUfbokzEBHuODVNdKi0lJoSbpF9K90gN3uVzzWmPp36XHEjg/9cC/7BDOOSaFlFW8ZTkEsIy7WkG33DmXJG1OirhSqagdBOReSfpak9dEgSs3CJRdGuT61GMyyw1dRkhFAMDkXZPy5d5ilF/yfRxbaaDQKJKK3UrOyISWYDCIVqul4sZW8kl6aI6SWiR0ovYoMLnoAfPeUbFYTHXtTiaT6r1WLrOVYpI3VUK/4VaWtR2hew8ULFQybPMjg+LAMQEqixPZqVp+Tt/zZyVBjodscwWYvRpZ97QQfSItYgpbZnbJBqzSqpXGUidKZzlhpaikIJYp0ZwrbrdbeU2VSmVeVppuBHI9c25xbzc+GH/i98tOFXqCUzfnnW7keDwelbloGMd3fZZUXCcqE4DqZSjng4yNc43S4OG6pEfKc5IySqcOrYysxcJZqaQIq0Jd9p6jZcL+Xz6fD729vQiHw2obAbrcdL+lEKbVIVM4+Rpb2+hWr+SR7ZQksRB4fvK8pQEga8h4Te12WwkRChLSD6yJId1n9+uXkAqF95hWK6kSPX6kB6B1hSLHkp092JkhFosp4et2u5UnL7dJt4pBWP3OcqAT3WdVs8P3sCbKqm+fTmtxjFg3FI/HkU6nkUwmFb0sa/Lk/mayeFw3GJYTuqyQdCQND8osXgNwnBXiFkL0GEnHk0KX1LH0wDi3GOpgOMPKeNYpvqWuYzxrlZSkIqQ3JR+yLkHudcP/9QC3lQXcibrrZDnL11aKgLa6Fvm/VGB8TdJ8MkNIFz7699sRFJL6MeC40pYeZyeFpB+TWV+davc4hvysVeKFVbylWzgR/UjwnK3iRvoc08dHeuhyuw96UQBUPEvW7UmGoxtjJJkJQsazJdMAQHlXkhmih2UVapDvk0W5AEw1alZdP/T70ClxYylw1ikpOcklPUDLgV4T+2PRavX7/ejv70ckElE3k0WYDEZKK0/WSVltywHM323XildfKZDXxP+56Gih+nw+AFBp57FYTFl7svhZ58xXIvTxAGBSyHyP1f3muNHqJ8UXCoUQj8cRCoUQi8UQjUZVMScFCT1RpvFbCWIro2mpxkAqWoLCUhbF8zgFJEsQ9Gw+fV7IPbTonff39yMcDmN4eBirVq1COp1W/Q35vdw+J5vNmortu20c6YpK3i/eY8ouJnfJek093sb5I49xzNrttpqTfr9f9UwMhULw+XymLvOyPIRjRWp/qRX8WaekJHTLQlqqFArcVoEb+rFZKkGhoAdwO/G2cgGspPjTyUK3AmU8Rlp1sg8i3ytjUStZQREn8qIXUha6p693G5CJAHIMJXUmYzjLqZwWgn5dercH/q9b8FaUkozByE1IuU5jsRhisZhav8xwIwXGhAw55+wEfS0RTJiQXW84ZrwOUs4A5sk2rknDON7+KBAIIBQKmZLHZI0fk1WkwaOvU4fuW0RYCQkdnOxUUrRQDON4T7FyuYxsNot8Pq/atTQaDbXBH6vhdSWl87zdFhxLBX2cZV2Z3iATgKlDx0obDyvKT4ek/gAznadTe7SAWe4QCoWU4A2FQiqdmrEJWr0UvHoygJWhtNRYaDykgSj70VGg0oAJh8OIRqPK4+Zr3KqEn4lEIsrTHB0dRSQSweDgoNqIk3OK3jp3fJYJGXYxFvW5JL0rSReT7pO0uVxrnD/0skKhEIBj85Bb5ujlN1RuVEyMbeqtzZZzTp2VSgroXDDLwQ4Gg4jH4yY3mJwvrbBisYi5uTlFGTBwzZ0+mQxg9ZtW1N5KEsonglxocuFI7ltuC07lr3dHX6ljIpWPHAcA85QSoXv1FMBUSpFIBPF4XAlubo3COVmpVBT1vFCqsTyXboDXLJWxrF+SHV+q1SpisZjatFBa+F6vV70Wj8cRjUaRSqWwadMmRCIR9Pf3I5FIKA+DBmShUEChUFCKSm9ibAfoVKmkz/k61wrHSioOjqvs5kGFJmlm2RpKlpBwLeqUsSxA15mgpcJZq6QAdPRw9Nf03mi1Wk3VbJD7t7I4pCIidKvDDkJjqSFjEJKm0jt2d2rhc6ZC96o4RrJpsUwEYE2eTDs3DEPNt4XoGCsPoVsxl05MhvQKeG30jphGzQa7jK0wrskYXSKRQCQSUdt0AObt0sl6cIsPWTBu1/kmx0c/ZmV86ApNp1jla5Jele249IbQVpTrco3ZWamkqGw4QfUdYHmsUqmYWtbTupibm8ORI0dUzzpOeKZ4yoCifhOtrFm7Lo7FgKR1QqGQSgeWm9vRyqXCl3skrURYUX/6gpZeE5WTjD3Rm/B6vUilUojH44jFYujp6VGdAPx+vzKYKpUK8vk8stms6hout0iRFOpyja1u+euv6enL7Lcni8Cj0Sh6enrUthtsq0VlxtonljMwcYL0fKvVQrFYxOTkJMrlMvbv34+5uTkcPXpUbfNBj9NuisqK9pPPpP7kcdlbT1dcsjZKKj4aOmx2zLBFuVxWXhWPdWMunZVKipCBfd1CkEWD8pjuSck6C1nIqrvBVmnH/P9Mh/QSZCskqzRXfXGtVJwoPsX30JIl5SITSkjX6F4UaTFaxbR4ZXDbKmHHTuOp0948N333ASZDkN6r1+umLXMkJUrKkAkAcj8qrtdcLodsNotSqWRaszwnu0Gn/Tq9x2o85TE9tV2XR1RuktKT2clW9VDLtU7POiVlZZ3Q+2G6tMvlUmm8vCm0ygzDwPT0NLLZrOpyzuAr08xP5kbacUEsJmQ8hpZvOBxWHgFT/lmvAWDe2K00LCRIJPS6HrfbrTwCuTEfyx16enqQTCYRiUSQSCSU8qLHzzgUYy1MqV4oU7KbVJ/+qNVqKBQK8Hg8qFQqMAxDzRG5DU4qlUK73VaKS2a2WcU3M5kMisUiZmdnsX//fpRKJYyNjSGbzSKXy5mSJuw833SPVPekZA9Dl8ulisY5x5imbkUL8m/GNJkQxnhUsVic1yNxucfqrFNSVuBNIKXndrtVZgv7XUkllclkkM/nlSIjzSfpQuDMV0Qngi6MWXNGqoq0DrBwpuVKgx4L0OMDshuHzKySBeNMBvD7/Uin02rTvlgspja2k6nU9BRI/ckGonZLCNCtfjITTJTg2NBrjEQipixQxujkdxJcy81mE7lcDjMzM5iensbhw4dRLpcxMTGBfD6vqOVuZD2eLqRXxf/JQNAgoZKS84wGuP5d8h6QbufWO7LeTrJI/Oxy4qxVUvpioTvLFkelUkmlvLIIlROBk1xmBa2Uib7ccLlciqqKRqMqC4vxFr2mzO5W7UI4kRdllSRB2pOeJbP42GGfmW9yvyV6H4wbyCJL6UFZxQ66NUetvAHZColCkUqqVqspj5JjJilRmTTCZ65fbog4NTWF6elpzM3NmRSTVV/DlTLnpKKSnhTbJbE2iuMq+/Ppyk1XUjS29djTQkaOE5NaIkh3GYDqcs7/PR4PSqWSKbvKMAyTG0wlxhu70ib7UoOC2OfzIZVKIRqNYtWqVRgdHVWUXygUMnVGkEHsMwGcD1ZeFDMdqZDS6bTylBKJBHw+n6oPYi81AIqCptDN5/OYnp5W21rweK1Ws4XxZEWvy7quRqNhSpTIZrOo1+uqH6HL5UIsFlOJN3IfMlnoK3ftHR8fV0kSk5OTKJVKyGQyqjzEKjXf7pCxbSm/pBKhQpKNYanUWarA91Jmydg754zs7qEnlMixWq5xOyuVlISVN8Ub2Wg0lLUvlRSD03YOTNsBUlHJXmqybkNacyvdk7KCTv3xb6stYmRneP4tkwhkJiq9Dip5Cnyrbt4S3R5bfb2RnpIdyb1e77zu5DIRhN8j++9xK49isagK63O5HHK5nBon2YXDThToqcBKWckQg2QmZENjqaRk5jHlnZR9neZPNxQUcJYrKbrLVhsTypYtLICTPL9VkoSDY5CxKJ/PZypCpYIib06Kit5pqVQyFUCvBOgKSP9b3z7BqkmsVcNdACZqj5tLNhoNlaGWy+UwOTmpkg9kxppeo9ctWMVSpDVPdsLr9ap92ZjJx5IE2aVbtuvJ5/Oo1+vI5XIoFAooFosYGxtDtVrF9PQ08vm8iivzM1JBrdR1K8dUKn69fEPKNKns+T7+fzJKSv7ecuKsVlK6G8uFbVVkKV/X02ZX6kRfSkhFJfsfMq6gW9EUSLJOaiWMa6c4lD5/5HHWQ1l13SdlAxyfnxSs1WpV0WFSSc3NzansrE77SNkJupLi/a9UKoqOC4fDan4AUPQoDRx6XtlsFrVaDblcThXpjo+Pq8w0qZys4nMrGbqiAo6HLGR9IqlAqYiseo3K0hl+vx1w1iopq0DuiSzibtQIrGRw3Gq1Gsrlssq2CoVCigbkluBzc3Oq7mwlxaT0eSOPA8eFBoUEPSM5h0jrUUnXajVT6jBpqmq1qmIrFMAcM53Kkl6UHeao7k3J2h1mn5HN4DY4mUxGFS7LHnTS8qdnyZhUtVpV3pPe2fxMW7fyGqS3dCqelBwbu47TWaukAOuODydDkdjhxtkdtOTa7TZKpRIAYHJyUjUDbbVa8Pv9mJiYwMzMDMbHx1VdCzMsVwp0g0d/lkKCY8L03nK5DI/Hg3K5rGJ29CJYQya3q8jlcvOKLDleVntx6efZTegxDY4bew263W7k83m4XC6MjY2ZUvRldh/nFmNzHAPSnDoVr//2Soc+jjqVSg+KlDrnplWz4U7fqR87mfq/pcJZraSscCZN5m5CWmf0DIrFohKyzNTKZDLIZrOK5qGgXWno5FHxNcBs7UpwnyMZC6CgAY4rKdZE0YuQ8YSFhLLebaDbsPKqeJzQa32kkgJg8sJklh/HQ/++MxmdFIyk/gB0nCsn+s6TOb6UcJSUg0UH4w2kWyYmJuD1ejEzM4ODBw+qhqBUXKzzyWazppjKSsNC3ouMUVkl51QqFSWImTxBgSyVEb0mnZrpRNF00wJeCLrlrytwq5ieHt/Tkwb0cTibwHGkt85nwLol26mO00JG2FLDUVIOlgSyhoNWMetV5H5BMm2adM2ZKGg6KRAZt+MxWYDJzyzkdaz0sToRJadTqQ6scSZSm4CjpBwsIXRBKmszpMVn1Qz1bECn6+TYWAmdhQSRHeNQi4Ez4RrOBHTrPrhP/JZTx9jYGP7oj/4IPT09CIVCuOCCC/D//t//U68bhoEPfehDGBoaQigUwuWXX47du3cvxak46DLoTcnWK8xKY2NUfV+fswk6XSeLLfUt4Dt1Nj+bFLuDsw+L7kllMhk885nPxPOe9zx861vfQl9fH3bv3o1UKqXe89GPfhQf//jH8bnPfQ7r1q3DBz/4QWzbtg07duxQ7V9OFbFYDJs3b0Y0Gl2sS1nR2Lhxo9oumohGo9i8efO842crNm3ahHA4bDoWiURwzjnnqPY7ZzvWr1+PSCRiOhaJRLBp0yZTTdfZjLVr184bo3A4jE2bNjnGw2+xZs2a05bNLmORR/EDH/gAfvSjH+F///d/LV83DAPDw8N4z3veg/e+970AgFwuh4GBAXz2s5/F6173uhP+Rj6fRyKRQC6XQzweBwBks1ns27dPcftnOyKRCNavX2+aGLlcDvv27VO7m57tCIfDWL9+PWKxmDqWy+Wwf/9+VCqVLp6ZfRAKhbB+/Xq1zoBj62/fvn3OGP0WwWAQ69evRyKRUMcKhQL27duHcrncxTOzD6zGyEqOW2HRldTWrVuxbds2HDlyBD/4wQ+watUqvP3tb8eb3/xmAMC+ffuwYcMG/OIXv8CFF16oPnfZZZfhwgsvxN/8zd/M+06m4BL5fB4jIyMnvDgHDhw4cGBPnKySWnR/fd++ffjEJz6BTZs24Tvf+Q7e9ra34Z3vfCc+97nPAQAmJiYAAAMDA6bPDQwMqNd03HHHHUgkEuoxMjKy2KftwIEDBw5siEVXUu12G095ylPwF3/xF3jyk5+Mt7zlLXjzm9+Mv/3bvz3t77zhhhtUR+NcLofDhw8v4hk7cODAgQO7YtETJ4aGhrB161bTsXPPPRdf/epXAQCDg4MAjrXIGRoaUu+ZnJw00X8S3OZhIczNzWHXrl1OvOW3iEajOOecc0xudCaTwa5du5xYwm/BJAnJk2ezWezatcuJJfwWTJJIJpPqWC6Xw65du1S7q7Md4XAY55xzjmmM8vk8du3ahWKx2L0TsxFCoRDOOeccUwLdyWLRldQzn/lM7Ny503Rs165dGB0dBQCsW7cOg4ODeOCBB5RSyufz+OlPf4q3ve1tp/27O3bswM0334wjR46c9necSdi6dSu2b9+OCy64QB17/PHHcfPNN+PgwYNdPDP74JxzzsEtt9xiMo527dqFm2++Gfv27eveidkIGzZswPbt2/HUpz5VHduzZw+2b9+OPXv2dPHM7IN169Zh+/bteNrTnqaO7d+/H7feeus8WXi2YnR0FDfffDMuvfTSU/7soiupd7/73XjGM56Bv/iLv8BrXvMa/OxnP8OnPvUpfOpTnwJwrHr8uuuuw2233YZNmzapFPTh4WG84hWvOO3fLZVK2LdvH/bv379IV7KyEQ6H53mVHCNHuByD3++f51WWy2Xs27fPES6/hdvtnjdGlUoFBw4ccMbotzAMY57n7YyRGa1W67Q970VXUhdffDG+/vWv44YbbsAtt9yCdevW4a677sIf/uEfqve8//3vR6lUwlve8hZks1k861nPwre//e3TrpFy4MCBAwdnJpakLdKVV16JK6+8suPrLpcLt9xyC2655Zal+HkHDhw4cHCGwCkZd+DAgQMHtoWjpBw4cODAgW3hdEF34MCBA5vCapsSfU+tMx2OklpELLQp2NkwmRw4cHD64P5i+uaO3LaFGw/ysdDWLFabYK5UGeQoqUWCPqmIlToxHDhwsLyQCogd5uXW7wBMuzkvtEULt3QhqOBWojxylNRJQN+GWbd4dOtHTizu/cNtsZ29fxw4OHl02kZeHtMZDCsPYqFj3QaVkt/vh8fjQSAQQDAYhNvthtfrVa/zf/max+MxeVa1Wg3NZlPt29ZqtVCr1dRzrVYzbUBqp3HoBEdJdYCVZ0QLxu12q8mhTyL5/kajgWazadry22r7bwcOHJghFZFcd9LTkOuP0NeY3EzS6rnbVJjL5YLH44HH40E4HIbf70c8HkcymYTX60UoFILX64Xf74ff70cgEFCvRSIRBAIBJY/a7TYymQwqlQoKhQIymQzq9Tqy2SxqtZrqfdpqtVCtVk3elp3lkaOkNOjKSV8QHo8HXq8XPp9PWT+cJFRSnPzVahX1eh3NZhPAMc9KV1pnOxaK43WiUIkzhXN3MJ+dkEagXGtcf16vF263Gz6fzzQ3KHi51rijcb1eV7tD81iz2TQpNWD55xBlBz2kYDCIcDiMSCQCn8+HWCwGn8+HYDCoPKxkMgmfz4dwOGxSUq1WCx6PB9VqFYFAAF6vF7VaDW63G9VqVXlQ3AWbYyBZHjvCUVIwC0DpDblcLvh8PnXDablEIhEkEgn4fD5Eo1Fl5fj9frRaLZTLZTSbTWQyGRSLRVQqFWQyGTQaDZTLZbVgqLzsOjkWC7pVbPUa/5b0jbScKbQIqfA7bal+tuNEcdJuj5F+n+kx0KsIBAKIRqNIp9MIBALo6+tDOBxGLBZDPB6H1+tFIBBQXgQFLxmMTCaDUqmEUqmEubk51Ot1ZDIZVKtVlMtlFAoFkwJbTs+C1xwIBNS1rlq1CrFYDLFYDMlkEqFQCMPDwwiHw4hGo4hEIggGg0ilUvD5fPD5fCZPstVqoVgsolaroVQqoVAooFqtYnJyEuVyGZOTk5icnESxWMTY2Biq1SpKpZJJgXV7TljhrFdSVgtZek8ejwd+vx9erxfhcBjBYBCJRALpdBp+vx/JZFJ1aQ8Gg2g0GigUCmg0GiZqsFKpwO12K89qJQcyTwWdlI7+utVD0qqSTpXWLwUTj7daLcu0XTtgIa+RWKxz1uM3nc6jmzSXvt7kWqNATiaT6OvrQygUUkI8mUwinU4r74NKih4S4zJTU1PI5XIoFArKw2i32/B6vab4DYXzQllzSzUGHo9HGcKRSEQpqVgshkgkgt7eXkQiEcTjcUSjUQSDQXXtZHZ4vq1WC6FQCPV6HZVKBdFoVPXvLJVKynPyer3IZDIAgHq9rpS0XXHWKylCp/ak90SF1Nvbi3A4rBZOIBBAKpVCIBBQnhSVVL1eRzKZRKFQQC6XQyAQUIrK5XKpiSH58TMJUtlIaob0KI+RxvD5fAiFQipwTCvR7/ebhGq1WkWlUlFce71eR7FYVN4rA8N2oVStMrZ43Ap6nGQhr4eCVf8t/Xfl67rFvNyGkjw/UuShUEh5FL29vQgGgxgeHkYymUQikcDAwAACgQB6enqUoRgOh9WccrvdSkFJJeXz+ZBMJlEqlZBMJlGr1dDT04NSqYSZmRmMj4+jVqshk8mgVquh0Wig0WjMW5NLMT68dlJ58pFOpzE0NIRIJIL+/n6EQiFEo1GEw2HlQbnd7nkMAunMRqMBAMrL7OvrQzweRzgcRk9PD7LZLDweDwqFAg4fPqyUl/w+O+GsVlJWC5oWOz0jLpJQKITBwUHEYjGk02m1cLioaNk0Gg3kcjn1XCwWMTs7C5fLhWKxiGq1qrwscsNnIj0lx9Ln8ykKJxwOq4Cwx+NBMplENBpFKBRSxkAsFkMwGITf70coFILL5VKCNZvNIpPJoFwuY2xsTAkc4JhVyAVH4dstIUxYJdvI9+leX6fgvn4des2M/C39mPwd+d3ye5Z7jGR2Gj2IZDKJtWvXqr3QBgcHEY/H0d/fD7/fj0gkYjJapOKXcSZms0WjUdTrdVSrVRSLRdTrdfT29qJUKmFsbAwulwulUgnNZtOU1m2V6LRU10/jTD7S6TRGRkYQCoXQ19eHYDCIUCiEYDColDsAU7yb51qv15V8CQaDaLfb8Pv9aLfb6O3tRb1ex9zcHNrtNrLZLCqViqII6/U6ANhOUZ3VSkpCp5iodGi50AuQWTbS4peuNy0YqYxSqRS8Xi+i0ShKpRLa7bZaYO12+4yh/qRw5LjE43EEAgGVtcRYntfrRSwWU9ZxPB43BYRpZUrBz/Eul8uo1WoIhUKKW6fApZXZrWvn39Izl+nEMm1YKikKGhlfOZFVb+Wxcg7Lc5LfKanSbiQK8PyYuZZMJpFKpRRDEYlEkEwmFeVFI5DjZhV/lEkStVrNlCQBAF7vMVFHD4yUGj05jgu9EP7GUlGjuqwh5UfjzO/3m5JCGDuTc4SMAr3jdrutvEh+h5yH9MBCoRDi8TgMw1AemsvlQqVSsWUYwlFSMC8cKphQKKSybCKRCEKhkPo7Go0iHo8rS58Ulszya7fbiEQiaDQaSCQSCAQCKBQKihv2eDyKolpqamG5wYQTek7r1q1DIpHA6tWrsWbNGgSDQfT09KiFSOFKIU6BRL5eZlEWi0U1jn19fSrewGBxuVyGYRhwu90dKbKlumY+S0XE+cC5RSFBAURDhXHKZrOpBC4pYQog3eM2DEMJObfbrbxUCjyp4KrVqhJg9Ez1xJ2lnHscH97TYDCovIR169Zh9erVSKVSWLdunaL9YrEYAoEAQqGQup8cF14La4HoQdCb4NhIAc0H41LAsQ1X6/U6crkc5ubm1PhLRb4UYyNjcJQzjLUlk0nlNVKWlMtldW35fB6NRgNzc3MoFotoNBqo1Wrqe91utyluTlnFEIbH48G6detQKBSQzWZRrVYVO0Gj2k7sjqOkBCRFxcVPYSmFjX5MjwXI4x6PB81mE9FoFACU284sJjsHLE8H0qonTUFLua+vD0NDQyYlJRMiCKt7wAC5pBDz+bwS+nwfx10/p6WMwVhReDp9LK+FMUx57RSMjUZDxVhoMVOpAJg3Xyj05fdSIPF7qazomVHgLzROSwHde2BsiVQfHzQI6U1LSo/eQqVSQbPZVHSdVFK8ZiZh8CGpMiqGVqulxk16ovIeLoV3oc8R6VHJecz7zVhTtVpFPp9HrVbD3NycUlhMkKBcId3XarUQDodN848UY7vdRjgcRigUQqVSmbd+7OJRnfVKSqdodIEiM8posZHnbrfb8Pl8JoHC9+pCgJMimUwq62d2dhZut1stLn52JYILjtba8PAwNmzYgGQyiSc/+cno6+tDT08Pent7TQkRrIKv1+uKBiWYpELIQHO73UYoFFIB8oWSBuS9WC46QypTCmRSnYlEAsFgUF2LYRgqnlapVJSHLTOyqGDo/fB6mHDg8/lUYgGVFCmhZrOJubk5lX5NOkwfo6UEf8vv9yMajSIajWJgYADRaBSDg4Po7+9XsUmWcrBrAhUsi1KLxSJyuZzygEjr0Uvk2KfTaUSjUcRiMVNRrPS8GROr1Womw0FPOpFjvhhjxfPlvHe5XCiXywiFQiiVSigWi6bfy+fzikGYnJxEtVrFzMwMCoWCGh/ppSeTSczOziIUCmFoaAjhcFhRqo1GQ3lxyWQSg4OD8Hg8KkaVy+VsFSs/65UUIWMp0mvSlVSz2USj0VCUAhcUaQL5XbTM+L52u41YLIZEIoFisagoGRm0XckgzcdMyHPOOQe9vb14ylOeopJPwuGwGkspeBqNhrIKuTgY/JbxFS6uZrOJQCCAer2uXud7dCVFLIeC6mQd06NkxwDGWiKRCAAoZVIsFtU4+Hw+VfMjYyZyzjC+EAgEMDw8bKrbY90MaUMGxund83yXGnpchJ5SOp1WtFQqlVJZfkwRp3dExToxMYFisYhsNovZ2VnlTcg6J84Pn8+Her2Onp6eeZl0jIkmEgll7EhPysrgWew5Q8VCg5cp8sxe5VbrNF6mpqYwOzurMvIqlQpmZ2dNyszj8aj7z9T7SCSiYk+cLxwnFgv39PSg2WyqGB3jvXaBo6Q0yEpsutjNZlNVdJNO4MJgLIVKSgpI0hX0DkiDcRF1oqdWGqRA7u3tRTwex8jICEZHR1UAnMFvegPlchmNRkMtvlKphKmpKbRaLZOwSCaTMAxDKTcZOOY9kp7siay/5TAE5Byg0AwGg6rOhUqKxaiksPhZOZ7yu6S3LeNQ8XgcoVAIqVQKsVhMKSnOW7bE8fv9qNfrHZX4Uo+JHBsab7IAl+uI1whAtfih55DP55HJZDA7O2uKzxCsZ+Tn9aQnObf4OzJ93apd0lLNGZmR5/V6VXyV3jdjic1mE7Ozs6o4mXQfjTsAKnbFEgwqPCodt9uNSqWCSqWifpvyiONDepAsEmsOu208O0pKgAKUNxk4NumZ9UKBSMrB7XYrV52LTcakmLkGHPcCQqGQoh8oSPSYzEoDExvC4TA2b96MNWvWYOvWrbj00ksVzSA9zlqthqmpKZRKJTz22GPYv3+/ojFarZaKS1QqFcTjcZWw4vV6TYYCrU69KJOQC0wXPEsBq1gU6bhEIqFqX/r6+hCNRk0CulKpqGxQeoa0ekkxU6ACUB5HKpVSXQlGR0dVJxTW7M3OzioKkX3bWCqhewlW47JYikxXvhSqXE/lchnAsTo4KqtWq4V8Po/p6WkUi0Xs3r0bc3NzyGazmJmZMfWgk7FLemUul0t1a2B7Iekpyd9naQiVlcwclGOzGHNHGluk+5rNJo4ePYpcLodMJqMMNoYGSPc1Gg31fiopOd8MwzAZPDIDlnVYMmuW6zYajapOFqT9gKVLHDkVOErqt5CTUqbrytRW+aDXxGcZ5KaXJLP2JJ0oa2ZWuhclaZxAIKDoG92DcrvdpngeFx3rnkqlkuLCGWOiYJaQ94i0q6TDrCzf5fKe+CyFBseF6cUM0vt8PlMmGb1zOd90QSm/X1KJFD6kU2XshcaB9Mq6DelBcQzkA8C8cWFGHz0EmdUn77s0FDn2skyEvy89KJlRqae36+e9mGPAZ2moSGUulVSpVFKUsGQPdCNMLzXQZZasISRkQpiMxdvFeD7rlRRvLMHsKpkYwfRPus3AcaEkW6oAUDyvrI+RE17P+NIzBO3gXp8seC3RaBSrVq1CKpXCBRdcgM2bN2NgYACRSETVi9XrdczMzGBmZgazs7N4+OGHkclkcPjwYUxOTirLkMFflgLIdjAUuKQ1pqam1PcVCgWTR7UctI1UTMD8ZqGMzfX29qrMNWbd0Rufm5tDpVLB2NiYKZ1YCkrOUTl3GCBnckk0GlW/w+slFU2vhfS1pEdPVCu1WGPGNUYjxe12q44hrJNjnNHn86nrpuKW1DrXDXCM6WDBqt/vV6UO8Xgco6OjWLNmjaq3YpISPbDZ2VnMzMyopJJSqTRPASwV3ScNLZk2z5hdJpNRCkwW6XIcZS2cTPaQNZucLzSQAKikGRotkn2QpRLhcFh5lzzXbsmls1pJyUGX1h1pP1rzVFy06GilcRExECnro3QPykpJSWvFDhbuqULG3tLpNHp7e7Fq1SqMjo6qGhcAilcvFAqYmprC5OQkdu/ejZmZGUxPT6s+YvQ8GCxmGi0pCi483h96YqVSSWUI6h7IcmUo6XEoUm6kUUj90ppvNpuoVCrIZrMol8tK0Vol4EjvTAogelKyni8ajZra3MikHKnAuyF0pNfA7DrDMFAul1XLMHpIvHZZR6ivHZlAIr1KGgTpdBrpdFoZPcDxzhSNRgPFYlG11OIcIt2vz5vFHivpzdFjZIYeaWxeL4B590y+JpWU9ILknJHzTmeApJLiGpShiG6XyZzVSkqCN0oW8Uk3vN1uo1gsqvfSEpE3WG4hQCtFVonTipSV4sB8BbUSvCnSmuwaMTw8jN7eXiQSCZUSzUWYz+eVt7Bnzx5lwZL7ZnEzaapUKoWBgQGk02llZVMwMwmAvP3k5KQKJOv9x5YjDsWxkAKByikUCiGRSCCZTCpLttVqqU74TABgckC5XFZCVH4v/5ZZjDJTjindjFORIuP3klpl/E4KneVQ4vI3uCYAqNhKPp9HOBxWGY2ywJsUF1kNNlClQcJ1RCXd29uLoaEhJBIJlfXIJIR2u418Pq/in2NjY6rWiNvq6HNnqRMn+CwTY3j/rJSNBM9VMjNce2xKy5ZTnB/8HT3BotVqKaaCJQw0zvlb8pyXE46SgvW+RLRma7WaopJo5TNYKbl/Jg/IrD0G9ymsGSTl91nFG1YCKDyZEcR0876+PtVvjZZuvV7H1NQUstksHnvsMfziF79APp/HoUOHTEKZ38ceievWrcPw8LDq1kGjoFwuY2pqCuPj4zhw4ADGx8cxOzurKFnpSQHLV/8jPSj2X2Mt0ODgoDoXJjPk83nMzc2pJqdMxZfUnu5JyW0ZmCTAVG62twkEAsoQKhaLyhjIZDJKGC+UGLBU4PczG41riAKRSQ6tVssUS6LAZtEtx4MxGyqWVCqFeDyOgYEBbNy4EYlEQm3tQW+gXq9jenoac3Nz2L9/Px577DEUCgU1f/RY4FKOi9XalzQjvSome5A+tpoXMkbJGjym2IfDYTUOBEMXjHnRWGBnDxoMzLiU49INA9pRUr+FfgNkLGChAC89Bjlh5ESiZS/T2RcKjK8UcGFQUdGS5aLSA8LlclnRKkw/lx0QmI4tN32TXSa4eLk3Vy6XUzSRrJNZTu9J/s3xoLDgtch0XsbmuJ8R639kwgTP30oY8W89IUP2eaMw5lizjRQ9El0ILyckxSUpJ5ndVyqVVNmGDPiTupTp0oZxrP4QgKo/ZOYsW1DR4GTSBWusSBNLT02nQJdyjGQcW46N/Fse4/zi/Zf/y7R1evCkfmWyDuWZNOb43TKeKnsHsh+pHltfTjhKSoOkJQAo693v96uiO9l4lhNEWr8yAaLdbqNarSKXy6lgOQWT3FuK4OfsSvlJaisSiSCVSimKpbe3V6X+SrppYmICMzMzmJycxOzsrLLmgeOp1LFYDENDQ4jH41izZg3Wrl2rOsy7XC7lfe7duxc//elPMTs7i7GxMeWBdEvpywXOTfoSiQT6+/uVwGTnh5mZGdRqNRw9ehTZbFYVpsrO3fp3AzAJIyrxnp4eDA4Ooq+vTyVkkEqcmprCrl27kMlk8Otf/1opdZlmLeM8yzHf+BvyPtHA4LgEg0EUCgXVMUGWb5Belq22mBTAVPxUKqVigKw/4+aG09PTKBQKePjhhzE2NoajR49ifHxczVMZi+L5LjWs1j2P6/KEykNu9MjkGRqJfr9fUeRUVCxcZmccGkvc8JE0NL0mn8+nUtFdLpdKJpFhkOWm/hwlJSCtG+lJyaacstCXx8gV614UwWwi1jXIwsFuZs2cLqyEBukmjgXrTxggZ4Ca3D/HWi925a6r3P+Glhw7D2SzWYyPjyshz6SMbnLmuiBh7QnHg9Y8PT8ZsKdy4nzg9/FapBLhmMtYlOwYz3EolUrIZDKYm5tTRaB6LVm3PCngeHyDwo9JAlxT9Ao4FvRQmUBD74meA+OYqVRKJdrIrhVMTMnlcjh69CiOHDmiujVwPduB1bDypAjp6VBJ8d6zDs/v95v23OJ2OMFgUIUf9FpCfh9lGQBV1CszLTu1iloOOErKAlJBdQqkSosCgApWyiaRFE6y1oPfR+Wne1LLacmdLug9ULnIPX6oTKQw5hbe7XZb9VAjhccU83Q6jfXr16tYAjsn0Ms4ePAgZmdnceDAAdUeR3pQwPKPme45U3nQ+zMMQyUqcPNLWYDM9F79uwgKBo4xYy5MVBkeHlZbwBiGgbm5OZRKJRw5cgQHDx5EoVBQ3Rr0HVi7Gf+UtB/HCDieeUaKlIkUFJZskUVqT3b45l5MpBGZ/VmpVDA9PY1du3Yhn89jbGwMMzMzKBaLpszBbq83XQboVB8pcc4xek2sRaSyZjYjY31ketxutzIaqewMw1DPjUZD7SzOlH4WEXOdyQ4XlGFW57/YcJSUBmm9yloSqbD0ie1yuRTVw0lBy1Cm3EqhKuNcdlkoJ4Kk+uQ2A7TEaLmyE4fMLCsWizAMQwXHaQ339fUhlUqhv78f55xzjhLEiURCjWGxWMS+fftw6NAh7NmzB2NjY4qmofXXjbHT69uYDk7LFTi+k3Aul1PZjKVSSVFvegowwXFm0JydO1atWqXaTo2MjCjvolarqTq0AwcOYN++fahUKmorCrnrrB4PAZZv/KQ3Jc9DJkjQi6pWq8pjJH3ndruRTCaxbt06lW7OuQRAtUqq1+uKXh4bG8MjjzyCYrGIsbEx1blBCly7rD1J9cnMPnqT3PyRW51wOw5m9dGbpEKj0ieazaYpJZ0UIpNQOCasWysWi4qWlan/krnQGajFhqOkOsBqIctUT7kFgCygk50k6C3ICSe7WCx1weBSwCo5hEpXUqHyurknUCwWU4KI1nMikUAikVABX1p/9ELJn5O6krVEdvE69bGQBg7PVSaKSPpOBqb172PSCMcuHo+r8ZLGgVSEmUxGNZXlfTgZSrlbMVA9mM+sNnnOsgZK7mArNx7VY2xMsybFKvfTktSqndaeVbjBivaTW8gDMMkX3bgGjhsEMlGH81GnVQGoOBVLGzweD0qlkvo93bBe6pCFo6QWgLRwZeYLm3pyt1laurSg9cwv2QaHKe16ttVKAS0zWSfGwlTZzogWWzgcxuDgIGKxGPr6+lS6K/shshN2PB5HT0+PGkNSDdPT05iZmcEjjzyiaqzoQXUzniez7WQHBNkln3U+FJY8Z0mX6rUv0nombbxq1Sr09/ejr68PW7duNSWZFItFjI+PI5/P4/HHH8fY2JjqdSc7TFARWnnt3UjWkb8j1wC9Pa4NJtVEIhH09/cjlUqphBFSX6QGpYHUaDSQzWYxMTGB6elp5cXKejo7rrtOoQWeK7cW4XVLhQtAsTVyCyFu/cIM21arpXrzSWOIuzJwxwZu+yK3EJmZmVGUKutHrcZzMcfWUVInAekZUWHJnT4Z6NX78cmqeOkqdzuA/UShx044USkUpdAjXWUYhho3tr8xDAPxeFx1Y6BVDBxPvCBlKHv86ZlYdoCVFUvPUs/ilJ6o3nFEn2NerxeRSER5UPQ6SelQCHGPpVwupzKyrIQHz1Oeix3GsRONLtPO2ZuQadWyeJ5p7dITkbHghdpl2RVW5ymVCmDuRMF1qBsAbF5Mr1IWVEvDSca+AKjGs9wCBDhWIsAYNL9DMkYO3bcMkBYyJ4RsZ8+ApNwFVS/ilW1raNXJXTHlTqB66xq7Qqf5uEhII0xPT6NcLqsO1AQ7BTABgJOdk1rWcXCyM35z9OhRPP7445ibm8PU1JRKlrCDgrKydhl/rFarinKTtBsFAJNHCCl4ZHkDW0utX78eIyMjiMfj6O/vVzUvpVIJs7OzOHTokKr/YUcF9p7UMx+tHnaAjL0wSUKWN7BomR3yZSyLNLqk/GgcRaNRlMtlxGIxeDweVaMHYEkF62JBV96M0dIIofxhWylpDFNR8/2k+wgyFo1GQ8W6pPwKBALo6elRa65cLis5V6lUMDk5qb5Pn1OLOa6OkloA0rKQwkPfo4YKRxZzSmqF9VWypxtvtvS+uplxdbKQ/LfMYJubm1ONQ2nVyr5qHBMpiGTbF77fMAxVE3XkyBHs2rUL2WxWZa7Z0QPl+VBJSWEoYy281wCUx6hTe9IIYt3ZunXrsHbtWtXJgokp7P03NjZmikfJAmedTraKIdjFq5BGIdv6UFFFIhFTZ33OPSbO6EpKNkplcTgAU+at3RUUYO0pcW4x+5HbCUkvkkqp2WyatvYgDU2ZRdqeiRf8X3bQITtSrVbVmDFblXOMzxKLNb6OkuoA3WuQD8A64CutVj0AysXH7LZarYZoNKqy/mSQk7DjApLnJCv5WXNCas+q6aXMOJKKRh9T+b2yK4DdrH/deiS95HK5TJ0MpKcl5w1wfCHrPdJoybLuh2noAFQ/xFwup7p4y958MmHCSiHZRSkB8z0o0nnxeBzJZFL1n2MMRsZDgONJAVyD7OhBYcxC10QiAa/Xq7abl5m2do1PSegeO41BGsO6suY1UqmRjpddPjgejOlJY5x/yy1fDMNQRcIsH+E94bxdimxbR0lZQFdQcp8VWhrsEExetlqtKsqKE4GKioFM4FjKNWs8Wq0WSqWSKm6VKcl2t/QM41jHACYMUKA2Gg1Eo1FT2iwFDGMK+lYLtGy52MrlstqBdWZmRtX62EmYSEFP2oj3kXNCZk4BMCki3l++TkXFsWA9EL2JdDqtfq9Wq+HQoUM4dOiQ8jbpVVFJ6VssyPO1GsPlHle5xigYuSFkJBLByMiIqgNjkgSVNLMZGW+RbcaoqJh0kUwm1ZiSBgWgvHIK1W4Wg1tBZ1VkyQrpSioH2bFE7jklM2mtEo1YO5XNZhGPx5UMo5eVTCbR19en5mIoFFLjTEVVLpcxPj6uWATZpEAa3E8EjpI6SUhLRgbFm81jW3Tzb712RhdADHiz151hGIqCIE1mV+iCjguE1isAE7Up98siRcpr1IPjMnjLCc+FpitvO4ICkjy/3G5EGjzS6gTMSSiSIna5XKZCVo/n2CaajHOxpVI+n1cBcdkbUt4jnp/+LL39bkJ6UjLNnF6VpKFkkg7jLbrHqqevBwIBRffxu0nLAvaKTcn5cCJZoMdCJaXHdclUfJ0mp0xiH0lShrLjvNwyhuPPchJm/wFQxvlSdaVwlFQHyDgD6Thu0sb2PvSG2EaEFrBsbsmHDIZLZZXP55HP5wEct+7kgrPL4gGOW/y0PqmYXC6X6rDtch3rdShryZieTiVOoSHT+nm99ET1+hY7eVESFJpSgNJo0TP39MUrlZOMUbJzdyqVQk9Pj9o2gVtMFAoFPP7449izZ4+K10kKy8pDAOYro26Np0xK4nzg5o3Dw8MqQYTdE2QH/Ha7rbI9q9UqpqamVCyUwpRrK5VKqZ523EK9VqshmUxiYmJCKX2rzQ67NS7637wumXBFpU2KWTat5g7GVkoKwDwlVSwW1TiwyJ5bywSDQbX7A0tDuLFksViE2+1W2beZTEbRiIs9fouupFqtFm6++WZ84QtfwMTEBIaHh3H11Vfjz//8z02W20033YRPf/rTyGazeOYzn4lPfOIT2LRp02KfzmlB3khaKaQLJLUg658kl8sJQKtOcruRSMTUK4utXRgMpXUnvTA7KSrA3HuNwoa8tz4e9ABI15DuA8w1V1Khy75/MpXabuC9occklSwVl/5+wLqrOYuYSaO43cd2PI7H48oDZRZlLpfD+Pg4xsbGVLdzGRNdyIOS6IbXrl+3bK/FhsUy1Z5WOmlO1j9NTk6qFlA0imggssEsWya5XC4V/83lcmrLndnZWXg8HpMRZCfDUI6VNPrkzgD6Q1KCVF6d6jHb7bZpKyHKNNKjTLyQCV+MF4ZCIRQKBZWtKlmSxfamFl1JfeQjH8EnPvEJfO5zn8N5552H//f//h/e8IY3IJFI4J3vfCcA4KMf/Sg+/vGP43Of+xzWrVuHD37wg9i2bRt27Nihqp27DakcaIUwKM4F1mw2lRclY1a8sfJmcbHJJpHct4XZR6TKAJiEnF0UlZ4osBAFyte4oJrNprJkmeknA7WyxoPtfKi4u91AdiFYLXwrj0m+TwofvkbFFAgE1JYTTJYgrTU3N4fDhw+rzRIZY5E1QFaeU6cx69a80hVVMBhELBZTD3YfkWUJjUZD1YBNTk4q5cQ0aBn/ZJeOUqmkjEKyG+l0Wnn3zBiloJbCuluwMmAkFUq6jXICgNotnN40QW9ej4VKSMqQcou/TTnFeDOLfGlcBwIB1Go19R69Vm2xsOhK6sc//jFe/vKX44orrgAArF27Fv/0T/+En/3sZwCODcpdd92FP//zP8fLX/5yAMA//uM/YmBgAP/6r/+K173udfO+k1Y1QXpsKSAXrrx57XZbcbaMC8j6H1ovsg6GAUhmxJDW4Q2lIkqlUiqbLZ/Pz+uzRnQzhqCPCwCTcpKb2XFh8ProIXKhyTgVJ7sUDtxmnpSObMxrR8hEF+JkFqr0JpiSHw6HMTAwgN7eXlW0y47phw8fxq9+9SvMzc3hyJEjmJubM3n6cnxOdqy6NabSYGM3Ej6YMBKJRNS6qlaraoPLQ4cOYe/evajVamq9yNT1ZrOpMvqCwSASiQR6enoUddbb26syMKn4OH40lLox33QFJZkGtsaiJyObBzBGSYOOioYyU1KDMk1cUtRMApMxdJltKRs+kxplpjLT/BmT1ePxTxTuE7/l1PCMZzwDDzzwAHbt2gUA+NWvfoX/+7//w0te8hIAwP79+zExMYHLL79cfSaRSOCSSy7Bgw8+aPmdd9xxh6nifmRkZLFPe0HoHoOsDZDutaxul+62nrEHmLeblwpNWj6AfXfrlZ6U/F9PuZYPOX5SQMuEApk+q/ees6uSIqzGY6EH30dhJGkvaS1TCMmu8owhWAXE7TpOekKR9BJkzSEpJwo5ri25aSZjlrIXnWykq7eEkgyG7P/H+BXnYDfHptPfcp3I8eK1yDIFeludHgxRyC45MhNX76bTqT8pP6ff06UYw0X3pD7wgQ8gn89jy5YtKmPt9ttvxx/+4R8CACYmJgAAAwMDps8NDAyo13TccMMNuP7669X/+Xx+SRWV9Bp0fl+nuehZlctllSABQHUWpoUsC95ozVAwxWIxtVeS3L9FZnrZSfBIZauPk/Q82+22aVJLSo/XzoVAxVQul1XqOTeK1LuF2x0nOk+5kGmxDgwMIB6PY/369TjnnHMUbVWr1XD48GHs27cPExMTqos3LWdd6dkVVhQWm8SybyPjUYxbkr1gL8IjR45gamoKU1NTyOVypuQAzrVaraa6L5CBMQxDFavSayd96vP5cOjQIVUArWeeLte4ynUux0h/kA5m8hEpN3Y4CQaDqtN+oVAwGYY0oqX3TbCjfDAYxMDAANLpNHp6etRvkO7jebAOjefMNS+zWBeL+Vl0JfXP//zP+OIXv4j7778f5513Hn75y1/iuuuuw/DwMK666qrT+k5aAcsJXVHxmJWiomDmgiD1RY/IKkWWr8suA7K10mK7zIsBndLSX+OzlbLia3zoXhQAE7VDK1l6C2cKdAqXCRPJZBLJZBLpdNpk3OTzeUxOTqqaMX231JU2NhRkVt3NZfyIa4lClwXMci8xq151OpMBQHkfVH4sAWGWLr0pnl+3YeWhUF7Qg4pEIgiHwyqrltcsvSSppOR2Qaw75INNsuXW84wLSo9Kxk/1mCrlg+0TJ973vvfhAx/4gIotXXDBBTh48CDuuOMOXHXVVRgcHAQATE5OYmhoSH1ucnISF1544WKfzmlD54YBcz0HG1zqPf1YwEkBIieXDGCSJtS3DtAFt45uC6ROSRNSMUkvkGAard4tXnqklUpFpffTsrVjG6QnCs4p9kuLx+PYsGEDhoaGsHr1akSjUbRaLWQyGVQqFRw9ehSHDx9WmyZK4bvSxkUXuLJ9kRSKXCsUqBSqwPF1KIWkjN1wI00WQcdiMWUAynOQlr5ujHYDOmsiz4vGjO5V0cNhTIjb4ZCZIH1Or5Tzh93QGZ5grEvuU8WYnqxRo9eWz+cxNzenavVkh/nFNioXXUmVy2VTTAWA0ugAsG7dOgwODuKBBx5QSimfz+OnP/0p3va2ty326ZwWpILis5wktP7IcUtrUE8ukAFiqcA4QSiI+X6d77UrrBSUy+Wal3otvS9uDEk6RwZuZcyBcQcuspVE9Z0MOJ8CgQCSySR6e3tx7rnnYt26dUo4FAoFzMzMIJPJ4MCBA9izZ48ao5WsuKWSkp4U5wXjJrznNF4oXDmfmBFKj4vKjY1oY7EYent70dfXh2QyOW9Let0TsKJNuzG+nWK8egyb9WCsN2SYQV8rTDNnJxdmzLIIulQqqVoojj87fdC7peFsGIZKNpmbm8P09DQymQyy2ayiGzt1m38iWHQl9dKXvhS333471qxZg/POOw+/+MUv8LGPfQxvfOMbARybINdddx1uu+02bNq0SaWgDw8P4xWveMVin84Thq6oqKykFyW9KbkIrQKU0lPqFE+wuwBaiPaTVi7HgAFdGayWzVYBmJS2HQorlwqcP8zkS6VSquhU7o3EGOXc3JxpE8OVEoM6FXSitughsd1WOByeV6hKj5I0IZOrmMpOwSsTMSQlSIpMpw27iYVCClZeChWYlXHNOk1m3nF+seDc5/OhWq2qcdZDDzwfGozlclnFRJnEo+/RtdjjuOhK6u6778YHP/hBvP3tb8fU1BSGh4fx1re+FR/60IfUe97//vejVCrhLW95C7LZLJ71rGfh29/+tu1qpAiZWUPviSmZqVRKVbTTk6ISolXIxSJpQr1hqm45dVJedoHVuQNQ3LaskO/v70c4HMbq1auxevVqpbS4sFi8m8lklFAm3XcmCWTOC2ZgjYyM4OKLL0YqlcLq1auRSqVQr9cxMzODyclJPPLII5iYmMChQ4dQKpWUsFqp49GJyuJr8sEYUTKZVF5Au91GJpNRHQ6kkopGo6oYeOPGjYhGo1i1ahV6enqU4CX1Va1W1W7PnG/lcrnr803SfZJtMQxD7RnGruR6mIDxNhlyoAyjTJExKSp5MhaSgk0kEsqDItMxOzuLarWKyclJTExMIJPJYP/+/cqrore2FCn8i66kYrEY7rrrLtx1110d3+NyuXDLLbfglltuWeyfX3ScjCcl0z5lyqie7ilTOHVPaiHltFLAsaFXyUA4g9R8MFAtYwJUVNVq1dZdJk4XMq7JeRONRtHb26v2SKJVW6lUVPbZzMyMbTd6PF3oisoKcg2xxjAej5soPqmkuHkmE08ikYjaTJOeK+cZ09Vl13ir+dbNpCUrL4retN4KiecoKVSrBAbKLSoeeln6HlMyJkjFTiqebdzy+bzyqKigVowndaZBj73oVoJcTKRreJOpvEh1yTgMd51lEJJZW3rKtV2FklTGsv6kt7cX6XQa4XAY8XgcgUAAfX19CIVC6OvrU+39ZfYag+P0pGSnCbte/6mC3oHP51OKaXBwUAnURqOBQqGAo0ePqlTryclJlTxxJilsACbhWyqVkMlk4PF4VGEtxwo4JlzZWb9SqagEG+C4wmNCTjgcVhtDMl7TarVQKBRQr9cxPT2NfD6P8fFxHD16FLlcTs033UvtVkxKxrQpC7LZLIDjMf9wOIxSqaT6GyaTSdVXj015pdFMxoK/IVsmyea0wLEeoiwLYdLK4cOHUSgU1Brl3/TEdCVl68SJMwVWAUy9sJQBTXoP7DTBmIsMBDOjjZOOBZqFQkE9SHFJfteO0GteeM2hUAg9PT0YGBhALBZDOp1Wu3uyYj4ej6vvkYuFWwZks1nV2WMlJwhIyHHy+/1Ip9MYHBxEf3+/Ei5sfTQ5OYndu3erwHQ2mz1jlLW+pqSSymaz8Pv9SghHo1FTxh7TxmnUsEM3BbBkLPSYJ9datVrFzMwMstksJiYmMDExoag+vW6om+B5SI8nn8+r62BTa/4fiURQKpVUI2J2gKDXSepUjr+uoNjthgaC2+1WnlOpVMK+ffvU3xxLuTWMXKuOJ7XMoIKiJUKqgDxuvV5XMSZmuOlcO4UxABVkZB87Ulwy5Vq/4XZZPJ2gNwmNxWJIJBKmRp9UYuTLKXi5saGkEOzeBulUICli2SWA6dYAlGfdbDaRy+VUWu9KK2I+GVgJSm6aGQ6HlaAkLUwhCxzfe6zdbquxI2RyEtcbBXy5XFbGDzeJzGazqtxBZy3sMN5SUQFQdJ/b7UY+n0etVlMJJaFQCMViEX6/H6VSSW1zIvd1k9mMrdbxfbhk1w7GPancmSRRqVSQyWRM3T6kclrqUIWjpDpAnyQUqKwDYsU894SS+6yQpmAgVC4YFiJmMhnkcjlTAJfbpssCWP18ugnJcUvOmzRDIpHA8PAwRkZGkE6nMTQ0pNKC5Xbp0oKbnZ3F5OQkDh48iAMHDiCTyaBQKKz47D7d22S3+3A4jHQ6rbZENwxDJUuUSiUcPHgQe/bsUcLgTPGiJGQMstVqYXZ2VinqcDiMWCwGAMpjIG0n+z7KQnk5PmQpWq2WovKYzl8qlfDYY49henoak5OTmJycNNUT8dzkczcgf1u2E2MJArfJGB8fV5l4NABJsetKiuuUqfuydopGIWWUNKQ5B2k8yp3EmdRhJa8WE46SOklYBTGZzil7hgHHO5hTGMvPsuZD7pcke/7pVslK8KRkDZnMdKS3wEw+mY0kC3gZm+OYnEmCWS9JYFIJU3wpsBlnYZ0Yg9Er4f6fKqQByEwzXn+xWAQA1cHcMI5vCqp3hLBKPKLyI31KSj2bzaJUKiGXy6mmslxzdlBMVtCVL41eejtcP16vV21XUqvV1BoslUqmkhfOPXqarKGSPTLlMd4XKizZXFtP9tLPdzHhKKmTAAefQqNWq6nWNFNTU6Z+WdzYUPam40JkAJdJApOTk2qPIKbALiW3u9iQsYVqtapS8CX1AhwfN1pi7MdWLpexa9cu7NmzBxMTE5iamlJ890r2ogiZCcpCZray4a6mvF5uwcE4iVUgfyVDKiYmzPAaC4WCCtyzRc/c3BwSiYRKK6e3wLiTLmzJVtTrdRQKBTQaDZVeXiwWVQo155j0IORcs9t4y/OSKeWk7WgAMZZXKBRM2cfA8bgdvSopX6zoOo6pbAxt1XtU9z6XCo6SOgE4OfSMG1q62WzWtPWG3+9XxXG0Yth/jAuSdB/pBn6XnAB2XTRWIH1AT1JSMrT8qNxJY2UyGRSLRYyNjWHfvn0qTiAtuzMBFBj0KJnxycJvehAzMzOm7Umk13mmQXoCHAMqKRaYFgoFRKNRlabPjgjSG5VClnEVGoD1el3NsVKphNnZWdUOiPOLcWLA3uvsZGRBp+J6qwLfhX7jRL/TDbnkKKkFIBUUYHa5KXzZEZiC2uv1KmsGOB6DkY0d2ZZENlC1szVHyPPjIqeQ4R5fExMTCIVCKJfLypPgGDIpgDuqsrP17Oys8h7OBA9Kh7SAOQdKpZJKxbdqBXWmjQEh15S03Pl3sVhUjUyLxSJCoRDy+bypUJWKHzjupcstc9jyh14qk5NkbHi5vIDlgn4dkhJd6H0n812n+vpiw1FSJ4AV7UaF5Ha7Ua1W4Xa7kc1mVYEqaS4ZVNRb2sjuASupkFd6lLLvINNkd+zYgUwmg8HBQRQKBdM2JePj4xgfH0ehUFBdFNj/SwawzyQPQiabkOKiEUP6hZY+d4o9U2NRhKTPgeO0EbP89N56XE8LdSmX46XXM8o5eyaPq8TpKCe7wlFSpwC5CKSHJRulSqEkg5ELFQqutIWje1SMubndbuRyOdUrja1VaL1yLyAWbzJRQC8GPBMh4yZMnKEAlgk0K7W7+ROB3RkEB92Fo6ROAQsFMclv61beQh7SyfLAdoVUwuT7f/Ob32Dv3r2q+l9SO6wH07cKkBmQZxp4XYxDer1eVCoVBINB9Ro7ITB55kwcBwcOTheOkjpNONafmbahB1CpVAB0pmTONsi4CzOwGo2GyvpkEbjM6OPnHDhw4CgpB0sER8geL1oFYEoZbjab83ZNXcqtDhw4WMlwlJQDB0sAUsKMQ0nP8kSBfwcOHBzHGaOkotEoNm7caJs9qbqNdevWzRuLSCSCjRs3qiK/sx3r169HKBQyHYtEItiwYcO83aUXQqfak9N5n92ysjZs2KAKj4lwOIz169c7sbPfYt26dYhEIqZjoVAI69evV7WDZztGR0fnjdHJwmV0exWcBvL5PBKJBHK5nOqqnclksGfPHlSr1S6fnT1Apc0+aMCxMdq7d6+KG53toNKWndmz2Sz27NnjjNFvEQ6HsXHjRiQSCXUsl8up/oIOjimkjRs3IplMqmP5fB579uxBqVTq3onZCKFQCBs2bEAqlVLHrOS4Fc4YJeXAgQMHDlYOTlaOnzyn4cCBAwcOHCwzHCXlwIEDBw5sizMmcYLxFicmdQzRaBQbNmwwxaSy2awTkxJgkoSkGhhvccboGMLhMDZs2GCKSTHe4sSkjoExKTlGhULBiUkJMCYl43YnizNGSe3YsQO33HILxsbGun0qtsCWLVtw88034/zzz1fHHnvsMdx66604dOhQF8/MPti0aRNuuukmXHjhherYrl27sH37dhw4cKBr52UnrF+/HjfddBMuuugidWzPnj3Yvn079u7d28Uzsw/Wrl2Lm266CRdffLE6tm/fPtx6663YtWtXF8/MPlizZg0++MEP4tJLLz3lz54xSqpYLGL37t3Yv39/t0/FFvD7/fO8gVKphN27d2PPnj1dOit7gX3zJEqlEvbs2YOdO3d26azsBW6/LlEul7F371785je/6dJZ2QuNRmOex1SpVJwxEuAefKcDJyblwIEDBw5sC0dJOXDgwIED28JRUg4cOHDgwLZwlJQDBw4cOLAtHCXlwIEDBw5sizMmu8+BAwcOJBZq6LsCu8GdtXCUlAMHDs4onEy3+U7vcZSX/eAoKQcOzkCczrYgZwL06z7ZbVT098uxORPHaSXBUVIOHNgUpypgO33OSvBKnAlC2GqsFtpo0uVyzTtmtfEkN688E8ZopcJRUksAOfnPpMl9ukJT//zpjMmZNI46dGHKDRd5fKFxP9E9sfII+Mzt6leyED6RItL/PtEOycRKHY+ThRyPhYwaOWe6NSaOknqC0G92JwuNz3a46acCeU1ut9v0DMwXfBI8Jt9PgciHlUK3EqytVmvB31pJ6DRnXC4XfD4f/H4/3G73vIf+Hbrg1cfNMAzU63WljAzDQLvdVmPZbDbV39xldyWMra5o9Dlp9X6OnxxHfT7JeUmsNEV+IoOGD4/HA5fLBa/XC6/Xa/meZrOJRqOBdrutnrsxBo6SOk3ogkYKkoUWgi4w5Gt2g66g+OAEJ+TC1q0wKUDkZ1qtlul3rASs1eNM27JcFxo+nw+BQAButxter1e95vF45n2mk5KSY+VyudBut00P/Ri/w67zsBN0Ba8rcvk+OQfltXaaU3zPE/H+lxOdvG4rg0jOLb/fr/7m+zhW9XodhmGg1Wqh2Wyq15d7LBwldQrgjfR6vfD7/fD7/ejr60MoFEJPTw96enpME4ALoNlsolKpoNVqoVAooFqtolgsIpvNquaUjUbDNkJYKiSfzwe3241wOIxwOAy/349EIqEsfp/PB4/HA5/Pp4SfYRgmYevxeOB2u9FqtZTlzmcunFarpax+WviNRgOVSgWNRgOZTAb/f3vfGhzndZ737P1+B7ALgAQISaRJWY6siJGsODO1R5zasieOLbcZeVhFI2escSrZVpRxFDdV3cRxFdtJY8vNSGl+1O1UbtrMRG7sGTvDWqoUz8iyRFp1LIkXSeANBLAL7P2+i/36g34O3u/gAwmSIHYX/J6ZHQB7w/nOd857fd73tFotNBoN1Ot1k0cwLNAFhsvlgtvthtPpRCAQgMfjQSQSQTQahcfjQSgUgtvtRigUQiAQMH0H5whYVUrSQ2q32+h2u6jVauh2u6ZHrVbDysqKahyrW8jy9ysN8W4W5DikwpHKnfuO7+V8cC2vl4NaWVkxGYx8Djg/z/qeHIQ1p3tF3IfhcBherxehUAiRSAQulwuBQAButxt+vx9+v9+0Jz0eDzweD4Dz82oYBhqNBrrdLnK5HM6ePYtms4lsNotms6n28FbCVlKXAC5yr9eLYDCISCSCPXv2IJVKYc+ePdizZ49aDBS8KysraDQaKBaLaLVaOHfuHEqlEhYXFzE7O4tGo6GEhNws/QQXr8vlgt/vh9vtRiqVQjKZRDgcxo4dO+D3+xGJRBAMBuHz+RAOh+F0OtWmDoVCiMfjJkXXbrfRarWUIpICpN1uo1qtotvtotVqodPpoFqtolAooNFo4NSpUyiVSigUCmvCVIMgNC4GK4vW5XIpSzYej8Pn82FkZARjY2Pw+XxIpVLw+/1IJpOIx+MmL6DRaCilzrnodDpYWVlBs9lEvV5Hu91GsVhEu91Wj1arBbfbrcI3/DzncRC9e6uohQxXORwOJYClkqJSpidq5UFx3nQlRYFt9f5+e51y/TidTni9XgQCAXi9XoyPjyMSiWB0dBSTk5Pw+XxIJpPw+XyIx+OIxWLrelIulwsrKytYWlpSpwEcOXIEpVIJ9XpdeVNbvedsJbVByLBLNBpFOp1GNBrFjh07kEwmMTIygkgkArfbrbwKbhIqLJ/Ph2azCY/Hg263i2KxCK/Xi3K5rDwJaR334xq58X0+HzwejxKemUwGY2NjCIfDmJychN/vVxa+x+NRlj4teHpeetjK7XabhCHntNPpIBAIYGVlBa1WC91uF+FwGH6/H/V6HY1GAx6PBysrK6hWq0ogD1OeisJTeql+vx8+nw+xWAyhUAgjIyMYHR1VyomvRSIRk5Lyer1rlBQVf6PRgM/nQ7vdBnD+nnAOKZgMw1BCTs8NDgr0EBa9IeZRKKCdTqeaR643YNXblM/JUCgfTqfTFIaXBqMepud39AtSUXOPRiIRjI2Nwe/3Y3JyEpFIBKlUCmNjY/B6vcozj0ajypiUnhR/Z7Sj2+3C6/UikUggmUwCAHw+H9xud1+M6EtWUi+88AK+9rWv4fDhw5ifn8czzzyDj370o+p1wzDwxS9+EX/913+NYrGI9773vXjyySexe/du9Z58Po/PfOYz+O53vwun04mPf/zj+MY3voFwOLwpF7XZ4KKg17B3717ccccdiMfjeMc73oF4PA6/368ENRc6PYJAIIBoNIper4fR0VG0223Mzc0hGAyiVCop66VcLitPpB8bgQs1GAwikUggHA5j9+7diMViuP7663H99dcjGAwqS59WmNzUlUpFWesMI3A+ZMhKChmv16veB0ApoFarhXq9jnq9jkwmg+XlZRw9ehTtdlt5oNJjld8xKLDyoLxerzJ2RkZGEAqFcMMNNyAej2N8fFxZwBQuDGUBq/lOri2rEHGz2USj0UCr1cLy8jJarRby+TwKhQJqtRra7TY8Hg+azSY6nY6abwrtfnsLVp6TXDM+n0+FsILBoDKqKEhpHHEv0TgCoK6TpIBut6s8eOlN0rtqNpvKaKJR0K/9Kb1It9uNZDKJaDSK6667Du95z3sQjUYxNTWFWCym5kjm6mRIVM4p852857FYDJ1OR3nc2WwWc3NzqNfriowDbN1eu2QlVavVcPPNN+OTn/wk7r777jWvf/WrX8UTTzyB//pf/ytmZmbw2GOP4QMf+ABef/11+P1+AMDBgwcxPz+PQ4cOodPp4P7778cDDzyAb3/721d+RVcJ3BwU4OPj44jH4xgbG0M0GlUbSy5wCk/phYVCIfj9flSrVaW4aAVyI+nJ3a2C7knxWpPJJNLpNCYmJhAIBDA6OqoWvMvlMuVAKDjl+K1yA5wPChfdAuY8BoNB1Ot1pFIpGIaBaDQKn8+HlZUVFcIZRC8AWL82h9fKsHEoFEIsFkMymUQymUQqlVIWssyz8LOcS+kdAFgjjOhteTwelcuj8OH8y3syCHOpe096iI8hUnoRwWBQ5Uf5CIVCal1SSVEQU0l1Oh2lfJj/tCKXyNCyTlbZyv0p/y8NQK6bkZER7Ny5E7FYDDt37kQ8HjdFLfTxy3vOtSAJIvSomB+t1+vK4+rH+rhkJXXXXXfhrrvusnzNMAx8/etfx7/9t/8Wv/EbvwEA+G//7b8hnU7jO9/5Du655x688cYb+MEPfoCXX34Z+/fvBwB885vfxIc+9CH82Z/9GSYmJq7gcjYf3BTBYBC33HILJicnccMNN2BmZgY+n08l9ZeWlpDL5dBoNJDL5ZTQXllZgdfrRTgchsfjwcjICMLhMBqNBkKhkFoMjUYDjUZDhR62GlIQ+P1+xONxpFIpXH/99chkMti5c6cKH1AIECsrKyiXy6jX6zhx4gRyuRxqtRrK5bLJ8uTG8Xg8aj7S6TRSqRRCoRDS6bTaDC6XC4ZhwOfzwTAMpFIpuN1uzM/PIxqNwul0qu/XadaDAKtQlSShMP80PT2NaDSK66+/HqlUSoX2KETWY+FRwPJ7dQEk/zcAJYglSwsYnALf9UJ7Mm9HDyEajSIWi5lCWVIxc33oOV6Zf2LertfrodVqqTCXzOt1u12Uy2XUajX1Hj0MuFXgfY1EItixYwfC4TD279+P66+/HmNjY9izZw/8fj/C4TB8Pp/JSGZ4XObofD6f8uaDwaAiX/AeUG5xPwLo2x7b1JzU7OwsFhYWcODAAfVcLBbD7bffjhdffBH33HMPXnzxRcTjcaWgAODAgQNwOp146aWX8LGPfWzN97ZaLbRaLfV3uVzezGFfEAxJhcNh7Nu3DzfeeCMymQx27NiBXq+HYrGIZrOJkydP4vjx4ygWi3jrrbeUawwAwWAQqVQKwWAQ+/btQyaTUeGvlZUVhEIhhEIhlEolU46gXxvB5/MhEokgkUhgamoKO3bswOjoKFKplBIaMpzCjVAqlfDWW2/h1KlTWFpawtmzZ1WehIw/sv4SiQT8fj+uv/567NixA6lUCuFwGIFAAIFAQG0MzkE8HlcEg1AohF6vB6/XqwyBflJk14O0vOn50AgIBAJIJBKYnJxEPB7H1NQUUqmU8gaYY+K18fpkKLnb7a7JKfB33eKl98Cw6Hpeno6tmEurkKi09Bla5r1PJpMYHR1Vz0mvnuxT6UnJvB2Nx0ajgUqlYjKiSCzpdrvK65RjJBtyqw1JaUCGQiFMTU1hZGQEv/qrv4pbbrlFpRMk2YPhYBKQGAJuNpuKMUoP1O/3m8glvEZpMAJr6zy3CpuqpBYWFgAA6XTa9Hw6nVavLSwsYGxszDyIX8RX+R4djz/+OP7oj/5oM4d6UfBmBQIBjIyMIJlMIpFIqE3RarXQbDYV62x2dhZnzpxR1HKpVGnxNptN5PN5Fa4IBAKmMIZeT7SVAlcyhvx+P2KxmErmk3oux0irtN1uY2lpCUePHkWxWMTs7KxiMBaLRZP1xYS3x+NBu91W1hzDeQ6HQ5EHSJ9lyIKWns/nUx4oPdlOp6M2KEM1/U5u63/LXILP54Pf70cwGEQ4HFY5FGnp09onoYbKnsKRVrIkQTAEWq1WUS6XFWOy2WyiVqsp8gTzVXouyspDuNrhHV1BUVByT9A7IJEkEokgHo8jHo+bIhR8vyRVSE8UgFobOjuO4L0hgUmGw/i+foXgGX4fGRnBzMyMMhrJ6uOaZ8lGsVjE0tISGo0GFhcXlYJqNpvwer2IRCIqzM5ohc/nM93vbrercnI6UWkrMRTsvi984Qt45JFH1N/lchk7d+68qv+TiyMej+PGG2/EyMgIbrjhBkxNTaHVaqlF8Pzzz+Ps2bNYWFjA/Py8SvjLDeF2u5HP5+H3++H1elGr1TA2Nobp6WllBUqLuF+eFMdBDyqdTpvCcTKs1Ov1VEjv6NGjeOaZZ7C0tIQzZ86gWCyq8BKwurGlgKAgOXHihKozO3bsGKLRKG666SblYdCgIbU/FoupsOPS0pIS3NJqHgQavxS+9KLoEUSjUUSjUaRSKaTTaVUPxbVDxVQoFEz0cV4nALVeKLz4O8Og+XxefUer1UI2m0Uul1OGUrvdViQKWb+2leEcq7wTQ6LM0UajUYyOjiIYDOK6665DPB5HNBpVeRdZi6d73/z+TqejQlgsUGWEBIDK+3EvMgrSbDZRqVRM5AJ9/Fd7rjgviUQCo6Oj2Lt3L+68806VI47H4+q9nU4H+Xwe1WoVx48fxz/90z+hUqng5MmTKuRH45DlJGTiRqNRhEIhkzJuNpsoFAooFAqo1+sqBLrV2FQllclkAACLi4sYHx9Xzy8uLuLd7363ek82mzV9rtvtIp/Pq8/rYHJ9KyFpnvQqqGRYVFqr1VQ+qlAooFKpmAoBpUDnzac1Q+/KypPqF6SFybATLVNJUpCWfq1WQ6lUUjm5UqmEarVqGRqQ18ZrZfzf4XBgeXkZnU4HlUoFjUYDwWBQCRRayawJYdjCqpvFxfItWwk9hCXzcny4XC4VjiMJhQxGhqB0JcX6NQpXGkX02FknVa/XldfP79UZfVutnDgv+hzJInLOESMOLGkIBoOKaETvWn5OZztKEpKknvNv/d7IeyK/tx+kEjk2Gd1IJBIqXE4PmgqoXC6jWCxieXkZi4uLqFQqyGazqmFAp9NRBLZOp4Nms7nGsOPcSE9KZ39uJTZVSc3MzCCTyeCHP/yhUkrlchkvvfQSfud3fgcAcMcdd6BYLOLw4cO49dZbAQDPPvsser0ebr/99s0czmWDG9/j8SCVSuEd73iH8iY6nQ5yuRxOnDiBbDaL06dPY35+HvV63WSNAjBtgm63q6zlXq+n4sK0FlutlqLTcoFwLFu1KJj/sCro5OJksW2r1cKxY8fw1ltv4dixY5ifn0epVFKLXv+81fc5HA7lbdVqNSwtLaHVaqFUKqFWqyl2Gx+9Xg+ZTAalUgmBQABvv/02arWaUlZy3vtNn+bvUgBS8VPoejweZeVzHqQiITWaoRr5nWSFkiDAPIRhGMjlcpifn0ez2cTS0hKazSbK5TIqlYryoCRJYKsVlVWIjwqCyjcSiSgm6fT0NAKBAJLJJEKhkCksJSnjkknLPUfviV0UGA6j4nY4HOp/ckz8nPw+uQb1CMHVnCeumd27d2P//v3YuXMn0um06vrCLjbZbBbFYhEvvPACTp48iYWFBZw5cwatVgvlclmxbhlSJnmHOTx64wBUDq9UKuHUqVPI5XImmv5W45KVVLVaxZtvvqn+np2dxauvvopkMompqSk8/PDD+JM/+RPs3r1bUdAnJiZULdW+ffvwwQ9+EJ/61Kfw1FNPodPp4KGHHsI999wzUMw+bppIJIKJiQkkEgl4vV61KObm5rC0tITl5WUV3tLbhUgLTg+p0EtzOByKLCCtwq2GtDKtrCq+LuuX5ufnceLECZw+fRrFYlEt5I1sXklPB6BCKwCUUJaFmNxApNzKnBbnbBDCfITuHUhBJ5vIUujRyGm32+ra2UqrVqsp5qesL5NGD8MzXJ/s1MG8BOeUAloaJP1QUHKO5NzwuiQ1f2RkxMRck73mCHqgnBsqfnoYVFKMYpBIID0wjkHOqcxd6eSCrZgrzkkmk8E73vEOjI2NKaUCQOWhGMn42c9+htdffx3lchmFQsGyu4vT6VSGEOeasogyq9frqe/N5/Mmb2qrcclK6pVXXsH73/9+9TdzRffddx++9a1v4fd///dRq9XwwAMPoFgs4td+7dfwgx/8QLmYAPD000/joYcewp133qmKeZ944olNuJzNAS2YQCCgYt60sCigS6XSGgvlQt8naxvIUlovIdsvL4CLk9ZXIBBQsWgKXAo5hqEo+C51zHrOhp4G2y2xmJg0WIJKUlL8+0kNvhisFJVUUu12W3XckCE/GV6ROTz+5Hv5/YZhqBBfPp9XrFPSp2XYRrIy+xHu0xW4rHOih0kqfjQaVWF2AGp9Ss8SMLdAkt45vX56j1RSUqkxytFqteD3+00hVoZOuc43aoRtBth3j8o6lUqZWHxUNIuLi3jttdeQy+WwuLiIcrmsDByr++v1epFOpzEyMqJo/VT8VE71et2UymAurx+4ZCX1vve976IC+Y//+I/xx3/8x+u+J5lMDnThLr0bKhPeQLJcCoUCFhYWFIvvQsJRJoTZjy2TySh2Ej2JCzGstgoUXLVaDdlsFk6nE6VSCeFw2FSpT8u+Wq2iVquh2Wwqq5PXvJFr0AUV2W7sO5ZIJBQdXf5//l8KElrMg1grZaWcGO5j/oi5OdZ8MXRFZULvQNZEUZhIz6tQKKDZbCKXy+HcuXOmkKFU6rJThR6avVCI9krngj918gzzTqzP8/l8SKfTSCQSiMViiEajqm6ORlKj0VARCnnPJVuNJIJWq4VqtWqqf+J4+H5ZWC7fV6lU1D7fSm+C88PrHx8fx86dO1XExTAM1Go1RZI4dOgQlpeXMTs7qyI7VrLEMAwEg0HccMMNGB8fRzqdRjAYNIWRi8UiisUizp07p0gX7PDSDwwFu68fsNpUupuvv2b1ui6kuCEpdCQRoV9JbMAc1mNndiqDWq2mPB3mMmQBLWDunL4RduKFyASStCEp7/QeSD6RgnfQlJMUgpIeLa9Vtqjh2KmQgdW8nZxThpskFV2GYSVlWBZw6t6mvHf9IE1IA0WeKsAHFYYkMVBpyxZFspsC55vXJOdDtjaikpKECPk5emuydZJkj26lx0mZoZNsHA6H8pRZcsCc43oKioqPxkAymTQ15aXSr1aryOfz6vsuJ1KymbCV1DqQC1qGo1hQx5ZGwWBQCQRZ/AesNk9leyHSpzOZjAplUeCS0SZrYrZ6YVDw5fN5vPnmmyiVStixYwfK5TJ27NiByclJRWtmaIkCxu/3q7i3XtNjdR3cGPxsOBxWndZjsZiqjaHwoNeUy+Xw9ttvY2lpCUtLS8rKHQRFpedbJPU8EAiomii2muJzMgepd4rv9XpKSMv2PRSePM6k3W5jeXlZsS3ZtZoegcyJbmVjXmnsAWalLWnmDDuxkwRroeg5AFAhN3bHl4JY1tQBUJ5ltVpVLEc5p9KwZJEuQ8ss5mUTaBb2yhq2qzlvXDts8spGADROqEDPnDmDubk5vPXWWzh37pwK8+kRBV4n19vk5CRuu+02TE1NYWJiQim+lZXzx7e8+uqreO211/Daa6+pa+9nvtdWUhaQJAHpKegeES0csn5kk1OdtURyRCgUUgJYWogyzNOvkJ9MmBYKBbhcLiwtLalWNCQssCiUxAad/QSsegG6Z6h7qHoYjHlAeYQA55YEg2KxiFKpZCJY9FtBrQc91MdCXoa35Cm80qKncpIJfQpPyTyj0JL3hZ6U7kXpTXj75UHpHjSVOPcUH3J+5J4kEULPs/BvXjMNQIYIpYLRSQIAVAcPRhKo5K1qybYCTqfTxAQFoK6v0+mgXC5jeXlZKWMq1vXGRy8qGo1iYmJCtVeSTEnmuGZnZ5HL5UyRin7BVlLrgEJRPmjVjYyMYM+ePSo5TQu2Wq2a4vy0zBKJBHbt2oVYLIZMJqOapTJEw8LgWq3W17yKtEJJnpidnVXdC+j2y+Q+GZD0skjxlSwyq+adVNBMkI+OjiKdTqsiQxna6Ha7KJVKqFQqWFxcxNzcnFJSenK435ACWIayAoGA8hDj8bhiizLcIj0C5hyoVKjAeA8oVJif4nNUahTmem3LVpNzrELmjEjI/o1UUDz+RYbEmQtmGIpKmGxPvSWUDHNyjqjo5VzI3BzngmtNhvlkOHWrwb3GfVWtVk3svKWlJczPz6sc1HoRC35mfHwcN9xwA2644QZV90nqfavVQqFQQC6Xw9zcHM6cOYN8Pj8QjFlbSa0DWSBHy5Qu+NjYGDwej1JS2WwWy8vL6qYy7EePIJPJYM+ePYjH46pRK9snUcmx8K6fxAn+X6lUjh07hmAwqPJTbEVDtqM8qZeWPq36SqVi6oPGzc95dLlcqt0U+yEyLMpkNjdqoVBAPp/H3NycOiyStOJBUE6AtadA7ykYDKq2PiMjIxgZGVEeFq1jXZnoHg+vmbkKACoMyP8HmIWtVRHmVntPcm6otNkfkgdmsk0Un6OnyRxkp9NR9XO8brZNkkXw9LS4Bzm/LHjmg0xdmc/iPMu6K67bfhhCJNKQ+MHenlTcrIVaWlqyHKNcix6PBzt37sRtt92GnTt3mkhJXFuLi4tYWFjAqVOn8PbbbysPst/7y1ZSFqClxTBcvV5X8XJavpIuLUkEsi2SPE5A9sBjGEt6aXJB9HNRyPAcQyaGYaBUKiGXy8Hv9yuqr2z/xGsFoH5Ka5QNP/U6HzL6aEGzVoxjoUBh7Q9zDP2Ok+uwykcx3Cv7DkoygCxBYPhOCkyr3KTeBYFzKjuCbBT9Ikvo4T69HEGGPK0ULj0yvkcqX70kgWvkQnOjGwODwLTleCSbsVqtqnUjjWe9w40+nySj8DBENuCV4fh6vY5z585hYWHBdKjoIMBWUuuAoYB8Po+TJ0+quiFaZjxQ7LrrrsPY2JiJXVMsFk1tj9LpNMbHx1XPNr/fr9ooMYzFhdFvqwVY3fi9Xk/lpjqd84efBQIBpNNppaRpFScSCSU0V1bOH93B+H4gEDCRSqSSymQyGB0dVc17WT9GIcHi1OPHj2N2dhYnT55ULV76LUQIKwVFNpZsJJtIJFQnBSoqPbRHIcN8iFReUsnL830Y2tLPI7MSzP3ypqyKmmXukeD7ZGcT3m8KZXrh9ACkQqHwlkpKsiRlo1TmuiStXHpS64VKtwI0Ungo6tzcnDpnLBAIqNcYudDZinIdJhIJhEIhXH/99filX/olxGIxJb/ogZ06dQrf//73lSd1sdKarYStpCwghQZzM263W8XDpTUoT5uld8EwgWTVBINBdZooNwc3oQwpDAq4WbkBeGowvUHJ5iN5hEKIyXzmFKiwpbCicAqHw4r1xlyFtPAAmJLETGgPQhhCh7Rg9S4F9KQoYKUwkYpD96Tk3HF96ALfqov+RjyHrYQV5ZvXYDXOC+WHpELRn6cS0un2+jxzTDqxR/7dTwIT7zeVLsPnJIgwrGnFVpTzK6MV0WhUKSw9NFytVjE3N4eFhQVTbnwQYCupdSDp2K+99hrC4TAqlQpGRkYUU6vX6ymKL0N27A/W662euBsIBBCJRFSi2DAMlMtl1WOrVqv1reXIhSA3OCvOqXw9Ho/yLgOBACqVikn4kgothYVkKkUiEZWvI1mCCXSHY7VjNbt2Z7NZVU0/iN0lJDGASonKnG1n5EnGzIcAUAXUsvaHdGCGSmWHb12os36N5ANSsfs9P3p+Ts4Lw7qSYScp82zZw44PFKSdTgcul0sJUr0u6EJEHQDKmAJg6o/JNcdxWLH5tnI+eS1kMS4vL2N+fl6lDxit4L5kiJ0eJunm8Xgc7373uzE6Ooobb7xRUfypBIvFIsrlMubn57GwsKA65Q/C+iFsJbUOuOhZxe/z+VAoFJSrzLb2ei6AghyAYiixDigcDqvQRrVaxenTp9UptoPoGQDmQ/ZkyMXlcqFSqSjBWK1WTUeOULgAMIWoIpEI/H4/UqkU/H6/CvWRls0NJBmGZD9ms9m+k0t0WFmvsps250f2GWSuj3lPkgLYXFavk5I5LAp3mVPQ6e0yfCY9g34QJqzCfOsVatPAc7lcaDQaAKAa4urdIqSnYUUMkWE8hhbpWQCrOUAJXUHpXttWgnMiu4ksLi4qGrlhGIqYxGuU3iKP9kin03jPe96DHTt2YHp6Wp36zHVYLpexsLCAxcVFRQCTBtQgwFZSFwEXLgAlIOv1Our1uqmXms4wYiiQi1y3hlmLQWt5GMCNQ4XKxWwY5qPhmcNiqFAqcMDcy1B2W5BCi6Ec2Y5mkDbOhSC9B3lN7BXH3AgLcZlfoJKiJct1ITtw6Ed08D1SCWxlE9QLwcqTkl3t9d6VvN9cE5wzlhrIZL5UbOtFIKRnyznjvEjjUpIl9FxWPz0KOS7m5mQOj/kpYDXyw3DyyMgIMpkMRkZGMDo6qgqjdcOgWq1ieXlZhfMHLZoD2ErqgpAut9PpxNzcnGnRSyUl29xIgR0MBtHr9ZSgoQVYrVaxsLCgDqUbdAEsQ1M8dZhC1+VyoVgsmhL6Ml9Ca1/mVOg5OZ1OJQz4GWD1VFAW7koGpZ7L6Qdk/snKm9LPJWL3AoZJnU4narUaisUiOp2OIt3ozVMdDodihvJMIXqiwWBQMVBXVlbg9/vh9/tNh1Na5Vy2am64P0gVl0XtnB92HWfeqd1uA4C6Jho78hrogcpro+KT94Lf73a7FVFA5jm59qicZNEvjaJ+KioqbsM4z65lfkk2q2boj6SasbExhMNhjI+PY9euXQiHw+onc+L83na7jbm5Obz++us4ffq0yRjs9/6SsJXURaALZ91KlkqKwoEbQ2cSSWotcy7cFMMAmdAl9Nh/r9czhaWkRc/50i1+PXzH+WEoTGe+DRto7MhTd51OJ6rVKiqVilJS8pwjwBwq43rT8yxybenz049cig49DCofcn3IEB29dNmtnN8FrHY8l9fFuZFzoXv3UmnrHr6V96SHD/sB/m/Wi9HYo+EnozNsNBCJRDA2NoaxsTFFmqCCknPQ653vLiO7pg9SLoqwldQGwJsmWUL8nbFt5mmkJU1qebvdVvkJWmzNZlO1MhmWcB9gfXCh3PjAaiiFLW1CoZA6wG7Hjh2qwSWTvQzzVKtVeDwe1ROQuQgACAaDiMVi6kReGfbZ6k21nhclBR/zktVqFQAUO0sqGvZs1HvsMe8iz9ECoDyCUChkOr6CZBaywGT9jMyp9GOeqJD0BrL0clwuc4d7matjbogKSYbTOUdS+QBQvTXJHGWxL0OILAwGYPKgJINQhlr7mf+USluefUVDkDVPrNf0+XwYHR1VTL5kMqm6ncj6Q7Ig2TH/1KlTyGazJmNwkLwpW0ldAuRG54bRlRU3DpPAzDnQopPHJchjwYcJ0sKUYSU5JwwrOBwO1XGBB9h5vV5Fg5XHbcizfni+D+eG38GYO/9PvzaRlWKSc8E8ZrPZXOMxUIHIxqd6MbfsykEvnU2JGTqjspZsNamg+mUZy3UhSRMyxCep8/wM94xOK5csPADq2mRYUeY1SfVnkTi/X4YVgfWLhXWyST8FNQ1iet+BQECtj2g0itHRUUSjUaTTafh8PoyMjCiWLMPpJI3I76SHWiqVkM1mUS6XB1YO2Upqk6DXKsiQjAxtyK7Vg1rvc6mQCorXzyQ/lRMtPbK0mNei5e/z+ZQnRWUkBa08hFLmrXRSxtWGDKdJQSx/yvdIujkFoGy5ozP5+L2cU+bvyA5lKYM8jJOeBRmVejPVfipyyXak4JTMWMJKKUijTxpGVEpSkbONEudKrhc9HM0wslUj3kFQTISeh+OYy+Wy8gp5XTQAdBIXv0fCqv7Kqnv6oMBWUleI9cJf3KAytOFwnD8Dplwuqyr6Qeo9dznQwwK8bnZOZxKXDVXlwZHtdlvNhc/nUwdBJhIJVVsUDAZhGAZCoRASiQQKhYI6PVVaxHI8Vwu6gqIwkAxPKSSYW5EkAElz5u9SMMoiTP4PHgRJplYoFEIoFILH4zF1+eYBfVxb/c4xyLxQMBhEMBhUBaW696j3GNTJKDqJgfMu6+4Y3kskEqZzmGSLMxpJjUbD1MmCRoJOOe/33pQ5KcM4Tzufn59HpVLB5OSk2gNU0JLar38PwRo8nhvF2qh+9Si8GGwltQlY74bSgpSUYG6WQV0QVwLpTVmFeQAoinWlUlFWYa1WU7kDyYJ0OByKYqtTrCWRYCti51YelM7o05WUnqTmPZcC0WoN8Dul90Ehz1AWv5uEDB4pIZUT0H8haxX2YyiYORA5b7p3xZ9WORLZWolsUdlqSa4Rmd9iDkqvh9LvQ7/nTkKuIXaBp6IJhUKq6FfuHdm9RYKUdrlmBtlYtpXUFWK9BCMt4FQqhUgkokINPLCN1fODvDguBXq4T9LyKSxYmFuv1zE3N6dyds1mU+UQeBAkDwYMhUKKuURBxFg7WV9WjVivFmRoT3o9kjQjSxT0uhurehx+LwDT2Vrj4+MIhUK47rrrMDMzg0gkorwE5vAKhQJOnTqlCp7lESb9MoCkQpdMNFr7NDwYiuN8ssuEVORW10AjhUQBhviY59Rba/FAyEajgWw2q7qp6wchDopil+BYOD52YKlWq3jttdeQzWaRyWRQKBQQCoUwOTmpvFWe1qAX+tZqNZw+fRpLS0tqXgbZaLaV1CZAv6ncpF6vV1m/XCQsTpRNMAdtUVwJdO9C5uNarRZKpRKq1SqWlpaUgiL7kZ09otGomju9hoqeGenGOnFhKzwq/SE9O6msqKT0fIsVzRkwH6zJoyzI0mJCnApaFgMXi0UV5mPoahCEjRyD7kkxXEfBS2KAJDjoilYqcio9FqkyBCrB8CrbTrGTOH+XHu0gKigJKnOG/drtNhYXF1V+m4epMu/r9XoV61NffyRM8ITtQQgNXwi2krpMyByMhIzDh0IhxONxJWyZg1leXka1Wh3Ifn2XC2nxMQRFevXZs2eRz+fVmVBM8EvyCBvTMmfAMAbZcT6fDwCQTCaRyWTU5qLC5/++mhvNyluUhcv0ghi2pCBmYl4WiZKVR0+cbZOi0Sji8TgikQiuu+46xGIxTExMKA+KJAKeL5TL5bCwsIByubxG4AwCJIOOSkMqcQCq1ZjsewmYlZxhrJ65xRZQPp9P0ayl98T3kRBQLBaRz+dVHop5POnJ8n8MMuQeYy9D2XEkGo0iFArB4XAgHA6rgmWuNa6/RqOBpaUl5HI51Ov1gVVOhK2krhC6sqKS4vktrFtg/Js9uPTmq8MMPSlrGIY6gr5araoixHq9rrqY86eMtTNnUKvV4HA4EAwG0Wg0TJ0K+BkKZXpTMoSmj+lKsR6jT/bTYw0TD5KTp+bK88J0OjWwWtvj8XiQTqeVUrrpppsQi8VUPQznl8bOwsICzp07h1OnTqFWq6mC4KsxB5cCGQKXheusgWKIVNbRsSsHW0LJRrDAecFMJUzWqNvtRiQSgdvtNuVW+JNHq5dKJSwsLKgcKKMY/ay1u1xQCTscDnXMT6lUwtLSEmKxGJLJJFwuFyKRiGo+wE76NJbY7ebcuXPqNPFBngNbSW0iKLxkI03Za4veFC25QV0UVwIudoZsGM6R7CpdMeukAz20x3l0Op1oNBqIRCIwDEOdUyVDRRei3l5NyHvPfImkX8tcC+ubZLiQIc5kMolkMolYLKba3jA8RiFOL5VCStbbDZKwkeFNWTBLRS17EMq8HnNY9BplHo9eqiwENgxDEQFkrVWtVlOhPcmyHIbw3sUg51bvziINZ1miId8re2IOOmwltUmQ9UGkw8bjcUWnNgxDnYuUy+UGrpv3lYLXQSFAi9jlOn+KqGQ4ymQ5PSGpjJj4HRkZQTqdNlnOPDiSBIxisaio/cBqe5zNnFep/PT4Pl/jNbDjeSwWU4xGACo0Sa9iZWXFRJNmzokHZAYCAYyMjMDn86k8Qq1Ww+LiIqrVKl5//XXMzs6iUCgoOr88CHIQ1pW0+unlsU2PDIuyUJlKXv6UbFiPx6Ma0DKMVSgUVINeNkmlx0UiCZX6ermuQZiry4G+DgHrs8ZoNNJArtVqKJfLpqM+Bhm2ktpkcOMxZi6LTylstqsnJTc9LTapkGSbH50hJ/M6Ou2a7C127KAwDoVCaLfbSuBZFdNejevTIUOAvAbZiobCloQPKimSQ7xer+qzNjY2htHRUfUalTw9xmq1qliS+XxeeVKDmN+UXhDzI7TgWYTM9UHPmaUIXCvy1FnZpJiCmaSLSqWCcrmMTqejlFS5XFYsWnlW13ZQUMDas8WsSiIkcYJ7UqfhDzpsJbUJkDmKYDCIiYkJJJNJjI2NIR6Pq9g6PQyysIZ5g2wEFEDAWuo2lRXrWkKhEEZHRxEIBLBnzx6k02mMjY1henradPKvzEFJ9pxel3Q1IL9fD+3Si2Izz0Qiobwk2caJPw3DgM/nU33meAgkC6ABqNoXHkxXKpXw9ttvq5yCbAw6aOwsnc1IphkLSFl4Si+bIT7+zXljVwRJI5fd0UnEIWuP4T4y+pgHs+omMShzdbng3qInGolEEIvFVKhYp5/L3O8wXbutpDYJFFhUUuxCHIvFlKt9LSkpq3CY7jmxloytk6anpxEOh3HjjTdicnIS8XgcY2NjptwDlRTzUPJ5+b+vJqhsdGaf3+9XVGge5Mg+eyTTSEXL98u6KoY82+22OsaFB9Ll83mcOHEC1WpVnVIsG8kOovCVSoqsREkyYVK/1+spT5lz5Xa7TYXfy8vLau80m000m01ks1mVp6NHyXZBFMgcxyDOz5WCYWYqqWg0ilgspjpxyN6IsnHvMM2BraSuEBTALMCMRqNKQTFERcjk5TAtksuFvF4qKeC8Qmc4jMy1eDyuileTySQikYh6j7QEmeegJa33QNzKedUFHwWBXnsja59kvkUSQjg3vK5ms6k8h1wuh1wup4gS9CyslNMgrSvdm6KyajabyqtiSLjT6agiZam06SW1220Ui0VFguBP2fVdhvTWq38apPnZLMiyF9mAWNbyAauKymp+Bhm2kroCcHE4nU7EYjGMjo5i165d2L9/P8bHx7Fz505Fo2b9h7RmtjNkaIvWvsw9jI2NIRQK4YYbbsDU1BTi8Timp6cRCASQSqUQDoeVh8KNJCm07FPHIlbZBw64OsJIDyvqG52sKXZwZ+NT5lqi0ahSVhTCtHSZZysWi4q6//bbb6NSqWBhYQG5XA6NRgP5fN7UpFiv9RkkcI6A1a4gPJqGHhEJM/xJAgXnlJEHsvV47WTwyXCeXiQt18AwCONLhUwz+Hw+dSgmyVrSawdW2cXSwJNtxbaiGP5yYCupKwQXCvMLMi4sz7LR2+IM4mLYbPAa9VwMQxTsqpBIJJQ3xRCY7PItIRW9TrvdamtZ9xJkiEkSRwi9vkoKD36u0WgoEkChUFDKmCw15lusWvkM8pri+Mj2M4zztXRM4DPEx9CtpJbX63WlkOiJsaWRLMq1uv+DPCebBSvykVxfhFVLrmGYH1tJXSakcvJ6vUin09izZw8mJydVTJjFnBQu9XpdPfQjsbcLrK6HAkcWWlJZkTRBj0OvjaGCZ7sf5iPkOVS6FX21r49hEwpIenL0Enu9niIHGIahClDZm47hGHn+U6lUUjmW+fl5NBoNLCwsqK75DI0x3zIsxagyYS+FJO+bZPK5XC5Uq1XF/OPc8ZrlESey5dG14DVZQdbZsfSBBgDnSYaSmd/jYaxkPg56KNRWUlcA0mLZnmXHjh1Ip9PKE+Bmk8VzfGxHCroVZC7JKnFLujbbAsn+bToriSEKCikqvK1QUPK7eV+BVcYmFZHf7wcAVCoVpbgAqDOeqKS8Xq86urvdbiObzaJer2N+fh5zc3OqvxpDM/KYj/VCWoMKPVdH5cR7rDfnBdYaNvxd5jn53cMwB1cLnD/2M9T3DOdUeqWysfOg9Hm8EGwldQWQi4FNU51OJ3K5HDqdjjqIrVgsYn5+XnUvHpb6hCsBF72kbXPTMLfSbrcxOzurWiAtLCwohR8KhUxhvWKxiGazicXFRczNzWFpaUk1yJS5ma3ypoDVMKb0qFwul7L8vV4v6vW6yq+x7ok5GJICeL4WO1yz8JXeo35SLccwyIIFsF4DVq/RY5aJfj2EKo2dfrd9GiQwz1SpVOB2uzE3Nwe3241EIqHyojSUcrkcstkslpeXFeFkGOSQraSuABS8AJDNZuFyuVAoFFR+JRKJIBgMYmlpCSdOnFB9+1hsuN03mW7tc75kB4Hl5WX8/Oc/V0lfv9+P3bt3Y2xsDPV6XeViFhYWUK1WVUNVCnR6KNKyvtrXJK9N/m8KCzL3nE6nKQzDPAFJE7KfHa3a9QpPBz0ksx44dr09DwBVO2VVfG2Va7sWQ3oXAkkPtVoN8/PzKJfLCAQCOH36NCYnJzEzM6NyVA6HA6dOncLp06exuLiojCG748Q1AFp7tIp9Ph9yuZw6kiMQCCCfz68562dQGVlXA7rnwdwK29msrKzA5/OpY+QjkQh6vZ5JSS0uLqJWq6mmofQ0+t2kVwpRmWtjKIs1cvrRHjJfSWtY5pqGXTnpWG/8UoFZvc9WTBcGDT928WCbMHYsIS0dgIo+sMmuJPYM8tzaSuoyQYuQ+YJcLodyuYy5uTmcPXtWxYhJR2bh5fLysirkHeSFcTUgQzaslWH9i+w1dvLkSfj9fhN7j0qNOT09P9Ov65GQbX7IYmPRMbD2qA/phW1HxWQFq2u6kAKzsT44P8xf8sQBthUjnZ/rj6cQtFotZegNcgkDYSupKwATwgAUWwYAzp07ZxJI213wXApkmAyAOl1XYn5+fquHtSm4Fg2Py4E9R5sLyfSs1WoA1h4vAwyvV2orqasEqwUxTAvDhg0bwwudtKI/P0xwXvwtZrzwwgv49V//dUxMTMDhcOA73/mOeq3T6eDRRx/Fu971LoRCIUxMTOC3fuu3cO7cOdN35PN5HDx4UJ1C+tu//dvKC9kOkGGbQTo6wYYNG9cWhq1cwQqX7EnVajXcfPPN+OQnP4m7777b9Fq9XseRI0fw2GOP4eabb0ahUMDnPvc5fOQjH8Err7yi3nfw4EHMz8/j0KFD6HQ6uP/++/HAAw/g29/+9mVfSCgUwszMjKlX3rWM6elpVbNDBINBzMzMmJqxXsvYtWvXunM0rBt6szEzM4NAIGB6LhAIYNeuXeh0On0a1WBhZmYGwWDQ9Jzf78euXbvUOWfXOqanp9fM0UbhMK5gNzocDjzzzDP46Ec/uu57Xn75Zdx22204deoUpqam8MYbb+DGG2/Eyy+/jP379wMAfvCDH+BDH/oQzp49i4mJiYv+33K5jFgshlKphGg0CuC8d3b8+HF7UfwC4XAYe/bsUfMDAIVCAcePH0ej0ejjyAYHoVAIe/bsUUezA0CxWMTx48dRr9f7OLLBQSgUwu7duxGPx9VzpVIJx48fV/mPax3BYBB79uwxzVG5XMbx48e3VYToSsAjeBKJhHrOSo5b4arnpEqlEhwOh7qBL774IuLxuFJQAHDgwAE4nU689NJL+NjHPrbmO8joIsrl8pr3JJNJvOc979n8C9hGSCQSuP322/s9jIFGPB7Hbbfd1u9hDDRisRh+5Vd+pd/DGGhEo1GTjLNx+biqcZ9ms4lHH30Un/jEJ5SmXFhYwNjYmOl9brcbyWQSCwsLlt/z+OOPq6atsVgMO3fuvJrDtmHDhg0bA4KrpqQ6nQ5+8zd/E4Zh4Mknn7yi7/rCF76AUqmkHmfOnNmkUdqwYcOGjUHGVQn3UUGdOnUKzz77rCnemMlkkM1mTe/vdrvI5/PIZDKW3+fz+eDz+S74P4vFImZnZ01hwWsZJJLwKHLgfOh1dnbWztv9AiRJRCIR9Vy5XMbs7Kydt/sFAoEAZmZmTHu4Uqng7bfftufoF/D7/bjuuuvWzNHs7Kyd2/wF/H4/ZmZmTPnfjWLTlRQV1IkTJ/Dcc88hlUqZXr/jjjtQLBZx+PBh3HrrrQCAZ599Fr1e74ryJa+//jr+5E/+BHNzc1c0/u2CvXv34rHHHsNNN92knnvjjTfw5S9/GadPn+7jyAYHu3fvVkxU4vjx4/jSl76EkydP9m9gA4TrrrsOjz32GH75l39ZPffmm2/iS1/6Et56660+jmxwsGvXLjz22GOmHNTs7Cy+9KUv4fjx430c2eBgamoKf/iHf3hZvIFLVlLVahVvvvmm+nt2dhavvvoqkskkxsfH8S/+xb/AkSNH8L3vfQ8rKysqz5RMJuH1erFv3z588IMfxKc+9Sk89dRT6HQ6eOihh3DPPfdsiNm3HiqVCo4ePYrZ2dnL/o7tBJfLtcbSrVarOHr0qOn+XcswDGONpVutVnHs2DEcO3asT6MaLHQ6nTUsvlqthuPHj+O1117r06gGC+zbKVGv13H8+HH87Gc/69OoBgv1ev2ymY6XrKReeeUVvP/971d/P/LIIwCA++67D//+3/97/P3f/z0A4N3vfrfpc8899xze9773AQCefvppPPTQQ7jzzjvhdDrx8Y9/HE888cRlXcBm4UKdmG3YsGHDRn9wyUrqfe973wWF90YEezKZvKLC3c2G1XEB+vEC8r228rJhw4aNrYHduw9rldBG32vDhg0bNq4u7P44v4DeBPZiysqGDRs2bFx92J6UAEN8trdko5+wCj3bsHGtwlZSG8B6J4eu523ZQsXGhSDPGuNPq9/1IxbkOVzX+hrT956cN3uOrKGvO/13K1gdM7TV82orKQ36DbDaDOv9bRUqtDeKDcJKILAjvTy9l79LAaE/rmWP34roxHm72HH01zp0g0j+tCKIWZ1HtdWevq2k1sGFLIz1Tr28EEOQf9sw43Jyf8M0j1IoOJ1OOBwOeDweuFwuuFwueDweOJ1OeL1euFwuddy3YRjodrvo9XpotVpot9vodrtotVrXrEfFuXQ6nWr+/H6/6e9ut4t6vY6VlRV0u12srKwM9VlKG4W+xjgnTqcTgUAALpdLde5xOp1wu91rvM+VlRX0ej01b91uF+12G71eT/3kKcDyrLyrbTDZSsoC6wlOq5AM/7baCFKBXevWL2Gl4K3CEBLrnWw8qHOprxNdsIZCISUwgsEg3G43IpGIUlRutxsrKytoNBrodrsoFAoolUpot9swDEMJkGtJUXEOHQ4HvF6veoyMjMDn88HtdsPj8aBeryObzaLdbqNer5uE6XaEXGMOhwNut1sZP5yj0dFR+P1+xONxxONxuN1uBINBuFwuNS/dbheNRgMrKyuoVqtotVqo1+sol8vodDqoVqtot9vq0ev10Ol01jUANnO+bSWl4VLitZfynu26SS4GfT6lxcefViEvQj/hWJ50LDfIoMzvegrK6XQqD8rv9yMQCMDn8yEcDsPj8ZiUFD0CwzDQ6XTg9Xrh8XjQ6/UsQ1rXEnSlT+Xk9/vh9XoBnO836HQ60el00O12AazmqbYTrNYY11AgEEAwGITf70cikUAgEEA8HkcsFlNKimuJ68ztdivjx+l0otfrqT6f/P5+rDtbSQmsl8gmLmYxrKfgDMNQC2K979kusBLSfFAAU2A7HA74fD4lvL1erwpFuFwurKysKKut2Wyqv1utlikEYRX+2uo51u+9y+VSP51Op1JItGwjkQhCoRDi8Tg8Ho96jeNutVrI5XJoNpvKenW5XCrsJ5X0dveopCDWFZTP54Pf70cymUQikUCj0YDf70ez2cTc3NyasN92gB7a4zy43W7EYjH4/X6k02ns3LkTgUAA6XQagUBAGUfcf8B5D4ph5Fqthna7DZ/Ph1qtptab0+lEuVxGr9czGYlbBVtJaVjP8icuZsVavbZeEnK7wYqlJgW22+1WCogKiR6F2+1W+QX+3el0lHLi3zJfs7KyAuC8lawnf7dyrq3WjJXF7/f7lbKKRqMIh8NIJBJKSXk8HqV0Go0GKpUKDMNQnhTngUbPdl9PVrAKn9IziEaj8Hg8aLVa8Hq9WFpagsvlUutju82Vvr68Xi+CwSCCwSBSqRQmJycRDAaVkvJ4PMo4ZNSi3W6j0+mg1WrB4XCg3W6j0Wgow5B7Vs6fraT6AClYufm5AYDVMJRVbPtC3pe08mn9btfNwp+cP92LkGEuj8eDUCgEj8eDeDyOSCQCj8ejrDx+jnmFTqeDfD6PZrOJcrmMUqmETqcDh8OhLEHOLzA4OUBdoEohwTXRarVUwpphPirn5eVlNJtNZeGurKyYQjtU/tvZQ7cyfOhte71epexHRkYwPj6OWq2GbrercjLbjTSh55/cbjcCgQCi0SgCgQCmp6eRSCQwPj6OyclJeL1exGIxZQAxpykjFJ1Ox5Tn5PxKo1JGQayMd4nNnm9bSf0CVrFdWhBUUjJkoN8k3kD5mmTJ0OrfTpBzIPNKUsm73W6EQiF4vV5EIhFEo1H4fD4kEgn4/X5kMhmMjIwoxSXDohTQDN1UKhX4fD4YhqFIBLQC9fzUVibLrbxrGZKhQqFHBMCkpKiMgfOWbbPZVOG+VquFRqOhlJn0IBjmk3mq7SaQrX6nwvf5fOq07kwmg507d6JUKikPQIZPtwN0Y5CKOBQKIZlMIhKJ4IYbbkA6ncbIyAgymYzag06nE81mUyklEiIYTqZiopKiQeXxeNDtdpXConF4MZLTZsJWUlhr8cqbJJWUFIS610SLQ76P+QNgVWFtx4S3laVLIeLxeBCNRuH3+xGLxZBIJJSS4k+GaYLBoBK0DGlRufv9frTbbWU8UGBLy3LQBRLpu51OB+122xQK5TVTKVFZcQ1Jui/XKY2mYbj2zYT0pvx+vyIIeL1eteZI7d8usAqjkzRCTyoSiSASiSAcDquIhcPhUGuuVqspr7xSqSiKebfbNckuaWTq3pNuOGzFurvmlZSeO2FNQSgUMtVg6ItEhgIdDofaGLLOoF6vKyuYNQb8fZiFilUORq8BikQiSCaTCAQC2LFjB8LhMNLpNNLpNHw+nyIMBINBlYMiO4vhh3q9Do/Hg0ajoRK5zWYTfr8fwOrcA2sLXuX4tmKurWri5N+klHc6HZVzktY+FVGj0VDUc+bjJLPK7T6/ZZln4fdZzcOwQ1fA9JCdTif8fj/C4TDGxsYwMjKCkZERxONxAEAkEjHl77bLfEjZ43K5EAwGEQqFkE6nsXfvXkQiEezatQvJZFKF1tvtNgqFAhqNBhYXF5HNZtXa4rpxOBxqPmWI3u/3w+/3o9frqX3dDyP7mlZSurDVk5DrFb7pzzkcq7UbZJ0xz8ANQuEild2wb5z1lBVDW0zixmIxRKNRJJNJjI6OmuLk0uplKIxeAnML/MmNspG4+KCBuQDgvBIGVj0ret1UUvV6Xa0jwzCURSy9eob7pKDZDmvqQqDSkutM0vm5Rhia2i6elJWcknk5kkai0ShCoRACgYCJ7NBsNlGv11GpVFAsFpUHRbnE99L4k8XmfMg1ttXe1DWtpIBVujBDB8yZxGIxZVn4fD4AqxY+rQ55g3izpeXGcE2lUsH8/DyazSZyudxQV8FbhR2YI6AHRQrs9PQ0QqEQdu3apTZRNBo1CREKYxna4zyTeUTGUa1WQ7VaRblcRqvVUvF1eiHr1VD1C7T8+dCflxRy5pyYo5LXINcoQ6LyQS+93W6v+f5hh/SmpLfIPcSwMq1+euOyg8Iw10hZRXCojOPxOJLJpPIkSUYyDAPlchn1eh21Wg1vvvkmyuUylpeXkc/n1fpwOBwIh8Pw+/2mDihU+LVazUT7l/PPn3a4b4sgrRIK2UQioQosA4EAAKiq62g0ilQqpeK9hmGYLHyGAhn2y+fz6Ha7qNVqKJfLqNVqQy1ApICUzDW3241wOIxQKITR0VHs3LkTkUgE09PTKu+k5/mkN8B5Ywir0+moB61BKirWS3GOqeD6Pa+6UJWKSiolCk7+Tu+billC5vmopCSbkSFB2a6Gn+v3fGwUugC82Ps4n6wR4oOGojQOBsFguVJY5XsjkQhSqRRSqZQKrVNJVatVLCwsoFQq4cSJEygUCqhWq6hUKqZ9y/lhmoPzKSMcugdlU9C3ELqgZSKSiikYDKrfeWO4QKxCCRS0pFyTTOH3+9HtdlGpVJDP51XSctg2jx5e05lrksFHajDb/sj6JgBrBLYMV9E7qtVqKJVKqNfrKBQKyOfzqFaraDabSjlJgT8oVGzd0tQJDxQAfA/zLCTW6PNsFWaR/2e9UMywYaP3TXqWUjlJhq38zn6vh82ClC8kjLDFFq+bHSKKxSJyuRwqlYoyjJkTpaKjcc31SPapNK6kEahHKrYK16ySkpuabVUCgYASrIlEAslkUrFnAKiENgWOrIPiInG73YjH4/D5fBgdHUUikUC1WsXMzAyKxSIKhQIKhYIK7wD9F6obgVWYjx6oz+dTdRo7duzA6OgoxsbGMD4+Dp/Pp+qfWM/EsJa8fqmwGMarVCpYXl5GrVbDiRMnsLy8jEqlglKpZArnSO+hn4pK954YipNrg1R7JqRlI09JMZfKTbdi5ZwBqxT0YWM7SmzUg5KCmqy2eDyuQl0yF8P5HaZ5kJDXLFtrRaNRBINBjI6OIpPJIJFIKPo4KfgnT57E8ePHUavVcO7cOdWXr9frKaORBAnJCiRzll49aev08OV86grras3zNaukCClwdY9KJusBmAox+eCNl4KB1kkwGEQ4HFZ9xAAgGAwqMsCwCRJCt9xlQ8tAIKBasFBoSIFBL4nMNULOJS0+hvf4qNfrKsSn53WAwVL2cizS45FrTCrpjXpD612jbkTI5wZpXjaCi+0Lq/yMDLdvlfDcakjDUCcdydxmvV5HvV5HtVpFvV5XCkbOg/wen8+nSju4LinfaFjK0Cmwtc2er3klBaytu2ACliEZJrLpSTFpT5o06a4ulwuxWEzlq+hlUFi4XC4VQy6VSmg0GgCsO1kMInRBKtlFyWQSoVAIsVhM5fK4uFmo2mq1lJIhg00KFvn+VquFUqmEbDaLRqOhYupSSVkpgn7Po57g1z0emVOgF8W1RwtWKmGCAqPRaJg8NHapkIaW9OiA4cpNARfukakrfIa9GGIHzoe8uFeH6bqtwLWkM49ZusD6p6WlJaysrGB+fh6VSgWLi4uoVCpKdkmmXiAQwOjoqIp8ZDIZhEIhRCIRAFDGYblcRqFQUAqP9XuS+GUTJ7YIer6AVgUTi0zks/CNfdWYN2ExnGSsAauV8RQevV5PudatVsv03mGCtGQZemHDVBZWMvxARS6VuzzzR84zhXStVkOr1UKhUFBNVrlJJEliUKErBz0vJdmN0oCR3ry+LqikJKOP60cPC1mtqWFTVBcDr1UW8EriDSn92wFWuXNGKLivKpUKOp0OlpeXUSqVUCwWlaLmd1C2BYNBFSJNpVLquJNgMKj2F4vKK5WKqvW0am68FbhmlZQeV9W7ATAsJW8EGWWy4JICmAunXq+jWCwCAEqlEoLBoBLATGrKnlgyHyPHNeiwCrfIqnUq816vh1qthk6no+aNha3Sk5JJWiqpUqmkmHyMh1sJnmGZMz0BDazWO8lQC1vPEFZ5KemBso5FFvbSo9KVJD8/zLAiOl0o3LcdIAu69fC5DM01Gg1lCPJzDLvTiGTEQ3qgjBS1220Ui0UsLy+jWCyqqIbcf+tFMa4WrlklBayGZmTyutFowOl0KstBWqcyzlsul9HtdlGtVk0NLR0OBxYWFlCr1RStEzgvaHgIG2PAfH8/4rxXAp1wwuuRzCAu7G63i1KpZIqVy84bwGrdC0MJVFJk9zGHNWyMSDlOChEWKstwHQDVk1B67oT0uOTnZEslCix+BwUPGYN6+HCYoedTGJ6notLzlcOyXi4EXjOvT3o7BHvysfURP8doTiKRQCKRQDgcxvj4OAKBgGruzH3aaDQwNzeHxcVF5HI5FItF1R/TSknZ4b4tgFzIFCQUrmRbATCFo1i7I9/L76JwdTqdSijTCuLCkfFhCh1diAy65StDEPKnFKacT3miJxURBbH0ZGVBqwwvyN51wwwroanPoW4x02uSoUAKLCoimYeS3wesMv/0PNUwQ4aapQcFrBo88uTiYYZOhLEi1VB26IYNjRh6UvIgRKY0AKiQIeUVH7IO0Yo0YYf7thDSki+Xy2i324jH46qrhMPhMAnbRqOhuggz3Mf31Wo1OBwOVR9EL8vn8ymGXygUMnlaDI/JNjkbFShbLXTkRtEtermB6P3w6Gkq71qtppSXVFKcS+aw9KOqhy0cSnC8km7O65G5Stn7MBAIqDkyDMPU6od0fr6fYdNut6vqZSi46ZXJMejjGkaQOUsWKTvCkN3Gmrpmszm0npQe7rUyZLj3pEEbCAQUqUmSubxeL5LJpCqrIeu4Wq0qUlIul0Oj0cCpU6dQKBRURMOqa8pW4ppXUroXJQ//kklHWeND95c/ZRhqZWUFhUIBzWYThUJBdVoIhUIq4S97jDE0I+t9mKOhotI9Kvl3v7wtnemnP0g0YY6PD9lsV4ZlZAcJelGyLmPYhI3VvbHqgqBbygzPSKUsCy2DwaAKEQLn81wUVPzJz+klEvwfwzSPVuAcyVyoLAmR7L5hhm74yf0llZVcS1Zrh6cQ8Ph45pKB8110qtUqSqWSUlKlUgmVSmXdEB+/e6twTSsp3lgpOOQC0Be/DO9JpSUL3BiuoldVLpeVpUfBIePpAEyLQbK99HoXOW4ApnHL57cCVmFSPqhc9E0mQxBSSVnN7XqbY1hgFZbRO05wLmQHAPaek9azZJsyjykVHuvK6JFT2fN7rMYyTJCRBf7O+ZB7hMpbeuDDuHZ0SGYf2bRk49EjorKSuVvKmng8rlh9gUBA5dKZw+KjUqmYzpwaBAUFXONKipDhJ31B0DtiOFBa+XxISjT7zBmGoeoM/H6/SZHIRo4ATHkr5sB0916C30XBvpWbUU+aylwdxyPnRLf+yGCjcJGfo7K+UMujYYOcL72JJ7A6P1Q+NGDYB1GekCrnXdKBeewHu3pQWOne7bBD5ug4R3xe5qVIKBh2TwqwPt2ZZJFwOKxOE2BXHL3xLo8dknPFTi5sN1YoFFSHdHmemVUUox/70FZSvwAXg96iXno3+g3TQzXyJ2AmW0jBLOmztHbkhqLbLscjlZU8SpzFoP3wOGS4TicDSC9BepIy/0aPSve6BmFjXA3o1yFzSzSIZJ2e7AIgWV1yXuRnqaCkwOb7tsNcrhfukuuGeTidITns0A09ndnIxsPcj7LHKLuc6yeEX85a6Ee42FZSWF0AkgHDpCytUwoCKi3GdOU5UXoYgqE/MvtoDbHvWDgchsfjQavVMoUWaUXLXAQXomEYqnEkC4nXqx+6mpAsKioZuYH4c2VlBR6PR3lKfJDaWq1W1cLXcyrDGuoD1u88wTXE+8w6OpJH2FaKeUxpATO/p3dLZxEmsOrJy/oz3SIe1jmVe4wlHAydcj5Yp0ivYBhh5fXKsHA4HFYd0MfGxlQPQxqxegSG95sEJmnoyAYGNJqtCBn9zGXaSuoXkBuAQnY9mqtVSFB6YlY0Yt5g2RqHrU28Xq9JkFBAsZqedUiRSESFdxh338quFXLR8m891Ki3b+n1euoQNjmfFKQMn8o53g7J/YtBekGcM8Mw1vRlI7lGEmr0UCibqsqSBsAsWIZZORF66EvuNXqSkoU76J1JLgXy2ik76EkFAgGEQiFTTSZ/cl7k+pGRIMkG1WXdoISIbSWFVcUhLbRgMKiOoQ4Gg2rRRyIRU5EpKegUzC6XS7UFmpycxOjoKHw+n+loZofDgVKpBL/fr45DZxExw2NsVUKPjouG+Rv5PVJJbpUgWo/MoSspYPWkXQrYdrut5pTxcCZtqbgGZYNcLqxYWcBqNw52laBykgc90nsHzs8dw1c8QJOMPilY2MWDBAppQGwH6WvJCQAAH+1JREFUAoEUqKFQCGNjY0ilUqb+mixxYNeFYVZS0mh2Op2q8DYWiyGZTKqjcGRLKO5/PeRJEokkVTAEHw6HAazuUXY8dzgcJvq5HmLeyvVkKyms7d0nmTMEQyuRSATVahXhcFg1d5QdJ2RSd8eOHUin04o6zNdcLheKxSK8Xi+azSa8Xi/q9bqyngOBADKZjLKQAoGAaivExCaT5yQiOJ3OLduU67EO5VxSSTkcDpXU5cbrdDoIhULodDrKGnS73SgUCgBWmY3bwavSFZRsZ8PXdWsXgKlQl/edPdoMw1CHc9IgYG6S0BXUsM8hsGoAhcNhdQwOlRT3Yr1eR7PZNB0FM0yQe0oafH6/f42S4llSlDs0YqmYqtWqqbuLTAuQVCEbHFORsfkzAGUw6iSmrdyXtpISYK5JWiZUXGToRSIRdSP13n1SuLpcLoyMjKizXpjfogUsz2uhsJY5K1pIsgGr7vExzKNb7Vd78ei5Fb0LhySL6CxFhiQoYIPBoMpXsTO4PMH3ShO9gwLJEm21WqoVDa9JFnEzfEUBUa/XlZfENjg86E8P2ehEgu0E3ZiUB0eyyF6WcwwbpPGnpx/8fr8yWKmYqDy4LpgKYG6uUqmYcpIyGiP3J09rYA0e9yTLG/j+fhk7tpISIGGB/ayA88J0ZGTEpBh4o1jhLmtW2GkBAOLxOCKRiNpcwKrgoEXUarWQSCRUtwWGDtmxgglNWtUMJ/LgPBk/vpoKSlpPfDBk1Wg0UK1W1aLm9TMUJavgeT3SA+TmyOVycLvdqn2UFDjDSkfnXNEo6fV6KBaLaDabSvjI+8fDEOVnWVzJ75DribnLXq+n5pVKbjtBeugMn3NdraysqI4JPFB0GNoirRfSlsYxm8CmUilkMhkkk0nVHJZhYTZj7nQ6yGazqNVqilIOwMTy8/v9AFa7j1DWUKawtIHnwbFpgQz5bbWHaispmIWerlBkopLnJMlkLQUPFYw8K4nHV8j/oz+4SBgGY05ivdCNDN/0UxBJb4qelKyTYihBjh8wW8MOx/n6IHqSbB3FvKDeO5FKcthCgFZzJa9FzgfXEACloHnECa1mCifJ0pLe6iAlvTcTvDZJbuJ1ynD4MIQ4L6Sg5EPmd2XPPUYkaLiQTEWDhidYM9yuE7xkWJmGN71xeaYec1X9XFOXrKReeOEFfO1rX8Phw4cxPz+PZ555Bh/96Ect3/vpT38af/VXf4W/+Iu/wMMPP6yez+fz+MxnPoPvfve7cDqd+PjHP45vfOMbphzQVkG3dCuVChwOBxKJhLIiuFjoEtOi4+ekAGICW1I56WHpyUdZL8Xv4XcUi0X0ej3leTAf0Wq1cObMGSwtLWFpaUklOrciOS49SApUhhjY85C5umAwqLyFVqulTu2VdHVgVbgyD9doNODxeFQVPAXzxQ47HHQwTyBr3UidpuB1OByqYFySKdjwk9/jdDqVQUMFBkB52QyVbidlJb0L5qRisZg6VLRcLuPMmTPIZrNrTqEdFujsRRrGwWAQ0WhUHRtPRi/zk+VyGQsLC2g2m+rQQ+auaQSzPVIkElHpB0aO6M3TsKzVairPWSgU4HQ6FRFFGotbhUtWUrVaDTfffDM++clP4u677173fc888wx+/OMfY2JiYs1rBw8exPz8PA4dOoROp4P7778fDzzwAL797W9f6nA2BZIRU6/XFUVa75xAa4bKhZ/VLWV56qoMV+k3ViZGGf6iJc0zmGgxtlotlMtltFotZLNZZLNZdfLmVoc2dCXVbrdVbo7KlSE7hk0l00+29wHOz63P50MikVA9Dvm5crmsPAcZ8gP6wzS6VMi1IQuvpTckWVz0RpmTYg6L8ymFCd8ra/D6mau82uBeCQQCKpTOa63VashmsygUCmvOgRsGyPsl826s25T5KN5Leo65XA4nT55EvV5XSorGrqy5pDzjXvR4PCo6xLWzsrKCaDQKh8OBarUKn8+nDKN+ralLVlJ33XUX7rrrrgu+Z25uDp/5zGfwD//wD/jwhz9seu2NN97AD37wA7z88svYv38/AOCb3/wmPvShD+HP/uzPLJXa1QKFHAUIOyi3Wi0Eg0EVuvP5fKoOodPpqCOr5U2SoRye2ktPjK8DqyELClySJxqNhqLQNptNLC8vK4sagFKg7NS+nge1lZtTKmYKUVpvAJSX6HQ61XHyLFzmZpM1QDQA2F+sXq8rK0+eP6UrqkEE1xZ/l8pKdo9gSFO+LqnDesE051b33vl+GS4c1Lm5HMgwn8xrynCf3BPA8Fy/zpaVJCO9OF4aN9VqFbVaTRUvNxoNxW6U64sGcKPRUIpIemQ6YUfPPfd7Hjc9J9Xr9XDvvffi85//PN75zneuef3FF19EPB5XCgoADhw4AKfTiZdeegkf+9jH1nyG9FuiXC5v2ngl/bdSqeDs2bPweDwolUqIRCJIp9Oo1+uK1ZdKpRCPxzEyMmKyLpiTaTQaWFxcVOezNJtNJZQprOmF8drK5TKq1SqKxSIKhQIajQbm5+eV4iJzh8JIHmVxIU/takCGLPmT+YBOp4NisagUL0NaDPfJ2h5SYJm34+9UeAw3lEol9V3cQPr5UoPsJch5krF9aXxIdp7sKsE8FO8xvVAqdyowGgj0ZmVeRh/HsIJ7h91gmOynYG02myiVSuoomGG5XqmgKE9kdIXFuswTMfTW6/WQzWaRz+eRy+Vw5swZdRwO1wMAxSQmxbzb7SIUCiEUCql1xLmSSl9XUlspY3RsupL6yle+Arfbjc9+9rOWry8sLGBsbMw8CLcbyWQSCwsLlp95/PHH8Ud/9EebPVQTeCPk6bDA+fNZisUiut0u8vk8ACjrV1KqacGy1T1jwszLsACTC4bCigJdelP1el0pJ/7N8TG8NkgHAcpFLBvGUrHQI5Qxd84HSRLM8+khUHkujiSUDFuuRVcYupWqCwM5n1YWrbx+6c1fbF0Mwnq5XKzXAYYGAA3FQb/Gi61dvWRDGicAlIfEXC0NVl6/HvrXCVf8H1bjsFJK/Z7PTVVShw8fxje+8Q0cOXJkU4XIF77wBTzyyCPq73K5jJ07d27a9/MmyPOker2e6awVn8+HN998U8WGaf0zRCW/g3RyPhhD15uGer1eRYSoVCrI5XJYXFxEs9lUHgkXn5VV0y/rRs8FUZjSo5QhKeZPqKjpEbBVEj1Or9drOmSSRAvmHarVqgpjyaMI5P0bNKynVPS/pTcvPWNZ4CsFlxRgMnxsRTAZ1Lm5VMicMOsYZZi02WyiXC6bClEHEXpOx+p13mvmpNjDUe59Fi9Xq1XU63XFqtXXHI0/WQvFB3NcNJr5HfV6XX2vXnumG1ZbgU1VUv/4j/+IbDaLqakp9dzKygp+7/d+D1//+tdx8uRJZDIZZLNZ0+fopWQyGcvvJT35akFOtqRN08NhNwgeuyGVDZkzMoxDsgW/NxQKmbpSSEopm62yfT7ZScxrUdHx++RYB4WODphzJLKjOy1Ahu9YByYLoIHzHitDgnr+gd4nv0OGSPp93RvFhRSWDOXyfvM5kiykB6k/CJm/2m6Q7D5ZNA+shpxlWHgYoYf+ZPmLNPx00ozV2U9yfcgmACyCZgSDyh4w59VpIPN/9dMg3FQlde+99+LAgQOm5z7wgQ/g3nvvxf333w8AuOOOO1AsFnH48GHceuutAIBnn30WvV4Pt99++2YO55IgvQKHw2HKH5Dpx4JJGXJgrknPNVC4ADDRsYPBoFogbHdUKpVUEpT/Sy4OqwWy1dbMRmAVu6YAkZ4lw6r8yQaz8ngFhgJDoZAq+NU/qwvpQZoLHboA4hqyoovrBAu90wiLgFno6ff7VfjUKowzyPNyMUhBy36aLOLl2pJCdVjp58DaBs6AOYQLrCprlqdwHUQiEdNZbpRPbrdbMfhSqRSSySRCoRASiYTqDSp7PzKvRVmkH0DaD1yykqpWq3jzzTfV37Ozs3j11VeRTCYxNTWFVCpler/H40Emk8E73vEOAMC+ffvwwQ9+EJ/61Kfw1FNPodPp4KGHHsI999yzpcw+K+iCn8qKtSysoZKw+psPLpRAIIBcLgev14t4PK7cbDIIeWxzsVhErVYzdSe4WH5hEDakHAetMWn5kfTCa5FFmVTeNAZk92+/349oNAqv14tyuayEN2s26FUNwhxsBFwXMtcmlRShsxilQmORJTuj0NihZ2qV+CaGZZ50cC9Fo1GkUinFTGN+VuZ/B92TsiL7rBf2k16Tzg52OBwqFCjz1MypS68pkUjA7/cjlUohlUohEAio5rz8Lp7Wy5w6ZZFVukFex1bgkpXUK6+8gve///3qb+aK7rvvPnzrW9/a0Hc8/fTTeOihh3DnnXeqYt4nnnjiUodyVbAeY0y/KRdaYDJP4HK5FDmi1+up3muy1ojJT+k5XWxBDBJxQI+zyxCDBAWLJEHodFtJlJC/WyXMhw16mE52mpBJcc4bc51WXQfYv02v19uu0HtWAuaO8tJLH0boe13mn/XOItwTTCNwnVBJ0WChkuLx8SwMln1BZf6TLGqdhNFPBQVchpJ63/ved0mDPHny5Jrnkslk3wp3N4qNWqD6azJUKBPhFDayBRALV+WxzXqvuvXGMWgCySruTUuNY5XJfnm0NR/JZFLVglCIk05LoWyVjxg2yPyKPFEVWJ0jJscNw4Df71f96jKZDHw+H6LRqPKgSJaQgloKlmFWYDL5HwqFEI1GVX662+2qwz9Z/C6bqQKDuT6swrFWRCSyhSk3gPNF8bFYDC6XC/F4XCkTWbZAxcW9xDy6zG+xoLdaraLZbKJSqSCbzaJeryOfzytPykpZbTXs3n2bDGltUJgyfEcBJNuL9Ho9kyc1rGGa9RhockPSM2JCWHazlt4Bv6/Vaq2hnw+rFyVh5UnRy5TFzVwH8qhwniHE2hmSe3TDZpgVkw49TCrnim2hmM8cdE/qQutXKitenwz3MVROhmMgEFjjcXEuZL5TGn78/5IZyZwe228xbKoTMvoFW0ldZcjFxkI8hiiolGgJ6y3xB1XIXKi+Qg9LSO+JhYmSgp5IJFRPsmQyqRh8TOTSoiTLUs7VoNRxbBQUoJKFJ2vu6GHxetg6imUPkUgEkUhEhWvYMYD5GD6kZzUsc7MeKFh7vfPd4wOBABYXFzE/P49Op6PKN5aXl5WiGmRCje7lWXlVlBdUHABQKpVQKBTQ6/VUxIGMWGA1PUGPSo82yPwW91On08Hi4iIKhQKKxaLq/1culxVxYqubBVjBVlJXEdJ954LkAmInBr6Pi2jYOwXwWmTsnLUY8hiUUCgEj8ejOnj4fD5EIhE4nU5V98HzcLipyD7Sjw4YZOg5BsMwlGKSdHG97ocdFpxOJ8LhMEKhEMLhsCKSSHJFo9FQD1rBg1TsvRlgWNzpdGJhYQHz8/NoNpt46623UCqVsLS0ZJpPveXPoM2DVY6bz8mCeBKpSqWSIozwGgOBgDrxm3k6qVD4Ow1jng1FRjEb0mazWdWklsd+8DODUHNnK6ktgJXSkQlyq/zBoG2qi8Fq7BQQknLPujIqKTIdZf6KtRo6tVjmHIZ1fmSimgKD1r8klshwoCTisLBStsii92RVLzPskPPG0B5biDGPyzZQVqUgg4iL5bilMUNmca1WQ7lchtvtxtLSkjppgeUb9Kh477lGZLNq2dsvn8+j1Wohn8+rdlKy/EXW6/V7Lm0ltUWQXhX/lrkHvtZPFs1GoYcp9I0lQ01OpxOxWEwlvck0ikaj6tgFHi/Pz7MtFKmw7PzBcN9GGZCDAmmYMNfGsJ5hGIpuT2Ej+zwyFMhQTjabVd/B7gCFQgHtdlsJGoZ8htnoIaTRwqNbzp49i1AopEKADG+yq4n87DCB+4qkmV7vfD9RtkUjuaFcLiMQCGBkZETtIzZi5r6gcddqtbC8vKwiEVTqPCSRxAkqMel5rVejudWwldQWQuZrqKT014dxY/Hnet4gk7j0qOSDeSvJaCJln5RYqzzUsEGOmfddhlNkuEZ+RnoSMsxJZSS9KL3wchjX03qQbZ/q9bpSWGxsTOU0bMSaCxl88p6zUbVhGAgEAqjX63A4HOo4Hyslxb2Ty+WUkuK6KZfLptwUDQGdkTwIsJVUH7DRmz+oYQurZC83lQzVkWXUbDZN1HQ+zxNFSTWvVqtot9tYWFhAuVzG8vIyzp49q5K5Mh81rOA8yftKYSProZjTk6w2mQyXJyEzwa13CJD/c1gh1xUP82O3eAAq/Lm8vKwE77CRavS8mRw7WcE0TCqVCqrVKtxuN86ePau8bRbPy3Af1wK748u5YURCRj340ypS0U/YSmqLsV4id1AV0kYhmWskBcj+YswnsBcflRU3A+szmMwlm2lhYcEkiPWWS8QwzJ+em5TeNE+Alkw/+VMvjNZzD9ICXm+OhhkUtlwHjUZDEZEMw1BtxTgnwHBduz5WuT5Y9N9sNuFwOFAsFgGsNg2Qhs96kQ39f+jvW28MgwBbSfUBuqKSvw/KwtgI5HVwo7C5LMMyZCYxRk6vqlqtqiJFegHMreTzeVOBpoyPrxcjH9Z5k/VyfI3XIokAF/JeJZtrGNfRxaDPF40WGd6SLDR+ZlhhZchaMRUlSeRCSudCSkr/3ervfsNWUn2CvkiGwRMgdOXKh0zoM+zndrvRaDRMRbvsOOFwOEzFg9VqVSk6CiP2Yut3J+bNhi44ZHhOV0gbybNsFwG9HqTXyTwKsDo326UujFgvWnA53zHssJXUgGAYF5SVJyhJASREyAJm5hRoCVNJkakmvYF+nmGz1bCybC+1tc92nRviQgp9kBL9Vwvb/frWg62kbFwx9FCDrAVyOBxKIcnwFXMsMqein6oqredBSeJuJa61670YpJJimFR/zcb2g62kbGwqdEIAANVZQ2KjoQtb+NjQsd29ahtm2ErKRl9gCxgbNmxsBM6Lv8WGDRs2bNjoD7aNJxUMBjE9Pa2K2q517Ny5U527Q3CObJzH1NQU/H6/6blAIIDp6WlTe51rGdPT06ptFREIBDA1NWUZxr0WYTVHfr8fU1NTqov5tY7p6WkEg8HL+qzDGMK4S7lcRiwWQ6lUQjQaBQAsLy/j6NGj6uTbax2RSAT79u1T8wMA+XweR48etTfOLxAOh7Fv3z7EYjH1XKFQwNGjR1Gr1fo4ssFBKBTC3r17kUgk1HPFYhFvvPGGPUe/QDAYxL59+0xzVCqV8MYbb6BarfZxZIODYDCIvXv3IplMques5LgVto2SsmHDhg0bw4ONynE7J2XDhg0bNgYWtpKyYcOGDRsDi6EkTjBCWS6X+zwSGzZs2LBxOaD8vljGaSiVVKVSAXCewWbDhg0bNoYXlUrFRF7SMZTEiV6vh2PHjuHGG2/EmTNnhpY8US6XsXPnTvsa+ohhHz8w/Ncw7OMH7Gu4HBjG+dOGJyYm1hxFIzGUnpTT6cTk5CQAIBqNDu2iIOxr6D+GffzA8F/DsI8fsK/hUnEhD4qwiRM2bNiwYWNgYSspGzZs2LAxsBhaJeXz+fDFL35xTeufYYJ9Df3HsI8fGP5rGPbxA/Y1XE0MJXHChg0bNmxcGxhaT8qGDRs2bGx/2ErKhg0bNmwMLGwlZcOGDRs2Bha2krJhw4YNGwMLW0nZsGHDho2BxdAqqb/8y7/Erl274Pf7cfvtt+MnP/lJv4dkiccffxy/8iu/gkgkgrGxMXz0ox/FsWPHTO9pNpt48MEHkUqlEA6H8fGPfxyLi4t9GvHF8ad/+qdwOBx4+OGH1XODfg1zc3P4V//qXyGVSiEQCOBd73oXXnnlFfW6YRj4d//u32F8fByBQAAHDhzAiRMn+jhiM1ZWVvDYY49hZmYGgUAA119/Pb70pS+ZmnMO2jW88MIL+PVf/3VMTEzA4XDgO9/5jun1jYw3n8/j4MGDiEajiMfj+O3f/u0tO0jwQuPvdDp49NFH8a53vQuhUAgTExP4rd/6LZw7d25gxn+xa9Dx6U9/Gg6HA1//+tdNz/f7GoZSSf3P//k/8cgjj+CLX/wijhw5gptvvhkf+MAHkM1m+z20NXj++efx4IMP4sc//jEOHTqETqeDf/7P/7npVNPf/d3fxXe/+1387d/+LZ5//nmcO3cOd999dx9HvT5efvll/NVf/RV+6Zd+yfT8IF9DoVDAe9/7Xng8Hnz/+9/H66+/jj//8z83naT61a9+FU888QSeeuopvPTSSwiFQvjABz6AZrPZx5Gv4itf+QqefPJJ/Kf/9J/wxhtv4Ctf+Qq++tWv4pvf/KZ6z6BdQ61Ww80334y//Mu/tHx9I+M9ePAgXnvtNRw6dAjf+9738MILL+CBBx7o+/jr9TqOHDmCxx57DEeOHMHf/d3f4dixY/jIRz5iel8/xw9c/B4QzzzzDH784x9jYmJizWv9vgYYQ4jbbrvNePDBB9XfKysrxsTEhPH444/3cVQbQzabNQAYzz//vGEYhlEsFg2Px2P87d/+rXrPG2+8YQAwXnzxxX4N0xKVSsXYvXu3cejQIeOf/bN/Znzuc58zDGPwr+HRRx81fu3Xfm3d13u9npHJZIyvfe1r6rlisWj4fD7jf/yP/7EVQ7woPvzhDxuf/OQnTc/dfffdxsGDBw3DGPxrAGA888wz6u+NjPf11183ABgvv/yyes/3v/99w+FwGHNzc1s2dsNYO34r/OQnPzEAGKdOnTIMY7DGbxjrX8PZs2eNyclJ4+c//7kxPT1t/MVf/IV6bRCuYeg8qXa7jcOHD+PAgQPqOafTiQMHDuDFF1/s48g2hlKpBABIJpMAgMOHD6PT6ZiuZ+/evZiamhq463nwwQfx4Q9/2DRWYPCv4e///u+xf/9+/Mt/+S8xNjaGW265BX/913+tXp+dncXCwoJp/LFYDLfffvtAjB8AfvVXfxU//OEPcfz4cQDA//t//w8/+tGPcNdddwEYjmuQ2Mh4X3zxRcTjcezfv1+958CBA3A6nXjppZe2fMwXQ6lUgsPhQDweBzAc4+/1erj33nvx+c9/Hu985zvXvD4I1zB0XdCXlpawsrKCdDptej6dTuPo0aN9GtXG0Ov18PDDD+O9730vbrrpJgDAwsICvF6vWthEOp3GwsJCH0Zpjb/5m7/BkSNH8PLLL695bdCv4e2338aTTz6JRx55BP/m3/wbvPzyy/jsZz8Lr9eL++67T43Rak0NwvgB4A/+4A9QLpexd+9euFwurKys4Mtf/jIOHjwIAENxDRIbGe/CwgLGxsZMr7vdbiSTyYG7pmaziUcffRSf+MQnVAfxYRj/V77yFbjdbnz2s5+1fH0QrmHolNQw48EHH8TPf/5z/OhHP+r3UC4JZ86cwec+9zkcOnQIfr+/38O5ZPR6Pezfvx//4T/8BwDALbfcgp///Od46qmncN999/V5dBvD//pf/wtPP/00vv3tb+Od73wnXn31VTz88MOYmJgYmmvYruh0OvjN3/xNGIaBJ598st/D2TAOHz6Mb3zjGzhy5AgcDke/h7Muhi7cNzIyApfLtYY5tri4iEwm06dRXRwPPfQQvve97+G5557Djh071POZTAbtdhvFYtH0/kG6nsOHDyObzeKXf/mX4Xa74Xa78fzzz+OJJ56A2+1GOp0e6GsYHx/HjTfeaHpu3759OH36NACoMQ7ymvr85z+PP/iDP8A999yDd73rXbj33nvxu7/7u3j88ccBDMc1SGxkvJlMZg0ZqtvtIp/PD8w1UUGdOnUKhw4dMp3DNOjj/8d//Edks1lMTU2pfX3q1Cn83u/9Hnbt2gVgMK5h6JSU1+vFrbfeih/+8IfquV6vhx/+8Ie44447+jgyaxiGgYceegjPPPMMnn32WczMzJhev/XWW+HxeEzXc+zYMZw+fXpgrufOO+/EP/3TP+HVV19Vj/379+PgwYPq90G+hve+971raP/Hjx/H9PQ0AGBmZgaZTMY0/nK5jJdeemkgxg+cZ5Ppp5e6XC70ej0Aw3ENEhsZ7x133IFisYjDhw+r9zz77LPo9Xq4/fbbt3zMOqigTpw4gf/zf/4PUqmU6fVBH/+9996Ln/3sZ6Z9PTExgc9//vP4h3/4BwADcg1bQs/YZPzN3/yN4fP5jG9961vG66+/bjzwwANGPB43FhYW+j20Nfid3/kdIxaLGf/3//5fY35+Xj3q9bp6z6c//WljamrKePbZZ41XXnnFuOOOO4w77rijj6O+OCS7zzAG+xp+8pOfGG632/jyl79snDhxwnj66aeNYDBo/Pf//t/Ve/70T//UiMfjxv/+3//b+NnPfmb8xm/8hjEzM2M0Go0+jnwV9913nzE5OWl873vfM2ZnZ42/+7u/M0ZGRozf//3fV+8ZtGuoVCrGT3/6U+OnP/2pAcD4j//xPxo//elPFfttI+P94Ac/aNxyyy3GSy+9ZPzoRz8ydu/ebXziE5/o+/jb7bbxkY98xNixY4fx6quvmvZ2q9UaiPFf7BqsoLP7DKP/1zCUSsowDOOb3/ymMTU1ZXi9XuO2224zfvzjH/d7SJYAYPn4L//lv6j3NBoN41//639tJBIJIxgMGh/72MeM+fn5/g16A9CV1KBfw3e/+13jpptuMnw+n7F3717jP//n/2x6vdfrGY899piRTqcNn89n3HnnncaxY8f6NNq1KJfLxuc+9zljamrK8Pv9xnXXXWf84R/+oUkgDto1PPfcc5Zr/7777tvweJeXl41PfOITRjgcNqLRqHH//fcblUql7+OfnZ1dd28/99xzAzH+i12DFayUVL+vwT5PyoYNGzZsDCyGLidlw4YNGzauHdhKyoYNGzZsDCxsJWXDhg0bNgYWtpKyYcOGDRsDC1tJ2bBhw4aNgYWtpGzYsGHDxsDCVlI2bNiwYWNgYSspGzZs2LAxsLCVlA0bNmzYGFjYSsqGDRs2bAwsbCVlw4YNGzYGFv8fwd6hjEkhzmgAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "size = 25\n",
+ "sample = generator.sample(size)\n",
+ "image_tensor = sample.view(-1, config['channels'], config['size'], config['size']).cpu()\n",
+ "nrow = int(np.ceil(np.sqrt(size)))\n",
+ "visual_plt.plot_images_from_tensor(image_tensor, title='Samples from generative model', nrow=nrow)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "id": "75b21830",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAA+kAAAJwCAYAAAD1IyBAAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOydd3gUVReH323pvZFe6b1XgdCLSJEiIL0oFkCx+1mwFxQbSlOqICIIUpQqHaT33iGBkN7r7s73x5KQzcxuNiEI6n2fJw/svXdm7szuzs6555zfUUmSJCEQCAQCgUAgEAgEAoHgvqO+3xMQCAQCgUAgEAgEAoFAYEIY6QKBQCAQCAQCgUAgEDwgCCNdIBAIBAKBQCAQCASCBwRhpAsEAoFAIBAIBAKBQPCAIIx0gUAgEAgEAoFAIBAIHhCEkS4QCAQCgUAgEAgEAsEDgjDSBQKBQCAQCAQCgUAgeEAQRrpAIBAIBAKBQCAQCAQPCMJIFwgEAoFAIBAIBAKB4AFBGOkCgUDwL+XKlSuoVCrmzZtX1DZ58mRUKtX9m5TAJlQqFZMnT77f05ARHh7OiBEj7vc0/nNs3boVlUrF1q1brY6bN28eKpWKK1eu/C3zKi+2no9AIBD8VxFGukAgEPzH+fDDD1m5cqWsvfCBX6VSsXPnTlm/JEmEhISgUqno0aOHWV/hdp9//rnF/R44cKCorXDxIDEx0Wzs6tWradu2LX5+fjg5OREZGcmAAQNYt24dANHR0UXHsvb3IBq8gv8Olr5jAoFAIBAoob3fExAIBALB38cbb7zBq6++atb24Ycf0q9fP3r37q24jYODA4sXL+ahhx4ya9+2bRsxMTHY29tbPN6UKVN46qmncHJyKvNcP/vsM1566SXatm3La6+9hpOTExcuXGDTpk0sWbKErl278r///Y8xY8YUbbN//36+/vprXn/9dWrUqFHUXrdu3TIfXyDn7NmzqNVifb+slPYd+6/Rpk0bcnJysLOzu99TEQgEggcSYaQLBALBfwitVotWW7Zbf/fu3fnll1/4+uuvzbZdvHgxjRo1knm/C6lfvz5HjhxhxowZTJo0qUzH1Ov1vPfee3Tq1IkNGzbI+uPj4wHo1KmTWbuDgwNff/01nTp1Ijo6ukzHFJSOtQWZe0V2dna5FnkEfx9ZWVk4OzvbPF6tVuPg4HAPZyQQCAT/bMRyuEAgEDxAZGRk8NxzzxEeHo69vT1+fn506tSJQ4cOFY2Jjo6mdu3aHDx4kJYtW+Lo6EhERAQzZswodf8lc9JVKhVZWVnMnz+/KDS8ZM7xoEGDSEpKYuPGjUVt+fn5LFu2jMGDB1s8VqtWrWjfvj2ffvopOTk5ZbgKkJiYSHp6Oq1atVLs9/PzK9P+HiRyc3OZPHkyVatWxcHBgYCAAB599FEuXrxocZurV6/y9NNPU61aNRwdHfH29qZ///6y3OOCggLeeecdqlSpgoODA97e3jz00ENm711cXBwjR44kODgYe3t7AgIC6NWrl015zCVz0gtTF3bt2sWkSZPw9fXF2dmZPn36kJCQINv+jz/+oG3btri6uuLm5kaTJk1YvHhxUX/xz3abNm1wcnLi9ddfByAvL4+3336bypUrY29vT0hICC+//DJ5eXlmx1CpVDz77LP88ssv1KxZE0dHR1q0aMHx48cBmDlzJpUrV8bBwYHo6GjF8967dy9du3bF3d0dJycn2rZty65du8zGFH6XLly4wIgRI/Dw8MDd3Z2RI0eSnZ1tNh9L3zFb39e75Y8//qB169Y4Ozvj6urKww8/zMmTJ83GHDt2jBEjRhAZGYmDgwP+/v6MGjWKpKQkxfM+deoUgwcPxtPTsyjKJjw8nB49erBz506aNm2Kg4MDkZGRLFiwwGwfSjnphe/9qVOnaNeuHU5OTgQFBfHpp5/Kzufq1av07NkTZ2dn/Pz8eP7551m/fr3IcxcIBP8ahCddIBAIHiDGjRvHsmXLePbZZ6lZsyZJSUns3LmT06dP07Bhw6JxKSkpdO/enQEDBjBo0CCWLl3KU089hZ2dHaNGjbL5eAsXLmTMmDE0bdqUJ554AoCoqCizMeHh4bRo0YKffvqJbt26AaaH/rS0NAYOHMjXX39tcf+TJ0+mTZs2TJ8+vUzedD8/PxwdHVm9ejXjx4/Hy8vL5m0fZAwGAz169GDz5s0MHDiQiRMnkpGRwcaNGzlx4oTs2heyf/9+du/ezcCBAwkODubKlStMnz6d6OhoTp06VeRpnjx5Mh999FHRe5qens6BAwc4dOhQUdRB3759OXnyJOPHjyc8PJz4+Hg2btzItWvXCA8PL9d5jR8/Hk9PT95++22uXLnCl19+ybPPPsvPP/9cNGbevHmMGjWKWrVq8dprr+Hh4cHhw4dZt26d2WJPUlIS3bp1Y+DAgQwZMoRKlSphNBrp2bMnO3fu5IknnqBGjRocP36cL774gnPnzsnyvXfs2MGqVat45plnAPjoo4/o0aMHL7/8Mt999x1PP/00KSkpfPrpp4waNYo///yzaNs///yTbt260ahRI95++23UajVz586lffv27Nixg6ZNm5oda8CAAURERPDRRx9x6NAhvv/+e/z8/Pjkk08A698xW9/Xu2HhwoUMHz6cLl268Mknn5Cdnc306dN56KGHOHz4cNF7vnHjRi5dusTIkSPx9/fn5MmTzJo1i5MnT/LXX3/JBCf79+9PlSpV+PDDD5Ekqaj9woUL9OvXj9GjRzN8+HDmzJnDiBEjaNSoEbVq1bI615SUFLp27cqjjz7KgAEDWLZsGa+88gp16tQpuvdkZWXRvn17bt68ycSJE/H392fx4sVs2bLlrq+VQCAQPDBIAoFAIHhgcHd3l5555hmrY9q2bSsB0ueff17UlpeXJ9WvX1/y8/OT8vPzJUmSpMuXL0uANHfu3KJxb7/9tlTy1u/s7CwNHz5cdpy5c+dKgLR//35p2rRpkqurq5SdnS1JkiT1799fateunSRJkhQWFiY9/PDDZtsCRefRrl07yd/fv2jb4vstOa+EhISitrfeeksCJGdnZ6lbt27SBx98IB08eNDqtfnll18kQNqyZYvVcfeLOXPmSIA0depUWZ/RaCz6PyC9/fbbRa8Lr11x9uzZIwHSggULitrq1asney+Kk5KSIgHSlClTyjX/sLAws89K4XvZsWNHs/k///zzkkajkVJTUyVJkqTU1FTJ1dVVatasmZSTk2O2z+LbFX62Z8yYYTZm4cKFklqtlnbs2GHWPmPGDAmQdu3aVdQGSPb29tLly5eL2mbOnCkBkr+/v5Senl7U/tprr0lA0Vij0ShVqVJF6tKli9m8srOzpYiICKlTp05FbYWf2VGjRpnNqU+fPpK3t7dZm6XvmK3v65YtW2z6XBe+H4Xnk5GRIXl4eEhjx441GxcXFye5u7ubtSvN5aeffpIAafv27UVthec9aNAg2fiwsDDZ+Pj4eMne3l564YUXrJ5P4Xtf/Lzz8vIkf39/qW/fvkVtn3/+uQRIK1euLGrLycmRqlev/kB/9wUCgaAsiHB3gUAgeIDw8PBg79693Lhxw+o4rVbLk08+WfTazs6OJ598kvj4eA4ePFjh8xowYAA5OTmsWbOGjIwM1qxZYzXUvTiTJ08mLi7OpnD84rzzzjssXryYBg0asH79ev73v//RqFEjGjZsyOnTp8tzGved5cuX4+Pjw/jx42V91krjOTo6Fv2/oKCApKQkKleujIeHh1kqhIeHBydPnuT8+fMW92NnZ8fWrVtJSUm5izMx54knnjCbf+vWrTEYDFy9ehUweWkzMjJ49dVXZbnIJc/b3t6ekSNHmrX98ssv1KhRg+rVq5OYmFj01759ewCZF7VDhw5mUQHNmjUDTFEErq6usvZLly4BcOTIEc6fP8/gwYNJSkoqOk5WVhYdOnRg+/btGI1Gs2ONGzfO7HXr1q1JSkoiPT3dyhUzYev7Wl42btxIamoqgwYNMrtuGo2GZs2amV234nPJzc0lMTGR5s2bAyjOpeR5F1KzZk1at25d9NrX15dq1aoVXWNruLi4MGTIkKLXdnZ2NG3a1GzbdevWERQURM+ePYvaHBwcGDt2bKn7FwgEgn8KItxdIBAIHiA+/fRThg8fTkhICI0aNaJ79+4MGzaMyMhIs3GBgYEyoaaqVasCpvrohQ/XFYWvry8dO3Zk8eLFZGdnYzAY6Nevn03btmnThnbt2vHpp59afLC3xKBBgxg0aBDp6ens3buXefPmsXjxYh555BFOnDhxT8Sn8vPzSU5ONmvz9fUlPz+ftLQ0s3Z/f/8y7fvixYtUq1atzOJ9OTk5fPTRR8ydO5fY2Fiz8OLic3r33Xfp1asXVatWpXbt2nTt2pWhQ4cWqdvb29vzySef8MILL1CpUiWaN29Ojx49GDZsWNG5pKWlmWkI2NnZlZpuEBoaavba09MToGghoDDfvnbt2qWea1BQkEz1+/z585w+fRpfX1/FbQqFBC3Nx93dHYCQkBDF9sJ5Fi5uDB8+3OL80tLSis5P6VjFz93Nzc3ifsD297W8FJ5P4WJGSYrPLzk5mXfeeYclS5bIrqfSXCIiIhT3WfJ6gOma2LIoFBwcLFu08fT05NixY0Wvr169SlRUlGxc5cqVS92/QCAQ/FMQRrpAIBA8QAwYMIDWrVuzYsUKNmzYwJQpU/jkk0/49ddfi3Iy7xeDBw9m7NixxMXF0a1bNzw8PGze9u233yY6OpqZM2eWabtC3Nzc6NSpE506dUKn0zF//nz27t1L27Zty7yv0ti9ezft2rUza7t8+TJbt26VeXiLG1X3kvHjxzN37lyee+45WrRogbu7OyqVioEDB5p5dtu0acPFixf57bff2LBhA99//z1ffPEFM2bMKCpV99xzz/HII4+wcuVK1q9fz5tvvslHH33En3/+SYMGDZg4cSLz588v2mfbtm1LFePSaDSK7eW5PsU9uoUYjUbq1KnD1KlTFbcpaXxbmk9p8yy8llOmTKF+/fqKY11cXMq0T2vY+r6Wl8J9LFy4UHFBqfhi0YABA9i9ezcvvfQS9evXx8XFBaPRSNeuXRXnovQ+wd1dj4r8HAkEAsE/GWGkCwQCwQNGQEAATz/9NE8//TTx8fE0bNiQDz74wMxIv3Hjhqzs0blz5wDKLP5lLcy6OH369OHJJ5/kr7/+MhMEs4W2bdsSHR3NJ598wltvvVWmbUvSuHFj5s+fz82bN+9qP5aoV6+emRo6mDzmXbp0kbWXlaioKPbu3UtBQQE6nc7m7ZYtW8bw4cP5/PPPi9pyc3NJTU2VjfXy8mLkyJGMHDmSzMxM2rRpw+TJk83qyUdFRfHCCy/wwgsvcP78eerXr8/nn3/Ojz/+yMsvv2wWclzca1xeCoXSTpw4US6PZ1RUFEePHqVDhw42f17LQ+E83dzc6NixY4Xt19Kcy/K+lofC8/Hz87N6PikpKWzevJl33nnH7PtpKW3ifhIWFsapU6eQJMnsul64cOE+zkogEAgqFpGTLhAIBA8IBoNBFlbq5+dHYGCgrMyUXq9n5syZRa/z8/OZOXMmvr6+NGrUqEzHdXZ2tskocHFxYfr06UyePJlHHnmkTMeAO7nps2bNKnVsdnY2e/bsUez7448/AKhWrVqZ52ALnp6edOzY0eyvsFRayfay0rdvXxITE5k2bZqsz5q3UKPRyPq/+eYbDAaDWVvJclkuLi5Urly56POTnZ1Nbm6u2ZioqChcXV2LxtSsWdPsHMv6eVKic+fOuLq68tFHH8mOb4uXdMCAAcTGxjJ79mxZX05ODllZWXc9R4BGjRoRFRXFZ599RmZmpqxfqaycLVj6jtn6vpaXLl264ObmxocffkhBQYGsv/B8Cj3YJefy5ZdfVsg8KpIuXboQGxvLqlWritpyc3MVPxsCgUDwT0V40gUCgeABISMjg+DgYPr160e9evVwcXFh06ZN7N+/38zTBqac9E8++YQrV65QtWpVfv75Z44cOcKsWbPK5KEFk2GyadMmpk6dSmBgIBEREUWCWiWxlqtbGm3btqVt27Zs27at1LHZ2dm0bNmS5s2b07VrV0JCQkhNTWXlypXs2LGD3r1706BBg3LP5X4xbNgwFixYwKRJk9i3bx+tW7cmKyuLTZs28fTTT9OrVy/F7Xr06MHChQtxd3enZs2a7Nmzh02bNuHt7W02rmbNmkRHR9OoUSO8vLw4cOBAUUk/MEVbdOjQgQEDBlCzZk20Wi0rVqzg1q1bDBw48J6dt5ubG1988QVjxoyhSZMmRfW1jx49SnZ2tll4vRJDhw5l6dKljBs3ji1bttCqVSsMBgNnzpxh6dKlrF+/nsaNG9/1PNVqNd9//z3dunWjVq1ajBw5kqCgIGJjY9myZQtubm6sXr26zPu19B2z9X0tL25ubkyfPp2hQ4fSsGFDBg4ciK+vL9euXWPt2rW0atWKadOm4ebmRps2bfj0008pKCggKCiIDRs2cPny5QqZR0Xy5JNPMm3aNAYNGsTEiRMJCAhg0aJFRfoU9zLSQiAQCP4uhJEuEAgEDwhOTk48/fTTbNiwgV9//RWj0UjlypX57rvveOqpp8zGenp6Mn/+fMaPH8/s2bOpVKkS06ZNK5fC8dSpU3niiSd44403yMnJYfjw4RaN9Ltl8uTJsnxvJTw8PJg9ezZr165l7ty5xMXFodFoqFatGlOmTGHChAn3ZH73Go1Gw++//84HH3zA4sWLWb58Od7e3jz00EPUqVPH4nZfffUVGo2GRYsWkZubS6tWrdi0aRNdunQxGzdhwgRWrVrFhg0byMvLIywsjPfff5+XXnoJMOVuDxo0iM2bN7Nw4UK0Wi3Vq1dn6dKl9O3b956e++jRo/Hz8+Pjjz/mvffeQ6fTUb16dZ5//vlSt1Wr1axcuZIvvviCBQsWsGLFCpycnIiMjGTixIlFookVQXR0NHv27OG9995j2rRpZGZm4u/vT7NmzcwqKpQFS98xW9/Xu2Hw4MEEBgby8ccfM2XKFPLy8ggKCqJ169ZmGguLFy9m/PjxfPvtt0iSROfOnfnjjz8IDAyssLlUBC4uLvz555+MHz+er776ChcXF4YNG0bLli3p27fvPRGTFAgEgr8blSTUOAQCgeAfRXR0NImJiZw4ceJ+T0UgEAgeCL788kuef/55YmJiCAoKut/TEQgEgrtC5KQLBAKBQCAQCP4xFC8RCKac9JkzZ1KlShVhoAsEgn8FItxdIBAIBAKBQPCP4dFHHyU0NJT69euTlpbGjz/+yJkzZ1i0aNH9nppAIBBUCMJIFwgEAoFAIBD8Y+jSpQvff/89ixYtwmAwULNmTZYsWcJjjz12v6cmEAgEFYLISRcIBAKBQCAQCAQCgeABQeSkCwQCgUAgEAgEAoFA8IAgjHSBQCAQCAQCgUAgEAgeEP5zOelGo5EbN27g6uqKSqW639MRCAQCgUAgEAgEAsG/HEmSyMjIIDAwELXauq/8P2ek37hxg5CQkPs9DYFAIBAIBAKBQCAQ/Me4fv06wcHBVsf854x0V1dXwHRx3Nzc7vNsBAKBQCAQCAQCgUDwbyc9PZ2QkJAie9Qa/zkjvTDE3c3NTRjpAoFAIBAIBAKBQCD427Al5VoIxwkEAoFAIBAIBAKBQPCAIIx0gUAgEAgEAoFAIBAIHhCEkS4QCAQCgUAgEAgEAsEDgjDSBQKBQCAQCAQCgUAgeEAQRrpAIBAIBAKBQCAQCAQPCMJIFwgEAoFAIBAIBAKB4AFBGOkCgUAgEAgEAoFAIBA8IAgjXSAQCAQCgUAgEAgEggcEYaQLBAKBQCAQCAQCgUDwgCCMdIFAIBAIBAKBQCAQCB4QhJEuEAgEAoFAIBAIBALBA4Iw0gUCgUAgEAgEAoFAIHhAEEa6QCAQCAQCgUAgEAgEDwjCSBcIBAKBQCAQCAQCgeABQRjpAoFAIBAIBAKBQCAQPCAII10gEAgEAoFAIBAIBIIHBGGkCwQCgUAgEAgEAoFA8IAgjHSBQCAQCAQCgUAgEAgeEISRLhAIBAKBQCAQCAQCwQOC9n4efPv27UyZMoWDBw9y8+ZNVqxYQe/eva1us3XrViZNmsTJkycJCQnhjTfeYMSIEX/LfP8OEjLy2Hz6ls3jfVzsaRLhhbujrszHKjAYOXQ1hcuJWWXeVvDfQaWCCB8XGoZ6oNX8y9f1Uq9BwlnwjACfyuXbR8YtuHEI3AIhoF7593F1J+RlWh/n4AZhrcDFr3zHqQhunYKEM6Zz9Y66N8cwFMDlbaDPh/BW4OB+b47zN2CUjJxOOs2NrBs08GuAj6NPufZzPuU8l9MuU8unFkEuQRU8y38et7JucSThCJWcKlHPtx4qlarCjyFJEpfSLnEs4RhGyWh1bKhbKPV966PTlP23OTM/k1NJp5CQaOLfBLXqX37f/Qdx4chWEk4cJLhZO0KqNLzf0xEIBP9i7quRnpWVRb169Rg1ahSPPvpoqeMvX77Mww8/zLhx41i0aBGbN29mzJgxBAQE0KVLl79hxveea8lZvPrr8TJt42SnYXDTUMa0jsTf3aHU8dn5en7ef53Z2y9xIy23vFMV/McI9nTkiTaRDGgcgoNOc7+nU/EcXwa/PQP6XFCpofFo6PoxaMpwmzy0EP54BQpuL3zVGQA9vwFd6d/LIk79BiufgfwM28bbucCABVC5g+3HuFuMRji/AXZ+Adf/utMe1QEeeh7CHzKt7lQEKVfhp0EQf9L02s4VGo+E5k+DW0DFHONvoMBYwLrL65hzYg4XUi8A4Kh15LO2n9EmuI3N+5Ekic8OfMaCUwuK2qJDohldezT1/epX9LQfeM4mn2XOiTmsu7KuyHDuENqBj1p/hKPWscKOI0kSH+/7mMVnFtu8jZ+TH8NqDqN/1f446ZxKHZ+QncDC0wtZenYpWbfvITW8ajCv6zybthfcW/7433DCl+/DA0hTfc/Fif2IHvfe/Z6WQCD4l6KSJEm635MAUKlUpXrSX3nlFdauXcuJEyeK2gYOHEhqairr1q1T3CYvL4+8vLyi1+np6YSEhJCWloabm1uFzb+iOHg1mb7T95RrW51GxaMNgnmibSRRvi6y/tTsfObvvsq83ZdJyS6426kK/qN4O9sx6qEIhjQPK1cExwNJzEGY0xmMevP26j2g3xzQ2pe+j11fw8Y35e0RbWDgT2Av/07KOLQQVk+AUrx0MuzdYNxO8Awr23ZlxVAAJ36FXV9C/CnL44KbmIz1qt1AfRdewISzsKA3ZNyQ92nsoN4gaDXx3nnwK4AcfQ6/nv+V+SfnczPrpqzfSevEskeWEeIWYtP+lp9bzuQ9kxX7GldqzOg6o2kV2OqeeJIfJA7eOsgPx39gR+wOxf6Gfg2Z1mEarnauFXK8tZfW8uqOV8u1rZudG4NrDGZw9cF4OnjK+q+lX2PeyXn8duE38o35sv5B1QfxerPXy3VsQcVw5qdZSO98YdZmUEHAgjl4N2lxn2YlEAj+aaSnp+Pu7m6THfqPMtLbtGlDw4YN+fLLL4va5s6dy3PPPUdaWpriNpMnT+add96Rtf8bjfRCVCroWsufp6KjqBvswc20HH7YcZnF+66RnW+ooJkK/uu42Gt5vFkoox6KoJJbGTzFDxp5mTCzNSRfUu6PbAcDF4Gds3K/JMGf78OOzywfI6gxPP4LOHlZHrPnO1j/mu3zLklIcxixtmyef1vJz4Yji2D316aUAFvxqQYPPQd1+kNZw35vHIEfH4XsJOvjVGqo2cu0KFDe9IJ7QFpeGkvOLGHR6UWk5KVYHVvXpy7zus1Dp7Z+ja6kXWHAmgHk6HOsjqvuVZ3RtUfTKawTGvW/J+rFKBnZEbODH078wOH4w6WOr+FVgxmdZuDlYOV7ZwMFhgJ6ruxJTGbMXe3HQeNA36p9GV5zOAEuAZxOOs2cE3PYcHWD1fB5R60jm/pvws3uwXtm+S+QHxPLmUe6ocuROzcMAT7UWL0OjYuF3weBQCAoxr/WSK9atSojR47ktdfuPMj+/vvvPPzww2RnZ+PoKA9t+y950pWoE+TOmbh0CgwPxNss+Bdip1HTt1EQDULlHqLihHo50TDUEzvtA5Zf+duzcHih9TEhzWDwUnD0MG83GmHdK7BvVunH8asFQ1eAayXzdkmCbZ/A1o/KNG1F2r0BbV+yffzNY3DrJEhWFu9Sr8P+7yE7sdzTynUKILbGGEI7PonO0QbP5tXdsPgxyEsv24GiOkDrSaZw+/tEQnYC80/O55dzv5Ctz7Z5u3H1xvFM/Wcs9hcYCxj6+1BOJp20eZ8hriGMqDWC3pV7Y6exs3m7snAj8wYXUi8Q6hpKuHt4ufZxM/MmRxKOkKu3nH6Vrc9m2bllRakCthLhHsGsTrPwd/Yv19wAlpxZwgd7Pyj39iXRqrRU86pWpvfypcYvMazWsAqbg8A2JIOBK0OHkXvokMUx7n36EPjRhzbv83r6dS6nXybCPYIQV9siaMqKlJ9PztGj5F+7bn2gRo1D1arY16jxr4++EQgeBMpipN/XnPS/A3t7e+ztbQhV/ZdyPFY5wkAgqCjyDUZ+2nedn/aV8jAA+Ls5MKZ1BIOahuJs/wDcfk6tKt1AB7i+F+b1MBnZLr6mNoMeVj0LR3+y7VjxJ2FuVxj2G3iEmtqMRtjwP/jru/LNvyRbP4Ko9hDcyPo4QwGseQ4O/1gxxy0Fh+ybRB18j7RDX2Jo8iRe0c9Yjio4vwl+HgKleIsVubjZ9PfQ89Bx8l3NuTwcjj/Ms5ufJT2/jIsLwKxjs2gV2MpiXvn0I9PLZNQBXM+4znt/vceSs0v4tv23BLhUXA6/JEn8cOIHvjvyHQXGAtQqNcNqDuP5Rs+XSehs6dmlfLLvE8Uw74rgctplhv8xnNmdZxPqFlrm7bMLsplxdEaFzkkv6cv8Xi45u4QhNYcIEbm/maTZs60a6ABpK1bg0rYNbl27lrq/JWeW8Mn+T9Ab9WhVWl5s8iKP13i8oqaLMSuL1GXLSJo7D31cnM3b2deogc/YMbh26YJK8++JvhEI/sn8ozzp5Ql3L0lZVjDuBzn5Bq6n2OZ9ScspYMGeq6w9dgNjOd/FhqEePNEmikhfEaolUOZifCYztl/i6PXUCtunu6OO4S3DGdEyHC/ne+PhK5X0GzC9JeRYD0U2w7syDF1pUlNfNgrOrCn7cd2CTPvwioTVE+GIFUO5zctQW0FU06g3GbIpV+R9nhGm/HRLOfAFObB0OJxfX/a5K+EWDNW6wclfSw9Nv41R64S6ySiT+Jt7MWXykytg+VgwWtDMcA+F9Bjbcvaf2g2Vatk0n4pAkiQeXfWoTZ5ejUqDQSF6IcgliGWPLMPFzvy9OxB3gFHrRyEhv9Fb2ldJ/J39md1pdrm93cWRJIkpB6aw8JR8geuRyEd4t9W7aNXWF+EkSeL749/z9eGv73o+YAoJd9Q6kpybrNjv7eDNrM6zqOpZtUz7nXVsFt8c/kbWXs2zGh+3/lhxm5jMGOaemMuheOvGXVn5tsO3ZRIZFNwdOceOcWXQYDCU/v1Su7kR+dtKdAGWF8IScxLptrwbuYY7ESN2ajvW9Flz1wto+pQUUn5cRMqPP2Kw8XlYCV1oKN6jRuHepzfq/7CDSyC4V/xrw91feeUVfv/9d44fv6N+PnjwYJKTky0Kx5XkQTfSy8PVpCxmbb/ELwdjyNfbJjgVXc2Xp9pG0TTCS4Q4CUpFkiT+upTMd1svsON8+cOeS+KgUzOwSShj20QS5FFxSsylYjTCwt6msl4lsXMBnSNkJShv6xYMXhFwRVmwCpUGOrwF+3+ANAv5204+ENgALmy0PMeuH0Pzpyz3X98Pc7ooh6o3GAK9vpW352WYlNItzb0sFOab1+4HWjtT3vrhH2H3N5bPuyRqHdR7DFo9B9f+si6aV7mTScU+46YpN/7IYjBY8b52eNsU+v43cSHlAn1W9bE6pq5vXcbUHoOPow/D/hiGXtLLxjwS+Qgftr4TOpuen07fVX2Jy5J7xSLcI5jTZQ5rL61lwckFxOfEWz2+l4MXszrNoppXNRvPSo7BaOCdPe+w4sIKi2Pah7Tn07afYq9RfsiXJIkvDn7B3JNzyz2PQjzsPYpE2bIKshi7YSzXMpQ/f252bkzvOJ26vnVt2ndqbirdfu1GZoG8FOJ3Hb6jdXBrq9sfjj/MnONz2Bqz1abjadVaekX1ItwtnM8Pfi7rbxXYihmdKtarL1DGmJXFpT6PUnDNdg0Op6ZNCZ07x6IneuWFlby5Sy4u+nKTlxlac2i55llw4wZJ8+aR+ssypJxyRB9ZQOPrg/fw4XgMHIjGxQbRU4FAYBP/GCM9MzOTCxdMXocGDRowdepU2rVrh5eXF6Ghobz22mvExsayYIGp1Mzly5epXbs2zzzzDKNGjeLPP/9kwoQJrF271uYSbP9GI72Q+Ixc5u66wo97rpKRJ3/4U6ugR91AxrWNombgv+vcBX8fJ2LTmL7tIr8fv0lF3T20ahU965s+m1UrlVONOemiKb+6Uq3S1b53fwMb3lDu6z3DpE6+oJfJa1sWNHbQby7U6AFpsaaFgMRzZduHSg09p0EDG0Igt34CWy3kQg5YYBJUKyQ7GX7sa6rhfjcENTYZv5aU2w0FSCeWE7P6Q0L0V23cqQoUvMRF1OoDfWaZFgMKyYiDPd/CgTmQr1BPvnoPk+BfGUjJTeFk0kl8HX0tGrLHY9JIyymgTpA77k53hN6+P/49Xx36SnGbVkGtGF17NI0rNS5aFLU2/tM2n9ItohuSJPHK9lf448ofsjFatZbF3RdTw7sGAPmGfFZfXM3ck3O5mm75urvaufJdh+/KVa6twFDAqzteZcPVDaWObRbQjK/bfS0rHWYwGnh/7/ssO7eszMcvjr+zPyNqjaBP5T5mx0jMSeTJjU9yLkX5e+eodeSb9t/QLKBZqcf4/MDnzDs5T9beqFIj5naZa/MC9/mU88w9MZffL/+uGPXgpHWif9X+DK05lErOldAb9XRd3pVb2bdkY1f1XkWEe0Spx8zLz+HkxqVIkpFaHfrj4Fh2Q0syGMjef4CCOHlVguKoNBrsq1bFvmrVci3665OSyDl8GEOmwve4GGonJ5waN0brVT4hwNzTp8mPicGxbl10lSpZHXvjf/8jbfmvsnaDCn5vouKRfcr3K78XX8B7zBjFvrd3v82v5+X7bOrflB+6/GDDGdwh7+JFkr7/gbTVq0Evf96rKNSurngOGoTX8GFovb3v2XH+KeRfu0bu6TPYV6mCfWTp38MHGWNuLtn79qELCsIuMrJc392CfAMJV9PR2WvxDa2YShr/dv4xRvrWrVtp166drH348OHMmzePESNGcOXKFbZu3Wq2zfPPP8+pU6cIDg7mzTffZMSIETYf899spBeSnlvAj39dZd6uK8Rn5OGgU9O3YTBPtIkkzFuEtQsqhsuJpgiOXw/FkGdjBIctdKpZiaeio2hYihBdEZIE2z41ia8VPgBHtjPlJUe0kdfrvnkMvu+g7IWt9aip5JpKZRJMW9gbkmwUqtI5m4zCqGL3tKxEWNgH4o7Ztg+1Dvr9YG5cW8Ogh3ndTTnzJXHwMIV8uwdB+k3TuSScsW2/JTBKKk44NiSyz5u4VI0utQb61rPxjJy7l/bqwzytXUUj9flyHReAhsOgx5dgSaU8JwXmdIOE0+btrgHwgu3nu+fGHl7Y+gIZBab69I9Ve4w3mt9ZyJEkifE/HWbNMZPB4utqz+xhjakf4gHA478/zrEE8/dZq9ayqPsianrXlB3PYDQwesNoDt46KOtz1bmyvOdyDtw6wOs7lUtvvdDoBUbUHqG4383XNjPtyDQup11W3NZR68iX7b6kZWBLxX4lcvQ5PL/1eXbF7rJ5m7q+dfmuw3e427sDJvG7/+34n+Kig61U9qjMiFoj6B7Z3aIaflpeGk9vflr2fhRip7bjs7af0S5U/vxRSFxWHA//+rBirvzCbgvLtcgRmxnLgpMLWHFhBTn6HLwcvBhcfTADqw8sukaFzD42WzEVYHD1wbzWzHoViPhblzn52KP4x5nCqhN87Yj6biZBdZrbPNeC+Hhinh1P7jEb712AQ506eI8dg2vHjqhsKL2Yd+nSHUOzwLaysGoXFwI//gjXjh1tnpeUn8/NtyeTtuJ29IdajVu3bniPHYND9eqy8enrNxA7caLivn5urWb5Q2rG/2ag9SmFx2edjvAlP+FYS55q88iKR7iSfkXWrlFp2PbYNtlnQImco0dJnD2bzE2bSx1bkWjc3QmZNRPHeg9OBY2/E0mSSJo5i4RvvilKf3Bq0RyfsWNxatHiHxeRmnv2HFeHDMGYYfq9c+/dm4APP7Dpe1tIWkIOK6ceIjPFJM4dVseb7k/VRa3+Z12Lv5t/jJF+P/gvGOmFSJLEjbRc/Fzt0WmE2Izg3pCvN3I1KQuDlVtJTHIOs3ZcYt9l5XxRJZpFePFUdBRtq/pa/gE0GuGPl2H/bOX+oEYmY73awyavb342zIqGxLPysW7B8NROcCy2OJAZDwsfhVvH5eOL4+AOjy+DkKbyvtw0k1L5tVKqNmgdYeCPUNn2h0/AlJc+/SHIz5D3RbSFR740LRQo5a8DOHrBo7PB7U5O5LGYVN5edYrsfJOHJlbyIRMnutfx59vBDa0+kBiNEj2+2cmpm4XiaRJNVWcYp11Ne82Rsp1bi2eh8/ulLgrw+0vKCvuTToNbYKmH0Rv19FjRg9jMWLP2GR1n0CqoFQC/Hoph0tKjZv0NQz349elWJOYk0n5pe1nOeIuAFszqbFn5/2bmTfqu6lu0MFCcOj51uJR2iayCLFlfM/9mzOo8y6qIWHp+Os9seoYjCUcU+3VqHVPaTKFDWAeL+ygkIz+DZzc/W64c66qeVZnZaSYuOhde2PYC22O2K45ToeKVpq/QxL+JxX152Hvg5+Rn03GzC7KZsGUCe28qLGBhMozef+h9ekT2UOy35PVsF9KOr9vfXR59gbGAhOwE/J39Lb6HybnJdPqlk2yRwFnnzOb+m3HWKS+4GyUjK4ZGU/OAebpOtrOGavMW4VKndCMrPyaWa6NGlSnUuzh2ERF4jxmN+yOPoLKTa47kHD9O0qzZZGzaRLnCsdRqAj74AI8+vUsdaszJIWbiRLK2K6f4OLdpjc/YsTg2NkW5FMTFcalXb4wKed1ngmDyEA1GtQqnXIlP5xjwU0j/touIIOLX5aiLVRxKzEmk3VLLi0Ift/6YhyMfVuyTJImsnbtImj2b7H37SjnjOzg2boT3yJHogi0ryOccO0ry9z+Qf7X0qCf7KlWIWPXbP84gvVsko5FbH39MygJlkVmH2rXxHjsW144d/hGie5LBwKVHepJ/ybzsbKX//Q+voUNs24dRYulH+0m8bh798tCAKtRrf28qFvxbEEa6Ff5LRrpA8KBx8Goy07deZNNp67mzxakZ4MZT0VF0rxOApvgKrUEPvz0Dx5aUvhPvKqb86diDpvBoGSoYsUaxdNeV6zFkzHmUOpKCYQ/oHX3QDl8J/nUsHz8/G5YOhQublPvt3eHxpRBqu6fLjKNLYMWTyn1aR8tK6a4BJhE7vzvepC1n4xm38KDF6Igp/erSv7HlH+FVR28w4SflGtY1VFcZp11ND/UeNKpSfnravQFtXizdQAc4+jOseELePmAh1OxZ6ub74/Yzav0oWXunsE5MjZ5q2tWMPey7Il9k2v+/juyIW8vbu9+W9b3W9DUG1xhs9dh/XP6Dl7e/XOocC3Gzc2N5z+U2lRTLLsjmuS3Pseem8gKRRqXh3Vbv0jPK8jVKzk1m3MZxnE4+rdjvoHHg2QbPMuvYLIuq9mFuYfg4+ihGDYCpJNlHrT+ia0Tp6thlIc+Qx0vbXmLL9S2K/SpUvN7sdQZWH2jWfintEn1+6yOrXa5WqVn+yHIqe1au0Hla4n87/8eqi6tk7dY+V0vXTqHWC3NQMv31jnZEzf4Bp8aNLR4z7+JFro0ajf6WPNS+rGgrVcJr5Ag8+/dH5eRE1u7dJM3+nuy//rrrfUPpRoUhM5OYcU+RfeBAqftybNAA77FjSF64kOw98vll28HLozTEe965H1W/LjF5kQG1wq3MY+BjBEyeXPR609VNPL/1eYvH7xrelSltp5i1SQYDGRs2kDh7NnmnlL9/SrhER+P9xFicGja0abxkMJCxcZNJyf6k9aoDoXN+wLml7RE4/3QkvZ6bb71N2q/yBbuS2IWH4z1mNG49e6JWWJx6UMjYsoWYp56WtWu8vIjasAGNS+kRt+f2xbFxzilZu1egM4PeKj2V6L+MMNKtIIx0gaCCSb4EsYfAWLoCbiE303L480w8B6+lYLz9HJyLHUeMUcShnPcW5u3EE20i6dcoGHupAJaPLp+6uhIWSnadvJHG8Dn7yMpMZ6buC9pozD3qMZIPi6t+xcuPK3vjzNDnw69j4dRK83YnHxj6KwTcRRihJJmux4nltm/jGW4qB+cZXtS05tgNnltyBL2VchHOdhrWTmhNuI/8hzxfb6TTF9u4miSvUPFWj5qcvJHO8kMxhKhu8YRmLQM027BXKYS5dv0Emo+z/VwSL8A0hbJzrZ6DTu+Uuvln+z9j/qn5snY7tR3bHttGWraGVh//qbjt98Ma81vc+2y9vlXWt77vegJdSvfkv77jdVZfWl3qOICp0VPpFNbJprFgylV/ZfsrbLpmYYEIeKLuE4S7hcvaJUwK7JbC5l10Lnzb4VsaVmrI2eSzPLnxSZJybVP4L8ReY8/U6Kn3TLVcb9Tz1q63rF7fiQ0nMqbOGNJzCzh9I53ZZyezN36rbFyvqF68/9D792SeSpxMOsnANQNl7eFu4fzW+zeZF/5M8hkODetLgwuW048kOx2h06bh0kZ+vXNOnuT6mLEYUspQ8cIG1O7u6AICyDtTvnQba/hOnID3uHEy764+JYXrY8aWanTayjc91OyoI1/6eGybgb67le+Xvs89hy7QFKG05uIadt2wnCpir3HgzeZvoLmd1qNPTiblp58ouGpjNINGg1v37niPGYNDtbJVMChEkiSy9+whcfZsxYUKAJf27Qn5TkGU1Mo+C65eJef4iVKrcuiCQ3BsUL9cnnpJryf7wEHUzs441KxRIR5tY34+N158iYwNpWtwFEdbqRJeI0bgOaA/auf7k2IqSRJJsVlkpeURWMUDnd2d63Ft9Biydil/Fn3GP4vvM89Y3bdBb2Tx5L9IT8xV7H/sjab4BAuxQUsII90KwkgXCCoISYLN78DOLypsl0ZJxXpjY6bre3JMUhZ/q+unYan7NByuV4BCOUBAfRi90VyUDJPXf8Tc/WTkmkK+7Sjgde0ihmg2oVUZ2W6ow6sFY0nS+vHXax3wtKWUnNEAf74Pe6aZcuJDmplU2H2q3P155KTC9Fa2id351jDVfC8W4v7z/mu89utxm8o51g/x4JdxLWRpNAv/usqbK0/Ixod4ObJ5UjRatYp315xi3u4rpmmQykjtOoZoNuGmyiZFcmF3tdd4ePCzpU+iOJIEn4SZUguKE97aFCFhdVOJHit6WFQEf6/Ve9yMqcMn65QNjKfahbAsYbRZWSUwhXkv72nboklmfib9VveThduXpHfl3rzX6j2b9lkcvVHP27vfVvTKlhdPe09mdJphlm9/Nf0qYzeM5WaWdaGxQpx1zkxrP43G/pY9uxWBUTLy0d6PWHLWctRNz7DH2bS7MQn5F3COkBshOrWONX3W2LToUpEM/X2oYsrCzI4zaRl0x6OZq8/llW8eYdxMG77/Wi1Bn00xq+udfeAA18c9hbEU8bYHEa+RI/F7+aUi467g1i2ujRpN/sWLFbL/XTVUfNVLDSoVjlpHcopFJmkMEu8tNFDZto98hZOvhZSODWn54ifYBQdX2H5zjh3j+tPPYEgsUc1FpSJq4wabjiVJErfee4+UxT/ZfFyn5s0J/upLNO6l5+cXzfXkSW688CL5V64A4Ny6NUFTP0fjWn4hM2N2NjETJpK1c2e596F2d8fr8cF4Dh2K1tNGjZ0KwKA38ueC05zbZ4qGsXfS0ntSQ3yCXci7dJlL3btb3Fbt7EzUxg1WxRmPb41h+xLLgrgNOofS8tG/J9ron4gw0q0gjHSBoIKIOw4z5OHhFcUuQy2mG3qy01gbk/o3uJPJXLtPaai2IOamsQN7N8i2rUxctmTP3Nrz6dMpmsBiJeB2nE/giQUHySmQRwe4k4kdehLwKGp7pWt1noouRVG+OLlpphB4V3/bwrlt5cpOmNcDq0rpgQ1gyK/gdOdH+Psdl3h/re3hlAAT2ldmUuc7CujZ+XraTtlKQkaebOyXj9WndwNTPXRJkvhi4zm+/vPOe6jBQJAqkTjJi3x0zB3ZhHbVbMs9LmJhH7hYwttt5wKvXrMsOocptLnXSstCfc0DmnP91FDOxCnk/AN1q8ZwWTNN1v5E3ScY32C8bXPHVK5rxLoRshDrQkJcQ/jlkV8s5iKXhlEy8sm+T1h8ZnG5ti+On5MfszvPJtI9UtYXlxXH2A1jFQWyiuNh78GMjjOo5fP31LKXJIlvDn/D7OMW9CuA/JRmqO0S0TrLjbshNYbwStNX7uUUFbGUDtE2uC3TOtz53H341wfU+N+PVLe+znMHtZqAd9/Bo18/MnfsIGb8BKRcZc+YQ61a+L/9FiqdslBfzpEjJP0wh4KYMlbDuI3K3h6Pvo/i3rMnKgu1udPXrydpxkyL+/Do3w//yZMpuHGDayNHWZyL2t0dqaAAKVse7aNEohu8NEpDlqPpPv10/aeZfmS6mf6Ef7LE5/NAl2d7NNndkmUP6xup+L2xmnRnFXO6zLGq51AeEr6ZRuK38gUrr1GjqPTyS6Vun7piJTdfsy5yqIR9tWqE/vA9Wh+fUsdaWlxy7daVoKlTy+WVN6Snc33cU+QcUtbgUOl0qN3cMCTZFjWkcnDAo39/vEeOQBd47xf59qy4wKH15ovObr6ODH67GQkffUjKIutVT7yGD6fSa68q9uXn6vnxrb/ISbdc/tTF055hH7REJQTkFCmLHSrUxAQCQfm4VjF5hZZopTnJj3YfscruDbqp9+JHCkvs3rNsoOuc4fFf4PkT0P0z8Agt9Rjv6ocy5aCRNp9u4cVfjnIhPoN1J+IYPe+AooEOkIaLmYEO8ONfV9EbyqBw7+Bu8mJXtABP+EOm0H1LhD0Ew1YVGeiSJDF1w1mrBnqwp3L9+mlbLnCgWI723F1XFA306v6u9Kx358FEpVIxqXM1/te9RlGbAQ3XpErkYzICXvrlGImZ8n1ZJUjBG5ufCQnKWgKFKIWpF2fvzX2cTbRs+VzO3q/Y3i7EskiUEg38GvBEXYW8eky54x+3/rjcBjqY8qlfbfoq4+qVIY1AgVDXUBZ0W6BooIOpNNr8bvOp4VVDsR/Az9GPeV3n/W0GOpg+dxMaTuD5Rpa/H3aeexUNdGedM2Prjr2X07NIx7CO+Dr6ytq3x2znesb1ov+fW7PYdgMdwGjk5htvcuO117n+9DMWDXSnxo0JnT8Px7p1cahRQ/HPc9Agotb9QeBnn2FfTbl0oRJqV1e8n3ySyps34f/WWzjWr2/xGH7PPYffq5YXSVJ/WUbMs+O5Ovhxiwa6tlIlwhcvosqfm/GZMB6Nh4f1SwR888gdA12tUtO/an9ZCbw4LxUrevw95cnyPZxZ2E7N089oWNJWQ7qzaW5fHvqSiva5eTw2ALRaWXvq8uUYS6nJbszLI+Gb8gks5p09y9XHh1AQa/0DnbljB9fGjFWM/sj4Yx1pv/1W5mPrk5K4OnyEZQPd0ZHgGdNNn9nJb6MLKV0kTcrNJWXhQi507sKNV18j74KNFWPKQezZFA5tkEeFpSfkcGLz5TsVDqyQsnixxWt/7M8YqwY6QGZKHjcupNo0X4F1hJEuEAjKR4H1H+mKoq76MtPtvmKX/QRqqK8rD3LwgOGrIDIadI7QdCwHem7mFSZw2qj8I7re0JglBpMhpTdKLDsYQ8ep23l60UHyLRjcrav40DxSHgYWm5pTJjG8e0r0ayZveUmqdIEhy8DBtHJrNEq8s/qUmUe7JE+0iWT9c22IVMg/N0rw3M9HSM8tICUrnxlblUNLX+laXbEky9g2kXz8aB3FdYrEzDxeWXasTA+daV51lTtilcXKCinNSJcwonU7aqHXiNFRLp7j6+irWHatNJ6s+yR1feXn8VS9pxTby4pKpeKZ+s/wYuMXy7V9ZY/KzO82nyCXIKvjvBy8+L7L9zTwk38Og12Cmd9tPlEeZYg8qUBG1R7Fm83fRIXtC2TDaw3Hy6F8tbnLQ26BgT0Xk/jtSCy/H4unjptcUE9C4uOdP3AyLoa3drzBoG3K96xt3YPZXM/yuaatWGGx/Jlzm9aEzJ6FxqX0/FKVVot7j4eJWLnCVKqrsYJGxG00vj74vfgClf/cjN/zz2HwdGXPjT3EZMRY/c57jxhBwPvvmSp1KJC5ZQv6hATFPl1oKGGLFmEfFYXGwwPfp5+m8p+bqfT662gDAhS3+a2FitOhd65d84Dm+Dj6UNuntmzssipJOHQs28JcWdCFheL3ztu8OtGD1c3V5Nibv6fHEo5ZFEgs9zH9/HDr0kXWbkxLM5XNs0LqkiXob5Q/ByD/6lWuDBlK3iVlPYz0deusLi4B3Hr3PfLLUKGg4OZNrg4ZSt5p5UVrtasroT/8gEurVqgdHPAcOJCoP34n8PPPsL9dyk8C0l3DSHOLwKAuscCh15O2ciWXejzC9WeeJeeopd+V0jFmZ5O5cxd5l+9cn9ysAjbNO2UxkO7AmksU3E7fs4ZUUEDCNHkERW5mAYc3lF4FACgKta8oMpJzuXgonnP742z6u3aqbNooDyoi3F0gEJSPHVNNOen3mQTJnSvdfqRJ8ztCSNvPJfDkwsJwdYlo9RGe0q6mmdqUV/ynoT4TCp4lEyebj9O1lj9fDarPjnOJjFkgVwtuEenNT0+UU529osmMh19GwNVdoLGHpmNNwniaO+Gq7605xQ87lR+AAF7sXJVn2lVGpVJxPCaNPt/tUhSU69MgCD9Xe2ZuvyTraxruxc9PNrcacrhwzxXe/E1Z3Om93rUZ2jzM8nne5uf91/hm9R52qhU80Y1GwCNfKW6XnJtM9M/RstJpJTHkBJF9RR66rna4rpi/3K9qP95uIVd7t4XEnEQmbZ3E4fjDqFVqHq/xOC80eqFIUKqiWH1xNR/u/ZDMAttykFsHteaj1h/ZVMu5kBx9Dh/t/YiVF1YiIdEqqBXvtnzX5jJq95LfL/3O6ztfxyBZD1F2ULuxdeCGu4pisJXU7Hzm777KvN2XScm+YzirNBk4V/kYlcp8rpLBAWNuCNGXzjJ+jYKR7uuN24oF9F//OL3Xp/HIPtsf91y7dSXok08US6jZSvahQyTN/p7MrVtBktCFheI9ajTuvXuhvh3Wfib5DKPWjyLjdgnJwdUH82rTV63eM9LXrSP2pZdtrq1uX6UKIT98j85P+XMnFRSQtnYtyT/8QN75C0gqFRvrw5zOaozFFhg/eOgDekb15OczP/P+XrmA4KcNp5D98jxqXi6/8VUSh5o1TbXnO3dmU8yfTNo6yeLYKPcolvdcXqH3iuzDh7k6SF5JwL5qVSJ+W6n4PhkyM7nYqXOFCBBqvLwI/eF7HGrcicxJXb6cm2++RZHqrBUc69cn7MeFqBQiAoqTf+UK10aNpuDGDZvnURxJkkjduoM/FseSoqkEgHNmLPWOT8chz/J18B73JH7PPVfqeRQn+9Ahro99AmOWqTyn18iR+L70Ihu+P8XFQ9adBZGXVhF+bb15o0olL4WoVhP520rsq9zRy9m17DxHNllwlJTA3knLyE8eQqO7e1/w0c3X2bnsvNUsvpJUinCj3yv3VuukvJTFDrX+qRUIBAJLWFJzH/abKZy7TPsympTa938PecplnJSIkXwYkv8asasz+dLpJg/XDWDdiZtM+OlIMW+4iq3GBmzNb4A7mQS5qjmVa7txDtC3YTCf9K2DVqOmXXU/QrwcuZ5sHkmw51ISZ+MyqOZffrGaCsPFD0ashdxU04+vk7kX8HhMmlUD/Z2etRjeMrzodZ1gdyZ1rsqn6+Sh4ysOx5qXxivGK92qlZoTOKR5GNvOJbLptHzl/YO1p2gR6UVlP8vXdPrWi7dF3Vy4budLiLqEJ82KJ317zPZSDXQAjWMsKrsEpHzzsGOtq9yLDmUPdS+Oj6MPC7otIC4rDhedCy5290Yl95GoR+gU1omr6VfRS9a9Kz4OPvg5+ZU5v9NR68i7rd7l+UbPozfq8XWSh23fLzqFdWXq+qvEOcxCpbZ8/nYZnXHSlu1+UVZupuXw/Y7L/LTvGtn58vuqZHBFn14HnfsRs3aVJhd7+3M8tkPZWPF/dgKePpG80eJNXil4mSwHiYHbSzdsCvO771Yh26lhQ5ymf4chMxNjWhrawECzz5DeqGfS1klFBjrA4jOLqe5VnT5V+ljcr1vXrqidnIiZMNGqJxXAoW5dQmfNtBrartLp8OjdG4/evcm/FUe/DY9z2Whu7DhoHOgQ2gGA2r5yTzrAwjP72V1vKB7VeuNzW8RycNMg/kh6izyDeYhwZY/KXEm/gt6ol7V/8NAHAGj9fM0WFhaftq4ncTHtImsuraFXZcsaG2XFsX59HGrVkqnk5507R/b+/Tg3bSrbJnnOXEUD3aF2bfyLlaUrRMrL5eYbb5J/Wf6bZEhO5uqw4YTMnIFTw4YkzZtH/Mef2Dz/nCNHSJwxE99nLSuW5549y7XRY+QiebfRBgQQ+sMP2EdGKPaDKUrp2M1KpGjuvJ9ZLkEcrzWWRoc/Q21BayRpxkxco6NxrF/fpvMx5ucT+/ykIgMdIHnuXK5ro7h4vnRxuquhnQi6sROd3rS9Y6NG2EWEk7ashMip0Uj8V18RMs2ke5GRnMvxrcoh8G5pl0gvkf6Ul63n8sFYKje/u5rpV44nsvOX83e1j38ywkgXCATlw2jBixHYoOxGOkBwI1Mt8wNz4a/vINN6uNRFYwBD8l/nJt5gkBj/0yG2nwvhl4PXLSqU16sawcwhjbianMWMrRdZfewmhlLkzEe0DOetHjWLQrY1ahXDmofzwe/ykLj5e67wYR8r9dL/TlQqcFT+0V5xWPnHVqNW8WnfuvRtJFfufbJNFNvOJrD3srxWuNI17FijEo3CSg8RVqlUfNK3Dl2/SpXltOcWGJnw0xFWPNMSe625wSBJEp+uP8v0YmH2R6QoQihhpN86ZRLps5MbWtuubyt1foXo3I6Qn2he+kzrIv8MOGodaeovf3AtK7bUQb9bHLQOVPOyPYe4vHg6/H3Kxrby1abzXLgaisZpJI7B81Fp5HmWxnxPYq/X5/TNDGoGVnzk3YX4TGZtv8iKw7EUGKzfh/KTW8qMdIBOhyX80uTjdWFheDxqMnS7R3ZnR+wOflWtIdseRm20bKiXVEqvCDQuLooh81uubynKqy/Od0e/o3tkd+w1yiJyAC5t2hD6/WyuPznOzGApjlOzZgR/+61NdZ8LOSbFyAx0gOiQ6KJoiqqeVbFT25FvNP/MnEk5CTQk1cGVVAfTwuJPuUnE+RVAifSKrvU6k514jF2x5qWwLnOR1yL9ZAtaZ5PPcuBW6fXevz3yLV0julq9dmVBpVLhOWSIogBcyo+LZEa6PimJpHnzFPfl9+ILONZW1qEI+3Eh18aMVQw1N2ZkcG30GNy6dCFt5UqLc9UFBip6whO/+w7nli1xaihPv8k5coRrTzyJMV3ZOWAXFkbo3DmlCr6l3srm5E75sTPcwrgc0YOoS5YrayQvWEiQjUZ6+u+/o79l/myU7eDDgTOOYMOamkHryNXQzlS+ZMpN9xryOI7165O+ajVSvvnnOXPTZrIPH8apQQP2r72MQS+/b/gkHCE05k8ONZBHeBz+YjluLcBr6FC03mXXbMhOz+fPBWUTtP23IXLSBQJB+TBa8D6VzMMqCw7uJkN94jHo8SV4Kq9cnzSGMSD/LZOBXjgdCX4+YNlA71bbn9nDGuFop6G6vxtfDmzA1hejGdYiDHut8q1wQvvKvP1ITVlO9YDGITjq5L+IKw7FkpZdegjmqRvp7L6QSK4FcTpbuBCfwe6LiaTn2hbyWYgkSWw8HSdrt9Oo+XZwQ0UDHUwG/BeP1cfNofT3V6WCl7rYbvx5u9gzpZ9yzvWpm+lM3WBe7sVolHjztxNmBjrAEaNCnrNkgJtHZM15hjzFusXBLsHo1HIVa5OBdOfDpdIlo3GQX8cWAS1w0DoonovgwWDvpSS+3WrSYjBkR5F9bSySXr6Ik5fQBSQtvx0piyJb6RyLSWXcwoN0+mIbSw/ElGqgAxhzQzHkmH83HfIkHt2tbHD7PTfRTI399WavE+gcyLrGar59WI1RwQb3nTihzAZ6am4qe27s4WZm2fOPLXmG47Li+PnMz6VubxK1m49GobyVS7t2hMyaaWag38y8yb6b+0jLU1jVuM3ay2sV2x+OfLjo/zq1jhre8rDnfM1VSsbkxmQrR9s0rNSQ6OBoxb5tMfLFw5/O2FbG7GbWTZaeXWrT2NIwSkZOJZ3iVH1P1J4esv6MzZspuGn+vifOmKmonO/csiXOzZXTwbLy9JzI1pD36Tc4NmyoOEbKybFqoHuNHEnEqlXowhQEY41Gbrz8MoYSAnNZe/ZwddRoiwa6fbVqhC360SZF9n2rLyFZePi4GtoFfb+nLFcu2LCBglula9pIkkTKj+bK7EaVmlM1hmPQyH9zdA7KVntMcFty7T3QVqqEa8eO6AIC8Hz8ccWxCZ9PJelGJmd2K3y/JSORl9fgnnYJh1x5DniiW3VuzZ7PhfYdiHv3PfJjbL+PSpLEnwtPk5NRtuebfxvCSBcIBOXDopGuXKanTOgcoPFIePYA9JsD/ncMOKnaw/xSezpJ2O6t798omG8GNZB5Y0O8nHi3V212vdqeZ9tVLjJAHXUa3utdm0mdlcO13Z10RSXFipNTYOCXg5ZztrLy9IxbeJDuX+9g8Pd7af3pFs7dUi7tZQlJknjrtxN0nLqdwbP3Ej1lK/uvyL3bljgfnykL1QfoXsefrrWte28DPRz58NHSIwX6NAgqc9h/dDU/RrYKV+ybteMSuy+YQhELDEYmLT3Cj3/JBYGOGC3UZlUIed93c59ZveNCukV0o3VQa1m72i4JtcP1osUZJS86mDxuggeXtJwCJi09apaCacwNIfvqk2j1JiNYkjTkJbZDn14PgFVHb2AsJeLGVqZvvUjPabtYdzJOlgZaGvnJLc1e99gn4a5QScyhVi1cSwh+udq58lHrj1Cr1Gyrq+bTvmoSbwcHZDjA749FcKm3ZbG3klxMvcj/dv6Pdkvb8cTGJ+i8vDPTDstLEVqiNM/w7OOzzcLgLeFYuxZhPy7Eod7t3witFq/hwwn++quivHeAbw5/Q5flXRi9YTTRS6N5c9ebXEoz19EoMBSw4coG2THc7d1pFdjKrK2Oj/w+qNJmotKZh3lLDvIQbo1KQ12fuhbvFSUjfNLy0lh7Sb54YEn8cPax2WTml7/efYGxgNUXV9N3VV8eW/MYT25/lt/rKBhLBgMpPy0pepkfE0PKkiXycYDv88pVFc7fyuDhr3fQ+9tddPvhCF90fganVq0Ux1qicHFJ4+JM0GefKSrSF8TEcOv9D4peZ2zezPUnnrRYis+xXj3CFsy3qQxcwrUMzh+wbmQfMjQiZO0GXLvKRSDR60n9ufRFqdyjR8k9ccKs7UpYV1moeSEdh9ckop58/ka1jsvh3fEc+FjRQp73E2NRK0S7ZB84wO65+xXvVf5xe3HJvokKiUoK32Wjxo4E3/pIeXmkLF7MpZ49ydhim7jhiW2xXD3+7xB/uxtEuLtAICgfhnvgSS+JRgu1+5r+Uq+DWoPKLZC3JQmd82lm77CcV13IyFbhvPmw3BteHB8Xe17sUo2JHatwNSmLAHdHnO2tn8fwlmH8tE9uKC7Yc5WRrSJkedqp2fmMmLufI9dTi9oSMvJ4bskR1k54yGbv1YZTt1iw547CanJWPi/9cpTNL0RbzA0vzsZTymkEHWtWsun4PeoG8ueZeH49pLwqbqdR83zHqjbtqySvdK3O7gtJnC2xcCFJMGnpUX57thX/W3FCMX8d4KQUjl5So1WV8C7GyB8gLKm6R4dEU92rOn9e/1PWp3M/wlN1OzJ14zm0rnIjXYWK1sFyA1/wYCBJEm+sPEFsqnxxRmPwZ36XRXyz/SCbTqYgGYp5YNNy2Xs5mRZRd1dma/XRG7f1E0qnfXU/hrUIw9PpjnBbgbEJE3dtJL0gBddsiUf2KXvRfSc9j0pBAb1hpYaMrTOWmcdmcqiKmokRKtyzIMMR8uyuM2/DaOr61GVUnVG0C2mHWiXfx7GEY/xw/AfF78fMYzOp61uXNsFtZH0lKc0znJqXyvyT83m2wbOl7ss+KorwJUvQ37iByskJbQnP+t6be5l1bFbRa71Rz8oLK/ntwm+0D23P6NqjqeNbhx2xO0jPl3tVu4R1QacxX3xWMtIBNA4x6AsK03wkNI5XZGNqeNXASeeEk86JGl41OJ1sfi/Zc3MPOfocHLWm8pcrzq8g1yDPve9XtR+nkk5xMsk8XzwlL4UFpxbwdP2nFedoiRx9DivOr2D+yfncyDIP3V5ZJ5dOO0BTwlhL/eUXfJ55GrW9PYnffKMo5ufatSuOdeR5/Bm5BYyav99s0fi3MylEP/E6TZy/ImODfMGkJJVefx2vYUOLXjvWqYPvs8+S8OWXsrFpK1fi0qY1kl7PjddeB4NyJJtzyxYEf/MNamfb0iT+Wqlc2aQ4mSl57FoXT9sXXjCdVwnRu5SlS/EZ96RVocbkEl70NLcIroR1Uxxbs1UAkQ18cXODy0fiocR3+aZ/C6R2d6JBtJ6eeI8ZTcKX5iKr6a5hXLsut9BVxgIir9xZOAoquMBV5JUA4vwaExBnKtcrZWcT+9zzRPy6HPsoy9U9km9msWu5ctWZ8Lo+NO4ebnHbQnR2FSu0er8QnnSBQFA+FD3pKovlce4ajxBwM4WdqVQqXu9egxc6WTcGJ3aoYpZPXho6jZrKfq6lGugA1f3dFMuxXUvOZutZ81X1+IxcBs76y8xAL+TUzXT2XLJ9xfgHhYWJK0nZ7LVxH0oGrk6jok1V20W93ulZixAv5frpjzcPJcSrfEJbDjoNXw2qj51C+kFcei7tP9tq0UAHyMWeM5JCuGOsec1bSZIUjXRvB29q+9SmoW9LJIM8fNDe/RhDWwSj0uSicZKr2df1rYuPY+meF8H9YeWRWFYfVVZvfrFzNeoGezGwQV0zA72Quw15v5Gaw/9WHLc6Rq2CXvUD+WNia+aMaEJ0NT/qhXgU/TUO82Ngjf4APLrbiKNCuWKnFs1xseKJfLLek0UGZoFWRaK7ijy7O/fHY4nHeG7Lc/T+rTcrL6ykwFCAJEnsit3FqPWjePz3xxUN9EK+OvQVRgsiWYVY8gyXZMGpBSTmKIt5lUSlUqELCpIZ6IX7UUJCYvO1zQz+fTCj149m3sl5iuOKh7oXUsfXgpHueGfhVm2XgFor99Q2rHQnpFvJm55nyOOvGybDxmA0sOSssnd6cPXBPNfoOcW++Sfnk5Rj229CWl4as47Nouvyrny07yOZgQ6Q7KZiXzUFJfeUFNLX/k7u2XOkrVIoy6bR4DtxguJxJ686pRjVtfJkIkFTP8e9j2XxQNRqAj780MxAL8R77BiLJQBv/O8Nbrz8ikUD3aVjB4KnT7fZQI85m8K1U7ZFsp0/EM+VG1pc2suFRQ2JiaSvX6+wlQl9QoJZv17jwMkaI2TGN4BTXiLNO5p+hzT7N+Mft1e+Q5Wag9vM5+01dCiaYpEDEnAhUlmEMDh2e5FqveewodTf8AueHvLPR4pnNfLs7uh5SHl5xL74EsZ8hZsXYCgwsnHOSQwF8nuIk5sd7YdVp1K4W6l/XoH3vhrH34Ew0gUCQflQMtI1FRDqbiMqlYrxHaow+RHletRvPFyD5ztVrVABpJKMKKaAXpx5u68U/T8mJZsBM/ZwJs5y6Ob8YuOtcepGOvsshLavtMGISMjIU1woaB7pjZuD7e+dq4OOLx9rIPPcuzpoeaadhZBzG6nu78arXasr9mUpKF8D2GnVPNnWFPJ3VCkvPe2aqSzdbU4lnyI+R1kcSq1Ss/VMKvoMucCRpMnkZMpBggOvoirprb+9Pfo8uLwDEuRK+IL7x/XkbN5aqVzqr2WUN2Nbmz4/bar64ukk/y78fvwmefryaUgYjBLP/3yEdAs1iu20aoY0D2Xri+34amADagRYFql7rNpjBGc40PmQcqy83yTLJbrAlE/9ceuPi7y0lricdpk3d71J9xXdGbBmAOM2jWN/3H6r2wCcSznH75d/N2s7E5fOX5eSyL8tPPXr+V8VPcMlydHnMPvY7FLHWeN6+nV2xOwoddy+uH0cjj8saw9wDqC+X31Ze7BLMJ728gUBtWNM0f81TsqRXg39rBvpAFtjtgKmChSxmfJ7ezP/ZlT2rEzzgOY0D5Dnemfrs5l93Pq1i8+O5/MDn9N5WWe+OfwNybnWjc11jZRNhpQffyThiy/kZbwAj759sY+Qa8usOXaD5YdiZO0A+y4nk4+KgA/ex1PBCEenI+jLL4qEEUui0mgI+uQTxfBtKUe+KFCIe6+eBH/5pVmahDUkSbLoRdfZK3tyt/10Fl0veUk7gOQffzTt1yiRcD2DCwfji/6OzfqDeI/axPs2IN63AaerDyFXYUFYZTRQ88QPJL7/jimHfdEiIq78jkpB6Pfi4QRuXb4TOaJ2dsbnqXF35uNZnVRPubaMRp9D2DVTlIPPM89Q6bXXUKnVVG+nEHavUhPvZ75gknf6tMxjX8hfqy6ReF05VaPD8Bo4upS/JOQ/EWGkCwSC8qGk7l6Roe42MqJVBN8MaoDP7Zu3t7MdXw2sz5jWynlaFUnHGpUIdJd7XHecT+RCfCYX4jPpP2MPV5KU894K2XjqFjEp1seAdWP+jxNxpQrRbTkTr5hb1rGGbaHuxWkU5sm3gxsU5Wh7O9vx/bDG+LjcvarwiJbhNnv2ne00zBvZhFe7VqdqJReOSBbC6IrlpVsKdW8b3BYweU0L0uRKwABrL63F0V3ZAG+iCYQvasP8HvBtU/jtGcUHV8HfS6GRnJEnN5LdHXV8PqBeUbSNTqPm4boBsnHpuXq2nk2QtdvCrO2XFKsiAIx+KIJdr7Tn/d51CPUuPQLFz8mP98/WRqfwVT8R1QjHOqVrRoS6hfJdh+/wsPcodWxcVhxnkm0L0S9k2uFpFBgKMBolnl18iK5f7mDgrL9o8+kWLiSk8/PZ0vNvC1l6bqmiAryt/HT2J5vKLFqie0R3xbB/lUpFbR95CLfGIRYwvTlKoe4ADSrdubfU8KqBn5O8fvvW61sxSkYWn1EW1xtUY1DR/59r+JzimJ/P/qxo4F9Nv8rk3ZPpurwr807OI1tf+m8PwOkQuKJQaj731Ckyt26Vtavs7fF5Rh5yfyM1h9d/tRxVklNg4PC1VFRqNZVeew2/V15B7WT6buhCQgidNRO3zp2tzlUXFKRY7s0Sno8/TsBHH5VaT704l48mmhm5hTg46+j3SmPsnRRy43MN7Dpij65Y/fFCso+d5OjS/Sx+Zy9LP9jP+tkniv72XA/mRK0xRX8Jvsq/TxFX1uCWcY3MzZu5+eqr5J2/gGNeMsGxygtVe1ZeQCr2G+XZvz/akBASvOtwrupAxW1Cr2/GriATv1dfwXf8s0WOkCpNKpUsYgBAnF8TWVvynDlk7dlj1nb9TDJHNspTCAHqtg8mtNbdpRv9ExFGukAgKB9KddIrQjSuHDxSL5DtL7dj+0vt2PFKO3rVl4u63Qu0GjVDWoQp9n2w9hQDZu7hZlrpHiOjhKIQWnFSsvKtesszcvWyMPuSbLQQKt6hhsKTlw10rR3A0bc7s2lSW/a+3oFmkRXzI6pWq/isX128nK2vmrs76lg0tjkto3xQqVQMaxFuWTyuWF66Uuk1e409zQObE5+Ry64LiRiyIzEWyMXvNl/bTLJ0VNZuzPcicv1bkFXsPTj8I5yzHMIo+HtYeuA6B67K6zYDfNinDgHu5l7l3hbuH+UJeT8ek8bnG5QXdXrXD+TNHjXxdbV9YSvv/HmcNsnDVw0qNdMiO5JjIdqkJI39G7OmzxrGNxiPl0PppRLLQmxmLMvOL2PV0RusOXZHFTouPZenli9U9gwHNKNLuDynVW/U892R78o1j+yCbFaeX1mubQtRCnUvRFE8Tl2A2t50n9U4XZH1R7hHmF1vlUpFuxB5+HNybjK/XfiNv27+JesLdA40U4av5VOLzmFyo7XktTuZdJIXtr7AIyseYfn55RRYKqNaDLPPhkpl0ZuuuO2woegqmS8AlxZVUsjO84m3D6nCe+QIKm/dQtTGDUSuXYNzixY2Hd+9x8O4PfJIqeO8xz1JpTf+p6jjYAmjwWjRi96oWxhegc5EP64cERZ3MY0brcYUvdZr7Lke3I49zd5h558ZpN6ybdGkJB6p5wm7trHoddpvd8q+hV1bj0ZBKDX2bCoxp033RoPByLlDSeyt9zLH64wjx1G+UK7LzyAkdgsB77+H94gRZn2uXg4EVvaQbZPhFka2o/wZ48arr6FPMR07N7OAzfOUxVi9Ap1p0cdyDvu/GWGkCwSC8mFQ8qTfP7EOJzstod5OONn9vd78gU1CFXOot5xNIDlLOe9KiSX7r1n1hP984Dp5CnVKi7PysHK+LUBugYEd5+WewBoBbgR7li+HHEyhupX9XNBqKvbnxM/NgY+tKMn7utqz9MkW1A/xKGrr0yCIePtQMiR5KG/+NVOoblxWnEyoCUyl0xy1jqw5evN2GT91kbp3cXL0OeQZ5XWZK2X64pp2Xj7R0wp5moK/jdwCA19uOqfY179RsKLXvFGYJ8Ge8s/QptPxZSp5mJ2vZ+LPh9ErKMMHeTjybm+5J7Y04r/8SjE6Y0NoE646+yqms1jC3d6dJ+o+wbq+64pKtNmKq86VsXXG8nqz1xX7ZxydwYqjct2GGOMmxfGDqw/m2frPolHJf0PWXlrL2eSyp4+subSGjAJ5mlH7kPaMrTMWV531ChRVPatSxVPu8SzEcl76dVTaNNR28oWh4qHuhVgKef9o30eK7Y9VfwxNid/a8Q3GK1671RdX8+v5X3liwxMMXDOQDVc32BRZUNenLl+1+4o/+//JqNqjitp31lKRaUOFSbWbG95jxsjarUWVFGfnBXMtAo2bG3YhIaitCKsp4f/Wm1ZLqPm99CJ+zz1X5rS4s3vjSImTG9MunvbUbmta5KvcyI/qLeX3F4AT11xIDmrMpfDu7G7+Lucr9yPPQZ4+YStaQw41Ti9AZeG9tSvIJPT6ZsW+PSsvcmzLdX58cw+b5p0mLdPyb3l4zAbCPvsIj379FPurNlWOyrsV2EzWpr91i7i3J2M0Gtm66AxZqXmyMRqtms6ja6FVKHn7X0AY6QKBoHwo5aTfh3D3+42Xsx296tn+cFs7yI3oavIV6tTsAlYdUTayDUaJhcUU3S3x55l40nKUjYhdFxLJVRBj6VhOL/rfQeda/gxuJheCC/Z0ZNm4FrIyb872Wvo1DuO4UZ4DKcUcBKPRqqo7mHtLC9Lr2zzXATkW6kTHWi4zdS/Jz8vj7KHtXDi6C8lofXHn38y83Ve4lW56+AtTxVFbdQkncgn1cuLtnnLdATB573rVl3+n8/VG1p2Is35AowGu7YVbp3h/zSkuJcgXdNQq+HJg/TLpQABkHzpM5mb5g3a+Wsvi6p0AylSOsRBHrSODqg9izaNr+Kj1R1T2sKwr4ePow6RGk9jQbwMTGk7gsWqPUcNLXjM8OTeZA8m/mbWp7W6hdZarNge5BNE2uC3h7uH0qSLPM5aQ+Prw12U6J0mSLCrID6w6ggkNJ7Ch3waeb/S8RbHHQdUHKbYXUttbeZFF7Xhd0YsO5qJxhTTxb6KoEaBUItJeY8+jlR+VtVu7dm/vfps9N/fI+pRoFdiKOV3m8GP3H2kf2h6NWsPzjZ5nYsOJAOTrVGyuV7pBm9ovmq2pB9h8dXPR35xDq/hyz3K0ricU/9T2NyisM38sJtXib1lZ0Li6EjjlU1CrKdA6k+oeRYHWCVQq/N99B+/Ro8u8T32BgX2rlfUGmvSIMDMoWw+ogpuv/L2VjHCkykiuhD+MXifPnS8rLVs74Zhn/bsfEvMn9hr5c1vCtQx2/HyezGS5kVwch9wkmr02ADelMnK3iWroh1oj/3wkVOmguHyQsWEDh6f9zsXDyqlELfpE4R3kgj4xkay9+zBklK1k7T+d/94TtUAgqBjus3Dcg8TwluH8clBZBKc4TcO9+H5EY87GZSjmt87bfYX+jYNlq/qbTt9SLBtVknyDkXUnbvJYE7lha0kVvTz56H8nb/WoicEg8fMBU15q26q+fNK3Lv4KWgAAw1qEsfavyrTklFm7vSGTgoTzlvPRQ9pyOTGLozFpRW3G3CAMeb5o7K3nIqsM9ozMO6mYj0fCWchNBwfLYmAVTXzsZdLn9KWawRSOeeL3+lR+7g8cHMsfMfFPJC27gO+2mIzCd7RzGarZhFolESP5cL3V97hYqeLQq34Q326Rh7P+diSWAY1DLBwwFhb0hCTTMVsbmrCU8ehLPGo9064yTcLLFmIuSRLxUz9X7Pst8iESHT2A8hnphejUOnpE9uDhiIfZEbuD749/XySmFuoaysjaI3kk6hHsNXfC89UqNRMbTmTcpnGy/ak8tqJKalqklq/zUjYUH6t2xzP8VL2nWH1xNXkGc4Nhe8x2Dt46SKNKttVy3xe3jwup8gUBQ04IT81JYNqgRB6q4sOo2qN4vMbjrLq4inkn5nEt4xoalYbelXvTt0pfq8fwcPAg1DWUaxnmqUoah+tgVP5sNfCT5xLba+xpFdiKTdeUowyK83Dkw3g4eCj2jas7TvHalYZapaZzWGdG1R5FDW/5ggvAmDpjcNG58MHeD9jQUM0j+wyoLTjkk11ggtda8rf+LuuzK2U9W59Rg5wbAzEa7fnrUhJdavmX6VyUcGrUiKyJX7H/oAGjWodKMhIeCn6t5IJ7tnBiWyyZKfJr7OnvRPXm5vO1c9DSaVRNfp1yCEkhouZuUalVtOgTRZ1OocRdfZyURYssjtWqDDTqHMLuPywsKFvBXp9Bp4GhuLeTe8SL4+CsI6y2N5ePmkdCZObqKHioJ3Y774TgF2gdiQ1szZXjWlBwlIfW9KKKdxLXn/2EzE2mxUmVTkfglCm4dZWnxvwbEUa6QCAoH4qe9P9mSFLtIHcah3lazHsFiK7my/THG+Fop6FxmCc1A9w4ddNcdObUzXQOXE2RPcDbqv4OppD3kka60Six6bQ8X93P1Z46Qe427/t+4KDT8HHfOrzSrTpZefpSy7uFeTtjDGgICatkffv3/sHeNHk+b10fU+m0hTtLhkWr0KfXR+O7UbZNcapmOWCvsvQAJsGNwxDZ1uo+KpLLv7xBM8MdA7N23hF2zH+N1uOUFXX/rczcfpH0XD1t1UcZrr3zHgarEgn66ylosBuclI3lqpVcqRHgxukS39HdF5O4lZ5LJTeFRaK1LxQZ6ADdNPvpa9zBz4Y7Ocf1QjyY0MFyCLUlsnbsIOfAQVl7ptaBpVXbF70+dDUFvcF4V+knKpWKNsFtaBPchsScRLILsglxDbEYEtwysCVN/ZuyL26f+X40edh5byEvvgeoc9G5H5Jt66Bx4NEqdzzDfk5+PF7jceacmCMb+9Whr5jfdb5NocmLTysLruUntyQ7u4BXlh9jy4vR2GnV2Gvs6V+1P/2q9CMpNwlJkvB1sk24srZPbZmRrraPR6WW/z76OfoR7BKsuJ/okGibjPTB1ZWVwQEqOVdicI3BzD0xt9T9gGlRpnfl3oyoNYJQN4XSlSUYWH0gzjpn3tz1JgcrG2lyXvmet+whNfm68lVV0bqexil0NtnXR7LrQmKFGOmpt7LZf1yD8XbOuaRSc/k6XH5vH2F1vGnYJUwxl1qJvBw9B/9Qjmpr3isKtcL3zj/CnSYPh1v0vivh4WakQedw0t9+EalE7Xm1owNB075F6+yIV6Azdg4mU87vpRfJ2vsX+ReUc+XdOneiavdqHN+XQkZS6Vo5AC4eOuo0dqV21xbYuVivCFFI1ab+MiMdIPWhwQSc3EF2loHrwe2IDWyNwUKVCXt7qHpgBle/M9ePkQoKuPHSSzjUroVdsPJ36d+ECHcXCATlQ4S7mzHcQjk2gIfrBDBraGMc7UyLGCqVyqbybQDnbmWw+6K83m2IlyO1AuXe2b8uJxFXQqzuWGwaCRnylf8ONSrZXEP+fqJSqfBytrO5/nrjVp0U23dc2oRe4XMbHRKNJEnKwmCZyiq6xRmcU4r6dKzcuLpXFOj1VE/ZImuvc/MXYuNtqzn9byA+PZc5u0wPxb01O2X9qoybsGq8VfX93goh75KEcq319Btwbp2suZv6juHqZKfhq8fqoyujAS0ZjcRP/UKxb1mVdmTa3fleZOUbrJZ7LCs+jj6EuoVaNYxVKlVROHRJdJ5/odKmonM/gEot1+h4OPJh3O3NFwpH1R6Fq508X/xw/GG2x2wvdc43Mm8UlTArjlHvgj7DlEcem5ojizpQqVT4OPrYbKAD1PWtK2tTqSTUdvJ7dsNKDS1ex9bBrRVV5M2292tINS95SazijK49WvHaFcdZ58zI2iNZ33c9b7V4yyYDvZBHoh5havRUNjZRjpq76Qlb6t7db4rGMQan0Flsu6hsbJaVM3tuYtQrf8+vHk9ixWeH+HXKQa4cTzRTOlfiyMZr5GbJw/D9wt2IqK+cNgHQqGsYAVGlL4i7p16g7rHvaBk7D78Lm3FPOotH+iWzv7DOjQiu449/pHuRgQ6gdnAgaMoUVDrl98ZzyBA0OjXNHpGng8nGBjjTcUQNhnzQiob96ttsoAOE1/FG5yB32Fw8mc7VPu+zp/m7XAvtbNFAB6h6YCbGfXKBVzAZ6onffGPzfP7JCCNdIBCUD0Uj/b8Z7g7QtbY/YQollB5rHMLXgxrIxOV61g9UrMe87kQcN9PuhLZb8qIPax5OnwZyFWolI2LTKeVQ9041H9x89Luhad1aJKjkSvNXtMoK+tEh0RyNSVMsldcmoobig3ghKklFp5w0i/3A32qknzu+D3eVPA/aQ5XFrhUzy7Qvo1HiRGwaJ2LT0Bss5LXnpMCVXZBd/jBr8jJM+0gtf6mtkny1+fxtDQaJh9QnlAedWQOHF1rcR8/6gSjZVL8paUccXwYKWZf11BeL2ic/UotwH+fSJ1+C9LW/k3dGXgbN4OnFyqiHZO37bBDmqmjq+talQ2gHWbtKrcfedyN2FkLdlfK+3e3dGV1bOVf4y0NfYlCqLFKMJWeXYJTkn9eClKYg3TFqLKUAlQUlhXdLRLlZFgr0cvCivm99q9sPrmHZi16Iu707Y+uMtXiMCQ1MufiTGk0q02JEcdqHtueZMTO4FCA3IRZHqzEo5CSXFY3DLRJdp3IoVp6yoITBKHH4Wgqnb6abGdqSJHFuf+nv882Laaz99hg/v7+PkztiuXw0QfZ38VA8RzYr36Na9ImyupCl1qjpOLImdgrGK4B34nEaHp5KoyNf4JN8ktyDB0n64QfFsZ6DLWslONSoge+kSbJ2x4YNcWxgWnCu0tQf72DlPPhKEW50f6oOg95sSrXmAWjKEZGjtdMQ1UD+2crJKODcJTCW8pwYeGMHvknHrI5JW7Wa3LPKgqD/JoSRLhAIyoeiuvt/15Ou06iZNqghQR6m1WE7rZrnOlbh47510Ch4qx10GsXccYNRYtHtcmxp2QX8ekju3XXUaRjQOIRH6ikbESVLtSk9jDrqNLSMsrzy/09GrVaRXeKBVw8cd5I/3Ae5BGFvDOSLjco/+L0bBNI9orvFY9XJMeJaWi30v9FIv3VC2fsAUCd2CWdLhG9bIiUrn/4z99Djm530+GYnbadsZe6uy+Ylvk79Bp/XgHnd4bMqcHB+2Sd8ccudfXxVDza+DXcpdHc5MYsl+00P01VVMfiqrCyi/PEKJCobAgHujjSLkIfDH49N42JCplmb/qhy7W9PVSZhqlt0q+1P/8ZlD8+U8vNJ+FpZNM37qaco0MnLt91NXvrdMKHBBEVvsM7joKJn2cFQxaJneHCNwfgplG26kHqBVRflqSyF5Opz+fX8r7J2SVJTkGqeg7zp9K1SPaelEeVeBUmyLc3LRapqtd+SyjuY0gDah7a32F+cx6sPxY/WSJLpx0Fj8OaZOi+xvu96xtYdi5vd3etjNA9sQcjXXxPvY/rNN6jgpzZq9lavOLNCbZfMs1vHcDHVukc9ISOPrl9up893u+n21Q5GzdtP/u1KKHEX02wO7QZIis1i66Kz/D79uOxv3awT6PPkvyGhNb0Irla6MrubjyMPP1MXB2eTkapSq6jatBLd6sdT78QMPNLMz9OoIJLm3Lo19hHWPeFew4fh/dQdjQi7qCiCPptStIigVqvoPq4O7n53PNmhtbzoPakBfV9uREQ9X1R3GWFXtWn50hQq3dpPlQvLSh8oSSR8+WW5jvFP4r/7RC0QCO4OxTrp/82c9ELqBLuz9aVoriRm4e1iX2qd7yHNQ5m1/SIl9WR+2neNZ9tX5peD18lRKMvWu0EQ7k463NHRMsqbXRfMH4BP3kjnQnwGlf1cuZ6crRj+2rqKDw7/4rIm/jVbQfwdJeyj9vakKXgF1Dm1aPf5NgwKoj4u9lo61qhElr4rU/ZPwSDJ34vuOZZ1CIrIuGkSFXNXrr9dkdjFyusqF1JDfY1PflvGK+NGWRwDJu/T80uPcLCYxkJsag7vrD7FN39eYETLcEZGpOK6bDQU1lo26mHNcxDYAAIsRx6YkZ0Mv4yA/NufT8kAu740Xa9e34GmfI8oUzeeK3o/LXrRCynIhl/HwugNisKXvesH8dcludH72+FYJnWuRlxaLis3bGZcvOXjRDtd5flHh5a5zBNAyrJlFFyXe+90oaH4D3qMGtP/4uQN84WX/VdSkCSpXMe7GyI9IukV1YsVF1bYND7lZlOOXk+lXrEyioU4ah0ZV38c7+55V9b33l/v4WbnRocwuef+98u/k5YnX5TRp9dG0psbp9eTczh3K1NWJaIsJKQbMeYGoHG0LhwqGexJT5dH9xQnOiSaqQenKvY9Vu0xdDZGqv28P4aLpx9GpW0NKj1SgScnHAJxaGhD7bQyULdOB/K3HuTise3keTrSy8mBXrf78vVGPt9wlsPXUhW3fSo6inbVTYswefo83t79DnHZ8giVDH0SI9aNYEanGdTyVq7G8MbK45yPv7NotuVsAj/svMxT0VE2edHvlua9ba/hHVjFkxGftiLlZjZuPg7YOWgxZIZxYdYUjNml10f3GvJ4qWNUajV+EyfiPXIk+sRE7CIiZPcCNx9HHn+nOSk3s3H2sMNeIarvbgiq5omTmx3Z6baVofVMPk3YtQ14pp5T1F9VInPLFrIPHsSpkW1ikv9EhCddIBCUD6HurohOo6ZKJddSDXSAYE8nOtWUq6snZeWz+ugNFlgouza8ZVjR/3vVUzb8Cmumb7ak6q5w3H8T9mFNzV5vdVLOfzt7KVTRQAfoUssfB50Gb0dvmgcqKwFHZ5euug/8Ld70rDw9kTnHrY6pHfsTB69a97Qu2HNVsfoAQHJWPtM3HiNh/rA7BnohkhH+fM/2Ce/6EnJT5e3Hfoalw6DAdg9YISdi08zSPVqVZqQD3DgEWz9W7OpWJwA7hcWdXw/H8sqyY7T+9E+LXvRCnqyciodT2eo7Axizs0n8brpin++ECah0OkWV+MTMPMXUjb+Dp+o9ZRZSbgljgTv6jJpWRTF7V+5NmFuYrL3AWMCkbZP47YJ5iTdJkiwKxhWktFRsv9uQ90uJmRhyLKj9F8OQE865OHkaSnEi3CMIdwuXtevUulKV5ouz7HalEUnvhlTgBajYcPKWeRRMBWGntaNGw440imhFo0qNaFSpEVXd6/LFGj0HznpiyImQ/XWp3IIX2nQtGt8yqCWLHl6IRq/sfU3NS2X0+tEciJOXs7yalMUGhXSun/ZdQ683cOGgXDDV3klLkx4RRR7tu6FyYz98Q8u2yKPRqPEJdinKJ9e4uODeR14+ryS6sFCcW7e2/ThubthHRlpcrFOpVHgFOle4gQ4mb32VxqU9Y0j4xh+i8cFPaHBsGl4KBrrWzw+/l1/G83HlxYn4z6fedTTMg4ww0gUCQfko+YAO/+lw9/JiSXDuvTWnuJYsf9BuHulFdf87HqGudfxl+e4Avx2NRZKUVd1VKmhf/d+Zj15EYH2kYj/5Ska6ZHDAkG05dHBQ0zsP3wOrDZT1N8nJJUhv/uCbLcnDj4G/pV760RPHCFTJw4qL00V9gO/X7LT4YHPuVgYf/H7a6j7e0C4iUqWQlw1wfoMpv7w00mJhr5Uc+bNrYfEAyMu0PEaBT9efLfq/Dj3N1aesjC7Gjs8V5+3uqKNddXl+ZUxKDj8fuE6BwUgv9W6ruw7MPGnbHEqQvGABhkS52J99jRq4de8GYLGU2/0KeVcbPclPLr20VUFKc0DDmmM3ScxULhmmU+uY0GCCYp9RMvLGrjdYdPpOyalD8Yc4m3JWNtaQG4ghR27sA2y0oNdhK5cSsmwz0rPDbRL061W5l2Kbt2MJL3xOCtw8Bhnm87+VnmtWRrKQfIPxb/lMJGfl8/j3ey3qIvi7OfBhnzoyw9HPyY+uXu9gyFFOCckqyGLcpnEy4cAFe64qaj9eS85m2/br5GbKn1OiGvjStEcEwz5syUP9q+DiaeGeXQpqrYpmj0SWa9uSWDJCi+M1eDAq9T/HbKv5UKBizXS1RkWNVgEMfqspzVyO45Yh14qxCwvD/713idq0Ee9RI/Ed/yxqN3maRs6hQ2Rus5zi9U/nn/NuCwSCBwshHFchtIj0plol+Up8eq7C9QWZKrybg44OCgb39eQctp5L4K9LcqOtQYgHPi7lezD5x2DvisrPVPP3sk7LFTv5Z1OfWQ2lAq0qFTzfsSqNixlA0SHRjKw1Eo3KND5K68qbSfIH0R8M3chWK4jyxMrLT1U0t45vLXWMVmWk1s1lbD0n95Tn6Q1M+OlwUT6nEh3VB3lcu9liPwCbJltVTQdg2yegL8VTfnkbLOxtsyjd7ouJbC92XvVVF3BW2VozWoIVT0JOqqynd33LaQoNVecJUStHHRQRdwz0toV9FqJPSSHpe2XhKL9Jzxc9rDcJV86F3X8fxOMAdl1IJD+pHZLB8v1FMmopSG0CmIzHn/YqCzoCdArrRL+q/Sz2f7zvY2YenWnVi56f3BIsBNEeuZ5KfEbZIzYKuZiQhSHXNiP9YkImBZYEGG8ztOZQ2oXcKdnXuFJjXmz8ovmg8xvhm0Yws7VJx2Hf7KKuP8/IF2UL2XXh3lZ3iEvL5bGZezimsEgApvSh74Y0tBhV0r5qJNnXxqDPUl44zTPkMfHPiUWGelaenqUHLItNHtmhUK2DO/nSOnsN9TqEMOS9FrQfVgNPf9uqhwBo7dR0GVMbj0q2b2MN+8gInFu1stivcnLC/dFHLfY/iHgFOtNmYFV09qbfTK29hvodQxj6fkvaD62BZ6ArQV9+gWP9+kXbONSsSdCXXxD5+1o8+/dHbWf6rGg8PPAeM0bxOAlTv0C6Sx2TBxXh9hIIBOXDIOqkVwQqlYphLcP434rSw3ID3R3oWEMeQtarfhB/nIiTtb+x4gR6hVDuf3uoexFBDSH+FNsdlUPd9Zk1zF5r1Sp61g/kqbZRVFFYOJnUeBJDag4hMzuR0B+6oy0w/w4USBp+1Hekhf1VGnPYfOMbh006DvfwO2J3Q14DXolBmj8Z+cdx2lZpb1aCb8q6s1a9fb6k8IluVukHiNkHZ/+A6hYE9xLPw+EfbZorMfthXg8YugJcLX9uJUni03XmXtSHNBa+U10+vC1QV8LLlnYdfn8R+n5v1tyuuh+u9loy8uT3vN4aG6IGDPlw6zgE2Z47mTT7e4yZ8igCpyZNcH7ojqK7n5sDYd5OXC0R3n7gqg1aCfeAnRcSkQzO5Ce1wd5vo+IYfXpdJMOdhawf915lXHSUYmk6lUrFm83fxF5jb+Y1L860I9O4mXWTzdfki0dGvRP69HoA2GvV5CksQP15Op6BTW0vQ1acSwmZSPneSAYHVBplY18yajDkBmOQJC4lZFnNgbfX2PN1+6+JyYhBb9QT7h5uPsBQAKsnQvbtxVd9Dqx/Hap1A/dgi5U8AHacT+S1sp6gjVxLyubxH/7ierJy+o+nk44Fo5pRJ9hyGbLmUd6oJQdyro/CMWgRWld5RQO9pOf1na/z+6O/s+pwChkWFrN1Emjicim5OOPiaU9gFQ+zNo1WTY2WAVRv4U9afA5ZqdYX9jQ6Nb4hrmh0Fevn9BzyOFm7lO8n7r16onEtv3bC/aJW6yCqNKlEZkoe7n6OMrV4rZcXYT8tJv/yFdSODmj9/S2G53sNHULKwoXoE8wXRfPOnSN9zRrce/a8Z+dxvxCedIFAUD5EnfQKo0+DINwcSr92Q1qEoVV4kG1X3RdXhe1jU5UfmDopGPr/SoIaA3BewYsuSWr0mSa1ZQedmhEtw9n6UjRTB9RXNNAL8XPyI/LaAbQKgnHrjU24hRe788LlG+ZnQoI8FLeiiM/IpUqu9Xz0QrxVGVSJ38DqY3dC1necT+D7nZcVx49qFcHPY5syx2Mu3ioba3BvfldZXBJMeesKInwWiT8Jc7tCqmWP6/qTtzhyPdWsTTEfXecETcZAhzeVd3T8Fzi21KzJQaeha215vqwWPQ9rLAv1mVGGSIqCuDhSflRexPB7YZLsIVYp5P1yYtZdeYjLgyRJ7Dxv8tbmJz+EUa9c5im/RH74rfQ81p+ULzIWolapeaXJK4yrN87imOXnlysKOxakNgXJ9P3/fEA9xW3vJi/9cmIWoLYa8m7IDS6aw5k426orBLsGyw10gBtHIL2Eh9iQD2f/IDtfz04r3vJTN9NJspBacDecu5VBvxm7LRroldzsWfpkC6sGOpiiwuqFeICkIydmCAVpygKUaXlpzD0xlwV7rljcV5UCDTpJbuxVaVzJonK5SqXCo5ITQdU8rf75R7pXuIEO4NKmDboQ5c+Rlw3h8A8qdg5avAKcLZZzU6lU2EdGoAsIsF7GztERn2eeVuxL+OprpPyyRSv9ExBGukAgKB9COK7CcLLTMqCx9ZBJO62agQol2wDstRq61w6w6Vhh3k5U9lN+eP7Xcdtzma/wwy8ZnHC3d2dChyrsfrUDk3vWItjThtBFSYK9yt7kufouABwxWlD7vYd56ftPXaSqWh7eme6knIs7XLuez9efJV9vJDkrnxeWHlUcV93flZe7VqNZwnLq5CrPP0dSCF9NOC0zdgGTgN6p3+Ttah0MWgLOFrQSki/BnK6QIC+VpzcY+WyD+QKIC9nUVymUVgtrCVp7aDEeItooH2vtC5BiLtr4ePMwSj7bt1Yft33RIsb29z5h2jTFB06Xjh3MQkMLsRTyfvCKdW96Vp6eg1eTSc2umIfbC/GZxGfcNgIle/IT5errVd0aYMyV5x1bE5AD04P8M/WfkYd+W0GSVLdz36FzzUr0qBtIzQB5XuvOC4nlElXLyC0oOl9rIe/FdS9syUu3SoryQho3j7LzfKJipEBxdl+0rllRVo5eT2XAzD133vcShHk7sWxcS6sLn8V5qHJhWVAtuTcGkp9iEgB1KHDGOysIrcF0r1lw8kfOJ1nQxQBq5CtHLFVt9uAuUKs0GrxHyytvuLRrh33lyvdhRg8eHn37oguTPwcVxMaSsvSX+zCje4sw0gUCQfkQnvQKZViLcMWa54X0qhdoVTG+V4NAm47TsUalv700033DryZoHdErnK+bvT27Xm3PpE5VbVLiL+LaHlPocglOGMM5eLsO8lGLRvq9U3i/dWKrYru+2VNIfjVl7XXVl/FJPcqS/dd4dfkxxYdse62arwc1wCH5DGx8S3H/fxia8I5+mPKktnwI+hL73SwvqQVAk9GmkN1R68DdQuhxeizM7QZJ5vWEfz0cy4V489DwZurTaFUKBktktOlftRp6zwAHD/mYvHT49QmzlJ76IR682q069rdFGl3ttbwUoLywgUYhH9vG9z7v0iXSflUoYXa7rJISlsTj9lkRCtt06haN399E3+l7aPT+JhbtVa4kURZKenELUpqTn3wnzzbKPYrvOn1GhI+zbNv9V1I4ecNKPfvbDK81nHdavoPKhkJN+oyaSHoP1Cp4qYupHrtSqk9ugbFc+domL7oJS4JnYMpHL+TsXRvpV5Tbbx61KSKgIvPSj15PZfDsv0jNVhCRBapWcuGXJ1sQ4mV73narIiMdQE1eXB9qX+7PkIPv0P/Yyzx+6G0C0qPIN+Zi5/On4j6cjBCul5s3ngHOeAc92AvUHo89hseAAab7EyaRyIAPP7jPs3pwUOl0Fu+DidOnY8yyXkHhn4Yw0gUCQflQNNJFTnp5CfV2on01y4rrllTgC2ke4Y2/W+l1cJVy2v+1aLQQWB+lrEV3R3tc7MuxqGRBkXy+oTOF+Y9JuJOoVSgnFHNvjHRJkrC7sU+xz6tGNKpmTyr2Dddu4P21pxVLGAG81q06Vb10sHwMGORGfIFTJV4rGMMvhrZcNCpEcqRdgwNz7ry+uAUubZWPs3OB1rc9pN5RJkPdp6rinMhONOXk3iYhI49P18lzVy3WRy800sFUt77n18rjrv8FO78wa3qiTRR7X+/AxufbcOjVFtRM3ynfzjUQaimUU0o6b1LkLoWUH38EBREk9169sK9SRXGbCB9nfFzkC02W1LzPxmXw9OJD5BSYvMcGo8T/VpwwE90rD3IDUEV+/CP83msLK3quYEWvFVRy8WNYC+XojtK86YU8WuVRPm37KdpSFoULy671bRhc5Mm1lOpTnpD3Swl3DAKjhXB3SVKZKcufuWlbuLtFLBjpUvxptp1SFkorzo7ziRVSsqrAYGTiksNkWYhAqBfiwc9PtMDPht+k4jQM9cRRd+c5wk+vpmVcK7S30wUc9S50PTMW11wvdB77UOnkiw7VCjSoFRZxqjZ98BeoVSoVAe++Q5VdO4n843ciV/yK1lM5Uua/imvXrtjXrCFrNyQlkTR//n2Y0b1DGOkCgaB8CHX3CseSId44zJPaQdbz+dS3Rc+s4e6oo7GF0Nh/LUGNMCg8mGnL85yaFgunV8ua01VurDKY59keMih40+NPQX7Fr/RfSsyiZoHcKM3WuIJPNagzAEnBY9xdvRd3vXL4a9uqvqbP4+Z3TPNWQNdvFpXDQjGgYYr+MeXJbZ8CeRmmNIFNk5XHtHgWXIqVOXMPgpF/gL9yTipXdsDNY0iSxMvLjpKYKQ/XbmenMGcnH/CrZd5WsxfUH6J8nK0fycLUPZzsqFLJFd25P6BAoRZ5nb4Q0kR5fzcOK7cXI/uQfIxKp8P32WcsbqNSqWgcJvemn7qRTmYJsbvcAgMTlygr+L/wy1GSs8oX+l5gMPLXJfmiQN0gd0I8fKjsWbnIQOrXKBhnO/mC7m9HbpBi4/G7hnfl63ZfY68UtQAYcithyI7ETqvmuU53FnxqB7lRyU2+zabT8RgVRDatcSnhTvSGZHDFmO8hG2PMqwTGO8KVN9JySctR9jzbhAUjXWUswCfHQih8MWJTcxRLe5aVJfuvcyVJeT8tIr1ZNKYZnmWJULqNnVZNs8g7n+W6+VqZwW1vcKT9haGoAXvfO+KEHWuYFrlr5isv3lRt8s9ZoNZ6emIfYbk86H8ZlVqN36QXFPuSf5iDPuX+iGbeC4SRLhAIyodB1EmvaFpX8aFWoDxn8sm2FsKnS9CrFCO9XTVfRQXlfzVBjRQ96RpDOYyRA3MUBc+O+vUiD/MH0r354fLtJQPctBAifRfsOXOdOir5A3qGb2NT2KSdE6qGQ2X9OpVBsZyat7MdU/rXRXVxM/z1nfJBW46HyOiihaV1xiYcNSrUDM5Ogt3TTHnoN4/I+528oYWCAersAyPWQGgL5ePvm8mPf11ly1m599ePFMKNCqWZItsWhZGa0e1j8FR4IJYMpiiCPIUQ5eMK+fYAdQZYVnG3IZLCmCM3fJyaNUMXZLkMHECTCLmRbpTgUAmV9ynrLSv4J2Tk8cryY+XytB6LSZUtCAA8VMVH1ubqoKNfI3l4eJ7eyJL9lktqlaR1cGtmdJyBs04ePp+f3AZQMax5GEEed4xklUqlGE2UmJnH0ZhUm48NcDHRfMGtIL2+bIxeoe2uQt4thbsDtdSlG+kgT0soK9n5er7efF6xr2MNP+aObFK+KKXbFOalqyWTV1yJgIxIGsR2ROd+FLV9LFq1ig/61KGuuxOBBvl33CfcDTcf5SofhRiNEteSsmWpMxVNdkE2Z5LPkJhzb0vi/ZtxbtUSp2bNZO3GrCySZtpQgeQfwn/saU0gEFQYSsrNwki/K1QqFTOGNCLY0/QwoVGrGN++Mp1sLJlWM8CNKlZE4Tr8l0LdCwlurJiTri2ZK10aBTlwcJ68XaUmu+5wWfMRowWhn3uQl37z1C50Kvn30bVq6zsvmoxBUggBHaz5E12JZYwpfarjd+5n+FU5TB7/OtDepI7etbb/bc+kik/0A5XH75lm2Yve+kVwkC9MAeDgDoN/NoXDl8B47Be+Xatccm5ciAUV+Mh2yu32rqayayoFgyDlMqx71bwtM8EUul8S3+qma1Opdvnz0vXy91Flb7nmeCGWxOMOFAt5334ugR8sKPgXsvHUrTIZyoXsOK9scJjnGN9hmIWoobm7LrOzDCHZVdzq0c7tbQx5pkgMSVKRn9IMfVpDXO21PN1O/j20lPJT1pD34uHugKnsXEG1otf1vVua5eQXctZGhXcZBbmQblksrZbqitlrBwsK5DstvFe2MnfXFRIUNCyq+7syfUgjHHR3l/ZW+JkJ16txUlBoL6Tx9a74ZYRh77f+9n3IgU4uygJ1mf7Wvfpp2QWMmLefNlO20HHqNobP2UduQdnFBEtj49WNdP+1O/1X96fr8q78dOanCj/GfwGVSoXfpOcV+1IWLaLghuXvyT8JYaQLBILyoajuLoz0uyXEy4kdL7dj3XOtOfRmJ17oXK30jW6jUqksetO1ahVtq/kq9v2rcQ9Br1B1QJOTanuOeH4WLBlsyocuSfWHqVpNLsx2UgrHiMLDahlUvm1BbzDiaKE+ulOVYka6Zziqat1kY/xUqXRTm7Z3JofpkTtpv64jrJ6gfL5aB3j0e5NCOqDTqHm8mSnndrexNjsMteXb5Gcqq1K7h5gE46zh4A71B8ua1YY8+khy4ShfV3se97VgiBbPRy9JcGOIflW57/CPcHLlndcnVyiXkKvTH1QqU5WLAIVyX7EHTGH/VpD08vuqSlv6fbVmgBtOCiHkheJxyVn5vPCLbVEc764+xcWEsnkTlQTJHHRqGoYqLx5E+brQWsHLHp+Rx5Af9tLr2138cfwmBgsh6LfSc/lg7SlafryZn3Yayb70HFmXJpB18SXy4voAKp5oE6koCtkiytss77mQTafiSznLOxiNEpcTS1wjoxPNHF9jVe9VrO69mm87TisqvVac0+X1pKddByx/fmqrr5i9jq7qRw0FNfvdF5MsXtfSSM3OZ8a2i4p9r3StXiGRWtUqueLjYkdNCwrthajR0OH8UBwdr9K8ZgqSJOF6Sx4hZUBiU4blay5JEhOWHDbTZNh2LoFZ2y+V/yQU+OXcL7yw9QWSck0pRnmGPD7d9ynX08u+KCYAx3r1cO3UUdYuFRSQMO3b+zCjikcY6QKBoHwYRbj7vUKlUlHd3w13x7Ln+PeqrxwW2zzSGzeH/6BmgEqF3l7uidUZ9LCgJ1zebn37nFRY2AcuKisJ0/RJwr2dZO9VLvZc1YbLx5ehXrYtHItNo67xtKy9QGUPAfXNG5s+obiP0do/eEG7lL8cJtDtxneQablmNZ3fB7/qZk2DmoZid/vh/FNL3nQl2r1eZOxbxcK8h2g3osHcWP6sX13srym8p15R4GG9zCEPTYKQ5sp9qyeaNAnASqh7vzv/D24s789KuG1oWUbRSNeU7pnUapQN4iPXU8nXG3ll+TFF76cSOQUGnltyRDFvXYnMPD2Hr6XK2puEe1n1qo6wIoZ5LCaNpxYdotPUbfy8/xp5tyMMLiVk8uryY7T+ZAuzd1wuJlymwZgXiFRgCvv3cbFj1EPKOb0OOg1tqsoXCM7eyuC6jfnaN9NzyS2QX58oP1ci3CMIdw/HzUFnFmpfdJzyGulWQt0BaqiuoebOnDrWrMRDlb1l49JyCmxS0ldi+taLZOTKP6NNw72IrqBFYLVaRaswbypbCHUvjnueL60uP8ofsXNIuJZBVmKubMxlrZE9sakW39sFe66yTUE0cfmhmLJP3gJzT8zl3T3vIpVYZNFLejZd21Rhx/mv4fvcc4opTGkrV5J3QaEE5z8MYaQLBIKyI0lCOO4BJcTLiTZV5Q9LSjmg/xUM9vIQSA2SycP7Yz84+4fyhpkJML8HXFf2VBNQD8IfQqVSUS/EQ9a9r0DBSEi7BpnWPXaSJHEmLp34dPkDZ0l2n4ujoVqeH5rtWx+0JbyIkdEmIbkS1FNfYrx2Ja6UImpXpQs0GSNr9nW15+G6JnX341IkawzyXEH5RjWgrgWxuZL4VIGo9rLmIFUSHdV3oiFGtYqgrWey8iKDNS96IRotPDoL7BXC73NTYcWTpvJvMfvl/SHNwDO82OQs5aWXEkmhZKTrbFv8LFmKLUwVh3fBLd5acYyNFhT8e9ZTjrw5HpvGF5vkNemV2Hc5Cb2CZ/YhC6HuhURX8yPSV55PXpxLiVm8svw4bT/dyuh5++kwdRtL9l8n32B9AWFChyo4W8mLvtuQ90sWIg2iSpxPdX/5vedsXEb5FNZLMdKdVHlEqG4CoFaZNEgspRuUJy/9ZloO8ywo8L/SrVqFKqc30tqjs6HMHkD1hOZknlWzabPC9xI4bWdayFl1VB4Cfe5WBh/8Ll/kBLialC0z7LMKsjiecJyMfNsWWiRJ4utDXzP14FSLYw7dqtiF24okJTeF00mn0Ss972E6v4sJmcRnlP5bdS+wj4rCvU9veYfRSNrKlX/3dCocYaQLBIKyI1l4QBIl2B4IPuhdm8rFctP7NwouVVTu34ze0UPWVqTubsiDJY/DsV/MB6TFwNyuECeviQ6YwrB7T6ewuH19BSP9gF5BSA2s5ibHZ+TS/euddP1yB00/3MzrK45TYMUgiTmzHxeV/AHJucpD8sEqFTRT9kqXSkQbeHRm0fmWpHhlgqn6/uilUh4vOrxVtvtFU+X8+BGaDYDJGHq5azXlEm9gm5EO4BkGD3+u3HdlB/xkIVKgTn/z15aM9FLy0pU86dgQ7g7QJMLkSbcnn9m6z9lmP4ldDhPpcmwCbsiNynbVfPlqYH2LJdFmbLvIX5eU1f+Ls/O88hhLBmIhGrWKLwbUx8el9GiKuPRcNp+JLy1bADAJaBamYFiifXU/xY+y7Ua68oJWpI951E41BSM9M09PTEqOTccxoxQjHe7kpTcK88TbxZ6mEV7oNPITLU+99K83nydPIbqiY41KNFKoLnA3ON5UjvpIVivfC9tefIy4I/Jrmo/ERZ3JSF95ONZscSRPb2DCT8qVDgopvE5X0q4wefdkWi9pzeDfB9NmSRv+t/N/XEix7K01SkY+3Pshs4/PtjgG4HDCYYyWnqnuI7OPzabtz20ZsGYAXZd35WzyWbP+xMw8un21gw6fb6PZh5t57dfyiU7eLb7PPovK7s6CtF1kJEFff4XvC8oK8P8khJEuEAjKjpKyO4hw9weEEC8n1j/XhjXjH2LbS9FM6V/vga8Pey8xKDwAaYuHHUoG+HUs7P/B9DrxAszpCkkWHsCcfWHEWqh0p5xX/RB5iTyL4nFWvKlvrDjB6WK1lBfvvcZnG84qjs3O1+NyS9l7pI1oqdhO3YFgb72cnxn+daDfHBiyAhwtl++rH+JRFE1wSQpkqaGt5X0GNwWF/HhrSJU7kqCV12JvoTlFbW0MXw1sYAqtVjLSVWqIaC1vt0TdAVC7n3JfooJ3Wa2FWo+at3mGm5TrS1KakW5QEI6zUeujQYgnWrWKEZr1dNLcOU47zVG+1n1L8Xxmb2c7Pu1nui+83r2G2aJe0VwkmPTzEdKyrZcM23lBHirs6aSjpkI+dEnqhXjw54tteaVrdZuMdWu0rerLT2Ob8+Vj9dGord/vvF3saaSQHrD3UrJNJdIsedIjSnjSlYx0KGfIuw1GemFeemGkgJOdVjENYv+VlDIJo11MyGTpAXn4t0oFL3WxXTfFFrLT80m4IA/HT1AbWeacT55CXr69wQn7fCdZ+3mdgYLbH4Xz8Zmcvnnnuk9ZZ7nSQSF/nN/PpK2T6LmyJ8vPL6fgdpqfXtKz6uIq+qzqw/g/x3Mk/ojZdnqjnjd2vsGSs0tKO13S8tK4lFqx+e93y9nks3x9+Oui8Pxb2bd4cduLRR51SZJ4etGhousnSfDTvuv8tO/vz6/XBQTg+fjjaP39CfjgfSJX/YZb587/imceYaQLBIKyYyH0CQWBLsH9QaNWUTvInTBv6+Gk/wWUQvU0suc8CdZOgnWvmzzolnKH3YJh5DqT8VqMesEesqEXpUByVPIHR2KVjfS4tFw2KnjyZm2/xO6Lcs/XvsvJNOSMrN2I2mQIK2HvAg0eV+4rTnhrGLIcntwBtfvaJAo5ouUd7+VX+r7kKohmAdBxskWPvCWWHrrBjBx5yDvA52F7TcaQoQCu7JQPCGxgdYFBkYc/Nwnb2UJUB3D+P3v3HR5VtbUB/D1T0kNCElKAkNB7700FEUQFO4h+gtgVyxW9tisWLNg7FrB7LdiuXRSCIAoCiiBILyG0JEASSkLKzJzvjyEhydl7MifMJPuE9/c8PPfmTNvgTOass9Zeq1pArmnibPqeVYBb8vsTtW8cBwDhIXZ0aRaDC+yLDbedZl+NiceqDgDgqYu7oUm0NygOc9rxwiU9hBnXPQeLce+Xa6QZstxDxdiUYwxYB7VJgK2GQLlcozAnbjitNX69axgeOa8LWsQJPjMSNg0Y070pvr15CN69sh8Gto73++R8hGBqhsujC/cnV7dtvzGTnhgdahg9JmrcBgAbatPh3Z8g/dgoxsp/N9G2g1KXB39k+j9P+pmfNgqbzV3Qs7n0QkRtbfkzB7rgtdaHuHHQrmN+hP9z5teFVL0Q8dUqb1+JxZv34Q3ppAMd9ogtCE99EytdD2LejnmGveSVLdy5EJf/cDkmz52MX3f/ihJ3CW5feDu+2faN3+tcmatWyfvPO43TKzIPZWLRzkUAgAUbcrF8e57hPrN+2QpPLZsSnoiEKVPQeu4PiL3wQr9/X1oBg3QiMk/UNA5gJp2UVCZ4vzpkZXm/z/Q2+BKJbwNcORdIMGbI46NC0S6pajbSAxtWudONz7P7L8BjzO5/vXo3dB3Q4EFbbRcS4T2J9mY0V6OgqGrn4l837UNfmzHLXhzfST7WDAD6XSMeEQYA7c8GrprvnVHeZoSpYPqsrilIiPKWHeYgDm+7zzTeqe0oIN04lsqX7fsL8eDX6/Cp+zQU6cZ1t8v9ATia723KJ9or6m+pe2Xhsd796Zofp0ndxomPi4J011Egd53w7rquC/ekw+H/toChzW1obxM3vPqP40O01XZh4sA0DO9QNUDt3DQGd47qIHzcd3/vxRcrdwtv+01w8QgAhtZQ6i4S5rTj/wakYcHtp+LFCT2lAS4AhNhtuLR/Cyy4/TS8NKEnujQzUR1yjHRfumT/fmWicnfR/vqWCZHCix81ZXANdN2/cndbJlrFR6B1k+O/iwYLuugDwGJBBYTI6p0F+H6Nsc9DiN2Gf41o69dzmLFpufjfv3xv+TqnG+ucNQfqRZqOHY6qv2e/Xr0H+4+U4PZPRJMOPHBEr0VE+iuISHsDjijxLHiZP3L+wA3zb8CwT4ZhwU5xo1G7aMwj1AvSdx8Rf94/3PAh3B4dT84VV3dlHijCos3+va/MOFR6CBvyNqDYJd77bo+KhC0sLOCvW98YpBOReaIZ6QD3pJOS3IJxWY54SSm6TFJXYPIPPjuEi5pw/SUqeS85KCyl//KvPWiK/fg+5B7MC70Ty8Om4AnHLDjgQvahYvznf2urZDS3bVqDJpqxLDSsdQ1BcFwrYMwLgONY52mbA+h+KXDjMmDCh0BqX9+Plwh12HFpvxYVPz/ruhg/uSsFqsldgfNf8/v5dF1HxvocXPPeHzha5sYhROJ/buNee62syDsmbZtgdjlQuyAdANIGAUPEs3grOCPlpfvNBB3eAXnJu6DUHQA0h/8VSsMj5HPQQ7UyvBb+Cu4dKe6VcNWQlhgs6AYOAPd/tRbvLsnE0dKqa6ztfnRfHHYbxnZviu9vGYJ3JvdF/5bH9ztHhTpw/anerPtj53dFekLtK4VaN4lEerwxa//zxlyffSCKy9zYc9C4/7lVE+OWAafdhjaJ4uZxphTu9za6rEGMVoQL21Rde7dmMYgOM15A93df+lM/igOyywa0QKqJqgd/FOQWIWe7scpgl92NQ7Zjv/s0YH64C4j0vY+7pGkYqo9Z33uwGBNm/Y7capMONMchRKS9hvDm/4U9/MRKtmVN5Zw2J5497Vm0jmltuE215nF7jojnjC/PXo5Zv/+GjTny9++7kuaCtfXN1m8w4tMRuPibizH8k+H4ZVcNE1kaEAbpRGSerNyd3d1JQaJyd0dyN+D0B/x7gub9gCu+AaISfd5NNP5utcd4QgbAEKhtzjmMLXsPYHbIM+hoO36SON6xELc5PgMAfLdmLz4/ltHcd7gE8XniYM+WJtmPXlmPCcAdm4CrM4A7twPnv2oYrVYblw1Ig+NYmXMZHLi27HYMKXkeF+JpHJ28EIioucGUy+3Bl3/txpnPL8ZV7/6BLbnHg5N33SPFD1o+G9iSYTzuCJeX/vvjtHu85fIyHc8BQiSBYrNe4uOS7Q7CpnHwv9wdADqUibP05Vp7MhH2y6PC22w2Dc9c3AOxEcbf44Wlbjzw9T8Y/MQCvJSxGQeLyqDrujDQS4uPCEjwpmkaTmufiDnXDcSvdw3D5zcMxB/3jcDdozsgsdGJZ800TRNm0w8Xu7BCUMpbbvv+QmEDu1aSCwaiDu/b9hdWjJXzix9Z9HIjG1fNRDvsNgxsZbz48s+eQ8grNM4Vr+zXzfuFneAjQ+yYMszkhU4/bF4hzqJXL1tv2TQa517by2cZevsB4kqMzblVL3ZozjxEpL0Oe0RWjetz2pw4pfkpCLObe/+FO8LxyohXMLzFcPRMMv4+2Vu4F3uP7DX1nMEky6QDwBur3/f52IUb92G7YDtIbew/uh8P//4wjrq8F8UOlx3GHYvuQE6hfw0erY5BOhGZx8ZxZCFuQeWHw+YAhk491snbR0l3q2HAxC/92tOcGheB3mlV7yfMpAOGQO2rVXtwu+MTdLbtMNz1Bvs36K95xwQ98NVa7DhQiCVb96OvJs5wocXAGtcKwFsS37yP79J4k5IaheHMLslVju3SE/FncVN8JRiBVFlxmRvvLc3EaU8vxL/mrBJmazbpqVji7mR8cMEOYNdy4/G0gYDzBAI6uxO48E3AKQk6q3d1rywizlu1UN1ucdZMHqT7X6EUvkcyLrCypS8DW8XluMkxYXj8gm7Sh+YVluKZeZsw6PEM3PnZ38gWjAk8kSy6TPPGEeid5nvuem2I9qUDwPz18jGJss7urQWZdEAcpLs9epWLTzUyEaS3dhmrdIYISt51HVi6Vd69X9d1PDHX2PMCAK45pdUJN/oTvZ6o1N0NHRudVX+HTxqUjubt45AyJMRwfwA4GLoP7x+5D40b+Z57bwvJRUTaa7CF+J5iEOmIxJVdrsSPF/6ImafPxI8X/Yjrul2H6JCa9+NHh0Rj1hmzMCBlAACgV6L44p0qJe8uj8tnEFwStgKw+f53fW9pZkDWsiBrQUWAXu6o6yhe+1tckaXruqmGiKpjkE5E5kkz6QzSST3CxnHlewP7Xn1s77Hg5L/DOcClc+SZUoHzqo26y0Vj7NUF2eNKmXRd17Fr5fe4zvGd8Dltmo5nQ15BIxxBYakbt368Cos27kNfm/EEuiymJRAtDjzqyhWVxrFV9vZvmVi27YDwz8yft2Dw4wtw/1f/1Dieam7kWP8X02qYiZVLxLcGznzceLxxes2l9KJ96bnrgRJBuagkSPd3BBvKjgJ7/vLvvv+7ASgUByZndknG+D6+m+YVlrrx6Z/ive+GRmWFB4D8HfBrflod65PWGDHhxsqBeeuzpc3yZJ3dZTPfZY3VNuw1UfIuCdLd1eu5AdhyjGMjazMv/Ye12Viz27idJi4yBFcPlYyXPAH7sg6jIMcY/G13elBcKVpJiArFeceqls6bMBhFjY1VD+uSlmD7oW0Iaf4aNKf4fW4L243wtNdhc8qb+HlcUSjJHYVH+nyM23rfhiYRTQAAcWFxuKnnTZh30Tzc0ecOJIaLq6ziw+Lx9qi30SOxR8WxXkniIP2vXD8/u0GWW5QLly5vbqnZyuCM9T2l4rM/dqGwRP4clem6jm37jmDvwaOGz9zve38XPuZ/m/+H7QePb+3xeHTMX5eDi15bijs/+9uv17UCBulEZJ5sT7qfo4KI6pLohMNR+YJSt3HAJR8CUceCW80G9L8euPhdwGEuW3R2t6YV5d7lVolK3rPXAmXeLOTqTdtwd/ELPp+3mXYAjzrfAqBj1c4CLP7rH7S0GbMdjpZ+ZtGDqHdaY3RuaszOb8w5jPGzfhf+eerHjThQQ+kt4C0pnnzljf53Xq/tfvTqek0Ehv3n+M+RicAFb9Q80UK4L133dnmvflSaSfdzG9HuP+VNPas7kg18c4s0cH5wbGec2TlZeJsvmoaqpdU//gd4pj3wQjfgtSHAkcA3lToRDrsNwzsYA6ydeUcNZdHlRJ3dQ+w2NG8srrbokCyuVPG1r9dAEKSX6Xb8pQsat+01NkVrlRCJlBhjRYlsX3qpy4OnJXvRbxrWxtDFPhBkDePa9EmsGKkXE+7Ei5f0QHiI96Kq3W7DBVP6IT/6eKn49sZr8HfThQCAIv1Ypjy0auM7e/h2RLSYBZtDXBXhKYtF8d7zULjlLpQeGIa/MsUNyyKdkZjUeRJ+uPAHPDToIaQ1Oj7hok1sG7w7+l20j6s6oq5pZFMkRRgvpP6Z4zvwrSu+St3LhTReCkDeE+BwiQtfrBRfxKvswJESXPTaUgx/ZhEGzliAsS//hu/X7IXbo8PtcWPZXnFlkFt34+W/XkaZ24P//bULo19YjKvf+wN/7sjHt3/vQdYB35l+q2CQTkTmsbs7WYhwT3r192r7M4GbV3r3aN+yChj9RK0uOsVFhuCUdk2qHBPuS/eUAdlrAF2H8/upSNZqHoc0xv47zrd5R4z1EXR1BwCthR/70YNM0zRMkmTTayslJgz3n9MJ394yBC0TY4C+V9X8oIh4IKlLYBagacCpdwJT1wNX/gTctta/BnuiTDog3JcumpEOAJrdzxLvHUuFhz2yme8bvgVWvie8KTzEjlcu64VZl/dGj9RY/14fQJemMWgceawE+Z8vvaX15d8XOWuBOZcpl1GXdXn/VrI9Q5RJT4uPkM5mT2oUKtznv36viTFsgiB9l56ANZ6WxvseyQEOVw1KNU0TZtOz8ooMAc3RUjeuee8P4cWIZrHhuGxAC8PxE+Xx6Nj8hzFId4bace2ELlh8p7cnwdJ7hmNQtb9H6+YtMPHeU7Bo0NuY0/0x/NjhDeja8QDS5jzsDcjDvL0+7JEbEd7iTWj2qs3jyunFzVG0/WaUFQwAjo2R/HWz7yZ7IfYQXND2Anx93tf45JxP8OFZH+KTMZ9UCdrLaZomLHnfUrAFB0uMlQt1TdY0rjJbSB7sUd5Krn+Pai+8z7tLd0irUQDvlo8pH67EnzuOf/et2X0QN36wEmc8uwgv/roQh0rln5GfdvyEoc+/h9vmrK5ywcujA7MWb63x72AFDNKJyDyWu5OF+BWkA94Z4s37AI2NJ1ZmnFut5H2VLt+X7lr5PjofXOj3c093voPmWq5w9BoAb0dyBYzt3hSNBYGJWa2bROKpi7ph0b+H4cohLRERcuy/W69JgKOGveYtTwVsAT7NadQUaNHf/wqL5K7ihpqCDu96mSST7vTz92qWIEi3h8J27kygzRnix8y9G9hv3MMMeBvJjeycjP/dOAgfXTPAcPFJpEoguHy28Q47l3kvDijklHYJwjFpb/+Wifxq1R3e0lz/xq+V0zQN7ZNOsMO7IEjfqSdirShIB4C9xpJf0bx0oGrJ+6HiMkx8a5l0VvzUM9oh1ESPBH/t3piPooPGSppWPZrAGWJH09hw9E6LO/75r6ZF41TMvOhZxDcVby3QHEWIaDEbIQnzEJ76HjSb+LPWO6k3+oTeA91d9b/n37sP4mBRzVUqNs2GjvEd0bVJVzh9NNKVlbyvyl1V42sEmz9BOuDNpg9qHY8bT2uNni1iDbdvyT2C37bI9/rPXrwNv28TN2jctr8Qry6bW+MaDoZ9LTz+yR+7sO+w+CKMlTBIJyLz2N2dLMKje4QdgGXzagPhjE5JiAg5/vxrPC3hEewdxT9fAj/cJXyObZHdhc3qorWjeME5EwNs6w23eSISxI3K6kGY035C2fTuqbF4/fLemHfbqbi4TypCHNVOVyLigK6SDHG5QJW6nwhnGJAsyObvEpS2uiRBgD+ZdI8b2ClonNe8j3cN584EIgRBWlkR8MXV8mag8AaZA1vH470r++Hbm4fgnG4pECWNbVqlMYQHdwE7fhU/YcbDgNu//ap1ITrMiVPaGi9AHC5x4ZWFVS9g7DtSgsOCvbai8WuViWa+5x4uqbG7OgDAVQIcMpYgZ+mJ+EdPFz9GUPI+SDJer7zk/cCRElw6+3esyBRX9bRPisZ5PY0TLAJh03LjHHYAaNfP//4aTSKa4O1Rb6NrQlfh7Zq9FKFNMqBp4oqVIc2G4NURr+LUtsatNLoOLN3m38g6f8iC9D9zvb8X9h8pwbZ9R3xmomuSX1iKzP2F8HjMPYc/5e4A4IjajP8bGg5N06R9SN6RjGNbu/sgnvlJcqH5GHuk+OJh9TXYI4z3K3V58P7vxiasVsMgnYjMk51gcU46KUaURQckmfQAiQhxYGSlrtGFCMcmvbnxjjt/h8Nl3Dt3QI9G3pmve2eZC/S2bUYnQRd4W9ogb1m2Im4a1ganC/b7+jK0bQI+vKY/vrxxEEZ1ToZNUkIMAOh3ne8nUyFIB8T70g/vAQ5VzVhJy9392ZOesxYQzWdu4e0ojegkb6AusucvIGN6za8BoEuzGLx8aS8suP00TOjXAiF272lkiN2G20e2R6fyXgRrPpM/yf6NwN8f+/V6dWXKcHG1y7tLd2BPwfFGhrLO7rLxa+WkzeOy/Sh5L9gJCC40ZumJ2KI3RYkueH9kG4P0xOgwYUb/t637sbvgKMa9vhRrd4vXEx3qwLPju0tL+k+Eq9SNrX8ZM/fh0U4071DzVI3KYsNiMXvkbPRN9mMrSiWj0kfhxWEvItwR7lfFwYlqE9tG2Bl+YeZyjHt9Kfo8Mh/Dn1mEMS//ihzBBAVfdF3Hsz9tRO9H5uG0pxdizMu/YscB/0ei7Sn0L5MOAH8VeKtiRndJQZNoY3VRxoYc4XaKWz7+C2VuHxcPtDLYwzP9WkNo4lxU/nw0jQnDA2M64fpT1bhgfSJYm0pE5sky6TU1USKqY/URpAPAuT2b4ctVx092Vntao0Ol+ee+PO6cgic6dwBsHYGelwN/+Z5LW8Hf0Wt1xGG34c0r+mLHgUJsqKG012HT0CGlEZrFhvv/AindvH9nUZl3XKsT3rYQMM16AysEpd+7//SWzx9zQnPSJfvRUblHQfszgT5XAX+8abzfkhe9JfzD/uPXhZ70hEjMuKAr7j2rA7bvL0RSozAkVZ5dvuZT30/w8wygy0UnNh4vgHq1aIyRnZLw07qq+6JLXR68MH8znrjIO5ZOGqTXkEkXjWEDvCXvg1rXMLJO0tk9S0+CCw6s11PRQ9tW9UZBJh3wbkeo3rCuoKgM57y4GPmScu64yBC8d2U/dG4a43udtZS55gDKio0XqNr2SYLNbj6XGOmMxCunv4J/L/o3Fu5aWOP9L2x7IaYNmAb7sSRDm8QoJEaHIrdaubSv0m2zbJoNPRN74pddv1Q5vu3wBhzJzAHgPZdau/sQHv9hA54b38Pv516RmY8XFxzPLv+z5xAe+PofvDO5n1+PF5W7u482gy00x7BN4OutX+PWXrciKiQKl/ZrgRcyNle5XdeB93/PxH/OPj4285Hv1kk/R+XsEZnSLQmG+4bvgiN6LVqGD8T1p7bG2B5N4azF+0ZFDeNvQUR1i43jyCJko2QcWnDfq0PbJCA+8vgM31W6oHmcwAeu0xHX67zjGeQzH/e/hD1NrSC9XFp8JEZ1Tvb55/SOSeYC9HL9rhUfVyWLDvhoHle15P2E5qRnLRE80AakVjsxH/kIkNBO/By/POXdfuGRd22uLjrMiW7NY6sG6DnrvJl9Xw7tAla84ffr1IU7RrUXlvF/+udObMn1Bray8WutfexJB4B2ggw2UPMYtr93FeDjn34R3rZT91aprPOkG28syAKOGsvWhwrmpQOQBugpMWH45LqB6NIsOAE64KvU3fx0gXJhjjA8O+xZjG452uf9JnWahAcGPlARoAPe7R2ibPr2/YXYlR+4ruFd43sYjmmaG/awqhdz56/LMVX2/v2avYZjCzfuQ+7hmjPyLo8L2YXG/x7u4mYoO2Rcb5GrCF9t/QoAcFn/FobJJgAwZ8VOFJV6f7fNX5eDD5ZlCV/7htNa46UJPdExpZGwhB0Ayg52Fx5v0eYXfHfLIFzYu3mDCdABBulEVBtsHEcWUV+ZdIfdhnO6pVT8LOzwXs1WTwoecV2Gc3tU2vcZGuUd9VXDHno9JApIEu/FbNA6jgFiBRnzLhfW/Vpk4tsAoYIgZ1e1Du+1nZOu60CWYJ5wUmcgrNpe6JAI4MI35P1Dlr8OfHXjie0ZX/OJf/db/AxQXIfdrI/mA3nbvXu8BdolReOCXsZtKR4dePrHTQDE49fiIkMQGxFiOF5ZZKgDLeKMI9pW7yrAnzvyDX/mrcvB/72xDGNf/g2H9ooDlqxjQfo/uv/N4/q1jBMGUiLp8RH49PqBaJPou0rgRBQXlmHHP8YMdUyTcCSmiy9s+Mtpc2LGkBkY126c8PabetyE2/vcDk1QOSKbK78kANn0Q8VleHXhVsz6SRx42yMyq/x8uMRlGE/p0T3YdnCbsPu56D0KAEu31rz23KJcuHVjVYNe1hhleeKmpB9t+Age3YPERmE4q2uK4fZDxS58+dce5B4uxp2fi2eYd28eg6lntMOY7k3x/S1D0CbNmM3X3WEo3nshPGWxhtv2Fe/EN9vETeSsjEE6EZknm5POIJ0U45a8V+110D/h3EpNljbqqTiqy0/kS3U7bim7CalJCeiYUu3ktHlvYNg9Pl9La963ViPjLM/uBC56CwiPO35swBQgfUj9rak6mw1o1tN4fM+qKr9Laz0nPW+bd+xWdbJxfCndgXOelT/f6o+ATydJg1mfPB7f+9ErO5oHLHnZ/GuYpevAr88Bz3YGXuwBzOwv7K4PAP8a0bZin31lc//JxqqdBdguCIBq2o9eTlTyviH7MC58dYnhzzXv/VGxB7qFlmt4XL4ehcOIgKYBvfqfKn5BQcl7ZKgDvVrUvM+7Q3I0Prl+oHT2e6BsXZkLj8sYrLbtlyQMns2y2+y4b8B9uLHHjRXd1iMcEZg2YBqu636d9DVkQfriE9iXvu9wCZ6cuwGDH1+AJ+ZuQF5+EnSP8Xd29SAdQJU95Vvyt+CsL87CuV+ei1PnnIp3/3lXet/KahojB8ibxnnKGsNT0hSuonTj2g7twJI93koeWbPQd5dk4t+f/i1slBgRYsfzl/SsyIAXlBRgV6HxwpSrqBVsCEHHsIuFr/HK6ldQ7DK3f191DNKJyDxZJ2AG6aQYaSY9yOXuANAzNRZp8d6TXDfsWCPLeAF41nUx/tFb4twezcQnjkOm+t5zrsjotXrRvA9w2z/ApG+9s+7PfKy+V2Qkah5XehjYv6niR90laxxXwwUlURYdON40TqTXROD81+UVGhu+BT4cD5T633AKgHfE2kFB74X4tuL7L50JHDEGoQHj8QDf3Q7MfxAoO/Z3yd8OfHWT8GJz88YRuHyguJfBY9+tR1aesdy55QkE6f4QBelZeiLGdG+Kb28eggtHjxT/d8wWZy1lAWi5ni1i8fG1A5AYHfx+AZtXCC4uAWh/AqXu1Wmahhu634DvL/ge7575Ln648AeMay/OrpdLjgkTVhAs2bLfdLf0rANFuO/LNRj8xAK8snArDhcf+07SHXAXGys37OE7AFTdcpK53/u+K3WX4uYFN1cE0y6PC0//8TTWH/BO+yhze7Ar/yhEft2yv8ayedn4NU+Z98KO47D44ueH6z8EAPRqEYuugq0RG3MOS8f6PTCmU5XP0PLs5cKJLP/XbQSW3TsCcy6dgtYxxsq03KJcfLxBrYaUJ4pBOhGZx3J3sgjpnvQ6eK9qmoZzux9vDLbKI+4gvdTdCbPc5wCoNMKqOpvdG1SFGkc5AfAdkJ0MQiKAlkOBeP/2/tc5P/al65IRbDU2jhPtRwdqbiTY/RJg3HuAXVLhse1n4L3zgKMFvp+nMlmp+xnTgdbDjcfLCr174YPBXQb87zpxo7zcdcDeVcKH3Xhaa0SFGv/Nl2fmwS0I0GpqGleug2AMW810pAqC9Dbtu+ClCT29zdyc4UCT9saHSprHDWkrHsUGAINax+O/V/WvsXw/EEqPurBni3G7Q2JaNGKTAp/BT45MRq+kXogLi6v5zhDPlT9QWFpjE8xy6/Ycwi0f/YXTnv4Z//09C6UuY68HtyAzrdlLYAutuq8881h2/JONn2DXkV2Gx3y37TsAwK78o8L3KADsPVgsLYUvJwvS9WNB+tW9xyIxwjix49fdvyLrUBY0TTM1evPMzskY16fqyLule8RNMP+v+xloEh0Ku82OW3rdIrzP7DWzhVsArIpBOhGZx+7uZBH1tSe93NhK+8u/c/c33H5Qj8DUshvggQ190hojVbBvtULjNOBsQZlyo+ZAc/8691I9kQXplfely/ak17SNQZRJb5wONDLuDzXoeA5w6SeAU/K+27UceOcc/7LdrlLgn/8Zj4fFAm1GAKc/IH7cH29794oHUlkx8MlE3/vjty0UHo6PCsU1Q/0f39SqhqZx5Qa1jkeY09xpd2McRrRmzIxGJlW74JciaKi1f7OwEqJ781jh/vgzOiXhrSv6IlJwgSIYdm8ugC4IKFv3Nje2MVhko9h+81Hyrus6lm/Pw+S3l+OsFxfj69V74Cvx7i4SV1dVL3nPPFCEwrJCzPp7lvD+v+/9/dj9fAfhvtYOiMvddY8DuisKjSOcuGZoW+Eefx06PtrwEQDgnG4piIus+SJPUqNQzLigq6FyrPzvUllyZDLSGh2vcBmWOgzdmnQz3O9Q6SG8s/adGl/bKhikE5F50kw656STWuo7SG+TGIUuzbwZtNV6G7zqGgO37j0p2afH4KrSO7AX3sxW5T3sUt0uBkY85O3cDQDhjYFznlNmlBVJRCcBManG45Uz6dI56T7eq0dygQOCxmKy/egirYcBE78CwiQdvHPWAG+PBg6Lu3BX2Joh7CiOzucBjhCgaQ+g8wXG2z1lwM8B3KJQchj48GJg4/e+7ycJ0gHgqqEtq0xn8KWmzu7lYiNCMH1sF78btwFAt8g88Q2N06v+nGwMWAAdyDZ22XfYbZhxQVc0jvBeVLdpwOUD0vDKZb0Q5qy77/Bd68V/txad5Jn+utS/VZxwLrxsXvqREheue/9PjHt9KX7eKC7trq5vSg9oML5G9SB9x4FCvPvPu8gvEXy+AGzM34gDRw9gRw2Z8pr2pYtmpOtlsQBsOKNTEiJCHLio3UUV+/sr+3LLlygqK0KY044J/QS/66p5dlwPNK72Gdt5eKfwQsGAlAFVgnlN0/CvXv8SPu9/1/8X+4r8+/dXHWtTicg8lruTRYg61QKAvYZu6YF0Xo9mWLvbW4L3hGsCPnYPQyyOYLuejEPwlso6bBrOFnTGFRryL2/38oM7gaQuxg7epKZmvY37tXP+AUqLgJAI6GWSxnFOH79XRTPiAfPbH1L7AVd8B7x/PlAoOME9sAX47Cpg0tfyi7F/S7LWXStl3obfB6z7Cqj+uVzzKTD4FiD5BCcUFOUBH1wM7P6j5vtm/V7xb19dVKgDNw9vgwe/WefzKew2DS3i/AvSAWBc31Sc1qEJVu88KCx/riypUSi6FxQCXwpurB6kizLpgHdfegtjBc/gNglYeMcwbMw5jMToUKT7ua8+kHZuMAac4dFOxDet+7WIRIc50SM1Fn/uqLrO5dvzUOJyI7RSr4j8wlJc8fZyrN5V87QCTfOWeV9/amt0T43FRV+3w8b8jVXuYw/PBKADxwL47XnZhgZx1S3PXo7MAy183mfptgNwuT1wSMaU7T5sDJDL96OP6JgEAIgPj8eZ6Wfim23fVLnfkbIj+GLzF/i/Tv+H/xuQhtcWbZOW3l97SithbwRRFh3wBunV9U3uiyHNhuDX3b9WOX7UdRSv//067htwn/C5rISZdCIyTxqks9yd1FLfmXQAGNO9KSpX9O3Qk7Fab1MRoAPAqe2a+FUiWCE21dssjgG6dYhK3nW3t6QckI490+w+LijJmsbVppFgclfgyh/FGX8A2PErsORF8W0lh4GNPxiPN2pedW98fGtv0zoDHch42PSSqzic4y3N9ydABwB3KbBT8u8HYEL/FmjeONznU6Q2DkeIw9ypdGJ0GM7olISzu6X4/NMnPQ7OQzvET2LIpEsubkj23QNATIQT/VrG1UuAfiS/BPl7jVnf5h3ioJmoNAg2USB5tMyNv7IKKn7OPVSM8bOW1higO+0axvVpjvlTT8Wr/9cb3VNjAQC9knoZ7mtzHobmPF5pUBo9H0Uu3zPaf9/7e43l7oeLXVizW7xOl8eFnCJjIz9PWWOEOmwY0vb4v8WlHS8VPsfsNbNRWFaIlJhwjOqcJLxPp5RGuH1kO/HfYY/489g/xXihCQBu7XWr8Pjnmz5H1iHxPHYrYZBOROaxuztZhCxIr4sRbOWSGoVhUGvfJZxje0gaxlHDIctuHyu7lo1gg68RbDsETeMiEryz2WsjvjUw+Qf54xc8Auz5y3h8/beAS9BVuutF3hF0lZ16F+AQBL+bfxT/ffyRvwN4axSQ+4/4dodkO4iPkvdQhx1TzxAHE+X8bRpXa/mZxmM2B9Co2taYsEZAnGAfvaR5XH3btVFc6t68Q83j4aSK8rx/Aki2L728bHxnXhEuem0pNuUckT5HRIgdVw9piV/uHIYnL+qO1tXeM70SjUE6cLzkXXPmwRkrv5hUbumepTUG6ZXXXl1OUY50RvrgNgmICDl+ftcloYtwT3hecR7eX/c+AOCqIcb99qEOG164pEeVKoRyHt2DZdnLDMfbNm6LhHDxf4cOcR0wuuVow3GX7sKcjXOEj7ESBulEZJ5sTvrJOKeZlCYL0p1a3VZ9nNtDvt88IsSOMzqJsw7UgDTtBYQIxnBt/RkA5OXushFsJYfFY7ZaDABOZMZ0bCoweS4QIyid9biAz68xNiSTNWjrJhh11SgFGHC9+P7zH/TONTfj4C7vnvl8SfO5qCTgqnlAqGDPvY8gHfB+btsnyUen+TsjvdbyBZn0mFTxd62o5D13Q+3m3QfZrvXivdWpHf3rvF5FaSHw/gXAky2Bp9sB3041/x6S6JEai4gQ4+fv1y37sTnnMC56bYlwLB8AxIQ7cduIdlhy93Dcd04npMSIqzJ6JvYUHrdHeN/PoQnzodkk51yV7C3ci92HBeMPBWsX8TV+rbzUvbIbu98ovP87/7yDvOI89E6Lw+1ntEN5YURkiB2vXNYLbSWfp/V563GwxJjlF5W6V3Zzj5urjFRNikjC9EHTcVvv23w+zgoYpBOReR5m0skaZHvS67LcHQDO7JIsLYsd1Tm5SpaCGii7wzsmrrq9q4GiPOiycndZ47hdKwBdsK+5NqXu1UU1AS6YdbxBYWUHNgM//uf4z4dzxMFuYicgqbP4+QffKm5Ut3MZsHWBubV+dztwyLiXFoD3QsPkH4CUbpJ/+7+BwgPSp7bbNNx5pmC82TH1kkmvXupeTtQ8zlMG5K4P5IpOmK7r2LnBmPGOTYpAdFwtGmB+dZO3aSHg/fv+8Saw0vf+bX+FOGzo39J44eDvXQUY9/pS5BwSXwBJjQvHNzcNwa0j2tY4zi4pMgnNoowXce3hO2ALzYYjRlC5IhO+uca7rMzKR1Gp8XeNqGEb4A3ST+9o7Lg/qOkg9Es2ThUpLCvEG2veAADcfHpb/HrXcHxwdX/8cucwnC4I9svJSt0HpvgeJZnaKBUXtrsQMaExuKPPHfjugu9wftvz6/w7PhgYpBOReWwcRxahQrk7ADQKc2KE4EQHAM5lqfvJo9VpgoM6sP0Xabm7NEiX7Uc32zROJm0gMPQO8W1/vg1s8M5mxj//E18s6Hqx/LnDGwNDpopvW/2R/2ssyAI2zRXfltAOuHKut4Qf8PFvv8jnSwzvkIg+aeIybH/Hr9WKq9RbJVCdLEj31TxOIfl7i1B0sNRwvFal7rv/BP75wnh82eu1WJmYaF+6Rwfyi8TJiraJUfj0ukFoEe//rPfeScZ+FfbQfQhN/hKaZqwKGJ46XPg89siag/Qyt3dMXHWZBYL3GoCOCWlIamS8eKJpmnRP+McbPsbeI95Z701jwzG4TQLio0J9rkvUNM5hcwj/baq7uefN+OGCHzCp8ySE2n2/jpUwSCci89g4jixChcZx5S7paywfbhYbLt33SA2QMFCENxPtkpS0yoJ00f5tZySQLAnWauPUO+Uz3r+6yTuWTVbq3vUi38/d71pvKXp1G74DSuR7fKtY86n4eEp3bwY9plKGstUw8X1rKHnXNA13je5gOB4b4e3+bVpZMXBwt/d/fTm4E94O39WYDdIV25cuyqIDQGpr3036DHTduz1CJHedcPxcbQxt28Tv+3ZrHoM51w1Ecoy5igDZvnRHtVFsABBqD8W9/e9F+8bGCg9H5FYAvqcGAOJ56auytxmO6R4HRrWX97fo1qQbTm9xuuF4macMM1fNrHEd5YpdxViZs9JwvHuT7ohw1nyxIyY0BtGirUQWxyCdiMyTlGVyTjqpxqVLMul1OIKt3CntmuCKQekVPzcKc+Cpi7tJx+FQA5TQDogWVE5sW2guk+4qBXYJupg37xPY3iB2J3DBbG/wX93RPOCjS6rMeq/QYhAQ63scFEIigG7jjcfLimqecQ54g7S/BUG6Zgcu/RSIrHbxK761t9t8dTUE6QDQNz0Od1cK1EMdNkw/t4v5ueKb5wMvdAee6+T9383z5PeV7bGXBemRCcaGcoC3pF8huwSj1zS40fT7IcBP0wBPzUEmAGDbz8D2X+S3yy4emdQuKQoJNWSBAaB/yzh8cHV/c1M6jumZJN6XLnJpx0uRFJkk3Kut2YthC6tath4Vavx98OsW4xaPbfnG/ex6WSzO6Ox7NOgtPW+BTbAt5ptt32BL/hafjy23at8qlHqM1RU17Udv6HhmQETmsdydLELaOK6eqj4eHNsZP912Ct6/qh8W3zkcg1ozi35S0TRxNj1/O/TDgvnkkATp2X+Lu6kHYj96dfGtgbOeFN8m6vQOAN18lLpXuZ+gsRwgn7leWc5aYJ9gv3XrYUC0IEMv+7cv2AHkSQLiSq4/tTWW3D0cb0/ui8V3DsPY7ia3qRwtAP53LXAk2/vzkWzgi2vke+JF+9EBeZAOiLPpOWvlzV7rmNvtwW5BZ/cmzq0Iw0HviL+lL9X8RB4PMP8h3/dZ87n/Ab8PmqZhSBvf0zmGd0jEu1f2Q3RY7b5XWjZqicahNZf7RzmjcFWXqwAAA5qKA1hH5PHAOCEqFEPbGr9j1u89hP1Hju+nd3t05Jcax685kYAOyb4z1K1iW+Hc1ucajnt0D176y4//lpDvR2eQTkRklixIt7PcndTilpyc1kcmvVy7pGgMbdsEMRH8vJyUZCXv+yRZJ9GcdNmosha+myzVWo/LgE7GE3EhmxPodJ5/903qAjQxlpJj6wLgiPiiRQVZIO9rL7yv7QZ+aBobjmHtE5Eo2KNbow3fAUXVAvKj+cDaz8T3r02QLmoeV1YEHPAvoxlUhfuR+9mLKCsxBs6pIZWy/RkPA3tW+X6u9V/5nAEPADi0C8iq5Ui/akT70sud0y0Fr/1fb/NVFZVomibt8l7Z6NRLEXNsSkGvxF7Ci832SkF6enyEdO2VS97/2LEPut3YWb1FdDNofkyKuLHHjQixGSsIFuxcgNX7at5uIdqPHumMRJeELjU+tiFjkE5E5rG7O1mErNy9IXR+JYtqdarwsL7fuCcUADSn4GKOqGmczeEtdw8GTQPOeV5cql9dmxFAhJ+jtDRNHFTrbm9DOhmPB1j7ufG4IxzocLb8cZJ/e3+D9BMiK+GX7asXBelhsUB4rPw1VNyXnr8D+P7fwHNdsHOFeI5985BK6/OUAZ9fDZSKR5vBXeYN5P3hT0WGH4Z1SBSOYpvQLxUvXNJTOrnDjF5J4n3p5TyuaHSIOKvi5whnBHok9jDczx6eCWje0vH0hEhhJh2oGqR//c96aJrx4knXZOOsc5HkyGRM6DBBeNvzfz4P3cdIvILiAqw7sM5wvG9y35P+e5pBOhGZJyudO8l/oZJ6VGocRwQAiE4GmnQ0HNbzBDOxAWjVM+keD5C11HjHlO5ASBC7jUfEAee/BqCGzJq/pe7lZJlvX3uKd/wmHrvW4Swg1Ed5blSiN3tf3fZFASmNliorlo+W27UCyBNcoDEzfq1ciiCTDgBbMrzBbV3SdWDRk8CLPYHlswDXUewqMV5EcKAEKSEbqh48sBn46T7x8/71XyBvq39rWPdVQObEJ0SF4oExnSqCcaddwy3D2+Cx87vCbqs50+wPWfO4cqX7Tsee/KrnXsJ96TY37BHe3yXp8RFoEReB5o2NTfl+3by/Inj+Zdsm4Wv2T5U3javu6q5XI8ppHEn4R84f+G3Pb9LHLc9eDl3QILGm0WsnAwbpRGSeKPDR7N6sCJFCVBnBRlSFoOxaLxV3+zbsST+w2du0rbpglbpX1upUYNDN8ttDooB2o809Z+M0IFWw93TXCvlecVn2uatkj3tlopL3o/nBHVWWudhbdi6zplrJu657M9DVxdWQ2WzUDIgQ7J/++2PgpV7A8tnyDHUgedzAt/8Cfn7UWxUBoNQThpyydoa7Ng35B3ZN8Hv6jzeBjT9UPVZaBCx6QvyasWnGY8UFwJb55tYuMb5vCyy4/VS8d2U/LLj9NEwd2d6vUnB/dYjvgHCHuMO9pzQeZQV9kbm/sMpx2Z5tx7FRbGnxkcf21Buz6XsOFiPzQBG27TuCvYV7hM/TQtSIUCI2LBZXdL5CeNsLK1+ARzSmEeJSd0C+5/5kwiCdiMwTXZFnZpIUJG0cp3E/ONWj1oJxYLJEbvUgva73o1c3/D7x3mcA6DjG27XdLFn2vXrwCngzo+u+NB4PjwPaGMdBGUj3pf9c82Nrq6Zu9X9/4g3Myx3NB0oOGe9XUyZd0+T/bQqygO/vAJ7vCvzylPc1gsFd5m2I9+c7VQ7vKe0MD4znCc0HdPcx5m8KcLhSQ7Pls4DDe433S+wEnPeq+DkCVPIOAM0bR+CUdk2QGleL93gNnDYnujUR/7cr2TcSgB2ZB6peYOkU3wnRTmPlSPm+9JYJ3soa2b70X7fsR8b6XNic4vdCsyj/g3QAuLzT5YgPM14k2pC3AXO3zxU+RhSkJ0YkomUj/0rtGzIG6URknijwYdM4UpBblzSOYyad6lPaIMOFTeG2TbvdmK0TlboDQIs6yjw5QoEL3/Du/66u18TaPWen88UXetd8YvyH2TwPKDY2uULn8/37Hmox0Nvcrrpg7UvXdWNGuLoDm6s2QpNVENQUpANAu1G+by/aDyx4BHiui7ekfO9qIGed/M8hQVAsU3YU+PgyYb+AnaXi/fLNhw6Qj/krOgB8daP33/BoPvDrs+LXPf1+73/XGMHYv01zgWLBBQ8FiWaOu4ubwnWoKwBgx4GqmXSHzYF+Kf0Mj7GH7YFmL0SLeO/FhEGtxd3pf9u8H/PW5wiD9BBbCOLDfXe1ry7CGYHrul8nvO2lv17CpvxN2Jy/ueLPsr3LsPOwcfTbgJQBAa1SsCqmvojIPNGedAY9pCDuSSclhUYDzftWCbh1j/GkVDh+TRSkJ7QzzgUPpibtgYvfAT6/Cig94j02+Nbaj4CLjPc2nNtULdu2f5M3iGza4/gx2V512Ti36kKjgNR+3n3tle1Y6g0yneKS41rbu0qc/a3u70+Bpsc6fJudkV5Zz8u9neQzF/u+X+kRYMlL3j81SWjn3ebQbbz3Io1I8SHgownAjl+FN+8SBOlhUU4kNIsCbNHA6MeBrwVbKbbMP55BF12cSR0AtDvzWBPCi4yBvKsYWP8N0POymv6W9e7CthciIysDy/YuAwB4XFE4uutSlOdU84vKcLCorMpkkAEpA5CRlWF4rpi4HWh0bCRcfFQoOqU0wrq9VS9W/LplP4pKXQhNNQbpTaOaCuef1+Sithfh3X/exe4jVXtG7DqyCxd+faFfz3Gyj14rx0w6EZkn6u7OoIcUJMukM0inelet7Fq0ZdMQpOdnesuWq6urLHpl7c8Ebv4T+L8vgJv+AM6YfmLPJ20gV2n/efFBYKOgbDa2BZDa3//XEpW8u0uAncv8fw5/1ZRFL7f28+MXwGszfq1caBRw+Zfe8u+E9v69dk32b/IG0C/0AJa8DJQcrnp74QHgvbHSAL1QS0aey5jlbt6hMbTyxms9L/dulxD5aRrw+2vi20Y8eLwfjuxCja8mhAoJsYdg1hmz8P7o93FRswdQuOUu6GVVL75lHvBvX3pUbNVmhEMEXd6PlLjg0QFbiDFIN1vqXs5pd+KmnjfV6rHlGKR7MUgnIvNE2UlR+SBRPSuTjAuszznpRACMgaIuKO+sHqQvfkb8XC1qmcE+UdHJ3n3gCW1P/LnajxaXPK/57Hjwuv4bbzBdXdeLzTUubSXoCQAEp+S9pv3o5Y5kH89+i4J0zQ40au7fc9kdQI9LgRt/B8Z/IN/zbdbhPcBP//GWyi94FCjc7y2Hf+csYM9f4seERGNX71nCm1I7VBrVp2nAmBeB6BTjHd0lgOuo8XjbUUBapV4MiR0l3ft/AQ5n+/iLqcOm2dAjsQdOTzsN0I3nVdWD9OZRLaCXxRjuV+rcWOVnUfM4Lzc0h7FCoWmUH+MWJc5qeRbaNTY2CfRHm9g2aBLRpNav3ZAwSCci89g4jixC2jiOF5WovjXr7e2GfkyNmfR9m7zjpwx3ssnnf1tJSCTQ8Rzj8crBq6wJmCwLL9O0JxDayHg80EF6wU4ge43xeGSi+P5/H6saEAXpsane4NsMm837b3p1BjDpW6C1H431/FFcAPzypDdYn3UasG+D+H7hjYFJX2PXfnHQ1bxj46oHIuLkDeAMNO9e9OpE7wXdA6z9ouanLD4oLqmvB+kJ4uZ0mfurNo/be7AYrkLjqLSj+r4q+737pschxG4M+zTnQWiasSHGiQTpNs2GW3vdWqvHjkgbUevXbWgYpBORedyTThbhFr1XwcZxpAC7E0gfUvGjLsikV5mR/vMj4ki+2yVAo9qfUCtFNkJtzafeTOj2X4y3JXX1ZlDNsDuA9KHG43tWAUWC8Xa1VX2PfbnT7gIiBYHr+q+9M9VF49f8KXWX0TSg5VDg8i+A637xNtmrxX5jA9dR70UUkahkYPIP0Jv2xM4NxnLqmCbhaBQv2P/fehgw0I9y6W7jgGRB1rzrRQAEVRW+St6LDwJfXAs83gJ4PA348sa6nytfTUpMuDCort48LvNAIVyF4kqWyp3Tw0Ps6J3W2HCfQHV2r+6U5qfg2m7XmnpM5/jOuLzT5Sf0ug0JU19EZB67u5NFSBvHafz6IwW0GlYRyPnMpO/+E1j3lfEO9hBg2D1BXGAda3WaN3gt3Ff1+LqvgbhWAAQt8GXj2/x5rY3fVTuoey8EdD6vds9ZnWw/eodzvJURy1+verzkELDhW+DQLuNjTiRIryylu7fp38Hd3j345Y3/REqLvNUbOYJqAF8ap3v3xce1REF2IQoLjFsUmneMMxyrcPr9wLZF8te1OYFh94pvi2kOpA027o/f8xewfwuQUC3rXLgfeP98IPvvYwd0YNUH3i0cQ26TrzHI7DYNqXHh2LrPGJRX/bkI7sLWwudYumcpLm53/PMxpG0Clm47UOU+miRIP5FMermbe96Mc1ufi1X7Vkm/i8s1i2qGnok9EWIPOeHXbSh4lkJE5rFxHFmErHFcbbrWEgVc5X3poj3pzmO/V+c/JH5836u9TdMaCrsD6HyBOHhd9JTgARrQ5aLavZZ0XvrCwATpxYfEmf9mvb17+buNM/49AWDxs+IrNoEK0svFNANiLqj5fv2v83ZY//U5Y0d8kSYdvAF6I+/e8p3rxZUJqR2MWd0K5WP+Zp3q7c5eXZ8rff97dLtY3MRuzSdVg/uDu4H3z/M2xqtu+Wxg0K3eLQP1pGVCpCBIr1ruvmN/IXR3NNzFybCHVa1qWJ69HG6Pu6JybHCbBDz1Y9W96sHKpJdr0agFWjRqQL+j6hDPUojIPGG5O4N0Uo+ocZzD5uAMVlJDk/besmBIMul2B7D1Z2D7IuONIVHA0NuDvMB6INtfLmoclj7EG2zWRkJbIFqQLQzUvvStC8QXtNuP9v5vs95A45bG23P/ET9foIN0f2ka0PYMYPL3wJU/Ae3Pkt+3aU9g8g8VAToA7FwvCAI1oFl7H0E6ACR2AEY+YjzujAROucP3Yzud660yqe7vT7wz1wHgwFbgrTPFAToAHNrt30WJIEqLNzZSzCssxcGjx99X5Zl1t2Bf+sGSg9iQf7xfQNdmMWgUVvVcTRSkh9pDER9mbkY6BR6DdCIyT9jdnUE6qUeUSWfTOFKGplVkdKVz0uc/KH7soJvrdjZ6XWneRxy8iphtGFdZpX/7KvK3y0egmSErdS8PcjXN/9nuQP0F6ZW16A9M+MjbNb7bJd6O8+XajAAmfu1t/naMx+3B7k3GIDCxRTTCIv34Pdz3amDAjcd/1uzARW8CUZLGe+XCGwNtRxqP52/3bh3JXusN0A8KxhlWVnn8Xz1ITxBMOwCQVSmbXp5Zl+5L33N8X7rdpmFQ66q/M0Tl7imRKbyQrQAG6URkHru7k0WI9sFx/BoppTxIF2y31lxHgL2rjDdEJAADpwR1WfVG0/wLvu0h3ozpiWgtG8UmqFwww+0CNv9oPB7TAkjsdPxnMxcZVAjSyyV2BC54HbjtH+Cit73Z88s+A8KqdszP3XEYZcXGC6XNO/jYj16ZpgFnzgCu/w24+F3g9g3HKxFqIvu3/flR78i4wtyan2Pdl4BLMPKvjqTHizu8by/Pnnv0ioDdXdQSum78bqvcPA4ABleblx7IGekUWAzSicg8No4jixAF6Q5eUCKVlI9PE2TSpZ2zT/k3EBodvDXVN38yzG1HAuGxJ/Y6LSWj60605H3nMuCooMy7/eiq89wT2gIpPWp+vrAYb3ZYNY1SgC4XAGmDhHPqpfvRq49eq0lyF2+fgJoy6JW1GyUes7d1gf9j1ooPApvn+f+aAZYuKHcHvPvQAWDvwaModR/bJ6OHwF1k3Pu9Mmcliivt6686Lz3wM9IpcBikE5F53JNOFiEawcZMOimlUVOgSQdxJt0j2Icd2wLoMzn466pP/gSvJ1LqXi46qWpmu9z2RYBH0CTAXxu/Fx8XZYH9uSChUhbdhF2C0Wt2pw3JrWOC/+LOcKDjGP/vL9ti4Wt0W5A1jQ2H0268+FFe4r6jWhM50b70Uk8pVu1bVfFzenwEOqZ4L14EY0Y6BQ6DdCIyT9jdnYEPqcelM5NOFtDqNPGedNG20GH/8Xa/buh8Ba+hjYB2ZwbmdUT70osOADlra/d8ui4O0kMbeUeDVdflwppnlvu7R18hpcUuZG8TZGnbxMDhrKPzBX8v5LQ8Fbh+sXc7QnUb5/qfeQ8w7xg2Y8l7ebO47furdn73Z1+6pmmYfm5nxIQ7g97ZnU4Mg3QiMk/YOI7l7qQelruTJbQ6TTgC3BC7JXYKTAbZCjpfAEDSvKrjWMAZFpjXkY1i2/xT7Z5v/2Ygb5vxeJsRgEPQcTw6GWh5iu/ntGAmfc/mAnjcxje13/vRA6HlKRXTE6Tanw1c+ol3+0hXwTg/dwmw/pvgrM8PopL3HceC9B3VZqZ7iptBdxsv4C3Zs6TKz33T47DwjtNwxamC7QBgJl0VDNKJyDw2jiOLYJBOlpA2WJhJh61akHP6/SdP1VKjFHnw2i2AFyrSBom/v35/FSg5bP75NtXQ1V2kaw0l7xYM0nO2HxIeT+1Yh0G6ze6tVJDpdgkw7r3jF3xk1Rt/11/JuyhI33+kFIeLywwz0wE7tBJjyfv6vPXYkLehyrHGkSFo3OiI8DWZSVcDg3QiMk+4J/0kOXEkSxEG6RqDdFJMWCPoNmMGrEq5e+qAwJV4W0WvicZjsWlA+tDAvUZotLgMvWi/N1A3SzR6TbMDbUfIH9NxDGD3sYXBgkF6/t5CwzFHqB0JzaPqdiE9LoWwIqPvNcB5rwL2St8HiR2BpK7G+27/BTgsaeIYZOkJ4g7vOw4UIXO/8d84wdZdeP8P139oOLbnyB7DMc5IVweDdCIyj93dySJEc9LtvKBEChIG6ZUz6SMelGxSb8C6XFi1+VdIFDD2xcBfFB58q/j4by8Chfv9f57C/d7O7tWlDfLdnT2sEdDexwUYCwbpeYIgPS45Apqtjt/DyV2AU+88/rNmB067BzjrKcAmCIOEVRo6sPbzoC3RlzRJh/dt+wuxI696Jh3o3OgUhAu2H36//XsUFBdUObb7yG7D/TgjXR31HqTPnDkT6enpCAsLQ//+/bF8+XKf93/++efRvn17hIeHIzU1FbfddhuKi4t9PoaIAkzYOI7ZSVIPy93JKnRNcKGz/Fy57SggbWCdrkcJmgaMex+44nvg4neAW1bJ95CfiNbDxdn50sPA4mf9f57NPwG6oCu8r1L3crKSd80OxDT3fw0KcLs8OJhrnEzQOEUccAbdsHuPzVp/B5iyHDjtbvkFry4XQZh5r6eS95aSIH3ZtgModRnfa+d4lmNsgXH0XYm7BJ9vrnqhYU+hMZPOUnd11GuQPmfOHEydOhUPPPAAVq5cie7du2PUqFHIzc0V3v/DDz/E3XffjQceeADr16/Hm2++iTlz5uDee++t45UTneSEjeMY+JB6hN3dWe5OKtKNp2TeTLrm3Yt+stI0IH0w0Pl8IKpJ8F5jxEPi21bMBgp2+vc80tFrfmxTaHuGdx56dTHNLVepdjD3KDweY9O4uPoK0oFjs9bPBxKMe7ariGkGpA8xHt+7ytsUsI41jQ2DQ1B9sHDjPsOxVC0Hp29/CpceEvdSmLNxTsWF6zJ3GXKLjPEWm8apo16D9GeffRbXXHMNJk+ejE6dOuG1115DREQE3nrrLeH9lyxZgsGDB+PSSy9Feno6Ro4ciQkTJtSYfSeiAOOcdLIIZtLJKnSX8b2q2eDt5p7cpe4XdLJp3ls8V9tdCiycUfPjy4qBLQuMx5t0AOJa1fx4R6i3kVl1rYfV/FjFiErdgXrMpJsl6vIO1Es23WG3Ccew7S6oWqlghxvPO1+B01WIVmUuDDxqrGTYW7gXi3YuAgBkF2XDI6j6YCZdHfUWpJeWluLPP//EiBHHG2nYbDaMGDECS5cuFT5m0KBB+PPPPyuC8m3btuH777/HWWfJy4hKSkpw6NChKn+I6ASxuztZhFtwQYlBOqlIdxvfq1p4I2D0E/WwmpPU8GnimeWrPwJy1/t+bOZioEwQnLYfbeL1/wMktDv+c1wrYPC//H+8IvKzxUF6XIq4CZpyOp0L2AXj8tZ8AuiCWYlBlh5f87/bTfYv0dt2PNN/6UFx5/YPN3gbyImaxgEM0lVSb0H6/v374Xa7kZSUVOV4UlISsrPFHRQvvfRSTJ8+HUOGDIHT6UTr1q1x2mmn+Sx3nzFjBmJiYir+pKamBvTvQXRSYuM4sghRJt2usXEcKahMcPGz6/lARB2OrDrZNWl/rBt4NboHWPCI/HFFefLb/dmPXi4sBrhhKTDhY2DCHODaRUBcS/8frwhRJt3utCE6PrweVlML4Y2BtiONx/Mzgd1/1vlyZM3jyvXSNuFmx/+qHBt69CialRm//5ZnL8fm/M3SIJ3l7uqo98ZxZixcuBCPPfYYXnnlFaxcuRJffPEFvvvuOzz88MPSx9xzzz04ePBgxZ+dO/3cV0REYroOCDpmM5NOKhLuSed7lRQkLHcPCauHlZzkTrtHPA5tw7fATsH2ykN7gbdHe/csVxfZBGjW29zr2x3e7Hv7M71d3y1INH6tcXIEbHXd2f1EdBV1eUe9lLz7yqRHoQjPO2fCoVUtXbcDmCDZm/7Rho+End0BBukqqbcgPSEhAXa7HTk5OVWO5+TkIDk5WfiYadOm4fLLL8fVV1+Nrl274vzzz8djjz2GGTNmwOMRdNMEEBoaikaNGlX5Q0QnQJRFBzgnnZQkzKTzvUqK0T0eYRmtZud7tc7FNAf6XSO+bf6DVf875WcCb58J7Nsgvn/nC06670aP24P8HONosMbJFtmPXq7dmUCoIGb45wvALTkPCpL0BPm/3YPO99DCZmwiBwDnHTmCcEF89O22b7Ehz/ie5Yx0tdRbkB4SEoLevXsjIyOj4pjH40FGRgYGDhSPGSkqKoKt2kxD+7EvML0e9ogQnZSkQTrL3Uk9oiDdyfcqKUaURQcAzcmqj3ox9HZxgLbjN2DLsfPW3A3AW2d6A3WR6KbeUV8nmUP7i+FxiTq7W2Q/ejlnGNBxrPF44T5g28I6XUq6pNz9bNvvuMj+i/hBrU5DjEfH2UeMVQ1HXUexaNciw/GmUU05I10h9VruPnXqVMyePRvvvvsu1q9fjxtuuAGFhYWYPHkyAGDixIm45557Ku4/ZswYvPrqq/j444+xfft2zJs3D9OmTcOYMWMqgnUiCjJpkM6TSVKPW7A1g3vSSTmi/egAwHOb+hERBwy+RXzb/Ae9+5LfHg0c3iu+T2waMPn7k7KfgOU7u1fWTVLyvqZuS96bNQ6HvdpWgRQcwGPON8QPaDEQuPRTILYFJhwSN5ATYam7Wur1rHr8+PHYt28f7r//fmRnZ6NHjx6YO3duRTO5rKysKpnz++67D5qm4b777sPu3bvRpEkTjBkzBo8++mh9/RWITj6izu4Ag3RSEkewkRWIOrsDgOZg1Ue96X8DsGwWUFhtlnTOGuDNkfIL1k06AJd/CTRKCfoSVSQL0ut1RnptpQ8FopKBI9UaWq//FigtBELq5u/ktNuQ2jgcmQe82whs8OC5kFcQoxm3FSC0EXDBLMARAvS9Bu3mTUPfo8VYEV5zf4tmkezsrpJ6bxx30003YceOHSgpKcGyZcvQv3//itsWLlyId955p+Jnh8OBBx54AFu2bMHRo0eRlZWFmTNnIjY2tu4XTnSyEs1IB7zNbogUwyCdrEBa7u7ge7XehEYBp94pvk0WoDftCVzx/UkboAPipnE2u4ZGTSzS2b0ymx3ocqHxeFkhsPGHOl1K5Q7v19q/xQCbZCTg2c8CsS28/7/n/wGOcFwqaSBXnSGT7nHXy8g58qr3IJ2ILMbDTDpZB8vdyQrkQTrfq/Wq1ySgcbp/900bAkz8Gog8uRtviTLpsUkRsNstGnJIS94/rdNllHd476Jtw1SH+LX1ruOqrjciDug2DqcVHUWy5HdMZVVmpK/+GHiuCzA9Dnh3DHBw1wmtn8yz6CeGiOoN96SThZQJLioxk07KkZ1AM5NevxwhwLD7ar5f21HA/31m2ZFpgaJ7dBRkN4DO7pWl9ADi2xqPb5kPFOXV2TLSEyJhgwdPOV9HiGa8+JxjS4R29tPGB/a/Dg4A4/3Ym940qqk3c/7zDOB/1wGH9wC6B9j+C/DtbQH4W5AZDNKJyBx2dycLcQu2ZzBIJ9XIM+n8vVrvulwIJHWR3975AmD8fwGnBcu5A+xwXjFcZcaRX5br7F6ZpgHdxhmPe1xA5q91toz0+EicZ/sVHW07Dbe5dQ0fNLsPCIsxPjCpM5A2BBcePoIQj+/S9aYRKcDce4BFjxtv3PwTUHigtsunWmCQTkTmyOaDnmSzYEl9bo8bOownJQ6NQTqpRXfJGsfx92q9s9mA0x8Q39ZrEnDhG96MOzWszu6VdTpXfDz77zpbQlqsHVOdnwlvm+k+F57mA+QP7n8tGns8OKtQ/N8HODYj/acHgGWvyp/nwBZ/l0sBwCCdiMyRZdLtzPiQWkT70QFm0kk9ukvc64ON4xTR9gyg37WVDmjAkNuAMS/wAnUlDaqze2XxbYGQaOPxvavrbAkttn2C5tp+w/FtnmS86LoA6Qk+/o3bnw00au6zgVxTD6Ct/sD3IvK2+btcCgD+9icic7gnnSxC1NkdAOw8qSbVSEawcWqGIjQNGP0k0OUiYP9Gbxf35K71vSrliDq7azYNsYkWLncHvNUUyV2BrCVVj9dVkF5yGI5fBfvNATztGgcXHBWN5YTsDqDvleiYMR09i4vxV5hxHFvTwoKa18EgvU4xk05E5rC7O1mEqGkcwEw6qYcj2CxA04AW/YFeExmgS+TtNTaNi2kSDruzAYQbKd2Mx47kAIezjccDbelMoMiYRf/b0xI/ePoBqDqiTajXFYA9FJdKGsg186P7O/K21nwfCpgG8Kkhojolm5POwIcUIy135550UoxeJgnSnXyvkjXoui7MpFu+1L1cSnfx8b1B3pdeuB9Y8pLwpidcl0CHDVGhDiRE1dAXITIe6HoRTi8sQpIgIO9bXFLzWphJr1MM0onIHJa7k0XIyt2ZSSflyBpy2rk1g6zhSH4JykqMF0YbJ1u81L1csiCTDgDZQS55X/wMUGrMfv/q7ozfPN6KjrT4CGiaVvNz9bsWTgAz9h2AQz/eVHXA0aM4vbBSFURoDJDYyfj4A9u8I9qoTvBMhYjMcbPcnaxBNH4NAOwaAx9SC8vdyepEWXSgAXR2L9ekPWAPBdzVMs7B3JdekAWseEN405OuSyr+f3pNpe7lmvYAUvuj785l+GnnbvwSHo4mbjcGHy1GxbdiRAJw+RfA2s+B3HVVH19y0DsbPjLe9F+FzGMmnYjMYXd3sghm0skqGKST1TXYzu7l7E4gSZBdDma5+88zAHep4fB37n74W29d8XN6golqhWNTCpq4PbjwSCFOqRygN2oGXDnXW9of11r8eJa81xkG6URkjrTcndlJUkuZzsZxZA0M0snqhJl0DYhtKOXugHhfesEO4Gh+4F8rZx2w+iPDYRdseMY1rsqxIW2a+P+8nc4Fopsaj8e18gboCW2P/yzCIL3OMEgnInO4J50sQlbuziCdVMMgnaxO1Nm9UXwYnCEN6AK+rHlc9prAv9aChwEY93/vSb8Qu+3NK34+q2syBrSK8/957U7gvJmArVL1Y9OewOS5QGyL48ekQTo7vNcV/vYnInOkQTrL3UktLHcny+CcdLIwXdeRn92AO7uXS5Z1eF8NtDwlcK+TtQzY+L3xuCMMLc5/CD+WxeLPHflIiA7FKW0T/GsaV1nr4cCtq4DN84DIJkD70cZqyOgUwBEOuI5WPc5Mep3hb38iMoeN48giZCPY2DiOVMMRbGRlRYdKUVJkfA83mKZx5ZI6AZodqP7dEsjmcboOzH9QfFu/a4GYZkgHkJ5wgv+2Mc2BPpPlt9tsQFxLY/M4Bul1huXuRGSObE46Mz6kGGbSySpY7k5WJuvs3uAy6c5wb5f36gLZPG7LfCBrifF4aAww5LbAvY4/RCXvB1juXlcYpBOROdyTThYhDdI1vldJMZyTThYm2o8ONMBMOiDel75/E1AqvlBhiscDzH9IfNuQW4EIE3vPA0EUpBcXeMewUdAxSCciczwsdydrcOnMpJM1yDPp7PVB6pPOSG9Ind3LJXcTHNSBnH9O/LnXfg7kCJrQRSUB/a8/8ec3S9o8bnvdruMkxSCdiMxh4ziyCJa7k1VI96Q7mEkn9YlmpEfFhSIkrAH+rpV1eD/RfemuUuDnR8S3nXonEFIPVQns8F6vGKQTkTmyPemck06KkY1gs/O9SorRJeXu3JNOVnBSdHYvl9xVfPxEg/SV7wL5mcbjjVsCvSad2HPXVnxr8XE2j6sTDNKJyBx2dyeL4J50sgxJuTsYpJPijh4uxdHDxvOCBrkfHQDCGokzzCcSpJccARY9Kb5t+H3e2eb1IbopYA81HmeQXicYpBORObJy9/r6EiGS4J50sgrdJa76YCadVCfKogNAXHIDDdIBccl77npvyXptLHsVKMw1Hk/uCnS+oHbPGQjlY9iqY4f3OsEgnYjMYXd3sgjuSSer4Ag2sqqTqrN7OVHzOE8ZsG+9+ecqygN+e1F82+kPegPl+hQnKHlnJr1OMEgnInOkQTr3+ZJaGKSTVegu8TYiBumkOlHTOKCBdnYvJ20eV4t56YufAUoOGY+nDwXanG7++QJNlEk/mgccza/7tcj8+B9g4ePA4Zz6XklAMUgnInPY3Z0swq1LGsdpvKBEinFLGnIySCfFicavRcSEICyyAZ8TBKrD+8FdwPLZ4ttOfwDQNHPPFwzSDu+KZNOP5ALLXgcWzgCe6wx8fg2w64/6XlVAMEgnInPYOI4sgpl0sgr5CDa+V0ltokx6g+3sXi4yAWjUzHjcbJC+8HHAXWI83uEcILVv7dYWaNIO74rMSv/zHe9WA8D7v2s+Ad44HVjyUr0uKxAYpBOROdyTThbB7u5kFbook65p0Op7PyqRD8WFZSg6aGyW1qD3o5cT7UvPWSsfU1vdvo3Aqg+MxzUbMHzaia0tkFTOpLvLgD/eEtygAR3OrvPlBBp/+xOROZyTThbBTDpZhWhPOrPopLr8bHHTuAafSQfEJe9lRcCBLf49fsHDgO4xHu9+KZDY4cTWFkiNmgH2EOPxQHd413Xzj1n/NXB4r/F425HyiwsWwiCdiMzxCMrdbQ419k4RVSLdk84LSqQaUXd3ZwPe00sNgmg/OgDEpTTgpnHlUgSZdMC/5nG7/gTWf2M8bg8BTrv7xNYVaDY70FjQPC4QmXRXKfDTfcCTrYFnOgBLXjb3+GWzxMf7X3via1MAg3QiMkeUnWTTOFIQM+lkFaI56ZqdF5NIbdLO7idrJh0A9q7y/ThdB+Y/IL6t7zVAbOoJLSsoRFnpQATpv8/07h0v2g8cyQZ++g+w6kP/Hrt3NbDzd+Px+DZAq+EnvjYFMEgnInOEQTqDHlIPg3SyCtGcdJa7k+rys41Beni0E+FRgvLohqZRMyAi3ng8u4ZM+tYFQOZi4/GQaGDo7YFZW6CJgvSi/UDxwRN7XtF+8vkPAqXibRRVyLLo/a6r/9nyAdIw/hZEVHfcoiCdGR9Sj0tn4ziyBuGcdAd/r5LaRJn0xsknQRYd8G7xEzWP27tavr/a4wEyHhLfNvgWIFIQ9KsgPgjN4w7uBgqyjMeP5ADLXvP92MIDwJpPjcdDooEeE2q/JsUwSCcic0TZSTvL3Uk9zKSTZYjK3R38vUrqKi124UiecXzYSdE0rpyo5L34oDj4BIB1/xOPaYtIAAbcGNi1BVIwOrxnLZXf9tvzwNF8+e0r3xWPrutxKRAaXfs1KYZBOhGZw3J3sgi3ZBKBXWOGktTCcneyGlln95NiP3o5afM4QSBechiYL8min3onEBoVuHUFmixIPxCkIL34IPDrc+Lb3C5gxZvi2/o1jIZx5RikE5E5DNLJIkTl7ho0dncn5eiCbURsHEcqk3V2b3wydHYvl9JDfFy0L/2Hu4GCHcbjsS2A3lcEclWBF5MqbhB8Ipn0HT6CdABY9jpwaI/x+MbvgUO7jMdbnw4ktKn9ehTEIJ2IzGGQThYhKndnqTspSZRJd/K9SuqSdXY/qcrdG7f07oOurnom/Z8vgVX/FT/HsPsAR2jAlxZQNjvQON14vLZB+tF8IHed7/u4ioFFTxiPL5eNXbuudmtRGIN0IjLHLZmTTqQYBulkFXqZ4OIn96STwkSZ9NAIByIanQSd3cvZbEByV+PxykH6wd3AN7eKH586AOh6UXDWFmjCMWxba/dcO5cDkDTXq2zl+8D+zcd/zvlH3Bm/cUugzRm1W4vCGKQTkTnMpJNFCIN0dnYnBeluzkkna5F1dtc0rR5WU49E+9KP5ACHs73d3L+8HiguMN4nJBq44HXrTMeJb208VrgPKD5k/rl87UevTHcDCx45/rMsi97vmgYzdq2yhvc3IqLgEjXjsjPwIfW4deN7lfvRSUVsHEdW4nF7cOhAseF43Mm0H72cqMM7AOz9G1j6MrD9F/HtZz8jLiFXlax5XP52888l2o/ujBTve1/3JbB7pbdE/u9PBI+LAHpcZn4NFsAgnYjM8bDcnayB5e5kGYIgnXPSSVWuMo+wWjkiRvG91cEgC9JX/RfImC6+rctFQLdxwVtTMMS1FB8/YLLkvawY2LPSeDxtEND3KvFjMh4C/vovUCaYKND9EiA81twaLIJBOhGZIyx3595JUo8wk87xa6QgcSadv1dJTR6XeD+x3XGSlboDQEI7wC64OLHuK3FSIybVm0W32raAOEG5O2C+edyelYC71Hg8bSAw9A5vRr26bQuBRU+Kn6+BjV2rjEE6EZnDPelkEWWCEyRm0klFLHcnK3G7PcLjNvtJGFbYnUBSZz/vrAHnv27NzG9Mqvhcz2yQvmOJ+HiLgUBUE2DQTeLbSwR731ueAiR2NPf6FmL6G6CkpATLli3Djh07UFRUhCZNmqBnz55o2VJSBkFEDYtgnq9lGp/QSUVU7u5k1QcpiHPSyUrcLnGQbnechEE64G0eJyrhrm7oVCB9cPDXEwx2BxCbZuzobjZIz/pd8NwhQNNe3v8/8CZgxRtA0YGan6tfwxu7VpnfQfpvv/2GF154Ad988w3KysoQExOD8PBw5OXloaSkBK1atcK1116L66+/HtHRgpmBRNQwiDLpdgY+pB63oMkhy91JSaIRbJyTToqSlbvb7BYr4Q4U2b70ypr2BE67J/hrCab41icWpHvcwM5lxuNNewHOMO//D2vkLXv/sYZ/q5gWQPvR/r+2Bfl1yWvs2LEYP3480tPT8dNPP+Hw4cM4cOAAdu3ahaKiImzevBn33XcfMjIy0K5dO8ybNy/Y6yai+sJyd7IIl87GcWQN3JNOViIrdz9pM+nJNQTpzgjggjesn9AQdXg/kgOUHPbv8bnrxGXraQOr/tznSm95vS99r2rwVZx+na2cffbZ+Pzzz+F0it9crVq1QqtWrTBp0iSsW7cOe/fuDegiiUgh7O5OFiHMpDfwL3WyJs5JJyuRZtJPxsZxAJDUCdDs3rneImc+DiS0qds1BYNsDFvedvG8+OpEo9cAoMWgqj87w7xVB1/dKL6/IwzoNbHm17M4vy55XXfdddIAvbpOnTrh9NNPP6FFEZHCRHPSGaSTgjiCjayCjePISqSZ9JOxcRwAOMOBJu3Ft3U4p+EElCfa4T1L1DROA1L7GQ93vwRo0kH8PF0vBiLi/HtNCzP9aWrVqhUOHDBu5i8oKECrVpIrLETUcLiZSSdrEJa7a3yvkoI4J50shJl0gVbDjMeikoGxL1lv3JqMbFZ69X3qIroubhqX1Fnc7d5mB4ZPEz9X/4bdMK6c6SA9MzMTbkFZVklJCXbv3h2QRRGRwrgnnSyCmXSyCu5JJythJl1gyL+AiITjP4c3Bi5+p2FlfGNbeMv6q/Mnk56fCRwWbIduMUD+mA5nA70mVT029A4guWvNr9cA+H228vXXX1f8/x9//BExMTEVP7vdbmRkZCA9PT2giyMiBYnK3a3eDIUaJAbpZBUsdycrYSZdICoRuGUl8NcHgGbzdh5vnFbfqwosu9P7d6oelOdtr/mxoiw64J2PLqNpwDnPA+3OBPau9jaYSz/F7+Vand/fAOeddx4AQNM0TJpU9aqG0+lEeno6nnnmmYAujogUJGwcx7JMUo9b0MSHI9hINbquA6LGcSx3J0VJ56SfzJl0AAiLAQZKmp01FHGtjEH6AT/K3YX70eE7SAcAmw3ocJb3z0nG7yDd4/F+IFu2bIkVK1YgISGhhkcQUYMkLHdnJp3Uw0w6WYJoPzoAMJNOivK4ZZn0kzxIPxkIx7BlA6WFQEik/HGiTHpsCyCmWeDW1sCY/jRt27ZNGqAXFRWd8IKISHHck04WwSCdrEBU6g4Amp3vVVKTNJN+Mpe7nyykHd59lLwX7gf2bzIerz56jaowHaSPGDFC2CBu2bJl6NGjRyDWRESq8ngAXfDlzMCHFCQM0tndnRQjmpEOcE86qcsjaRxnO9nL3U8G0lnpPkres2Tz0X00jSPzQXpYWBi6deuGOXPmAPCWwT/44IMYOnQozjrr5NsvQHRSEWXRAYAZH1KQcAQbLyiRYvQyQZ8PAJqT71VSk1vSOI6Z9JOANEj30eFd1jQujZl0X0x/A3z33XeYOXMmrrzySnz11VfIzMzEjh078O2332LkyJHBWCMRqUIWpDPwIQW5BZMI7GxySKqRZNJ58ZNUxUz6Sax8DFv1xqy+gvQdgqZx4XFAQrvArq2BqdU3wJQpU7Br1y488cQTcDgcWLhwIQYN4tUQogZP1NkdYJBOSuKedLIC6Z50lruTophJP4k5QoDYVO/c88oOSIL00kLv+LTqWgz0jlgjKdOXvPLz83HhhRfi1Vdfxeuvv45x48Zh5MiReOWVV4KxPiJSiWhGOsDu7qQkUbk7R7CRahikk9Uwk36SE5W8yzLpu1YYs+4A96P7wfSnqUuXLsjJycFff/2Fa665Bv/973/x5ptvYtq0aTj77LODsUYiUoW03J2BD6lF13VhJt3JC0qkGukINv5eJTUxk36SE3V4P7wHKBVM+dohaRrH/eg1Mh2kX3/99fjll1/QsmXLimPjx4/H6tWrUVpaGtDFEZFi3Cx3J2vwiKYQgJl0Ug8z6WQ1HskINs5JP0nImsflC8awiTq7O8KB5G6BXVMDZPrTNG3aNNhs3ocVFxdXHG/evDnmzZsXuJURkXqk3d2ZnSS1iErdAe5JJ/VIR7CxcRwpyu2WZNLtzKSfFGRB+oFqY9jcZd5y9+qa9/HubSefTAfpHo8HDz/8MJo1a4aoqChs2+bdgzBt2jS8+eabAV8gESmE3d3JIkSl7gCDdFKPXibJpHMEGylKmEnXAM3GIP2kEC8odweAr28CMh4Gjuzz/pz9N1AmKIFnqbtfTAfpjzzyCN555x08+eSTCAk5fhWkS5cueOONNwK6OCJSDIN0sghZkM4RbKQcl2ROOsvdSVGiTLrdboPGbt0nh9gWgCYIIYsPAoufBp7vAnx3B7D2C/Hj2TTOL6aD9Pfeew+zZs3CZZddBrv9+MlO9+7dsWHDhoAujogUwyCdLEIWpLNxHKlGVu7OOemkKlEm3camcScPRyiQ0l1+u6sYWDEbWPqy8TbNDjTvG7y1NSCmg/Tdu3ejTZs2huMejwdlZZKmUkTUMLBxHFmEWzTyBWwcR+phuTtZjVswgs3O8Wsnl1PurN25X3JXIDQ68OtpgEx/ojp16oTFixcbjn/22Wfo2bNnQBZFRIqSzUln4zhSDPekk1WwuztZjUcwgo2Z9JNMh7OAyT8A6UPNPY770f1m+hvg/vvvx6RJk7B79254PB588cUX2LhxI9577z18++23wVgjEamCc9LJIqR70plJJ9W4ZVMz+F4lNbkF5e7MpJ+EUvsBV3wL7FwB/PY8sMGPOJD70f1m+hN17rnn4ptvvsH8+fMRGRmJ+++/H+vXr8c333yDM844IxhrJCJVeFjuTtbAEWxkFfJMOiuUSE0eQeM4ZtJPYql9gUs+AKYsB3pcJj8ntDmBtMF1uzYLq9XZytChQzkTnehkJM2k82SS1MLGcWQVuksyJ93BTDqpSZhJdzCTftJr0h447xXgtHuApTOBle9WHcE26CYgMqH+1mcxpoP0Vq1aYcWKFYiPj69yvKCgAL169aqYm05EDRC7u5NFuCX9EziCjVSjcwQbWYwwk25nJp2OiU0FRj8OnPJvYO3nQN5W7170jmPre2WWYvobIDMzE27BuJCSkhLs3r07IIsiIkXJ9k4y8CHFSBvHaQx8SDGScncwSCdFMZNOfomMB/pfW9+rsCy/vwG+/vrriv//448/IiYmpuJnt9uNjIwMpKenB3RxRKQYWSad3d1JMdIRbLygRIqRl7szSCc1MZNOFHx+fwOcd955AABN0zBp0qQqtzmdTqSnp+OZZ54J6OKISDFsHEcWUSZ5r7JxHKmGI9jIaphJJwo+v78BPB7vB7Jly5ZYsWIFEhK48Z/opCObk85mXKQYNo4jq5DuSecINlKUOJPOIJ0okExfpt2+fXsw1kFEVsA56WQR0nJ3zkkn1Qj6/AAAOIKNFCXOpLPcnSiQalVLlZGRgYyMDOTm5lZk2Mu99dZbAVkYESnIzXJ3sgZp4zi+V0kxepmk3N3J9yqpySMI0plJJwos098ADz30EKZPn44+ffogJSUFmsYrZ0QnDTaOI4uQjmBjJp0Uo0sy6Sx3J1W5BeXuzKQTBZbpIP21117DO++8g8svvzwY6yEilXFOOllEmc7GcWQNnJNOViPMpLNxHFFAmf5ElZaWYtCgQcFYCxGpjnvSySJkmXQG6aQc6Zx0ViiRmoSZdI5gIwoo00H61VdfjQ8//DAYayEi1UmDdJ5Mklq4J52sQj4nnRc/SU3MpBMFn+mzleLiYsyaNQvz589Ht27d4HRWPTl/9tlnA7Y4IlIMy93JIqRBusb3KqmFc9LJSjweHboxkc5MOlGAmf4G+Pvvv9GjRw8AwNq1a6vcxiZyRA2cm0E6WYN0BBu3ZpBqZL9X2TiOFCTKogPMpBMFmukz659//jkY6yAiK5B2d2eQTmop87BxHFmDcASbw8HEBylJtB8dYCadKNBO6LLXrl27sGvXrkCthYhUx3J3sghp4ziWu5NiROXuLHUnVTGTTlQ3TH+iPB4Ppk+fjpiYGKSlpSEtLQ2xsbF4+OGH4fGIP7hE1EBIspNsHEeqcelsHEfWoAvK3TkjnVTldsky6QzSiQLJ9NnKf/7zH7z55pt4/PHHMXjwYADAr7/+igcffBDFxcV49NFHA75IIlKEJDvJTDqpht3dyTKYSScL8bhlmXSWuxMFkulvgXfffRdvvPEGxo4dW3GsW7duaNasGW688UYG6UQNmVuUSdcAG6+gk1pk5e5sHEeqEe5Jd7I6idTklpS721nuThRQpj9ReXl56NChg+F4hw4dkJeXF5BFEZGiRNlJO08mST3SxnHck06K0d3GC0osdydVeSSN42xsHEcUUKaD9O7du+Pll182HH/55ZfRvXv3gCyKiBQlCtJZPkwKko1gY7k7qYaN48hKmEknqhumvwWefPJJnH322Zg/fz4GDhwIAFi6dCl27tyJ77//PuALJCKFMEgni+CedLIMQZAOBzPppCZm0onqhunLXqeeeio2btyI888/HwUFBSgoKMAFF1yAjRs3YujQocFYIxGpgkE6WYQok65Bg01jtofUIs6kcxsRqYmZdKK6Uauz62bNmrFBHNHJiEE6WYQok84sOqmI5e5kJR7JCDZm0okCy/Rlr7fffhuffvqp4finn36Kd9991/QCZs6cifT0dISFhaF///5Yvny5z/sXFBRgypQpSElJQWhoKNq1a8cye6K6IuruzsCHFMQgnayCc9LJStySEWzMpBMFlulP1IwZM5CQkGA4npiYiMcee8zUc82ZMwdTp07FAw88gJUrV6J79+4YNWoUcnNzhfcvLS3FGWecgczMTHz22WfYuHEjZs+ejWbNmpn9axBRbYjGWtkZ+JB6hEE6O7uTioQj2PheJTXJMul2zkknCijT3wJZWVlo2bKl4XhaWhqysrJMPdezzz6La665BpMnTwYAvPbaa/juu+/w1ltv4e677zbc/6233kJeXh6WLFkC57EZounp6Wb/CkRUWyx3J4tw6cykkzVwTzpZiSyTbrMzk04USKY/UYmJifj7778Nx1evXo34+Hi/n6e0tBR//vknRowYcXwxNhtGjBiBpUuXCh/z9ddfY+DAgZgyZQqSkpLQpUsXPPbYY3ALZoyWKykpwaFDh6r8IaJaEs2etvFkktTjFlR92G0sISb1cE46WYmHjeOI6oTpT9SECRNwyy234Oeff4bb7Ybb7caCBQtw66234pJLLvH7efbv3w+3242kpKQqx5OSkpCdnS18zLZt2/DZZ5/B7Xbj+++/x7Rp0/DMM8/gkUcekb7OjBkzEBMTU/EnNTXV7zUSUTXMpJNFcE86WYXuMl78ZOM4UpWbI9iI6oTpb4GHH34YmZmZOP300+E49iXi8XgwceJE03vSzfJ4PEhMTMSsWbNgt9vRu3dv7N69G0899RQeeOAB4WPuueceTJ06teLnQ4cOMVAnqi1BgyMwO0kKEgXpdo3vVVKQS1ANyDnppChm0onqhukgPSQkBHPmzMEjjzyCVatWITw8HF27dkVaWpqp50lISIDdbkdOTk6V4zk5OUhOThY+JiUlBU6nE/ZKZWAdO3ZEdnY2SktLERISYnhMaGgoQkNDTa2NiCREmXQ7y91JPaI96U5uzSAFcU86WYmbI9iI6kStL3u1bdsWF198Mc455xzTATrgDfZ79+6NjIyMimMejwcZGRkYOHCg8DGDBw/Gli1b4PEcv4q3adMmpKSkCAN0IgowlruTRbDcnayCc9LJSjiCjahu1OsnaurUqZg9ezbeffddrF+/HjfccAMKCwsrur1PnDgR99xzT8X9b7jhBuTl5eHWW2/Fpk2b8N133+Gxxx7DlClT6uuvQHRyETaO48kkqcetCxrHsdydFCSck85yd1KUbASbjSPYiAKqXs+ux48fj3379uH+++9HdnY2evTogblz51Y0k8vKyoLNdvw6QmpqKn788Ufcdttt6NatG5o1a4Zbb70Vd911V339FYhOLqI56QzSSUHMpJNliOakM5NOipJm0jmCjSig6v1b4KabbsJNN90kvG3hwoWGYwMHDsTvv/8e5FURkRDL3ckiOIKNrEI8go2/V0lNzKQT1Q1Tl71cLhemT5+OXbt2BWs9RKQyN8vdyRrKBFszHBrfq6Qe7kknK5Fl0m3MpBMFlKlPlMPhwFNPPQWX4AuFiE4ConJ3dncnBbG7O1mFMEh3MkgnNYky6ZpNg83GTDpRIJm+7DV8+HAsWrQoGGshItUJy91ZQkzqYbk7WYYo8cFyd1KUKJNu5/g1ooAz/S0wevRo3H333VizZg169+6NyMjIKrePHTs2YIsjIsWwuztZBBvHkRXobjegCzKTLHcnRXlcxiDdxvFrRAFn+lvgxhtvBAA8++yzhts0TYNb0ACFiBoIYSadJcSkHo5gIysQlboDDNJJXW5BubudTeOIAs70t4DHI24YQUQnAY5gI4sQNo7je5VUI+vxwznppCiPoNydTeOIAu+EPlXFxcWBWgcRWYGouzv3TpKChOXu7O5OimEmnayGmXSiumE6SHe73Xj44YfRrFkzREVFYdu2bQCAadOm4c033wz4AolIIZyTThYhKndnJp1UI5qRDnBOOqmLmXSiumH6U/Xoo4/inXfewZNPPomQkJCK4126dMEbb7wR0MURkUJ0nY3jyDLYOI6sQC+TZNI5go0UxUw6Ud0wHaS/9957mDVrFi677DLY7cf3THXv3h0bNmwI6OKISCG6pB8FAx9SEEewkSW4BBc+wXJ3Uhcz6UR1w/Snavfu3WjTpo3huMfjQVmZ+MuGiBoAUak7wCCdlMQ96WQFsnJ39vogVTGTTlQ3TAfpnTp1wuLFiw3HP/vsM/Ts2TMgiyIiBYmaxgEM0kk5uq7DpbPcndTHxnFkNcykE9UN098C999/PyZNmoTdu3fD4/Hgiy++wMaNG/Hee+/h22+/DcYaiUgFsky6nXPSSS2ipnEAg3RSjzRI5550UhQz6UR1w/Slr3PPPRfffPMN5s+fj8jISNx///1Yv349vvnmG5xxxhnBWCMRqUA0Ix0AuM+XFCMqdQcAu8b3KilGNifdzvcqqcnjEmTSHcykEwVarS7VDh06FPPmzQv0WohIZaLO7gDL3Uk5zKSTVcjL3VmhRGryuAWZdJa7EwVcrc9Y/vjjD6xfvx6Ad5967969A7YoIlKQtHEcTyZJLbJMOoN0Uo3uksxJdzCTTmpyi/aks9ydKOBMn7Hs2rULEyZMwG+//YbY2FgAQEFBAQYNGoSPP/4YzZs3D/QaiUgF7O5OFsEgnaxC5wg2shiPaE86M+lEAWf6U3X11VejrKwM69evR15eHvLy8rB+/Xp4PB5cffXVwVgjEanALds7yZNJUos0SOcINlKNbE86g3RSFDPpRHXD9LfAokWLsGTJErRv377iWPv27fHSSy9h6NChAV0cESmEmXSyCNmedDubHJJiZHPSNV78JEUxk05UN0x/qlJTU1FWZizPcrvdaNq0aUAWRUQKYuM4sgiWu5NV6GUcwUbWoeu6OJNuZyadKNBMB+lPPfUUbr75Zvzxxx8Vx/744w/ceuutePrppwO6OCJSCBvHkUUwSCer0CXbiDSOYCMF6R4dMCbSOYKNKAhMn7FcccUVKCoqQv/+/eE4tmfK5XLB4XDgyiuvxJVXXllx37y8vMCtlIjqF+ekk0W4dO5JJ4vgnnSyELdg/BoA2JlJJwo4098Czz//fBCWQUTKc7PcnayBmXSyCs5JJyvxuIyl7gAz6UTBYPqMZdKkScFYBxGpTlbubufJJKnFLan6sGus+iC1cE46WYlb0DQOAOzs7k4UcH5d+iosLDT1pGbvT0QWwO7uZBHScne+V0kxnJNOVuIRNI0DABu7uxMFnF+fqjZt2uDxxx/H3r17pffRdR3z5s3D6NGj8eKLLwZsgUSkCGmQzowPqUVW7s4RbKQc7kknC2Emnaju+PUtsHDhQtx777148MEH0b17d/Tp0wdNmzZFWFgY8vPzsW7dOixduhQOhwP33HMPrrvuumCvm4jqGru7k0XIgnQn36ukGHm5O4N0Ug8z6UR1x69vgfbt2+Pzzz9HVlYWPv30UyxevBhLlizB0aNHkZCQgJ49e2L27NkYPXo07BwbQtQwsdydLELaOI7d3Ukx8sZxfK+SephJJ6o7pr4FWrRogdtvvx233357sNZDRKqSdXdn4zhSjFuXNI5juTupRjInHUx4kIKYSSeqIHBS/AAAW4xJREFUO/xUEZF/OCedLIIj2MgqpJl0Jy9+knrkmXSGE0SBxk8VEfnHwznpZA2y7u4cwUaq0ctY7k7WIZ2Tbme5O1GgMUgnIv+wcRxZBBvHkVXoknJ3jeXupCC3pNydmXSiwOOnioj8w8ZxZBEsdyfLYOM4shCPpNzdxsZxRAHHIJ2I/CNrcMQ96aQYt6R/AsvdSTWyEWyck04qkmbS2TiOKOBMf6rS09Mxffp0ZGVlBWM9RKQqWSad3d1JMcykk1UIG8fZbNBsDHpIPcykE9Ud098C//rXv/DFF1+gVatWOOOMM/Dxxx+jpKQkGGsjIpWw3J0sQto4jlUfpBjdZWzIyVJ3UhUz6UR1p1ZB+qpVq7B8+XJ07NgRN998M1JSUnDTTTdh5cqVwVgjEamA3d3JItg4jizDLSh3Z5BOimImnaju1PrSV69evfDiiy9iz549eOCBB/DGG2+gb9++6NGjB9566y3ouviDTEQWJZ2TzhNKUossSOeedFKNaAQbM+mkKrdkBBsz6USBV+tvgrKyMvzvf//D22+/jXnz5mHAgAG46qqrsGvXLtx7772YP38+Pvzww0CulYjqE8vdySLcuviCEvekk2pEe9IZpJOqPG5m0onqiulvgpUrV+Ltt9/GRx99BJvNhokTJ+K5555Dhw4dKu5z/vnno2/fvgFdKBHVM7ek3J2N40gxbBxHViGak84Z6aQqaSadc9KJAs70GUvfvn1xxhln4NVXX8V5550Hp9N4gt6yZUtccsklAVkgESmCmXSyCJa7k2WIurs7+TuV1ORh4ziiOmP6m2Dbtm1IS0vzeZ/IyEi8/fbbtV4UESmIe9LJImTd3dk4jlQjmpOu2fk7ldTkZuM4ojpj+tJXbm4uli1bZji+bNky/PHHHwFZFBEpSNTdXbMDGr+cSS1uyQUljmAj1XBPOlkJM+lEdcf0p2rKlCnYuXOn4fju3bsxZcqUgCyKiBQkKiFmFp0UJCp3t2k22DSeSJJaOCedrESaSbfzYj1RoJk+Y1m3bh169eplON6zZ0+sW7cuIIsiIgWJGscxSCcFiYJ07kcnJQnK3TknnVTlETSOs9k0aDYG6USBZjpIDw0NRU5OjuH43r174eAXC1HDJSoh5t5JUpBoBBs7u5OKWO5OVuIWjGDjfnSi4DAdpI8cORL33HMPDh48WHGsoKAA9957L84444yALo6IFMJyd7KIMkH/BAbppCIG6WQlokw6x68RBYfpb4Knn34ap5xyCtLS0tCzZ08AwKpVq5CUlIT3338/4AskIkWIGscx8CEFCTPpGt+rpB7OSScrEWbSuR+dKChMn7U0a9YMf//9Nz744AOsXr0a4eHhmDx5MiZMmCCcmU5EDYQwk87PPKlHtCedmXRSUhnnpJN1MJNOVHdq9U0QGRmJa6+9NtBrISKVifakc6QVKUjYOI7vVVKQ7uacdLIOZtKJ6k6tvwnWrVuHrKwslJaWVjk+duzYE14UESlI1N3dzkw6qUc0J53l7qQi7kknK2EmnajumP4m2LZtG84//3ysWbMGmqZB171X1TTNeyXNLbgqTEQNABvHkUWU6WwcR9YgnJPOcndSlNstGMFmZ5BOFAymP1m33norWrZsidzcXEREROCff/7BL7/8gj59+mDhwoVBWCIRKYFBOlmEMJPO9yqpSDQnneXupCiPy1jubucINqKgMP1NsHTpUixYsAAJCQmw2Wyw2WwYMmQIZsyYgVtuuQV//fVXMNZJRPWNQTpZBBvHkVWw3J2sxC0od2cmnSg4TH+y3G43oqOjAQAJCQnYs2cPACAtLQ0bN24M7OqISB0M0skiRCPY7Bobx5F6hI3jGKSTokSN45hJJwoO098EXbp0werVq9GyZUv0798fTz75JEJCQjBr1iy0atUqGGskIhWIGscxSCcFMZNOllEm+L3q4AUlUpOocZyNjeOIgsL0Wct9992HwsJCAMD06dNxzjnnYOjQoYiPj8ecOXMCvkAiUoRoBBu7u5OChCPYmEknBbHcnaxEmEnnCDaioDD9TTBq1KiK/9+mTRts2LABeXl5aNy4cUWHdyJqgITl7gx8SD0u3fheddp4QYnUwznpZCXMpBPVHVOfrLKyMjgcDqxdu7bK8bi4OAboRA2dh+XuZA0sdycr0HUd4J50shBm0onqjqkg3el0okWLFpyFTnQyEmbSmZ0k9YhGsNlZ9UGqEe1HB+ekk7qYSSeqO6Y/Wf/5z39w7733Ii8vLxjrISJVifakM/AhBQkz6RoDH1KLqNQdAOekk7KYSSeqO6a/CV5++WVs2bIFTZs2RVpaGiIjI6vcvnLlyoAtjogUIuruzsZxpCBh4zheUCLFiJrGASx3J3Uxk05Ud0x/E5x33nlBWAYRKY9z0skiRI3juCedVCMP0nlBidSj6zo8wkw6g3SiYDB91vLAAw8EYx1EpDoG6WQRLHcnS5AE6WAmnRQkCtABwOZguTtRMPDyFxH5h0E6WYRbN+71ZSadVCPPpHMbEanHLSh1BwA7y92JgsL0WYvNZvM5bo2d34kaKAbpZBEcwUZWIGscp9lZ7k7qkWbS2TiOKChMn7X873//q/JzWVkZ/vrrL7z77rt46KGHArYwIlIMg3SyCOEINo2BD6lF5wg2shBm0onqlulvgnPPPddw7KKLLkLnzp0xZ84cXHXVVQFZGBEpRNfFQTq7u5NidF1n4ziyBu5JJwthJp2obgXs8teAAQOQkZERqKcjIpWIZqQDnJNOyhEF6ACDdFKPvNyd71VSDzPpRHUrIJ+so0eP4sUXX0SzZs0C8XREpBqPuCyT5e6kGlGpO8AgndSjl0kax7HcnRTkcTGTTlSXTH8TNG7cuErjOF3XcfjwYUREROC///1vQBdHRIoQlboDgI3l7qQWUdM4gEE6KcgtCdLZOI4U5HYzk05Ul0yftTz33HNVgnSbzYYmTZqgf//+aNy4cUAXR0SKkAbpDHxILaLxawAbx5F6ZCPYuCedVCTNpHNOOlFQmP4muOKKK4KwDCJSmiTjwz3ppJoyydYMZtJJNZyTTlYizaTbmUknCgbTn6y3334bn376qeH4p59+infffTcgiyIixcgy6ezuToqR7knXGKSTWnSXpHGcgxc/ST0eSeM4G8vdiYLC9CdrxowZSEhIMBxPTEzEY489FpBFEZFiWO5OFsHu7mQVuksyJ53l7qQgt2QEm52N44iCwnSQnpWVhZYtWxqOp6WlISsrKyCLIiLFSLu7M5NOapE1jrNzawapRjKCjXvSSUXMpBPVLdOfrMTERPz999+G46tXr0Z8fHxAFkVEiuGcdLIIjmAjq5COYGOQTgpySxrHMZNOFBymg/QJEybglltuwc8//wy32w23240FCxbg1ltvxSWXXBKMNRJRfWO5O1mEtHEc96STYuSN4/heJfV4JI3jmEknCg7T3wQPP/wwMjMzcfrpp8Nx7IvE4/Fg4sSJ3JNO1FC5JeXubBxHipGNYGMmnZQjm5rBOemkIGkmnSPYiILC9FlLSEgI5syZg0ceeQSrVq1CeHg4unbtirS0tGCsj4hUwEw6WYRsTzqDdFINR7CRlUgz6RzBRhQUtT5radu2Ldq2bRvItRCRqqRBOjM+pBZp4ziN71VSi3RPupMXlEg9zKQT1S3Tl78uvPBCPPHEE4bjTz75JC6++OKALIqIFCMN0pnxIbWw3J2sQpeUu2ssdycFMZNOVLdMf7J++eUXnHXWWYbjo0ePxi+//BKQRRGRYljuThYhbRzH9yqpho3jyELckhFsdjaOIwoK05+sI0eOICQkxHDc6XTi0KFDAVkUESlG1jiOgQ8pRjqCjd3dSTG6i3PSyTo8bnG5u40j2IiCwnSQ3rVrV8yZM8dw/OOPP0anTp0CsigiUoxsTrqdJ5OkFjaOI6vgCDayEmbSieqW6W+CadOm4YILLsDWrVsxfPhwAEBGRgY++ugjfPrppwFfIBEpgOXuZBGyPel2NjkkxegucYUSg3RSkUfSOM7GxnFEQWH6m2DMmDH48ssv8dhjj+Gzzz5DeHg4unXrhvnz5+PUU08NxhqJqL5J9vmycRyphpl0sgw3K5TIOtyyxnE2BulEwVCrb4Kzzz4bZ599dqDXQkSqYiadLELaOI570kkxHMFGViLKpNscGjSNQTpRMHAjCRHVTLYnnSXEpBiOYCOr0CWZdJa7k4pEmXQ7x68RBY3pbwK3243nnnsOn3zyCbKyslBaWlrl9ry8vIAtjogUIevubme5O6mF5e5kFbI96bAx8CH1yDLpRBQcpr8JHnroITz77LMYP348Dh48iKlTp+KCCy6AzWbDgw8+GIQlElG9Y7k7WYRsBJtdY9UHKUbU3d3pZPkwKYmZdKK6ZfrT9cEHH2D27Nm4/fbb4XA4MGHCBLzxxhu4//778fvvvwdjjURU3xikk0W4dGbSyRpEc9I1Oy8mkZqYSSeqW6aD9OzsbHTt2hUAEBUVhYMHDwIAzjnnHHz33Xe1WsTMmTORnp6OsLAw9O/fH8uXL/frcR9//DE0TcN5551Xq9clIj8xSCeLYLk7WYVoTjr3o5OqmEknqlumP13NmzfH3r17AQCtW7fGTz/9BABYsWIFQkNDTS9gzpw5mDp1Kh544AGsXLkS3bt3x6hRo5Cbm+vzcZmZmbjjjjswdOhQ069JRCYxSCeLYJBOViHak84gnVTlcRmDdJuDQTpRsJj+dJ1//vnIyMgAANx8882YNm0a2rZti4kTJ+LKK680vYBnn30W11xzDSZPnoxOnTrhtddeQ0REBN566y3pY9xuNy677DI89NBDaNWqlc/nLykpwaFDh6r8ISKTZI3jGPiQYmRBOvekk3IE5e5gkE6KcgvK3e0sdycKGtPfBo8//njF/x8/fjzS0tKwZMkStG3bFmPGjDH1XKWlpfjzzz9xzz33VByz2WwYMWIEli5dKn3c9OnTkZiYiKuuugqLFy/2+RozZszAQw89ZGpdRFSNLJPO7u6kGI5gI6tguTtZiUdQ7m5juTtR0Jzwt8GAAQMwYMCAWj12//79cLvdSEpKqnI8KSkJGzZsED7m119/xZtvvolVq1b59Rr33HMPpk6dWvHzoUOHkJqaWqv1Ep20OCedLILl7mQVojnpDNJJVcykE9UtS30bHD58GJdffjlmz56NhIQEvx4TGhpaq73yRFSJR1buzkw6qUXa3V2z1NcdnQSEe9LZ3Z0UxUw6Ud2q17OWhIQE2O125OTkVDmek5OD5ORkw/23bt2KzMzMKmX1Ho/3l4bD4cDGjRvRunXr4C6a6GTExnFkEdI96az6INWUieak83cqqYmZdKK6Va+XwEJCQtC7d++KRnSAN+jOyMjAwIEDDffv0KED1qxZg1WrVlX8GTt2LIYNG4ZVq1axjJ0oWBikk0WIgnSbZoNNY8aH1CIsd7fzdyqpiZl0orpV798GU6dOxaRJk9CnTx/069cPzz//PAoLCzF58mQAwMSJE9GsWTPMmDEDYWFh6NKlS5XHx8bGAoDhOBEFkFsWpDM7SWpxC/onsNSdVMTGcWQlzKQT1S3T3watWrXCihUrEB8fX+V4QUEBevXqhW3btpl6vvHjx2Pfvn24//77kZ2djR49emDu3LkVzeSysrJgs/FKHVG9EmXSbQ5A4xc0qUW0J51N40hFnJNOVsJMOlHdMv1tkJmZCbegRKukpAS7d++u1SJuuukm3HTTTcLbFi5c6POx77zzTq1ek4hMkAXpRIoRlbtzPzopiXPSyUKYSSeqW35/G3z99dcV///HH39ETExMxc9utxsZGRlIT08P6OKISBGi7u7s7E4KEgXpTr5XSUEsdycr8bgEmXQHM+lEweL3t8F5550HANA0DZMmTapym9PpRHp6Op555pmALo6IFCGak87sJClImEnX+F4l9eiCXh8M0klVbrcgk85yd6Kg8fvboHzUWcuWLbFixQq/55QTUQPgFmXSeTJJ6nHrgsZxfK+SikQj2By8oETq8Xh06B5jkG5juTtR0Jg+c9m+fbvhWEFBQUWXdSJqgER70u0sISb1MJNOViEud+fvVVKPqGkcwEw6UTCZ/nQ98cQTmDNnTsXPF198MeLi4tCsWTOsXr06oIsjIkWwcRxZBLu7k1WI56TzghKpxyNoGgcwk04UTKaD9Ndeew2pqakAgHnz5mH+/PmYO3cuRo8ejX//+98BXyARKYBBOlmEKJPOIJ1UxMZxZBVuZtKJ6pzpb4Ps7OyKIP3bb7/FuHHjMHLkSKSnp6N///4BXyARKYBBOlmEW9DkkEE6KUkUpDv5XiX1MJNOVPdMXwJr3Lgxdu7cCQCYO3cuRowYAQDQdV04P52IGgAG6WQRwky6xvcqqUeUSYed71VSj1swfg1gJp0omEx/G1xwwQW49NJL0bZtWxw4cACjR48GAPz1119o06ZNwBdIRAoQjAriySSpSNg4juMCSUEsdyer8AjGrwGAnZl0oqAx/W3w3HPPIT09HTt37sSTTz6JqKgoAMDevXtx4403BnyBRKQAZtLJIjiCjaxC2DiOI9hIQbJMus3BTDpRsJg+c3E6nbjjjjsMx2+77baALIiIFMQgnSyizFNmOMZyd1KNrutAmfG9CmbSSUHSTLqdmXSiYKnVJbD3338fQ4YMQdOmTbFjxw4AwPPPP4+vvvoqoIsjIkUIAh/YOM+X1MNMOlmCpIcP56STiphJJ6p7pj9dr776KqZOnYrRo0ejoKCgollcbGwsnn/++UCvj4hUIOiYDe7zJQVxBBtZgajUHeCcdFKThyPYiOqc6U/XSy+9hNmzZ+M///kP7JW+TPr06YM1a9YEdHFEpAiWu5NFiEaw2TUGPqQWvUzwOxUcwUZqcnMEG1GdMx2kb9++HT179jQcDw0NRWFhYUAWRUSKcQvK3e0syyT1MJNOliCamAFwTzopiSPYiOqe6U9Xy5YtsWrVKsPxuXPnomPHjoFYExGphpl0sgiXzhFspD7hjHQAGkdbkoJkjeOYSScKHr+/DaZPn4477rgDU6dOxZQpU1BcXAxd17F8+XJ89NFHmDFjBt54441grpWI6guDdLIIUSbdySaHpBhpkM5MOilImkln4ziioPH72+Chhx7C9ddfj6uvvhrh4eG47777UFRUhEsvvRRNmzbFCy+8gEsuuSSYayWi+sIgnSyC5e5kCdIgnVUfpB5pJp0j2IiCxu8zF10//gG97LLLcNlll6GoqAhHjhxBYmJiUBZHRIpgkE4WIRrBxsZxpBpZJp170klF0u7uzKQTBY2pbwNNq3rFLCIiAhEREQFdEBEpSNTkiI3jSEHMpJMVyMvd+XuV1CPt7s5MOlHQmDpzadeunSFQry4vL++EFkREChJm0pmdJLXous5MOlmC7pLMSWe5OylIlkm3sbs7UdCYCtIfeughxMTEBGstRKQqlruTBYg6uwNsHEfq0V2CsZZg4zhSkyyTbmd3d6KgMfVtcMkll3D/OdHJyCM4oWTgQ4oRlboDHMFGCnKLM+nck04qYiadqO75/emqqcydiBoojwfQBV/QzKSTYtweceDDPemkGs5JJythJp2o7vkdpFfu7k5EJxHBHl8A3JNOypFl0h0aAx9Si14mCdKdfK+SejySOek2dncnChq/vw08HvEHlIgaOLd47yS7u5NqZHvSWe5OyhFNzAAAO9+rpB63ZE66nd3diYKGl8CIyDdJdpLl7qQaWSadjeNINRzBRlYizKRrgGZjkE4ULAzSicg3aZDOk0lSi7RxHEewkWLkI9h48ZPUI8qk2+029qsiCiIG6UTkmzRIZ+BDahHNSAfYOI7UIx/Bxt+rpB5RJt3GpnFEQcUgnYh8Y7k7WYS0cRzfq6Qaabk736ukHrdgBJud49eIgoqfMCLyjY3jyCJY7k5WocvmpHMEGynIIxjBxkw6UXAxSCci35hJJ4uQdXdnJp1UwxFsZCVuQbk7M+lEwcVPGBH55uGcdLIGlruTVcj3pPO9SurxCBrHMZNOFFwM0onIN4+k3J3d3UkxbskFJQbppBxpuTsvfpJ6hJl0B0MIomDiJ4yIfGO5O1kE96STVcjL3Xnxk9QjzKTbmUknCiYG6UTkG4N0sgjuSSerkDWOY7k7qYiZdKK6x08YEfnmlgTp7EJMiuGedLIK6Z50lruTgphJJ6p7DNKJyDdm0skipHvSNb5XSTGSOelgJp0UxEw6Ud3jJ4yIfGPjOLIIlruTVegulruTdYgz6QwhiIKJnzAi8o2ZdLIIaeM4jgskxeiSTDqDdFKROJPOcneiYGKQTkS+cU46WQT3pJNVcE46WYlHEKQzk04UXPyEEZFvbkm5u53l7qQWaZDOPemkGkm5O/ekk4rcgnJ3ZtKJgotBOhH5xnJ3sgi3Lmkcx/cqKUZY7m63Q9MY+JB6hJl0No4jCip+wojINwbpZBEsdyer0AWjLVnqTqoSZtI5go0oqBikE5FvDNLJIqSN4zT2TyC1iDLpnJFOqmImnaju8RNGRL4xSCeLYLk7WUaZ4Peqk30+SD0ejw7dmEhnJp0oyBikE5FvDNLJIso84iaHDNJJNbrbeEGJmXRSkSiLDjCTThRs/IQRkW/s7k4W4ZaMC2R3d1KNsNyde9JJQaL96AAz6UTBxiCdiHzjnHSyCJfOxnFkEWwcRxbBTDpR/eAnjIh8Y7k7WYQsk27nBSVSjC7ak84gnRTkdsky6QwhiIKJnzAi8k2yzxc2lruTWjiCjayC5e5kFR63LJPOcneiYGKQTkS+MZNOFiFtHMc96aQY4Zx0No4jBbkl5e52lrsTBRU/YUTkm+BkEgBgZ+BDahGNYLNrdmgaMz6kGOEINv5OJfV4JI3jbGwcRxRUDNKJyDdm0skiROXudo3ZSVKPuNydW4hIPcykE9UPfsKIyDcG6WQRokw696OTijgnnayCmXSi+sEgnYh8kwbpzPqQWkSZdAbppCI2jiOrYCadqH7wE0ZEvgmDdA2w8dcHqYVBOlmGKEjnnnRSkEcygo2ZdKLg4lk2EfkmCtIZ+JCChEE6O7uTgkSZdDbjJBW5JSPYmEknCi5+wojIN7dgrJWdpe6kHmHjOBv3+ZJ6WO5OViHLpNs5J50oqBikE5FvHmODI2bSSUVsHEdWIZyT7uAFJVKPLJNuszOEIAomfsKIyDdhuTtPJkk9HMFGliGak85MOinIw8ZxRPWCnzAi8s0jKHdnZ3dSkEtn4ziyBuEINs5JJwW5OYKNqF4wSCci39g4jixClEl38oISKUi4J51z0klBzKQT1Q9+wojIN8HeSXYhJhWx3J2sQhikcwQbKcjNEWxE9YJBOhH5xkw6WYRb0OSQ5e6kJNEINu5JJwVxBBtR/eAnjIh8Y5BOFiHak84RbKQicbk7f6+SemQj2GwcwUYUVAzSicg3No4jixCVuzOTTqrRPR7AY8xOck46qUiaSecINqKg4ieMiHwTzklndpLUwyCdLEFU6g7OSSc1MZNOVD8YpBORbyx3J4tw64I96Rrfq6QWUak7AO5JJyXJMuk2ZtKJgoqfMCLyzS0od7ez3J3Uw0w6WYFoRjrAOemkJlEmXbNpsNmYSScKJgbpROQbM+lkERzBRlYgy6Sz3J1UJMqk2zl+jSjoGKQTkW/ck04WISx35wUlUoxeJqhOAhvHkZo8LmOQbuP4NaKg46eMiHxjd3eyiDLBe5VBOilHUu4OjmAjBbkF5e52No0jCjoG6UTkG8vdySLcgqoPBumkGnm5O9+rpB6PoNydTeOIgo+fMiLyTRSks3EcKUjYOI7d3UkxepkkSHfyvUrqYSadqH4wSCci39yiTDr3pJN6XLqgcRzfq6Qa0e9UALDzvUrqYSadqH7wU0ZEvrHcnSzAo3vg0Y0nkyx3J9XIy91ZoUTqYSadqH4wSCci39g4jixAtB8dYLk7qUd3yeak871K6mEmnah+8FNGRL4JR7DxZJLUIip1B5hJJ/XoLtkINpa7k3qYSSeqHwzSicg3Ybk7TyZJLaKmcQCDdFKQpNwdzKSTgphJJ6of/JQRkW9uQdaH3d1JMbJyd7vGC0qkFl0yJ13jnHRSEDPpRPWD3wgSbrcbZWXikjSyDqfTCTs75p4YNo4jC2C5O1kFR7CRlXhcgky6gzk+omDjN0I1uq4jOzsbBQUF9b0UCpDY2FgkJydD03jl1zSPB4DxKjqDdFINy93JKnTJCDY2jiMVuQXl7naWuxMFHb8RqikP0BMTExEREcHAzsJ0XUdRURFyc3MBACkpKfW8IgsSdXYHGKSTchikk2XI9qSz6osU5BGUu9tY7k4UdDx7qcTtdlcE6PHx8fW9HAqA8PBwAEBubi4SExNZ+m6WJPBhkE6qkQXp3JNOquGcdLISZtKJ6gc/ZZWU70GPiIio55VQIJX/92SPgVqQBelsHEeKceuSOem8oESKkc5J5550UhAz6UT1g0G6AEvcGxb+9zwBkr2THMFGqmG5O1mFdE46K71IQcykE9UPfsqISI7l7mQRLHcny+CcdLIIXdeZSSeqJwzSiUhO2jiO5e6kFtkINiffq6QYabk7g3RSjMcjmO4CZtKJ6gI/ZQ1IdnY2br75ZrRq1QqhoaFITU3FmDFjkJGREdDXOe200/Cvf/0roM9p5nm/+OILjBw5EvHx8dA0DatWrQr4WugYZtLJIqSZdG7NIMXIG8fx9yqpRZRFB5hJJ6oLDNIbiMzMTPTu3RsLFizAU089hTVr1mDu3LkYNmwYpkyZUt/LC6jCwkIMGTIETzzxRH0vpeHziDM+3JNOqnFL3qsOjYEPKYZz0ski3C7jfnQAsDsYPhAFmxLfCDNnzsRTTz2F7OxsdO/eHS+99BL69esnvO/s2bPx3nvvYe3atQCA3r1747HHHpPe/0Tc/fnf2JhzOODPa0b7pGg8fmG3Gu934403QtM0LF++HJGRkRXHO3fujCuvvLLi56ysLNx8883IyMiAzWbDmWeeiZdeeglJSUkAgAcffBBffvklbr/9dkybNg35+fkYPXo0Zs+ejejoaFxxxRVYtGgRFi1ahBdeeAEAsH37dqSnp2Pt2rX497//jcWLFyMyMhIjR47Ec889h4SEBCxcuBAjR45ERkYGhg4dCgB48skn8fTTT2PNmjW46667pM9b3eWXXw7Ae2GCgswtKXdnd3dSDDPpZBWyTDrsSpySEVXwuCWZdDsz6UTBVu/fCHPmzMHUqVPx2muvoX///nj++ecxatQobNy4EYmJiYb7L1y4EBMmTMCgQYMQFhaGJ554AiNHjsQ///yDZs2aBXRtG3MO46+sgoA+ZzDk5eVh7ty5ePTRR6sE6OViY2MBAB6PB+eeey6ioqKwaNEiuFwuTJkyBePHj8fChQsr7r9161Z8+eWX+Pbbb5Gfn49x48bh8ccfx6OPPooXXngBmzZtQpcuXTB9+nQAQJMmTVBQUIDhw4fj6quvxnPPPYejR4/irrvuwrhx47BgwYKKUvbLL78cq1evxrZt2zBt2jR8+umnSEpKkj4v1TOWu5NFyPaks7s7qUYvk2TSOYKNFMNMOlH9qfdvhGeffRbXXHMNJk+eDAB47bXX8N133+Gtt97C3Xffbbj/Bx98UOXnN954A59//jkyMjIwceLEOlmzarZs2QJd19GhQwef98vIyMCaNWuwfft2pKamAgDee+89dO7cGStWrEDfvn0BeIP5d955B9HR0QC8meuMjAw8+uijiImJQUhICCIiIpCcnFzx3C+//DJ69uyJxx57rOLYW2+9hdTUVGzatAnt2rXDI488gnnz5uHaa6/F2rVrMWnSJIwdOxYApM9L9YxBOlmELJPOxnGkGl1W7s4RbKQYj2D8GgDY2DiOKOjq9VNWWlqKP//8EyNGjKg4ZrPZMGLECCxdutSv5ygqKkJZWRni4uKEt5eUlODQoUNV/jQ0ui4uR6pu/fr1SE1NrQjQAaBTp06IjY3F+vXrK46lp6dXBOgAkJKSgtzcXJ/PvXr1avz888+Iioqq+FN+0WDr1q0AgJCQEHzwwQf4/PPPUVxcjOeee87vvyPVEwbpZBEcwUZWwcZxZBVuSeM4OxvHEQVdvX4j7N+/H263u2I/dLmkpCRs2LDBr+e466670LRp0yqBfmUzZszAQw89dMJrVVnbtm2haZrf/2Y1cTqrZp40TYPHI76aWu7IkSMYM2aMsJlbSkpKxf9fsmQJAG+Jfl5enrA8nxTCIJ0swq1LGsfxvUqqke1Jd7Lqg9TCTDpR/bH02cvjjz+Ojz/+GAsXLkRYWJjwPvfccw+mTp1a8fOhQ4eqZJJ9aZ8UXfOdgsyfNcTFxWHUqFGYOXMmbrnlFkPgW1BQgNjYWHTs2BE7d+7Ezp07K/4N1q1bh4KCAnTq1MnvNYWEhMDtrnpC3KtXL3z++edIT0+HQ5IN2Lp1K2677TbMnj0bc+bMwaRJkzB//nzYbDbp81I9kwXpbBxHimHjOLIK6Zx0lruTYphJJ6o/9RqkJyQkwG63Iycnp8rxnJycGvclP/3003j88ccxf/58dOsm734eGhqK0NDQWq3Pn67qqpg5cyYGDx6Mfv36Yfr06ejWrRtcLhfmzZuHV199FevXr8eIESPQtWtXXHbZZXj++efhcrlw44034tRTT0WfPn38fq309HQsW7YMmZmZiIqKQlxcHKZMmYLZs2djwoQJuPPOOxEXF4ctW7bg448/xhtvvAEA+L//+z+MGjUKkydPxplnnomuXbvimWeewb///W/p85YH8JXl5eUhKysLe/bsAQBs3LgRAJCcnMz97IEm6+7O7CQpRhakcwQbqUZY7q5pDNJJOR5J4zhm0omCr14/ZSEhIejduzcyMjIqjnk8HmRkZGDgwIHSxz355JN4+OGHMXfuXFPBZUPWqlUrrFy5EsOGDcPtt9+OLl264IwzzkBGRgZeffVVAN6y9a+++gqNGzfGKaecghEjRqBVq1aYM2eOqde64447YLfb0alTJzRp0gRZWVlo2rQpfvvtN7jdbowcORJdu3bFv/71L8TGxsJms+HRRx/Fjh078PrrrwPwlsDPmjUL9913H1avXi19XpGvv/4aPXv2xNlnnw0AuOSSS9CzZ0+89tprtf3nIxnOSSeLkHV3Z+M4Uo6gcRz3o5OK3JIRbMykEwWfpvvbdSxIysueX3/9dfTr1w/PP/88PvnkE2zYsAFJSUmYOHEimjVrhhkzZgAAnnjiCdx///348MMPMXjw4IrnKW9WVpNDhw4hJiYGBw8eRKNGjarcVlxcjO3bt6Nly5bS8nmyHv53PQEb5wIfjTcen/g10OrUul8PkcRHGz7CY8seMxz/9vxvkdYorR5WRCS2d9r9KPj00yrHtPBwdPhrZT2tiEgs658D+Oal1Ybj59zcHWmd4+thRUTW5isOra7eL92OHz8e+/btw/3334/s7Gz06NEDc+fOrWgml5WVVaXk+dVXX0VpaSkuuuiiKs/zwAMP4MEHH6zLpRM1fB6Wu5M1uCVVH2wcR6oRlbszk04qkmbS7cykEwWbEt8KN910E2666SbhbQsXLqzyc2ZmZvAXRERe7O5OFsERbGQVojnp3I9OKpLuSXdwTzpRsPFTRkRygpNJAICdQTqpRbYnnZl0Uo6ocZyT71NSj1sygs3OxnFEQcdPGRHJMZNOFsHu7mQVohFsmoMNDkk9HskINhsbxxEFHYN0IpJjkE4W4da5J52sQbgnneXupCC3pNydmXSi4OOnjIjkpI3jmPUhtUgz6QzSSTG6y/h7lY3jSEUeSeM4ZtKJgo9BOhHJcU46WYS0cRzfq6QaUbk796STgqSZdDaOIwo6fsqISE5W7m5nJp3Uwj3pZBWicnc24yQVSTPpHMFGFHQM0olIzs056WQNoiDdrtmhaTyZJLVwTjpZBTPpRPWHnzIikmPjOLIIUeM47kcnFXFOOlkFM+lE9YdBegOSnZ2Nm2++Ga1atUJoaChSU1MxZswYZGRkBPR1TjvtNPzrX/8K6HP6+7xlZWW466670LVrV0RGRqJp06aYOHEi9uzZE/D1EHzsSWfwQ2qRZdKJlFPGOelkDcykE9Uffis0EJmZmRg8eDBiY2Px1FNPoWvXrigrK8OPP/6IKVOmYMOGDfW9xIAoKirCypUrMW3aNHTv3h35+fm49dZbMXbsWPzxxx/1vbyGR9rdnb86SC3MpJNV6G7OSSdrkM5JZyadKOh4BuPL1zcDuevrdw2JHYGxL9V4txtvvBGapmH58uWIjIysON65c2dceeWVFT9nZWXh5ptvRkZGBmw2G84880y89NJLSEpKAgA8+OCD+PLLL3H77bdj2rRpyM/Px+jRozF79mxER0fjiiuuwKJFi7Bo0SK88MILAIDt27cjPT0da9euxb///W8sXrwYkZGRGDlyJJ577jkkJCRg4cKFGDlyJDIyMjB06FAAwJNPPomnn34aa9aswV133SV93spiYmIwb968Ksdefvll9OvXD1lZWWjRooX5f2OSY7k7WUSZ4IISg3RSEeekk1W43cZMus2usdcHUR3gGYwvueuBXSvqexU1ysvLw9y5c/Hoo49WCdDLxcbGAgA8Hg/OPfdcREVFYdGiRXC5XJgyZQrGjx+PhQsXVtx/69at+PLLL/Htt98iPz8f48aNw+OPP45HH30UL7zwAjZt2oQuXbpg+vTpAIAmTZqgoKAAw4cPx9VXX43nnnsOR48exV133YVx48ZhwYIFFaXsl19+OVavXo1t27Zh2rRp+PTTT5GUlCR9Xn8cPHgQmqZV/D0pgGSN49jdnRQjKndnZ3dSEeekk1V4BOXuNpa6E9UJfis0AFu2bIGu6+jQoYPP+2VkZGDNmjXYvn07UlNTAQDvvfceOnfujBUrVqBv374AvMH8O++8g+joaADA5ZdfjoyMDDz66KOIiYlBSEgIIiIikJycXPHcL7/8Mnr27InHHnus4thbb72F1NRUbNq0Ce3atcMjjzyCefPm4dprr8XatWsxadIkjB07FgCkz1uT4uJi3HXXXZgwYQIaNWrk9+PIT7I96dzrS4pxC96rzKSTkgRz0rknnVTkFjSOs7PUnahO8HJYA6Dr4j1D1a1fvx6pqakVAToAdOrUCbGxsVi//nhZf3p6ekWADgApKSnIzc31+dyrV6/Gzz//jKioqIo/5RcNtm7dCgAICQnBBx98gM8//xzFxcV47rnn/P47ipSVlWHcuHHQdR2vvvrqCT0XSYjK3TUbYOOvDlKLSxc0jrPxYhKpR1zuziCd1MNMOlH94bdCA9C2bVtomhaw5nBOZ9VSZk3T4PGIO3yWO3LkCMaMGYMnnnjCcFtKSkrF/1+yZAkAb4l+Xl6esDzfH+UB+o4dO7BgwQJm0YNF1DjOxlJ3Ug8z6WQVnJNOVsFMOlH94beCL4kd63sFfq0hLi4Oo0aNwsyZM3HLLbcYAt+CggLExsaiY8eO2LlzJ3bu3FmRTV+3bh0KCgrQqVMnv5cUEhICd7XutL169cLnn3+O9PR0OCQnG1u3bsVtt92G2bNnY86cOZg0aRLmz58P27GsrOh5RcoD9M2bN+Pnn39GfHy832snk0SZdAY+pCDhnnS+V0lFwiCdVR+kHmbSieoPz2B88aOruipmzpyJwYMHo1+/fpg+fTq6desGl8uFefPm4dVXX8X69esxYsQIdO3aFZdddhmef/55uFwu3HjjjTj11FPRp0+f/2/vzqOiutK1gT9VBYUBhMKADLllSq6gMqg4NubGIRLA2Gj3TbdoG0SSpddITEjUTK2JvTrEqR1waOMQtZOVtHTaFrO43XhJRWjjiKIgEScEIQlDEAFxxKr9/cFnJQXnEDRYdQqf31qspedsztlV7Bre8+7z7g6fy2Aw4MiRIygrK4O7uzt69OiB5ORkbNmyBVOnTsXrr7+OHj164MKFC9i5cye2bt0KAHjuuecQExODpKQkxMbGIjw8HCtXrsSCBQtkj6tuNa26ubkZv/nNb5Cfn4/MzEyYTCZUVVUBaLlYodVqO+kZJQDS96RzWiYpkGR1dxaOIwWSyqSDmXRSIGbSieyHl8O6iMDAQOTn52Ps2LGYN28ewsLC8PTTT8NoNFru11apVNizZw+8vLwwatQoREVFITAwEOnp6fd0rvnz50Oj0SAkJAQ+Pj4oLy9HQEAADhw4AJPJhOjoaISHhyMlJQU6nQ5qtRqpqam4dOkSNm3aBKBlCvzmzZuxcOFCFBQUyB63tW+//Raff/45vvnmGwwaNAj+/v6Wn7tT6akTSVV3Z3aSFIjrpJOj4Drp5CiYSSeyH5XoaNWxLqKxsRGenp5oaGhocx/zzZs3UVpait69e6Nbt2526iF1Nv5df4bPkoCv/2G9zd0XmH/OPv0hkjElcwq+vvy11bZBPoPw8TMf26lHRNKKw8LbTHn3+t3v4PfOIjv1iEjaruXHUXWxwWpbz8e747dvDbNTj4gcW3txaGu8HEZE8iTvSWfGh5SHmXRyBEII6XvSuQQbKZDZ1DaTrmEmncgm+EojInmSQToLHJHySBWO4xJspDhyxVFZ64MUyHSn7WRbNe9JJ7IJBulEJI/V3clBsLo7OQLJonHgEmykTFKZdN6TTmQbfKURkTypwnEaTncn5ZEK0p1VHKukLOKOdCadQTopEau7E9kPg3QiksdMOjmIO4LT3ckB3JG48AkAXCedFIjV3Ynsh680IpIntU46Ax9SIJPEWOV0d1Ia+enunPVBysNMOpH9MEgnInlmqXXS+WWSlEeycJyKF5RIWaTWSAc43Z2UiZl0IvvhK42I5HG6OzkIqenuzKST0ohmuUw6LyiR8jCTTmQ/DNKJSB6DdHIQkoXjOOuDlEb2nnS+r5LyMJNOZD98pRGRPJNEkM71fEmBON2dHIHsdHe+r5LCCCFglsykM3QgsgW+0rqQqqoqzJ07F4GBgXBxcYFer0dcXByMRmOnnmfMmDFISUnp1GPey3EXL16Mfv36wc3NDV5eXoiKisKRI0c6vT8EZtLJYZgEC8eR8nGddHIUUgE6AKidON2dyBb4qdCOxQcX43z9ebv2IUgXhMUjF/9ku7KyMjzxxBPQ6XRYsWIFwsPD0dzcjL179yI5ORlnzpx58J21keDgYKxfvx6BgYG4ceMGVq9ejejoaFy4cAE+Pj727l7XIhmkcwoxKYtZmGEWbadlcgk2UhrZIN2ZX8dIWUwSU90BQMPp7kQ2wU+FdpyvP4/C7wvt3Y0OmTNnDlQqFY4ePQo3NzfL9tDQUDz//POW/5eXl2Pu3LkwGo1Qq9WIjY3FunXr4OvrC6AlS52RkYF58+Zh0aJFuHLlCsaPH48tW7age/fumDFjBnJzc5Gbm4u0tDQAQGlpKQwGA4qKirBgwQLs378fbm5uiI6OxurVq+Ht7Y2cnBxER0fDaDTiySefBAAsX74cf/rTn3Dq1Cm88cYbssdt7Xe/+53V/1etWoUPP/wQhYWFGDduXKc+rw89ySCdgQ8pi9TyawAz6aRAMkE6NHxfJWWRzaSzcByRTfByWBdQV1eHrKwsJCcnWwXod+l0OgCA2WzGpEmTUFdXh9zcXGRnZ+PixYuIj4+3al9SUoKMjAxkZmYiMzMTubm5WLp0KQAgLS0NkZGRmDlzJiorK1FZWQm9Xo/6+no89dRTiIiIwLFjx5CVlYXq6mpMnjwZwA9T2RMSEtDQ0IATJ05g0aJF2Lp1K3x9fWWP+1Nu376NzZs3w9PTEwMHDvyZzyS1wenu5ACapZYKBOCk4lglZeE66eQomEknsi9+g+kCLly4ACEE+vXr1247o9GIU6dOobS01BIAf/TRRwgNDUVeXh6GDRsGoCWY37FjB7p37w4ASEhIgNFoRGpqKjw9PaHVauHq6go/Pz/LsdevX4+IiAi8//77lm3btm2DXq/HuXPnEBwcjPfeew/Z2dmYNWsWioqKkJiYiIkTJwKA7HHlZGZmYsqUKbh+/Tr8/f2RnZ0Nb2/ve3vi6KdJBekafpkkZZG6Hx1gJp2UR9yRKRzH6e6kMMykE9kXL4d1AUJIv5G2VlxcDL1eb5WhDgkJgU6nQ3FxsWWbwWCwBOgA4O/vj5qamnaPXVBQgH379sHd3d3yc/eiQUlJCQBAq9Xik08+wa5du3Dz5k2sXr26w4+xtbFjx+LkyZM4ePAgYmNjMXny5J/sI90Hk0SGkoEPKYxUZXeAQTopj5BZgk3F6e6kMMykE9kXv8G0I0gXZO8udKgPQUFBUKlUnVYcztnZOlOqUqlgNku/Wd/V1NSEuLg4LFu2rM0+f39/y78PHjwIoGWKfl1dneT0/I5wc3NDnz590KdPH/ziF79AUFAQPvzwQ7z11lv3dTySIXWvL+9JJ4WRC9K5BBspjtw96azuTgpjvsNMOpE98VOhHR2pqq4EPXr0QExMDDZs2ICXX365TeBbX18PnU6H/v37o6KiAhUVFZZs+unTp1FfX4+QkJAOn0+r1cLUaq3XwYMHY9euXTAYDHCS+bJRUlKCV199FVu2bEF6ejoSExPxxRdfQK1Wyx63o8xmM27dunVfv0vtkLrXl9XdSWE43Z0cBddJJ0dhMjGTTmRPfKV1ERs2bIDJZMLw4cOxa9cunD9/HsXFxVi7di0iIyMBAFFRUQgPD8e0adOQn5+Po0ePYvr06Rg9ejSGDh3a4XMZDAYcOXIEZWVlqK2thdlsRnJyMurq6jB16lTk5eWhpKQEe/fuRVJSEkwmE0wmE5577jnExMQgKSkJ27dvR2FhIVauXNnucVu7du0a3n77bRw+fBiXLl3C8ePH8fzzz+Pbb7/Fb3/725//RJI1Fo4jByBbOI5jlRRGNHMJNnIMspl0rpNOZBMM0ruIwMBA5OfnY+zYsZg3bx7CwsLw9NNPw2g0YuPGjQBapq3v2bMHXl5eGDVqFKKiohAYGIj09PR7Otf8+fOh0WgQEhICHx8flJeXIyAgAAcOHIDJZEJ0dDTCw8ORkpICnU4HtVqN1NRUXLp0CZs2bQLQMgV+8+bNWLhwIQoKCmSP25pGo8GZM2fw7LPPIjg4GHFxcbh8+TL279+P0NDQn/kskhUhGKSTQ5Bdgo3V3UlpTHLV3TlWSVlkM+kahg5EtqASHa061kU0NjbC09MTDQ0N8PDwsNp38+ZNlJaWonfv3ujWrZudekidjX/X+2S6A/zx0bbbR84Fot+zfX+IZFy4cgG//vzXbba/G/kufhP8Gzv0iEhaw+ef47vX32iz3fD3v+ORMF5oJuX45kwd9qw52Wb7M3MGoPcArqZDdD/ai0Nb4+UwIpImU4yLmXRSmjuC1d3JMXC6OzkKk8wSbBoWjiOyCQbpRCRNNkhn4ThSFtnp7gzSSWGE3HR3LsFGCmOWWYJNzcJxRDbBVxoRSZMpxsVMOimNbOE43pNOCiNklmDjPemkNCaZwnHMpBPZBoN0IpImk53kOumkNFyCjRzGHZn3VSfOUCJlMcsUjmMmncg2+EojImly0901/DJJynJHZqxqVLygRMoin0nnWCVlkc2kcwk2IptgkE5E0kyc7k6Ogfekk6PgdHdyFLKZdC7BRmQTfKURkTRWdycHweru5DC4Tjo5CGbSieyLQToRSWOQTg5CtnAcxyopjNwSbGCQTgrDTDqRffGVRkTSGKSTg+B0d3IUnO5OjsIkswSbhoXjiGyCrzQiksYgnRwEC8eRo+A66eQozCbp6e5qLsFGZBMM0ruQqqoqzJ07F4GBgXBxcYFer0dcXByMRmOnnmfMmDFISUnp1GPe73Fnz54NlUqFNWvWdHp/HnpyheNY3Z0UhkuwkcOQyaRzujspDTPpRPbFT4V2VC5ahFvnztu1Dy7BQfD/4x9/sl1ZWRmeeOIJ6HQ6rFixAuHh4WhubsbevXuRnJyMM2fO2KC3trV7924cPnwYAQEB9u5K18R10slBMJNOjkJIrZPu5ASVitlJUhazTOE4NQvHEdkEg/R23Dp3HjcKCuzdjQ6ZM2cOVCoVjh49Cjc3N8v20NBQPP/885b/l5eXY+7cuTAajVCr1YiNjcW6devg6+sLAFi8eDEyMjIwb948LFq0CFeuXMH48eOxZcsWdO/eHTNmzEBubi5yc3ORlpYGACgtLYXBYEBRUREWLFiA/fv3w83NDdHR0Vi9ejW8vb2Rk5OD6OhoGI1GPPnkkwCA5cuX409/+hNOnTqFN954Q/a4Ur799lvMnTsXe/fuxYQJEx7EU0qy092ZSSdlkavu7syxSgojdU86p7qTEpnkCsepGaQT2QLnrHQBdXV1yMrKQnJyslWAfpdOpwMAmM1mTJo0CXV1dcjNzUV2djYuXryI+Ph4q/YlJSXIyMhAZmYmMjMzkZubi6VLlwIA0tLSEBkZiZkzZ6KyshKVlZXQ6/Wor6/HU089hYiICBw7dgxZWVmorq7G5MmTAfwwlT0hIQENDQ04ceIEFi1ahK1bt8LX11f2uFLMZjMSEhKwYMEChIaGduIzSVZkKmbznnRSGrlMOqe7k9KIO23fV1k0jpRIKpOudlJx1geRjfCToQu4cOEChBDo169fu+2MRiNOnTqF0tJSSwD80UcfITQ0FHl5eRg2bBiAliB4x44d6N69OwAgISEBRqMRqamp8PT0hFarhaurK/z8/CzHXr9+PSIiIvD+++9btm3btg16vR7nzp1DcHAw3nvvPWRnZ2PWrFkoKipCYmIiJk6cCACyx5WybNkyODk54eWXX773J4s6joXjyEHITnfnrRmkNBLT3RmkkxJJZdI1XH6NyGb4ydAFCCF931BrxcXF0Ov1VhnqkJAQ6HQ6FBcXW4J0g8FgCdABwN/fHzU1Ne0eu6CgAPv27YO7u3ubfSUlJQgODoZWq8Unn3yCAQMG4PHHH8fq1as71O8fO378ONLS0pCfn8+ruQ+a3D3pGr5tkLLILsGm4lglZZFcgo1BOimQXCadiGyDnwztcAkOsncXOtSHoKAgqFSqTisO5+xsfR+nSqWC2Sx9b9JdTU1NiIuLw7Jly9rs8/f3t/z74MGDAFqm6NfV1UlOz2/P/v37UVNTg169elm2mUwmzJs3D2vWrEFZWdk9HY/aIVfdnZl0Uhi5e9KZSSelESZm0skxMJNOZF/8ZGhHR6qqK0GPHj0QExODDRs24OWXX24T+NbX10On06F///6oqKhARUWFJZt++vRp1NfXIyQkpMPn02q1MLX6ojF48GDs2rULBoMBTjJfOEpKSvDqq69iy5YtSE9PR2JiIr744guo1WrZ47aWkJCAqKgoq20xMTFISEhAUlJShx8DdQCnu5ODkJvuzsJxpDSS96SzcBwpEDPpRPbFS2JdxIYNG2AymTB8+HDs2rUL58+fR3FxMdauXYvIyEgAQFRUFMLDwzFt2jTk5+fj6NGjmD59OkaPHo2hQ4d2+FwGgwFHjhxBWVkZamtrYTabkZycjLq6OkydOhV5eXkoKSnB3r17kZSUBJPJBJPJhOeeew4xMTFISkrC9u3bUVhYiJUrV7Z73NYeffRRhIWFWf04OzvDz88Pffv2/flPJP2A1d3JQXAJNnIYzRJj1ZkXPkl5mEknsi++2rqIwMBA5OfnY+zYsZg3bx7CwsLw9NNPw2g0YuPGjQBapq3v2bMHXl5eGDVqFKKiohAYGIj09PR7Otf8+fOh0WgQEhICHx8flJeXIyAgAAcOHIDJZEJ0dDTCw8ORkpICnU4HtVqN1NRUXLp0CZs2bQLQMgV+8+bNWLhwIQr+/zJ3UsclO5IN0hn4kLKwujs5Cunp7rzwScpjvtM2SFc7MWwgshWV6GjVsS6isbERnp6eaGhogIeHh9W+mzdvorS0FL1790a3bt3s1EPqbPy73qeTnwIZL7bd/j/7Af8Btu8PkYyVx1Zix9c72mw/kXCCgTopyqUZSbh++LDVNpfgYAR+vsdOPSKStntlPr47X2+1zVvvjvjfD7dPh4i6gPbi0NZ4SYyIpMkVjtMw60PKwunu5Ci4Tjo5CrPEdHc1p7sT2QxfbUQkjYXjyEFIBelOKicu00jKI7FOOpdgIyUySRSO07BwHJHNMEgnImly66QzSCeFkVqCjcuvkRJJrZPOTDopETPpRPbFVxsRSTNznXRyDCaJC0q8F52UiOukk6NgJp3IvhikE5E0TncnByE53Z3jlJRI8p50zvog5WEmnci++GojImlyQToLx5HCSAXpLBpHSiSk1klnJp0UiJl0IvtikE5E0kxcJ50cg9Q96cykkxJxnXRyFMykE9kXX21EJI3T3clByFV3J1IaycJxGl74JOVhJp3IvhikE5E02SCdWR9SFpNg4ThyDJLrpDtzrJLymO9IZNKdGDYQ2QpfbUQkjdXdyUFI3pPO2zJIiaTWSdfwPZWUx2SSyKRzujuRzfDV1oVUVVVh7ty5CAwMhIuLC/R6PeLi4mA0Gjv1PGPGjEFKSkqnHvNejjtjxgyoVCqrn9jY2E7vz0NPdp10Bj+kLKzuTo6C66STIzCbBYS5bZCu5nR3IpvhJ0M79n1cjMvfXbNrHx4NcMPYhP4/2a6srAxPPPEEdDodVqxYgfDwcDQ3N2Pv3r1ITk7GmTNnbNBb24mNjcX27dst/3dxcbFjb7ook0QmXe0EqPghTcrCe9LJUXCddHIEUkXjAGbSiWyJr7Z2XP7uGqpLG+3609GLBHPmzIFKpcLRo0fx7LPPIjg4GKGhoXjttddw+PBhS7vy8nJMmjQJ7u7u8PDwwOTJk1FdXW3Zv3jxYgwaNAgff/wxDAYDPD09MWXKFFy9ehVASxY7NzcXaWlplix2WVkZAKCoqAjjx4+Hu7s7fH19kZCQgNraWgBATk4OtFot9u/fbznX8uXL0bNnT1RXV7d7XCkuLi7w8/Oz/Hh5eXX0z0odJXVPOrOTpECs7k4Oo1ni4ifXSSeFMUsUjQOYSSeyJQbpXUBdXR2ysrKQnJwMNze3Nvt1Oh0AwGw2Y9KkSairq0Nubi6ys7Nx8eJFxMfHW7UvKSlBRkYGMjMzkZmZidzcXCxduhQAkJaWhsjISMycOROVlZWorKyEXq9HfX09nnrqKURERODYsWPIyspCdXU1Jk+eDOCHqewJCQloaGjAiRMnsGjRImzduhW+vr6yx5WTk5ODnj17om/fvnjxxRdx+fLlTno2yUIySGfROFIek8StGQzSSYmkp7vzfZWUxcRMOpHd8VtMF3DhwgUIIdCvX7922xmNRpw6dQqlpaWWAPijjz5CaGgo8vLyMGzYMAAtwfyOHTvQvXt3AEBCQgKMRiNSU1Ph6ekJrVYLV1dX+Pn5WY69fv16RERE4P3337ds27ZtG/R6Pc6dO4fg4GC89957yM7OxqxZs1BUVITExERMnDgRAGSPKyU2Nhb//d//jd69e6OkpARvv/02xo8fj0OHDkHDpWw6j2SQzueXlEeycJyKY5WURZjNgGiboeR0d1IaZtKJ7I+fDF2AkPjQl1JcXAy9Xm+VoQ4JCYFOp0NxcbElSDcYDJYAHQD8/f1RU1PT7rELCgqwb98+uLu7t9lXUlKC4OBgaLVafPLJJxgwYAAef/xxrF69ukP9bm3KlCmWf4eHh2PAgAH4z//8T+Tk5GDcuHH3dUySwOnu5CC4BBs5AqksOgCoON2dFMYksfwawEw6kS3xW0w7Hg1oO3VciX0ICgqCSqXqtOJwzs7WU+9UKhXMZuk37LuampoQFxeHZcuWtdnn7+9v+ffBgwcBtEzRr6urk5yef68CAwPh7e2NCxcuMEjvTFJBuobTMkl5WN2dHIJMkA5m0klhzBLLrwGAhpl0IpvhJ0M7OlJVXQl69OiBmJgYbNiwAS+//HKbwLe+vh46nQ79+/dHRUUFKioqLNn006dPo76+HiEhIR0+n1arhalVhdrBgwdj165dMBgMcJL5wlFSUoJXX30VW7ZsQXp6OhITE/HFF19ArVbLHrcjvvnmG1y+fNnqYgB1Arnq7kQK02xuO1ZZ3Z2URjaTznXSSWHkMulqJ2bSiWyFr7YuYsOGDTCZTBg+fDh27dqF8+fPo7i4GGvXrkVkZCQAICoqCuHh4Zg2bRry8/Nx9OhRTJ8+HaNHj8bQoUM7fC6DwYAjR46grKwMtbW1MJvNSE5ORl1dHaZOnYq8vDyUlJRg7969SEpKgslkgslkwnPPPYeYmBgkJSVh+/btKCwsxMqVK9s9bmtNTU1YsGABDh8+jLKyMhiNRkyaNAl9+vRBTEzMz38i6QdS66TznnRSIE53J0cgP92dY5WURTaTrmEmnchWGKR3EYGBgcjPz8fYsWMxb948hIWF4emnn4bRaMTGjRsBtExb37NnD7y8vDBq1ChERUUhMDAQ6enp93Su+fPnQ6PRICQkBD4+PigvL0dAQAAOHDgAk8mE6OhohIeHIyUlBTqdDmq1Gqmpqbh06RI2bdoEoGUK/ObNm7Fw4UIUFBTIHrc1jUaDwsJCTJw4EcHBwXjhhRcwZMgQ7N+/n2uldzZWdycHIVk4jheUSGFkg3RnBumkLMykE9mfSnS06lgX0djYCE9PTzQ0NMDDw8Nq382bN1FaWorevXujW7duduohdTb+Xe/TR5OAiznW23z6A8mH7dIdIjmjdo7ClVtXrLZNCJyApU8utVOPiNpq/u47XHiqbd2Unm++gUdnzLB9h4hkfHf+CnavPNFm+/j/CUdghI8dekTUNbQXh7bGS2JEJE1yujszPqQ8XIKNHIH8dHfOUCJlMXEJNiK7Y5BORNIkq7szSCfluSPajlVn3ppBCiPuSBdG5RJspDRcgo3I/vhqIyJprO5ODoJLsJEjEHck3lPBwnGkPHKF45hJJ7IdBulEJI2F48hBcLo7OQS5JUYZpJPCyGbSWTiOyGb4apPwkNXS6/L497xPkkE6Ax9SFrMwQ6Dta5yZdFIarpNOjkI2k84l2IhshkH6jzg7t2QJr1+/bueeUGe6+/e8+/elDpIM0vllkpRFKosOcAk2Uh7RzCXYyDEwk05kf/xk+BGNRgOdToeamhoAgKurK1QqXjV0VEIIXL9+HTU1NdDpdNBo+KX9nkgWjuOFDlIWuSDdScWPN1IYk1x1d45VUhZm0onsj58Mrfj5+QGAJVAnx6fT6Sx/V7oHLBxHDkCqsjvA6e6kPHLT3cELyKQwzKQT2R+/xbSiUqng7++Pnj17orlZuhIrOQ5nZ2dm0O+X5DrpfC5JWWQz6QzSSWG4Tjo5CrPcOulcgo3IZvgtRoZGo2FwRw83VncnB2CSupgEBumkPPJBOr9rkLKYTHKZdE53J7IVRXyL2bBhA1asWIGqqioMHDgQ69atw/Dhw2Xbf/bZZ1i0aBHKysoQFBSEZcuW4ZlnnrFhjx+cW01XcbW20t7dIAKuq4HbbtbbrqmA7761T3+IJFy+WQuP6y5ttjs1mHCjtsEOPSKSdvPKNdzRdGuzvdmswe0bMlPhieyg+ab0xU81p7sT2YxK2Hl9qvT0dEyfPh0ffPABRowYgTVr1uCzzz7D2bNn0bNnzzbtDx48iFGjRmHJkiX45S9/iU8//RTLli1Dfn4+wsLCfvJ8jY2N8PT0RENDAzw8PB7EQ/pZ9m9dj8JjIfbuBhERERGRxf+sHQ0nLWd+EN2ve4lD7X5JbNWqVZg5cyaSkpIQEhKCDz74AK6urti2bZtk+7S0NMTGxmLBggXo378//vjHP2Lw4MFYv369jXtORERERPRwYCadyHbsOt399u3bOH78ON566y3LNrVajaioKBw6dEjydw4dOoTXXnvNaltMTAwyMjIk29+6dQu3bt2y/L+hoWX6Y2Nj48/s/YNx7cYN3Lh9zd7dICIiIiICAKjUKjQ1XbV3N4gc2t34syMT2e0apNfW1sJkMsHX19dqu6+vL86cOSP5O1VVVZLtq6qqJNsvWbIEf/jDH9ps1+v199lrIiIiIqKHy/wP7d0Doq7h6tWr8PT0bLeNIgrHPUhvvfWWVebdbDajrq4Ojz76KFQq+1WpbGxshF6vR0VFhSLvjSe6i2OVHAXHKjkCjlNyFByr5CgcZawKIXD16lUEBAT8ZFu7Bune3t7QaDSorq622l5dXQ0/Pz/J3/Hz87un9i4uLnBxsa78q9Pp7r/TnczDw0PRg4noLo5VchQcq+QIOE7JUXCskqNwhLH6Uxn0u+xaAUKr1WLIkCEwGo2WbWazGUajEZGRkZK/ExkZadUeALKzs2XbExERERERETkKu093f+2115CYmIihQ4di+PDhWLNmDa5du4akpCQAwPTp0/HYY49hyZIlAIBXXnkFo0ePxsqVKzFhwgTs3LkTx44dw+bNm+35MIiIiIiIiIh+NrsH6fHx8fj+++/xzjvvoKqqCoMGDUJWVpalOFx5eTnU6h8S/iNHjsSnn36KhQsX4u2330ZQUBAyMjI6tEa6kri4uODdd99tMxWfSGk4VslRcKySI+A4JUfBsUqOoiuOVZXoSA14IiIiIiIiInrg7HpPOhERERERERH9gEE6ERERERERkUIwSCciIiIiIiJSCAbpRERERERERArBIN1ONmzYAIPBgG7dumHEiBE4evSovbtEXdi///1vxMXFISAgACqVChkZGVb7hRB455134O/vj0ceeQRRUVE4f/68VZu6ujpMmzYNHh4e0Ol0eOGFF9DU1GTVprCwEE8++SS6desGvV6P5cuXP+iHRl3IkiVLMGzYMHTv3h09e/bEr371K5w9e9aqzc2bN5GcnIxHH30U7u7uePbZZ1FdXW3Vpry8HBMmTICrqyt69uyJBQsW4M6dO1ZtcnJyMHjwYLi4uKBPnz7YsWPHg3541IVs3LgRAwYMgIeHBzw8PBAZGYl//etflv0cp6RES5cuhUqlQkpKimUbxyopxeLFi6FSqax++vXrZ9n/0I1VQTa3c+dOodVqxbZt28TXX38tZs6cKXQ6naiurrZ316iL+uc//yl+//vfi3/84x8CgNi9e7fV/qVLlwpPT0+RkZEhCgoKxMSJE0Xv3r3FjRs3LG1iY2PFwIEDxeHDh8X+/ftFnz59xNSpUy37GxoahK+vr5g2bZooKioSf/3rX8UjjzwiNm3aZKuHSQ4uJiZGbN++XRQVFYmTJ0+KZ555RvTq1Us0NTVZ2syePVvo9XphNBrFsWPHxC9+8QsxcuRIy/47d+6IsLAwERUVJU6cOCH++c9/Cm9vb/HWW29Z2ly8eFG4urqK1157TZw+fVqsW7dOaDQakZWVZdPHS47r888/F//7v/8rzp07J86ePSvefvtt4ezsLIqKioQQHKekPEePHhUGg0EMGDBAvPLKK5btHKukFO+++64IDQ0VlZWVlp/vv//esv9hG6sM0u1g+PDhIjk52fJ/k8kkAgICxJIlS+zYK3pYtA7SzWaz8PPzEytWrLBsq6+vFy4uLuKvf/2rEEKI06dPCwAiLy/P0uZf//qXUKlU4ttvvxVCCPHnP/9ZeHl5iVu3blnavPHGG6Jv374P+BFRV1VTUyMAiNzcXCFEy7h0dnYWn332maVNcXGxACAOHTokhGi5IKVWq0VVVZWlzcaNG4WHh4dlbL7++usiNDTU6lzx8fEiJibmQT8k6sK8vLzE1q1bOU5Jca5evSqCgoJEdna2GD16tCVI51glJXn33XfFwIEDJfc9jGOV091t7Pbt2zh+/DiioqIs29RqNaKionDo0CE79oweVqWlpaiqqrIak56enhgxYoRlTB46dAg6nQ5Dhw61tImKioJarcaRI0csbUaNGgWtVmtpExMTg7Nnz+LKlSs2ejTUlTQ0NAAAevToAQA4fvw4mpubrcZqv3790KtXL6uxGh4eDl9fX0ubmJgYNDY24uuvv7a0+fEx7rbhezDdD5PJhJ07d+LatWuIjIzkOCXFSU5OxoQJE9qMJ45VUprz588jICAAgYGBmDZtGsrLywE8nGOVQbqN1dbWwmQyWQ0gAPD19UVVVZWdekUPs7vjrr0xWVVVhZ49e1rtd3JyQo8ePazaSB3jx+cg6iiz2YyUlBQ88cQTCAsLA9AyjrRaLXQ6nVXb1mP1p8ahXJvGxkbcuHHjQTwc6oJOnToFd3d3uLi4YPbs2di9ezdCQkI4TklRdu7cifz8fCxZsqTNPo5VUpIRI0Zgx44dyMrKwsaNG1FaWoonn3wSV69efSjHqpO9O0BERNRacnIyioqK8NVXX9m7K0SS+vbti5MnT6KhoQF///vfkZiYiNzcXHt3i8iioqICr7zyCrKzs9GtWzd7d4eoXePHj7f8e8CAARgxYgQef/xx/O1vf8Mjjzxix57ZBzPpNubt7Q2NRtOmGmF1dTX8/Pzs1Ct6mN0dd+2NST8/P9TU1Fjtv3PnDurq6qzaSB3jx+cg6oiXXnoJmZmZ2LdvH/7jP/7Dst3Pzw+3b99GfX29VfvWY/WnxqFcGw8Pj4fyiwDdH61Wiz59+mDIkCFYsmQJBg4ciLS0NI5TUozjx4+jpqYGgwcPhpOTE5ycnJCbm4u1a9fCyckJvr6+HKukWDqdDsHBwbhw4cJD+b7KIN3GtFothgwZAqPRaNlmNpthNBoRGRlpx57Rw6p3797w8/OzGpONjY04cuSIZUxGRkaivr4ex48ft7T58ssvYTabMWLECEubf//732hubra0yc7ORt++feHl5WWjR0OOTAiBl156Cbt378aXX36J3r17W+0fMmQInJ2drcbq2bNnUV5ebjVWT506ZXVRKTs7Gx4eHggJCbG0+fEx7rbhezD9HGazGbdu3eI4JcUYN24cTp06hZMnT1p+hg4dimnTpln+zbFKStXU1ISSkhL4+/s/nO+r9q5c9zDauXOncHFxETt27BCnT58Ws2bNEjqdzqoaIVFnunr1qjhx4oQ4ceKEACBWrVolTpw4IS5duiSEaFmCTafTiT179ojCwkIxadIkySXYIiIixJEjR8RXX30lgoKCrJZgq6+vF76+viIhIUEUFRWJnTt3CldXVy7BRh324osvCk9PT5GTk2O1BMv169ctbWbPni169eolvvzyS3Hs2DERGRkpIiMjLfvvLsESHR0tTp48KbKysoSPj4/kEiwLFiwQxcXFYsOGDYpdgoWU6c033xS5ubmitLRUFBYWijfffFOoVCrxf//3f0IIjlNSrh9XdxeCY5WUY968eSInJ0eUlpaKAwcOiKioKOHt7S1qamqEEA/fWGWQbifr1q0TvXr1ElqtVgwfPlwcPnzY3l2iLmzfvn0CQJufxMREIUTLMmyLFi0Svr6+wsXFRYwbN06cPXvW6hiXL18WU6dOFe7u7sLDw0MkJSWJq1evWrUpKCgQ//Vf/yVcXFzEY489JpYuXWqrh0hdgNQYBSC2b99uaXPjxg0xZ84c4eXlJVxdXcWvf/1rUVlZaXWcsrIyMX78ePHII48Ib29vMW/ePNHc3GzVZt++fWLQoEFCq9WKwMBAq3MQ/ZTnn39ePP7440Kr1QofHx8xbtw4S4AuBMcpKVfrIJ1jlZQiPj5e+Pv7C61WKx577DERHx8vLly4YNn/sI1VlRBC2CeHT0REREREREQ/xnvSiYiIiIiIiBSCQToRERERERGRQjBIJyIiIiIiIlIIBulERERERERECsEgnYiIiIiIiEghGKQTERERERERKQSDdCIiIiIiIiKFYJBOREREREREpBAM0omIiKhTGQwGrFmzxt7dICIickgM0omIiBzYjBkz8Ktf/QoAMGbMGKSkpNjs3Dt27IBOp2uzPS8vD7NmzbJZP4iIiLoSJ3t3gIiIiJTl9u3b0Gq19/37Pj4+ndgbIiKihwsz6URERF3AjBkzkJubi7S0NKhUKqhUKpSVlQEAioqKMH78eLi7u8PX1xcJCQmora21/O6YMWPw0ksvISUlBd7e3oiJiQEArFq1CuHh4XBzc4Ner8ecOXPQ1NQEAMjJyUFSUhIaGhos51u8eDGAttPdy8vLMWnSJLi7u8PDwwOTJ09GdXW1Zf/ixYsxaNAgfPzxxzAYDPD09MSUKVNw9erVB/ukERERKRCDdCIioi4gLS0NkZGRmDlzJiorK1FZWQm9Xo/6+no89dRTiIiIwLFjx5CVlYXq6mpMnjzZ6vf/8pe/QKvV4sCBA/jggw8AAGq1GmvXrsXXX3+Nv/zlL/jyyy/x+uuvAwBGjhyJNWvWwMPDw3K++fPnt+mX2WzGpEmTUFdXh9zcXGRnZ+PixYuIj4+3aldSUoKMjAxkZmYiMzMTubm5WLp06QN6toiIiJSL092JiIi6AE9PT2i1Wri6usLPz8+yff369YiIiMD7779v2bZt2zbo9XqcO3cOwcHBAICgoCAsX77c6pg/vr/dYDDgvffew+zZs/HnP/8ZWq0Wnp6eUKlUVudrzWg04tSpUygtLYVerwcAfPTRRwgNDUVeXh6GDRsGoCWY37FjB7p37w4ASEhIgNFoRGpq6s97YoiIiBwMM+lERERdWEFBAfbt2wd3d3fLT79+/QC0ZK/vGjJkSJvf/eKLLzBu3Dg89thj6N69OxISEnD58mVcv369w+cvLi6GXq+3BOgAEBISAp1Oh+LiYss2g8FgCdABwN/fHzU1Nff0WImIiLoCZtKJiIi6sKamJsTFxWHZsmVt9vn7+1v+7ebmZrWvrKwMv/zlL/Hiiy8iNTUVPXr0wFdffYUXXngBt2/fhqura6f209nZ2er/KpUKZrO5U89BRETkCBikExERdRFarRYmk8lq2+DBg7Fr1y4YDAY4OXX8Y//48eMwm81YuXIl1OqWiXd/+9vffvJ8rfXv3x8VFRWoqKiwZNNPnz6N+vp6hISEdLg/REREDwtOdyciIuoiDAYDjhw5grKyMtTW1sJsNiM5ORl1dXWYOnUq8vLyUFJSgr179yIpKandALtPnz5obm7GunXrcPHiRXz88ceWgnI/Pl9TUxOMRiNqa2slp8FHRUUhPDwc06ZNQ35+Po4ePYrp06dj9OjRGDp0aKc/B0RERI6OQToREVEXMX/+fGg0GoSEhMDHxwfl5eUICAjAgQMHYDKZEB0djfDwcKSkpECn01ky5FIGDhyIVatWYdmyZQgLC8Mnn3yCJUuWWLUZOXIkZs+ejfj4ePj4+LQpPAe0TFvfs2cPvLy8MGrUKERFRSEwMBDp6emd/viJiIi6ApUQQti7E0RERERERETETDoRERERERGRYjBIJyIiIiIiIlIIBulERERERERECsEgnYiIiIiIiEghGKQTERERERERKQSDdCIiIiIiIiKFYJBOREREREREpBAM0omIiIiIiIgUgkE6ERERERERkUIwSCciIiIiIiJSCAbpRERERERERArx/wDQUxExd9xR6QAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "plot_list = []\n",
+ "for i in range(contexts):\n",
+ " plot_list.append(plotting_dict[\"acc per context\"][\"context {}\".format(i + 1)])\n",
+ "figure = visual_plt.plot_lines(\n",
+ " plot_list, x_axes=plotting_dict[\"x_iteration\"],\n",
+ " line_names=['Context {}'.format(i + 1) for i in range(contexts)],\n",
+ " ylabel=\"Test accuracy (per context)\", ylim=(0,1.05) if scenario==\"class\" else None,\n",
+ " xlabel=\"Iteration\", title=\"{} -- {}-incremental learning\".format(experiment, scenario)\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "00d3deb7",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.8"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/PyTorch/build-in/other/continual-learning/example_comparison.ipynb b/PyTorch/build-in/other/continual-learning/example_comparison.ipynb
new file mode 100644
index 000000000..c342b7817
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/example_comparison.ipynb
@@ -0,0 +1,728 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "a00a09c9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Load required libraries\n",
+ "import torch\n",
+ "from torch import optim\n",
+ "import copy\n",
+ "import numpy as np\n",
+ "# -custom-written libraries\n",
+ "import utils\n",
+ "from data.load import get_context_set\n",
+ "from models.classifier import Classifier\n",
+ "from train.train_task_based import train_cl\n",
+ "from eval import evaluate, callbacks as cb\n",
+ "from visual import visual_plt"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "cb0539a1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Enable plotting in the notebook\n",
+ "%matplotlib inline "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "57e3b84a",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CUDA is used\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Is cuda available?\n",
+ "cuda = torch.cuda.is_available()\n",
+ "device = torch.device(\"cuda\" if cuda else \"cpu\")\n",
+ "print(\"CUDA is {}used\".format(\"\" if cuda else \"not \"))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "320f9e43",
+ "metadata": {},
+ "source": [
+ "## DATA: Prepare the data"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "c6ead555",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Specify what kind of continual learning experiment we should run\n",
+ "experiment = \"splitMNIST\" #--> create context set by splitting up the MNIST dataset\n",
+ "contexts = 5 #--> split the dataset up into how many contexts?\n",
+ "iters = 500 #--> number of iterations per context\n",
+ "batch = 128 #--> number of samples per iteration (i.e., the mini-batch size)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "ee01ec88",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Specify according to which scenario the continual learning experiment should be performed?\n",
+ "scenario = \"class\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "2746c66a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Where is, or should, the data be stored?\n",
+ "d_dir = './store/datasets'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "id": "d9b6f329",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ " --> MNIST: 'train'-dataset consisting of 60000 samples\n",
+ " --> MNIST: 'test'-dataset consisting of 10000 samples\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Load the context set (both train- and test-data) for the specified continual learning experiment\n",
+ "(train_datasets, test_datasets), config = get_context_set(\n",
+ " name=experiment, scenario=scenario, contexts=contexts, data_dir=d_dir, verbose=True, exception=True,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "51fc95ea",
+ "metadata": {},
+ "source": [
+ "## CLASSIFIER: Specify the classifier network"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "06a212fa",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Specify the architectural layout of the network to use\n",
+ "fc_lay = 3 #--> number of fully-connected layers\n",
+ "fc_units = 400 #--> number of units in each hidden layer\n",
+ "fc_bn = False #--> use batch-norm\n",
+ "fc_nl = \"relu\" #--> what non-linearity to use?"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "id": "a9749091",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Define the model\n",
+ "model = Classifier(\n",
+ " image_size=config['size'], image_channels=config['channels'], classes=config['output_units'],\n",
+ " # -conv-layers are not used\n",
+ " depth=0,\n",
+ " # -fc-layers\n",
+ " fc_layers=fc_lay, fc_units=fc_units, fc_bn=fc_bn, fc_nl=fc_nl, excit_buffer=True,\n",
+ ").to(device)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "id": "d6b15c63",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Indicate to model what scenario it will be trained on and how many classes there are in each context\n",
+ "model.scenario = scenario\n",
+ "model.classes_per_context = config['classes_per_context']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "id": "749dc0cd",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "-------------------------------------------------------\n",
+ "Classifier(\n",
+ " (convE): ConvLayers(\n",
+ " (pooling): Identity()\n",
+ " )\n",
+ " (flatten): Flatten()\n",
+ " (fcE): MLP(\n",
+ " (fcLayer1): fc_layer(\n",
+ " (linear): LinearExcitability(in_features=784, out_features=400)\n",
+ " (nl): ReLU()\n",
+ " )\n",
+ " (fcLayer2): fc_layer(\n",
+ " (linear): LinearExcitability(in_features=400, out_features=400)\n",
+ " (nl): ReLU()\n",
+ " )\n",
+ " )\n",
+ " (classifier): fc_layer(\n",
+ " (linear): LinearExcitability(in_features=400, out_features=10)\n",
+ " )\n",
+ ")\n",
+ "-------------------------------------------------------\n",
+ "--> this network has 478410 parameters (~0.5 million)\n",
+ " of which: - learnable: 478410 (~0.5 million)\n",
+ " - fixed: 0 (~0.0 million)\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Print layout of the model to the screen\n",
+ "utils.print_model_info(model)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "91b36dc3",
+ "metadata": {},
+ "source": [
+ "#### Create several copies of the network, which each will be trained with a different continual learning method "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "id": "6b421901",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# -baseline\n",
+ "model_naive = copy.deepcopy(model)\n",
+ "# -different continual learning methods (each method illustrates a different strategy)\n",
+ "model_si = copy.deepcopy(model)\n",
+ "model_lwf = copy.deepcopy(model)\n",
+ "model_er = copy.deepcopy(model)\n",
+ "if scenario==\"task\":\n",
+ " model_xdg = copy.deepcopy(model)\n",
+ "if scenario==\"class\":\n",
+ " model_icarl = copy.deepcopy(model)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "10f44613",
+ "metadata": {},
+ "source": [
+ "## CL-STRATEGY: Specify the continual learning strategy to use"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "50eb0638",
+ "metadata": {},
+ "source": [
+ "### Parameter Regularization"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "id": "456d55be",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Synaptic Intelligence (SI)\n",
+ "model_si.weight_penalty = True\n",
+ "model_si.importance_weighting = 'si'\n",
+ "model_si.reg_strength = 100."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a1b72b02",
+ "metadata": {},
+ "source": [
+ "### Functional Regularization"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "id": "c59bc386",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Learning without Forgetting (LwF)\n",
+ "model_lwf.replay_mode = 'current'\n",
+ "model_lwf.replay_targets = 'soft'\n",
+ "model_lwf.KD_temp = 2.\n",
+ "model_lwf.lwf_weighting = True"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "02317fbe",
+ "metadata": {},
+ "source": [
+ "### Replay"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "id": "82668e74",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Experience Replay (ER)\n",
+ "model_er.replay_mode = 'buffer'\n",
+ "model_er.use_memory_buffer = True\n",
+ "model_er.budget_per_class = 100\n",
+ "model_er.sample_selection = 'random'"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "975a3268",
+ "metadata": {},
+ "source": [
+ "### Context-specific components"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "id": "5a8ce734",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Context-specific Gating (XdG)\n",
+ "gating_prop = 0.9\n",
+ "if scenario==\"task\":\n",
+ " model_xdg.mask_dict = {}\n",
+ " model_xdg.excit_buffer_list = []\n",
+ " for context_id in range(contexts):\n",
+ " model_xdg.mask_dict[context_id + 1] = {}\n",
+ " for i in range(model_xdg.fcE.layers):\n",
+ " # For each fully-connected hidden layer, define for every context a random mask of units to set to zero\n",
+ " layer = getattr(model_xdg.fcE, \"fcLayer{}\".format(i + 1)).linear\n",
+ " if context_id == 0:\n",
+ " model_xdg.excit_buffer_list.append(layer.excit_buffer)\n",
+ " n_units = len(layer.excit_buffer)\n",
+ " gated_units = np.random.choice(n_units, size=int(gating_prop * n_units), replace=False)\n",
+ " model_xdg.mask_dict[context_id + 1][i] = gated_units"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "04975f9b",
+ "metadata": {},
+ "source": [
+ "### Template-based Classification"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "id": "d9e0a292",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Incremental Classification and Represenation Learning (iCaRL)\n",
+ "if scenario==\"class\":\n",
+ " model_icarl.use_memory_buffer = True\n",
+ " model_icarl.budget_per_class = 100\n",
+ " model_icarl.sample_selection = 'herding'\n",
+ " model_icarl.norm_exemplars = True\n",
+ " model_icarl.add_buffer = True\n",
+ " model_icarl.prototypes = True\n",
+ " model_icarl.binaryCE = True\n",
+ " model_icarl.binaryCE_distill = True"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "6448262e",
+ "metadata": {},
+ "source": [
+ "## CALLBACKS: Specify callback-functions to get insight into training progress"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "id": "198c0fbf",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Callback-function that provides a tqdm progress bar for during training, reporting training loss and accuracy\n",
+ "loss_cbs = [cb._classifier_loss_cb(contexts=contexts, iters_per_context=iters)]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "id": "7e7cdc33",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Prepare for keeping track of performance (on test set) during training, for later plotting\n",
+ "plotting_dict_naive = evaluate.initiate_plotting_dict(contexts)\n",
+ "plotting_dict_si = evaluate.initiate_plotting_dict(contexts)\n",
+ "plotting_dict_lwf = evaluate.initiate_plotting_dict(contexts)\n",
+ "plotting_dict_er = evaluate.initiate_plotting_dict(contexts)\n",
+ "if scenario==\"task\":\n",
+ " plotting_dict_xdg = evaluate.initiate_plotting_dict(contexts)\n",
+ "if scenario==\"class\":\n",
+ " plotting_dict_icarl = evaluate.initiate_plotting_dict(contexts)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "id": "6b7b35c6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Callback-function to track test accuracy during training, for later plotting\n",
+ "context_cbs_naive = [\n",
+ " cb._eval_cb(log=iters, test_datasets=test_datasets, plotting_dict=plotting_dict_naive, iters_per_context=iters)\n",
+ "]\n",
+ "context_cbs_si = [\n",
+ " cb._eval_cb(log=iters, test_datasets=test_datasets, plotting_dict=plotting_dict_si, iters_per_context=iters)\n",
+ "]\n",
+ "context_cbs_lwf = [\n",
+ " cb._eval_cb(log=iters, test_datasets=test_datasets, plotting_dict=plotting_dict_lwf, iters_per_context=iters)\n",
+ "]\n",
+ "context_cbs_er = [\n",
+ " cb._eval_cb(log=iters, test_datasets=test_datasets, plotting_dict=plotting_dict_er, iters_per_context=iters)\n",
+ "]\n",
+ "if scenario==\"task\":\n",
+ " context_cbs_xdg = [\n",
+ " cb._eval_cb(log=iters, test_datasets=test_datasets, plotting_dict=plotting_dict_xdg, iters_per_context=iters)\n",
+ " ]\n",
+ "if scenario==\"class\":\n",
+ " context_cbs_icarl = [\n",
+ " cb._eval_cb(log=iters, test_datasets=test_datasets, plotting_dict=plotting_dict_icarl, iters_per_context=iters)\n",
+ " ]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "1c1c8ab1",
+ "metadata": {},
+ "source": [
+ "## TRAIN: Train the model on the continual learning experiment"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "id": "680da4f9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Settings for the optimizer to use\n",
+ "lr = 0.001 #--> learning rate to use"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "id": "8e3602ca",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# For each model, set up the optimizer to use\n",
+ "model_list = [model_naive, model_si, model_lwf, model_er]\n",
+ "if scenario==\"task\":\n",
+ " model_list.append(model_xdg)\n",
+ "if scenario==\"class\":\n",
+ " model_list.append(model_icarl)\n",
+ "for model in model_list:\n",
+ " model.optim_list = [{'params': filter(lambda p: p.requires_grad, model.parameters()), 'lr': lr}]\n",
+ " model.optimizer = optim.Adam(model.optim_list, betas=(0.9, 0.999))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "cc1dd1bb",
+ "metadata": {},
+ "source": [
+ "#### Train all the models using different continual learning methods"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "id": "609facda",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ " | Context: 1/5 | training loss: 1.05e-05 | training accuracy: 1.0 |: 100%|██████████████████████████████| 500/500 [00:10<00:00, 47.20it/s]\n",
+ " | Context: 2/5 | training loss: 0.0128 | training accuracy: 0.992 |: 100%|██████████████████████████████| 500/500 [00:10<00:00, 49.12it/s]\n",
+ " | Context: 3/5 | training loss: 0.000898 | training accuracy: 1.0 |: 100%|██████████████████████████████| 500/500 [00:10<00:00, 48.89it/s]\n",
+ " | Context: 4/5 | training loss: 0.000352 | training accuracy: 1.0 |: 100%|██████████████████████████████| 500/500 [00:10<00:00, 49.36it/s]\n",
+ " | Context: 5/5 | training loss: 0.00319 | training accuracy: 1.0 |: 100%|███████████████████████████████| 500/500 [00:09<00:00, 50.34it/s]\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Naive baseline\n",
+ "train_cl(\n",
+ " model_naive, train_datasets, iters=iters, batch_size=batch, loss_cbs=loss_cbs, context_cbs=context_cbs_naive,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "id": "b585d66d",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ " | Context: 1/5 | training loss: 6.23e-05 | training accuracy: 1.0 |: 100%|██████████████████████████████| 500/500 [00:10<00:00, 47.72it/s]\n",
+ " | Context: 2/5 | training loss: 0.0373 | training accuracy: 1.0 |: 100%|████████████████████████████████| 500/500 [00:11<00:00, 43.94it/s]\n",
+ " | Context: 3/5 | training loss: 0.0161 | training accuracy: 1.0 |: 100%|████████████████████████████████| 500/500 [00:11<00:00, 44.69it/s]\n",
+ " | Context: 4/5 | training loss: 0.00592 | training accuracy: 1.0 |: 100%|███████████████████████████████| 500/500 [00:11<00:00, 44.42it/s]\n",
+ " | Context: 5/5 | training loss: 0.0707 | training accuracy: 0.984 |: 100%|██████████████████████████████| 500/500 [00:10<00:00, 47.00it/s]\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Synaptic Intelligence\n",
+ "train_cl(\n",
+ " model_si, train_datasets, iters=iters, batch_size=batch, loss_cbs=loss_cbs, context_cbs=context_cbs_si,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "id": "fc974df9",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ " | Context: 1/5 | training loss: 3.24e-06 | training accuracy: 1.0 |: 100%|██████████████████████████████| 500/500 [00:08<00:00, 56.95it/s]\n",
+ " | Context: 2/5 | training loss: 1.46 | training accuracy: 0.992 |: 100%|████████████████████████████████| 500/500 [00:09<00:00, 50.88it/s]\n",
+ " | Context: 3/5 | training loss: 1.97 | training accuracy: 1.0 |: 100%|██████████████████████████████████| 500/500 [00:09<00:00, 52.16it/s]\n",
+ " | Context: 4/5 | training loss: 2.3 | training accuracy: 1.0 |: 100%|███████████████████████████████████| 500/500 [00:09<00:00, 52.21it/s]\n",
+ " | Context: 5/5 | training loss: 2.49 | training accuracy: 1.0 |: 100%|██████████████████████████████████| 500/500 [00:09<00:00, 51.95it/s]\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Learning without Forgetting\n",
+ "train_cl(\n",
+ " model_lwf, train_datasets, iters=iters, batch_size=batch, loss_cbs=loss_cbs, context_cbs=context_cbs_lwf,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "id": "f7824f59",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ " | Context: 1/5 | training loss: 9.88e-06 | training accuracy: 1.0 |: 100%|██████████████████████████████| 500/500 [00:09<00:00, 50.40it/s]\n",
+ " | Context: 2/5 | training loss: 0.00253 | training accuracy: 1.0 |: 100%|███████████████████████████████| 500/500 [00:11<00:00, 44.84it/s]\n",
+ " | Context: 3/5 | training loss: 0.00109 | training accuracy: 1.0 |: 100%|███████████████████████████████| 500/500 [00:10<00:00, 47.07it/s]\n",
+ " | Context: 4/5 | training loss: 0.000389 | training accuracy: 1.0 |: 100%|██████████████████████████████| 500/500 [00:09<00:00, 50.39it/s]\n",
+ " | Context: 5/5 | training loss: 0.00527 | training accuracy: 0.984 |: 100%|█████████████████████████████| 500/500 [00:09<00:00, 50.81it/s]\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Experience Replay\n",
+ "train_cl(\n",
+ " model_er, train_datasets, iters=iters, batch_size=batch, loss_cbs=loss_cbs, context_cbs=context_cbs_er,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 28,
+ "id": "76f05e7a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# XdG\n",
+ "if scenario==\"task\":\n",
+ " train_cl(\n",
+ " model_xdg, train_datasets, iters=iters, batch_size=batch, loss_cbs=loss_cbs, context_cbs=context_cbs_xdg,\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 29,
+ "id": "54e9c8de",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ " | Context: 1/5 | training loss: 0.000369 | training accuracy: 1.0 |: 100%|██████████████████████████████| 500/500 [00:08<00:00, 55.57it/s]\n",
+ " | Context: 2/5 | training loss: 0.397 | training accuracy: 0.859 |: 100%|███████████████████████████████| 500/500 [00:09<00:00, 50.54it/s]\n",
+ " | Context: 3/5 | training loss: 1.37 | training accuracy: 0.977 |: 100%|████████████████████████████████| 500/500 [00:09<00:00, 51.36it/s]\n",
+ " | Context: 4/5 | training loss: 2.95 | training accuracy: 0.953 |: 100%|████████████████████████████████| 500/500 [00:08<00:00, 55.88it/s]\n",
+ " | Context: 5/5 | training loss: 4.05 | training accuracy: 0.945 |: 100%|████████████████████████████████| 500/500 [00:10<00:00, 49.02it/s]\n"
+ ]
+ }
+ ],
+ "source": [
+ "# iCaRL\n",
+ "if scenario==\"class\":\n",
+ " train_cl(\n",
+ " model_icarl, train_datasets, iters=iters, batch_size=batch, loss_cbs=loss_cbs, context_cbs=context_cbs_icarl,\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "41bcf17f",
+ "metadata": {},
+ "source": [
+ "## EVALUATION: Plot average accuracy curves throughout training for compared methods"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 30,
+ "id": "5b128331",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Collect data for the lines to plot\n",
+ "lines_to_plot = [\n",
+ " plotting_dict_naive[\"average\"],\n",
+ " plotting_dict_si[\"average\"],\n",
+ " plotting_dict_lwf[\"average\"],\n",
+ " plotting_dict_er[\"average\"]\n",
+ "]\n",
+ "if scenario==\"task\":\n",
+ " lines_to_plot.append(plotting_dict_xdg[\"average\"])\n",
+ "if scenario==\"class\":\n",
+ " lines_to_plot.append(plotting_dict_icarl[\"average\"])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 31,
+ "id": "ae1533dc",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Specify labels and layout for the plot\n",
+ "line_names = ['Naive', 'SI', 'LwF', 'ER']\n",
+ "line_colors = ['grey', 'yellowgreen', 'gold', 'red']\n",
+ "if scenario==\"task\":\n",
+ " line_names.append('XdG')\n",
+ " line_colors.append('deepskyblue')\n",
+ "if scenario==\"class\":\n",
+ " line_names.append('iCaRL')\n",
+ " line_colors.append('purple')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 32,
+ "id": "9e5b4617",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAA+kAAAJwCAYAAAD1IyBAAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAADWJklEQVR4nOzdd3hUVf7H8fedSTLpBRJCSSA0EURpAgKiCCiiIthAsCBYsAGCrmXX7ior/lRUVCy7igUbuuouithQmqD03nuHlEkvM/f3xyyRkJswE2ZSP6/nmcfknnvOfCdNPnPPPccwTdNERERERERERKqcraoLEBEREREREREPhXQRERERERGRakIhXURERERERKSaUEgXERERERERqSYU0kVERERERESqCYV0ERERERERkWpCIV1ERERERESkmlBIFxEREREREakmFNJFREREREREqgmFdBGRWmrHjh0YhsG7775bfOzxxx/HMIyqK0q8YhgGjz/+eFWXUUpKSgo33XRTVZdR58ydOxfDMJg7d26557377rsYhsGOHTsqpa6K8vb1iIjUVQrpIiJ13DPPPMOXX35Z6vixf/AbhsH8+fNLtZumSXJyMoZhcNlll5VoO9bv+eefL3PcP/74o/jYsTcPjhw5UuLc//znP5x//vk0aNCA8PBwWrRowdChQ5k9ezYAffr0KX6u8h7VMfBK3VHW75iIiIiVoKouQEREKs/DDz/Mgw8+WOLYM888w9VXX82QIUMs+4SGhjJjxgzOPffcEsd/+eUX9uzZg8PhKPP5nnvuOe644w7Cw8N9rvX//u//+Mtf/sL555/PQw89RHh4OFu2bOGHH37g448/5uKLL+Zvf/sbt9xyS3Gf33//nZdffpm//vWvtG3btvj4WWed5fPzS2kbN27EZtP7+7462e9YXXPeeeeRm5tLSEhIVZciIlItKaSLiNQhQUFBBAX59qf/kksu4bPPPuPll18u0XfGjBl06dKl1NXvYzp27MiKFSuYNm0aEydO9Ok5i4qKeOqpp7jwwguZM2dOqfZDhw4BcOGFF5Y4Hhoayssvv8yFF15Inz59fHpOObny3pAJlJycnAq9ySOVJzs7m4iICK/Pt9lshIaGBrAiEZGaTW+Hi4hUI5mZmdxzzz2kpKTgcDho0KABF154IcuWLSs+p0+fPrRv356lS5fSs2dPwsLCaN68OdOmTTvp+Cfek24YBtnZ2UyfPr14aviJ9xwPHz6co0eP8v333xcfKygoYObMmYwYMaLM5+rVqxd9+/Zl8uTJ5Obm+vBVgCNHjuB0OunVq5dle4MGDXwarzrJy8vj8ccf57TTTiM0NJRGjRpx5ZVXsnXr1jL77Ny5kzvvvJM2bdoQFhZG/fr1ueaaa0rde1xYWMgTTzxB69atCQ0NpX79+px77rklvncHDhxg1KhRJCUl4XA4aNSoEYMHD/bqPuYT70k/duvCggULmDhxIgkJCURERHDFFVdw+PDhUv2//fZbzj//fKKiooiOjqZr167MmDGjuP34n+3zzjuP8PBw/vrXvwKQn5/PY489RqtWrXA4HCQnJ3P//feTn59f4jkMw+Duu+/ms88+o127doSFhdGjRw9Wr14NwBtvvEGrVq0IDQ2lT58+lq978eLFXHzxxcTExBAeHs7555/PggULSpxz7Hdpy5Yt3HTTTcTGxhITE8OoUaPIyckpUU9Zv2Pefl9P1bfffkvv3r2JiIggKiqKSy+9lLVr15Y4Z9WqVdx00020aNGC0NBQGjZsyOjRozl69Kjl6163bh0jRowgLi6ueJZNSkoKl112GfPnz6dbt26EhobSokUL3nvvvRJjWN2Tfux7v27dOi644ALCw8Np0qQJkydPLvV6du7cyeWXX05ERAQNGjRgwoQJfPfdd7rPXURqDV1JFxGpRm6//XZmzpzJ3XffTbt27Th69Cjz589n/fr1dO7cufi8tLQ0LrnkEoYOHcrw4cP59NNPueOOOwgJCWH06NFeP9/777/PLbfcQrdu3bjtttsAaNmyZYlzUlJS6NGjBx999BEDBw4EPP/oz8jI4Nprr+Xll18uc/zHH3+c8847j9dff92nq+kNGjQgLCyM//znP4wdO5Z69ep53bc6c7lcXHbZZfz4449ce+21jB8/nszMTL7//nvWrFlT6mt/zO+//87ChQu59tprSUpKYseOHbz++uv06dOHdevWFV9pfvzxx5k0aVLx99TpdPLHH3+wbNmy4lkHV111FWvXrmXs2LGkpKRw6NAhvv/+e3bt2kVKSkqFXtfYsWOJi4vjscceY8eOHUyZMoW7776bTz75pPicd999l9GjR3PGGWfw0EMPERsby/Lly5k9e3aJN3uOHj3KwIEDufbaa7n++utJTEzE7XZz+eWXM3/+fG677Tbatm3L6tWrefHFF9m0aVOp+73nzZvH119/zV133QXApEmTuOyyy7j//vt57bXXuPPOO0lLS2Py5MmMHj2an376qbjvTz/9xMCBA+nSpQuPPfYYNpuNd955h759+zJv3jy6detW4rmGDh1K8+bNmTRpEsuWLePtt9+mQYMGPPvss0D5v2Pefl9Pxfvvv8/IkSMZMGAAzz77LDk5Obz++uuce+65LF++vPh7/v3337Nt2zZGjRpFw4YNWbt2LW+++SZr167lt99+K7Xg5DXXXEPr1q155plnME2z+PiWLVu4+uqrufnmmxk5ciT/+te/uOmmm+jSpQtnnHFGubWmpaVx8cUXc+WVVzJ06FBmzpzJAw88wJlnnln8tyc7O5u+ffuyf/9+xo8fT8OGDZkxYwY///zzKX+tRESqDVNERKqNmJgY86677ir3nPPPP98EzOeff774WH5+vtmxY0ezQYMGZkFBgWmaprl9+3YTMN95553i8x577DHzxD/9ERER5siRI0s9zzvvvGMC5u+//25OnTrVjIqKMnNyckzTNM1rrrnGvOCCC0zTNM1mzZqZl156aYm+QPHruOCCC8yGDRsW9z1+3BPrOnz4cPGxRx991ATMiIgIc+DAgebTTz9tLl26tNyvzWeffWYC5s8//1zueVXlX//6lwmYL7zwQqk2t9td/DFgPvbYY8WfH/vaHW/RokUmYL733nvFxzp06FDqe3G8tLQ0EzCfe+65CtXfrFmzEj8rx76X/fv3L1H/hAkTTLvdbqanp5umaZrp6elmVFSU2b17dzM3N7fEmMf3O/azPW3atBLnvP/++6bNZjPnzZtX4vi0adNMwFywYEHxMcB0OBzm9u3bi4+98cYbJmA2bNjQdDqdxccfeughEyg+1+12m61btzYHDBhQoq6cnByzefPm5oUXXlh87NjP7OjRo0vUdMUVV5j169cvcays3zFvv68///yzVz/Xx74fx15PZmamGRsba956660lzjtw4IAZExNT4rhVLR999JEJmL/++mvxsWOve/jw4aXOb9asWanzDx06ZDocDvPee+8t9/Uc+94f/7rz8/PNhg0bmldddVXxseeff94EzC+//LL4WG5urnn66adX6999ERFfaLq7iEg1Ehsby+LFi9m3b1+55wUFBTFmzJjiz0NCQhgzZgyHDh1i6dKlfq9r6NCh5Obm8t///pfMzEz++9//ljvV/XiPP/44Bw4c8Go6/vGeeOIJZsyYQadOnfjuu+/429/+RpcuXejcuTPr16+vyMuocp9//jnx8fGMHTu2VFt5W+OFhYUVf1xYWMjRo0dp1aoVsbGxJW6FiI2NZe3atWzevLnMcUJCQpg7dy5paWmn8EpKuu2220rU37t3b1wuFzt37gQ8V2kzMzN58MEHS92LfOLrdjgcjBo1qsSxzz77jLZt23L66adz5MiR4kffvn0BSl1F7devX4lZAd27dwc8swiioqJKHd+2bRsAK1asYPPmzYwYMYKjR48WP092djb9+vXj119/xe12l3iu22+/vcTnvXv35ujRozidznK+Yh7efl8r6vvvvyc9PZ3hw4eX+LrZ7Xa6d+9e4ut2fC15eXkcOXKEc845B8CylhNf9zHt2rWjd+/exZ8nJCTQpk2b4q9xeSIjI7n++uuLPw8JCaFbt24l+s6ePZsmTZpw+eWXFx8LDQ3l1ltvPen4IiI1haa7i4hUI5MnT2bkyJEkJyfTpUsXLrnkEm688UZatGhR4rzGjRuXWqjptNNOAzz7ox/7x7W/JCQk0L9/f2bMmEFOTg4ul4urr77aq77nnXceF1xwAZMnTy7zH/ZlGT58OMOHD8fpdLJ48WLeffddZsyYwaBBg1izZk1AFp8qKCggNTW1xLGEhAQKCgrIyMgocbxhw4Y+jb1161batGnj8+J9ubm5TJo0iXfeeYe9e/eWmF58fE1PPvkkgwcP5rTTTqN9+/ZcfPHF3HDDDcWr2zscDp599lnuvfdeEhMTOeecc7jsssu48cYbi19LRkZGiTUEQkJCTnq7QdOmTUt8HhcXB1D8RsCx++3bt29/0tfapEmTUqt+b968mfXr15OQkGDZ59hCgmXVExMTA0BycrLl8WN1HntzY+TIkWXWl5GRUfz6rJ7r+NceHR1d5jjg/fe1oo69nmNvZpzo+PpSU1N54okn+Pjjj0t9Pa1qad68ueWYJ349wPM18eZNoaSkpFJv2sTFxbFq1ariz3fu3EnLli1LndeqVauTji8iUlMopIuIVCNDhw6ld+/e/Pvf/2bOnDk899xzPPvss3zxxRfF92RWlREjRnDrrbdy4MABBg4cSGxsrNd9H3vsMfr06cMbb7zhU79joqOjufDCC7nwwgsJDg5m+vTpLF68mPPPP9/nsU5m4cKFXHDBBSWObd++nblz55a6wnt8qAqksWPH8s4773DPPffQo0cPYmJiMAyDa6+9tsSV3fPOO4+tW7fy1VdfMWfOHN5++21efPFFpk2bVrxV3T333MOgQYP48ssv+e6773jkkUeYNGkSP/30E506dWL8+PFMnz69eMzzzz//pItx2e12y+MV+focf0X3GLfbzZlnnskLL7xg2efE8F1WPSer89jX8rnnnqNjx46W50ZGRvo0Znm8/b5W1LEx3n//fcs3lI5/s2jo0KEsXLiQv/zlL3Ts2JHIyEjcbjcXX3yxZS1W3yc4ta+HP3+ORERqMoV0EZFqplGjRtx5553ceeedHDp0iM6dO/P000+XCOn79u0rte3Rpk2bAHxe/Ku8adbHu+KKKxgzZgy//fZbiQXBvHH++efTp08fnn32WR599FGf+p7o7LPPZvr06ezfv/+UxilLhw4dSqyGDp4r5gMGDCh13FctW7Zk8eLFFBYWEhwc7HW/mTNnMnLkSJ5//vniY3l5eaSnp5c6t169eowaNYpRo0aRlZXFeeedx+OPP15iP/mWLVty7733cu+997J582Y6duzI888/zwcffMD9999fYsrx8VeNK+rYQmlr1qyp0BXPli1bsnLlSvr16+f1z2tFHKszOjqa/v37+23csmr25ftaEcdeT4MGDcp9PWlpafz444888cQTJX4/y7ptoio1a9aMdevWYZpmia/rli1bqrAqERH/0j3pIiLVhMvlKjWttEGDBjRu3LjUNlNFRUW88cYbxZ8XFBTwxhtvkJCQQJcuXXx63oiICK9CQWRkJK+//jqPP/44gwYN8uk54M970998882TnpuTk8OiRYss27799lsA2rRp43MN3oiLi6N///4lHse2SjvxuK+uuuoqjhw5wtSpU0u1lXe10G63l2p/5ZVXcLlcJY6duF1WZGQkrVq1Kv75ycnJIS8vr8Q5LVu2JCoqqvicdu3alXiNvv48WbnooouIiopi0qRJpZ7fm6ukQ4cOZe/evbz11lul2nJzc8nOzj7lGgG6dOlCy5Yt+b//+z+ysrJKtVttK+eNsn7HvP2+VtSAAQOIjo7mmWeeobCwsFT7sddz7Ar2ibVMmTLFL3X404ABA9i7dy9ff/118bG8vDzLnw0RkZpKV9JFRKqJzMxMkpKSuPrqq+nQoQORkZH88MMP/P777yWutIHnnvRnn32WHTt2cNppp/HJJ5+wYsUK3nzzTZ+u0IInmPzwww+88MILNG7cmObNmxcvqHWi8u7VPZnzzz+f888/n19++eWk5+bk5NCzZ0/OOeccLr74YpKTk0lPT+fLL79k3rx5DBkyhE6dOlW4lqpy44038t577zFx4kSWLFlC7969yc7O5ocffuDOO+9k8ODBlv0uu+wy3n//fWJiYmjXrh2LFi3ihx9+oH79+iXOa9euHX369KFLly7Uq1ePP/74o3hLP/DMtujXrx9Dhw6lXbt2BAUF8e9//5uDBw9y7bXXBux1R0dH8+KLL3LLLbfQtWvX4v21V65cSU5OTonp9VZuuOEGPv30U26//XZ+/vlnevXqhcvlYsOGDXz66ad89913nH322adcp81m4+2332bgwIGcccYZjBo1iiZNmrB3715+/vlnoqOj+c9//uPzuGX9jnn7fa2o6OhoXn/9dW644QY6d+7MtddeS0JCArt27WLWrFn06tWLqVOnEh0dzXnnncfkyZMpLCykSZMmzJkzh+3bt/ulDn8aM2YMU6dOZfjw4YwfP55GjRrx4YcfFq9PEciZFiIilUUhXUSkmggPD+fOO+9kzpw5fPHFF7jdblq1asVrr73GHXfcUeLcuLg4pk+fztixY3nrrbdITExk6tSpFVrh+IUXXuC2227j4YcfJjc3l5EjR5YZ0k/V448/Xup+byuxsbG89dZbzJo1i3feeYcDBw5gt9tp06YNzz33HOPGjQtIfYFmt9v55ptvePrpp5kxYwaff/459evX59xzz+XMM88ss99LL72E3W7nww8/JC8vj169evHDDz8wYMCAEueNGzeOr7/+mjlz5pCfn0+zZs34+9//zl/+8hfAc+/28OHD+fHHH3n//fcJCgri9NNP59NPP+Wqq64K6Gu/+eabadCgAf/4xz946qmnCA4O5vTTT2fChAkn7Wuz2fjyyy958cUXee+99/j3v/9NeHg4LVq0YPz48cWLJvpDnz59WLRoEU899RRTp04lKyuLhg0b0r179xI7KviirN8xb7+vp2LEiBE0btyYf/zjHzz33HPk5+fTpEkTevfuXWKNhRkzZjB27FheffVVTNPkoosu4ttvv6Vx48Z+q8UfIiMj+emnnxg7diwvvfQSkZGR3HjjjfTs2ZOrrroqIItJiohUNsPUahwiIjVKnz59OHLkCGvWrKnqUkREqoUpU6YwYcIE9uzZQ5MmTaq6HBGRU6J70kVERESkxjh+i0Dw3JP+xhtv0Lp1awV0EakVNN1dRERERGqMK6+8kqZNm9KxY0cyMjL44IMP2LBhAx9++GFVlyYi4hcK6SIiIiJSYwwYMIC3336bDz/8EJfLRbt27fj4448ZNmxYVZcmIuIXuiddREREREREpJrQPekiIiIiIiIi1YRCuoiIiIiIiEg1UefuSXe73ezbt4+oqCgMw6jqckRERERERKSWM02TzMxMGjdujM1W/rXyOhfS9+3bR3JyclWXISIiIiIiInXM7t27SUpKKvecOhfSo6KiAM8XJzo6uoqrERERERERkdrO6XSSnJxcnEfLU+dC+rEp7tHR0QrpIiIiIiIiUmm8ueVaC8eJiIiIiIiIVBMK6SIiIiIiIiLVhEK6iIiIiIiISDWhkC4iIiIiIiJSTSiki4iIiIiIiFQTCukiIiIiIiIi1YRCuoiIiIiIiEg1oZAuIiIiIiIiUk0opIuIiIiIiIhUEwrpIiIiIiIiItWEQrqIiIiIiIhINaGQLiIiIiIiIlJNKKSLiIiIiIiIVBMK6SIiIiIiIiLVhEK6iIiIiIiISDWhkC4iIiIiIiJSTSiki4iIiIiIiFQTCukiIiIiIiIi1YRCejWW6VxLYWFeVZchIiIiIiIilaRKQ/qvv/7KoEGDaNy4MYZh8OWXX560z9y5c+ncuTMOh4NWrVrx7rvvBrzOSpWXR9pHs9n7+RMEz+vIpu97QEYGFBVVdWUiIiIiIiISYEFV+eTZ2dl06NCB0aNHc+WVV570/O3bt3PppZdy++238+GHH/Ljjz9yyy230KhRIwYMGFAJFQeeuWcvU0csxI2dMCYSjZOV3Ek0TqKDcogOLSA6wkVMtEl0rI3gmHCIjPQ8IiL+/Njbh8MBhlHVL1tERERERESo4pA+cOBABg4c6PX506ZNo3nz5jz//PMAtG3blvnz5/Piiy+WGdLz8/PJz88v/tzpdJ5a0QHkdrtZ8cOduOkJQC7h5BLOQRp6TigCsv73OOg5FEouMWR4Qjx7//ffY587icFJMIVlP6nd7nuwP9kjPBxsupNCRERERETEV1Ua0n21aNEi+vfvX+LYgAEDuOeee8rsM2nSJJ544okAV+YfBw9+R4Mja+B/Id0beYSRR9ifQd5CySB//CODaJeT6AwnIRkZfngFx6nIVf2TPYJq1I+riIiIiIiIz2pU6jlw4ACJiYkljiUmJuJ0OsnNzSUsLKxUn4ceeoiJEycWf+50OklOTg54rRXRqNFA5u891+/j+hrko/53Bf7Eq/Ih5V2RP1F2tudx8KAfXsH/OBz+D/6a7i8iIiIiItVIjQrpFeFwOHA4HFVdhteCC/sB+yv9eb0N8p4p9Bn+CfK+ys/3PI4e9d+Ymu4vIiIiIiLVSI0K6Q0bNuTgCVdmDx48SHR0tOVV9Joo5doeXFi0gAObZ+NKD8GZHo3TGU1mdhSmWbXB71iQP0RimeeUHeT/DPMBDfK+crk8q+f7e7p/eLj/w39wsH9rFBERERGRaqdGhfQePXrwzTfflDj2/fff06NHjyqqyP8S+59JYv8zycq+kqNb29EsxHPV2F1kI+toBM79MTgPRnNg32kUFt5G5t5sMnZn4NzjJHNfJqbLrNL6fQnyxwf3E6/KV6sgXxE5OZ7HoUP+GzMkxP/BPzRU0/1FRERERKqRKg3pWVlZbNmypfjz7du3s2LFCurVq0fTpk156KGH2Lt3L++99x4At99+O1OnTuX+++9n9OjR/PTTT3z66afMmjWrql5CwERGNCCtwbccPNKXxKAsbEFuohMziU7MBKAd68gMyiGq5bdgeK6wu11usg5k4dzjxLnbiXOPk4zdGWTuyaxlQd5zrMYHeV8VFEBqqufhLzab/4N/RISm+4uIiIiIVJBhmmaVJba5c+dywQUXlDo+cuRI3n33XW666SZ27NjB3LlzS/SZMGEC69atIykpiUceeYSbbrrJ6+d0Op3ExMSQkZFBdHS0H15FYC3f8D7N3bcSa8u3bM+LHENo0jSvxysvyBd/XA2CvDdC7YVEB2UTbWQRbaYTXZRKtCu1xFX5OhfkqwtN9xcRERERKeZLDq3SkF4ValpIB/jpjwfoHv4iETbrwOmK/wf2+Af89nxWQb5UqK8pQT4qiOgEB9FxdqJjDKKj3ESHFRETmk90UA7RtkxC8rMgq5xHdnZVvwwB76b7l7f1X2wsxMd7HlFRmuYvIiIiIpVGIb0cNTGku91uvvntKi6K+5oQw219UqMPIWZE5dV0fJA/PszXxCAfG0p0UjTRydEl/huTHOP5vHEkIbai8oN8RR4uV1W/9LorONgT1hMS/gzuxz+sjoeGVnXVIiIiIlJDKaSXoyaGdID8ghy+X9qXgbFLsBulv2Vu046t6WyI6F8F1Vlzu9xkH/xzYbsTg7xzjxPnXmeNDvLHh/mQyBDvBzRNz3ZyZV25r2jwz8sL3BehrouIsA7vZQX9+vU9W/yJiIiISJ2nkF6OmhrSAQ6n72LFlovoH7HRcqauizDsKQsgtFPlF1dBZQb54z+uqUHe4qq8T0G+IoqKTi3kl/UQ3xkGxMV5H+oTEiA6WtPwRURERGohhfRy1OSQDrB+56+kpt1Ar9Bdlu0uIx5788UQ0qKSKwucUkHe6qp8DQnyjhjHn9Poy5heH/Ag7yu3G3Jz/Rv6MzM13d9KUJBvoT4+HsLCqrpqERERETkJhfRy1PSQDvDzyreJsz1Mx+CDlu3uoBbYUn6DoIRKrqzqHAvyx+6Hr3VB/oQwX+2CvK9M07OlnL+v+NfF6f7h4d4H+vh4qFdPK+WLiIiIVDKF9HLUhpAO8OmCe+kU8y6tg6z3zDYdXTGa/QS2yEqurPoqN8gf+7gGBvmopKhSob5WBPmKKGu6f2YmpKXBkSN/Pg4fLvn50aOeWQN1wbGV7r1dOC8mBmy2qq5aREREpMZSSC9HbQnpLlcR780fRv/4H0i2O61PihgISV+Boatm3rIM8idclc/cl4m7qPqHOUeMoziwWwX56KRoHFGOqi6z+nC7IT29dHi3CvTHHhkZVV115bDbrcN8eUE/IqKqqxYRERGpNhTSy1FbQjqAM/sony2/ksvj/iDBnmN9UvRIaPSOFqPyI7fLTfah7BLbzdW6IH9cmFeQL0dBgecKvLeh/vDhujMlPyzMt1AfH69p+CIiIlJrKaSXozaFdICdB1Yxd9dohkSuIcaWb31S/b9CwtOVW1gdd9Igv8dJ5t6aGeRL7CGvIO8b04ScHN9C/dGjdWeRvZgY70L9sWOxsZqGLyIiIjWCQno5altIB/h9w7/ZkvMIg8M2Em4UWZ+U+ArE3V25hUm5ygrymXsy//y4pgT5aEeZq9UryJ8it9szrd6bQH/s4/T0qq66cthsnv3ofVk4LyJCM4tERESk0imkl6M2hnSA/yx5hqLg97ksdBPBRulQZ2JgNP4Uoq+uguqkooqD/HFX4TN2Z9SaIH9imFeQ95PCQkhN9T7UHzniucJfFzgcvoX6+vU9fUREREROgUJ6OWprSHe73Xw4/xbqR8/jYscWbFYXiowQSJ4D4edXen0SOKbbJOtglmWQL/64Jgb5Mq7KK8gHSE6OZ2q9t6H+yBHPavp1QVTUyafeH/+Ii/MsticiIiLyPwrp5aitIR0gvyCH9367mlYx67jAsdP6JFsMNJ0HoWdWbnFSpayCfKlQX5OC/PFX408M8knROKIV5APONEtOw/fmHvtU6y0jax2bzbMfvS8L50VFaRq+iIhILaaQXo7aHNIBDqXt4N/rb6Bj5Ha6h+y1PimoMTRbBMFNK7c4qdZKBPkTVquvDUE+rF4YtiAb9mA7tiAbtmCb57/HHzuF4/ZgO4bdwFDQKltR0Z/T8L1dOC87u6qrrhwhId4H+oQEzzT80NCqrlpERES8pJBejtoe0gHW7fiF+YfvpUfobs4MPmR9UkhbaDYf7PUqtzip0Uy3SfahbMtt54rD/V4n7sLqH+QDxbAbfgv9Vuf6e7yKHq+0NyRyc0tvc3eyKfmFhYGvqzqIjPQ+1MfHe67uaxq+iIhIlVBIL0ddCOkAP614k63uafQN2UHLoDTrk8J6QvIPYAur3OKkViszyB//cR0P8rWFP0N/ieOnMl6QDVthPrbcbGw5mdiyMrFnO7FlZvz5cKZhS0/Fnp6K7dgDFzbc2HBj/99/PQ8XBia1Yn6EYXjul/dl4bzoaE3DFxER8QOF9HLUlZAO8MmCiWSF/cxAxxYa27OsT4q8HJp8DkZQ5RYndVqpIG91VV5BXqoRm+HGZpjYTBd201Ui1P8Z7ksfO9lxw+JNgbLeLPD+eMXHsHHCPwmCgkqH97g4z1R7h8PzCAkp/bHVMW8+DgrSmwIiIlIrKaSXoy6F9CJXIdPnD8cetYVBoZuob8u1PjH2Nkicpn8YSbVyLMgfv4e8grxIoJnlvrlw7FgIBYRQQDCF//tvASElPvZ8fvzHVucFUVRyloJheB/0K/pGQEXPtdmq6psiIiK1gEJ6OepSSAdwZh/ho+VDiQg/wuDQjUTZCqxPjH8c4h+r1NpETlW5Qf7YxwryItWWgbs46B8f+k/tTQBPv1KzAk6V3R74NwIq0i84WG+yi4jUAArp5ahrIR1gx/6VfLdrDHEhmVweupFQw2V9YsM3IfbWyi1OJMBMt0n24WwKcwpxF7pxF3kerkKX52OrY14c9+XcMs+p4HPXhBX2RapaUHFwt34ToHTwL/0mgFWb5zaFaqa6zTo49rEWKhQRKeZLDtWNyHVASqMOdEy/j5U5k/guvxWXOjYRZFi8N3PgdrAnQtTllV+kSIAYNoPIxMiqLsOvTNPEdJkVCvf+eMPB8vgpvOFQ3psWpqtOvY8sflREMEUE+31cG66TXv0/WdC3agumEKOiV/8LCjyPzEz/vthTZbdXr1kHxz4OCdHsAxGp1hTS64juba/m4OINHDT+zY/5LbjQsRVbqf8/uWHfMEj+EcJ7VkWZIuIFwzAwggxsQTaCQmv3n3HTNP8M9gGYkVCRMcwiMyBvdphuvSFRE7ixk4+dfPy/T32w5RX/0lf/y5rmX1abnSqafeNyebZRzC1jTZyqdCysV/WsgwYNPAsxiogcR9Pd6xC3280H828mP3IVp9mPcL5jl/WJtjhotgAcbSu3QBGROsx0l3xDojLecPBmjBPbXfkuCrILKMwqoCArn4LsQgqzCynIKcTft4GLf9hwWd7rX/q+f9/eBAiisPpN/a+JGjeGFi08j+bNS/63USMtWihSS+ie9HLU5ZAOkJufxQeLr4HIQ3QK2s/ZIfutTwxqCs0WQnCTyi1QRERqJNM0KcotoiCrgILsAgqyCjzh/fiP/9dW4uOswpOer8Ufqyuz3OBf1psAJ18MsBBbVV39r24cDkhJKR3ej/03JqaqKxQRLymkl6Ouh3SAg6nb+XLjDdgcOfQK3k274CPWJzrOhKa/gj22UusTERE5nqvAVSrce/MmwMnOL8wprOqXJmWwU+Tl1X/f3gSwn7jtX00XF2cd3lu0gKZNPVPrRaRaUEgvh0K6x5rtP7PwyP3Y7C76hWyneVC69Ylh50PybLDpfikREaldTLdJYY71Ff5y3wT439X/8s7XoofVk4G73O3+fLsFoOTHFV74L1BsNkhKKvsqfGKiFtATqUQK6eVQSP/Tj8unsc18CztuLnFspqE92/rEqKuh8cdgaCsVERGRkzFN03P1v5xp/t5c/bc6vyivqKpfnpQhqIwp/2Wt+h9CARFkE42TaJxEklm5i/yFhXkCu1WIb94coqIqrxaROkAhvRwK6SV9PP8eMsPnEUIRg0I3Uc+WZ31i7N2Q+LLecRUREalCbpfbq3v9K/ImQHW7EFz3mESSVRzao8k47mPPI4pMgnBVTjnx8WVPpU9OhqDavbuIiL8ppJdDIb2kIlch0+dfiztqBxFGAZc7NhJpK+MevYRJUP/Byi1QREREAs40TYryirxe8O/4trLOP/a5q6CSQmUdEVEiyJd+ROEkmADPuLDbPfe8lzWVPj5eF3ZETqCQXg6F9NIysg7z8Yqh2MKdxBm5DArdhMMo43+ojd6FmJGVWp+IiIjUXK5CV+kgX06o9/ZNgMJsLfxXlvDjptFHlRHmQwjg1y8iwjq8H5tKHx4euOcWqaYU0suhkG5t+77lzNl9B7bgQhJtWVzi2EyQYfWjYYek/0DkwEqvUUREROQY021SmFvo1TT/Eqv6e3G+u6j2bwEXSm45V+Q9U+0dFATmyRMTy55K36SJ50q9SC2jkF4OhfSy/bbuU1blPothQIo9nX4h27BZzVQywqHpzxDWrdJrFBEREQm0Ywv/eXuvv7dvAhTl1qyF/xzklboiH3NCoHeQ599t7YKDoVmzsqfSx8VpKr3USArp5VBIL99Xvz3JoZCvAGgbdJhzQ3Zbn2iPh2YLIaR1JVYnIiIiUnO5XW4KczxBPi8jj8x9mTj3OIsfmXs8n2fsziDncE5Vl+uVYAosr8If/wgj139BPjq67Kn0KSkQqm2DpXpSSC+HQnr53G43788fTUHkagC6BO+jc/AB65ODm3uCelDDSqxQREREpPYryisqFeKLH7s9/806mFUjVuUPorDcxe6icRJOjn/2mm/c+M+p8ycG+UaNPPvHi1QBhfRyKKSfXG5+Fu8vvgYj8hBgcl7ILtoEHbU+2dEJmv4Cdu2lKSIiIlKZXAUuMvdbBPndf36ctT8L0139/7lvp4goMon535V4qwXvIsk+tSDvcHiutpc1lT4mxm+vR+RECunlUEj3zsHUbXy58UZsjlwMTC50bKWZ3Wl9cnh/SJ4FRkjlFikiIiIi5XIXuck6kFUc2jN2Z5SYWu/c48S514npqv6RwIaLKDLLXfAukixsFQ3ycXFlL2jXtCmE6N+6UnEK6eVQSPfe6m0/sujogxh2N3bcXOrYTKI92/rk6BHQ6H0wNIVIREREpCZxu9xkH8oudRX+xIe7sPqvem/gLhHkrRa8iyQTOz6+FpsNkpLKvgqfmKgF7aRcCunlUEj3zQ/LXmM7/wTAQRGXh24k1pZvfXK9+6DBc5VYnYiIiIhUBtNtknMkp/hK/ImL3R17FOXVhBXsTSLJKnfBuygyCcLl/ZBhYX/uA2+1N3yUbg2t6xTSy6GQ7ruP548nM3w+AJFGPoNDNxJulPEHuMHzUG9iJVYnIiIiItWBaZrkHs0t80r8sSv1hTmFVV2qVyJKBXnnCUHeSTBevikRH1/2VPrkZAgKCuyLkSqnkF4OhXTfFbkKmT5/GO6onQDUM3IYFLqJEKOMaUKNZ0D08EqsUERERERqAtM0yUvPswzwmXsyPVfqdzspyCqo6lK9EkZOuYvdReMkhJO8KWG3e+55L2sqfXy8ptLXAgrp5VBIr5iMrEN8vGIYtnDP4nGNbZlc7NiC3bD68QmG5G8hol/lFikiIiIitUK+M7/c7eece5zkpedVdZleCSX3JFvQZeCgnDclIiLK3hu+eXMID6+8FyMVppBeDoX0itu6dxk/7LkDW7BnWk8Leyr9HDusT7ZFebZmC+1UeQWKiIiISJ1RkFWAc2/ZId65x0nu0dyqLtMrDvJKLXZ34oJ3DvKwvJ6emFj2VPomTTxX6qXKKaSXQyH91Cxa+zGr854rnnFzRtAheobssT7ZngjNFkFI88orUERERETkfwpzC8nc++fidlZb0GUfKmP3omommALLq/DHfx5GbskgHxwMzZqVPZU+Lk5T6SuJQno5FNJP3ZeLHuew4z/Fn3cL3kuH4IPWJwe3hmYLICihkqoTEREREfFeUX4Rmfsyy92CLutAFhXdfr0yBVF4kqn1TsLJwTj2YqKjy55Kn5ICoaFV+npqE4X0ciiknzq32837826iIGrt/46Y9AnZSeugVOsOod2g6U9gi6i0GkVERERE/MVV6CJrf1a5W9Bl7svEdFf/aGWniCgyy13wLpJsT5Bv3Lj0FPpjHzdq5Nk/XryikF4OhXT/yM3P5P0l12BEHAbAhpsBjq0k2TOtO0RcAklfghFceUWKiIiIiFQSd5GbrANZ5W5Bl7k3E3dRGTskVSM2XESRWe5id5EhhdialzOVPiamql9GtaKQXg6FdP/Zf3QLX28aic3hWVkzGBeXOjaTYM+x7hBzEzT8l+57EREREZE6ye1yk30o23L7uWP3zGfuzcRV4KrqUk/KwF0iyJda8C7GILJlA+wtUkpfjW/aFEJCqvolVCqF9HIopPvXqq3f81vqXzHsnncEwyhkUOgmYmz51h3q/xUSnq7ECkVEREREag7TbZJzJKfc7eece5wU5RVVdaleMIkkq/SVeMNJdEIo0Sn1iDq9MUGtTrgKn5hY6y7sKaSXQyHd/+YsfYWdxrvFn0cbeVweuokwo4w/HIlTIe6uyilORERERKSWMU2T3NTcckO8c7eTwpzCqi7VKxHHBfkonMQE5RDdIJToprFEt04k6oymBJ/e8s+94aOiqrpknymkl0MhPTBmzBtLdsTC4s/jbdlc5thMsGF1z40BjT+F6Ksrr0ARERERkTrENE3yM/JLbT934oJ3+c4yZsBWM2Hk/G8qfQZRoQVEx4cQ3TiK6JbxRLdNIrpTS0LatfKsSl8NF7RTSC+HQnpgFBblM33BtZhRu4qPJdmcDHBswWY1U8VwQPIcCD+v8ooUEREREZES8p35OPeWvf2cc4+TvLS8qi7TK6Hkct+RB7HXj63qUkrxJYcGVVJNUssFBzm4qvM0Pl05DFu4Z4X3Pe5ofiloxgWOnaU7mPmw53JoOg9Cz6zkakVEREREBMAR7SAhOoGEtgllnlOQXWC9Wn3xFfkMco7kVmLV1mwG1TKg+0ohXfwmLiqRfinP8eO+u7EFee5H3+KqT3hBId1D9pXu4M6APQOh2UIIblrJ1YqIiIiIiDdCIkKIbxNPfJv4Ms8pzC0kc2+m9dX4HWk4d6eTnVoQ0DqjwwI7fmVRSBe/apXUlYPp41mb/3zxgoyrihIJNwo5M/hw6Q5Fe2H3xdBsPtjrVW6xIiIiIiLiF8FhwdRrVY96rcr+N31RfhGZ+04I8rszyNx8iIwdqTj355CVUQRUbGX36Njqdy96RSiki9/1aj+CQ4s2cMQx639HDH4rTCLcKKRlUHrpDgXrYc8gSP4BbGGVWaqIiIiIiFSSIEcQcc3jiGseV+Y5rkIXWfuz/gzx246QsW4vmdsO49ybifNIIZlZBqZFkI9ODA9k+ZVGIV0CYnD3x3lv3nYKo9b974jB3IIUwowtNLZnle6QuxD2DYcmM8HQj6WIiIiISF1kD7YT0zSGmKYxZZ7jLnKTdTAL5+4MnOv24lyzC+emAzTt17oSKw0cre4uAZOT5+SD36/BiDhSfCwYF4NCN1HfVsbCErG3QeI0iufKi4iIiIiI1HC+5NDaMWlfqqXw0GguazsVd35o8bFC7MzOb0mmO8S6U/qbcPSpSqpQRERERESkelFIl4BqHN+acxIew3T9+aOWY4bwbX4r8ky7dacjj0H625VUoYiIiIiISPWhkC4B16HVRTQzri9xLMMM5bv8lhSZZUxrPzAGMr+uhOpERERERESqD4V0qRQDzh5PeNY5JY4dckfyY34L3JarIrhh3zDIWVgp9YmIiIiIiFQHCulSaYb2fAEjM7nEsV3uGOYXNLXuYOZ5tmbL31AJ1YmIiIiIiFQ9hXSpNMFBDq7s9Dru3KgSxze64vmjoJF1J3cq7B4AhfsqoUIREREREZGqpZAulapedCP6Np2Mu6jkXujLixqyrjDeulPRLthzMbjSA1+giIiIiIhIFVJIl0rXOrkbZ4SNxSxxL7rBwsJkdhTFWHfKXw17rwB3fmWUKCIiIiIiUiUU0qVKnHvm9dTPH1jimInBTwXNOeCKsO6UMxf23wCmO/AFioiIiIiIVAGFdKkyV5zzJEGZp5c45sLGnPyWpLlDrTtlfgaH7uGEy/AiIiIiIiK1gkK6VBmbzcaw7q9jZtcvcTyfIL7Nb0WWO9i6Y9orkDq5EioUERERERGpXArpUqXCQ6O59PRXcOc7ShzPNkOYnd+KfNNu3fHwg5DxXiVUKCIiIiIiUnkU0qXKNUloQ7f4RzDdRonjaWYYc/JbUGQa1h333wxZsyuhQhERERERkcqhkC7VQqfWA0k2ryt1/IA7ip8LUsq4Bb0I9l4Nub8HvD4REREREZHKoJAu1cbFZ99DeFa3Usd3uOJYUJhs3cnMhj2XQsGWAFcnIiIiIiISeArpUm0YhsHQnlMwMpNKta0vSmB5YUPrjq7DsHsAFB0McIUiIiIiIiKBpZAu1UpwkIMrO72OOzeyVNsfhY3YUFjfohdQuA12XwKuzABXKCIiIiIiEjgK6VLt1ItuTN+mk3EXBZ3QYjC/sCk7C2OtO+Yvg71XgVkQ6BJFREREREQCQiFdqqXWyd1pF3pXqeMmBj8VNuNQYbR1x5zvYf9oMN0BrlBERERERMT/FNKl2up91o3Uy7241PEi7MwuTCGjKMq6o/NDzz7qIiIiIiIiNYxCulRrV/R4iqDMNqWO5xPEt4VNyXNHWHdMfQ5SXwxwdSIiIiIiIv6lkC7Vms1m45pur2Fm1yvVlmk6mJWfTJEZat350ERwfhzgCkVERERERPznxJW5Tmr79u3MmzePnTt3kpOTQ0JCAp06daJHjx6EhpYRlkROQWRYLJe0eYVZ20ZjC8kv0ZZqhjM7txmXhm/FoKh05303gj0BIvpVUrUiIiIiIiIV53VI//DDD3nppZf4448/SExMpHHjxoSFhZGamsrWrVsJDQ3luuuu44EHHqBZs2aBrFnqoKQGp9M1/W/84XwMw2aWaNtPBD9mtaR/5EaLnoWw9wpo+iuEdqyUWkVERERERCrKq+nunTp14uWXX+amm25i586d7N+/n6VLlzJ//nzWrVuH0+nkq6++wu12c/bZZ/PZZ58Fum6pgzqfdilJ7uGWbdttESzKamfd0Z0JuwdCwfYAViciIiIiInLqDNM0zZOd9N133zFgwACvBjx69Cg7duygS5cup1xcIDidTmJiYsjIyCA6uoxtvKTaMk2TD3+9g9yo3y3be1FEu/BV1p2DW0OzhRAUH8AKRURERERESvIlh3p1Jf1YQC8qKuK9997j4MGDZZ5bv379ahvQpeYzDIOhPadAZhPL9gXY2VfU2bpz4WbYcxm4swNXoIiIiIiIyCnwaXX3oKAgbr/9dvLy8gJVj8hJhQSHcmXH13DnRlq0GnyTZ+I0Olp3zlsMe4eCWRjIEkVERERERCrE5y3YunXrxooVKwJQioj36sckcX7SJNxF9lJtps1gZnowBfYy7lHP/gYOjIGT3+khIiIiIiJSqXzegu3OO+9k4sSJ7N69my5duhAREVGi/ayzzvJbcSLlOb1ZTw6m38km1yul2lzBLj45VJ/rElOwFe0o3TnjHQhqDAl/D3yhIiIiIiIiXvJq4bjj2WylL74bhoFpmhiGgcvl8ltxgaCF42qfz+Y/SHr495Zt9fIacWXCEgzXYevOia9C3J0BrE5EREREROo6X3Koz1fSt2/XNlZSvVzZ42mmz9uJK3pTqbbU0P3MzbiEC6JmgmmxYNzBuyGoIURdWQmVioiIiIiIlM/nK+k1na6k106ZOWnMWDoUW0SqZXvP0As4w/YSUFS60XBA8vcQ3juwRYqIiIiISJ3kSw6tcEhft24du3btoqCgoMTxyy+/vCLDVRqF9Npr98F1fLP9Fmwh+aXazCI7lze4mIZ5T1l3tsVCs3ngaB/YIkVEREREpM7x+z7px9u2bRsdOnSgffv2XHrppQwZMoQhQ4ZwxRVXcMUVV/hc7KuvvkpKSgqhoaF0796dJUuWlHv+lClTaNOmDWFhYSQnJzNhwgRtCScAJCe2o0vcQ5huo1SbEeTi631zyYn+q3VndzrsvhgKdwe2SBERERERkXL4HNLHjx9P8+bNOXToEOHh4axdu5Zff/2Vs88+m7lz5/o01ieffMLEiRN57LHHWLZsGR06dGDAgAEcOnTI8vwZM2bw4IMP8thjj7F+/Xr++c9/8sknn/DXv5YRvKTOObvNIJq4hlm2GaHZzNi6ClfMXdadi/bC7gHgsp4yLyIiIiIiEmg+h/RFixbx5JNPEh8fj81mw2azce655zJp0iTGjRvn01gvvPACt956K6NGjaJdu3ZMmzaN8PBw/vWvf1mev3DhQnr16sWIESNISUnhoosuYvjw4Se9+i51yyXd7iM0s4tlmxmxj0+3FUHUNdadC9bDnsvBnRvACkVERERERKz5HNJdLhdRUVEAxMfHs2/fPgCaNWvGxo0bvR6noKCApUuX0r9//z+Lsdno378/ixYtsuzTs2dPli5dWhzKt23bxjfffMMll1xS5vPk5+fjdDpLPKR2MwyDYT1fhszGlu1ZYb/zw6GzIbyP9QC5C2DfCDCr93aCIiIiIiJS+/gc0tu3b8/KlSsB6N69O5MnT2bBggU8+eSTtGjRwutxjhw5gsvlIjExscTxxMREDhw4YNlnxIgRPPnkk5x77rkEBwfTsmVL+vTpU+5090mTJhETE1P8SE5O9rpGqblCgkMZctZruHMjLNu3uT5ldeFd4DjLeoCsL+HgXVC3Nj8QEREREZEq5nNIf/jhh3G73QA8+eSTbN++nd69e/PNN9/w8ssv+73A482dO5dnnnmG1157jWXLlvHFF18wa9YsnnqqjBW7gYceeoiMjIzix+7dWhisrkiIS+a8Js/gLrKXajNsJgsPv8CBiFcgqKn1AOlvwNG/B7hKERERERGRP3m1BduqVato3749Npt1pk9NTSUuLg7DKL2qdlkKCgoIDw9n5syZDBkypPj4yJEjSU9P56uvvirVp3fv3pxzzjk899xzxcc++OADbrvtNrKyssqs73jagq3umbviX2x2v2rdmFOP6896mrADA8FdxoJxDd+C2FsCV6CIiIiIiNRqft+CrVOnThw5cgSAFi1acPTo0RLt9erV8ymgA4SEhNClSxd+/PHH4mNut5sff/yRHj16WPbJyckpFcTtds9V0gpu9y51QJ+Oo4nN6W/dGJ7Kpyun4G7yFRhh1uccGAOZ/wlcgSIiIiIiIv/jVUiPjY1l+/btAOzYsaN4uvupmjhxIm+99RbTp09n/fr13HHHHWRnZzNq1CgAbrzxRh566KHi8wcNGsTrr7/Oxx9/zPbt2/n+++955JFHGDRoUHFYF7FyZY+nsTtbW7YVRGzk6zWzofEnWP9KuGHfMMi1XtBQRERERETEX4K8Oemqq67i/PPPp1GjRhiGwdlnn11mKN62bZvXTz5s2DAOHz7Mo48+yoEDB+jYsSOzZ88uXkxu165dJa6cP/zwwxiGwcMPP8zevXtJSEhg0KBBPP30014/p9RNdnsQ13R7jRlLh2KLSCvVfjj4WxbuPp2ejd+AA7eWHsDMhd2XQbMF4Di9EioWEREREZG6yKt70gFmz57Nli1bGDduHE8++WTxNmwnGj9+vF8L9Dfdk1637Tywhtk7bsUWUlCqzXTZ6ddkKi1DZsORx6wHCGoGzRZCsPX2biIiIiIiIifyJYd6dSUd4OKLLwZg6dKljB8/vsyQLlKdNWvYnk5pD7Ai++8YtpLvTxl2Fz/u+Av1z/yI2Nh9ntXdT1S0E/YMhKa/gj2mkqoWEREREZG6wuct2N555x0FdKnRurUdQmPXNZZtRmgWn6+4g6L4FyFysPUA+atg7xBw5weuSBERERERqZN8DukitcGl3e4nNLOzZZs7Yg+fLb4XGn8EYT2tB8iZC/tvBNM/iyiKiIiIiIiAQrrUUYZhcM05L0FmI8v2rLDFfL/yTUj6D4S0tR4k81M4NAG0/Z+IiIiIiPiJQrrUWaGOcAaf9Rru3AjL9u2u91m963dIng1BTawHSXsZUp8LYJUiIiIiIlKXKKRLndYgrim9Gz+NWVR6S0HDZrLw4BPsz8zzBHVbGQvFHX4AMt4PcKUiIiIiIlIXVCikb926lbFjx9K/f3/69+/PuHHj2Lp1q79rE6kU7Zr3plXwbZZttpB8/rvubnKNZpD0FRgO60H2j4as7wJYpYiIiIiI1AU+h/TvvvuOdu3asWTJEs466yzOOussFi9ezBlnnMH3338fiBpFAq5vp1uIzu5r3Rh+lE+X3IE7tDc0+hAwLE4qgr1XQe4fgSxTRERERERqOcM0fVv1qlOnTgwYMIB//OMfJY4/+OCDzJkzh2XLlvm1QH/zZRN5qVtcriLenXcd7ugtlu0Nii5lcLcnIe1VOHi39SD2BGi2EEJaBbBSERERERGpSXzJoT5fSV+/fj0333xzqeOjR49m3bp1vg4nUm3Y7UFc0/U13Nmxlu2HgmaxaP0MiLsL6v/VehDXYdg9AIoOBq5QERERERGptXwO6QkJCaxYsaLU8RUrVtCgQQN/1CRSZaIj6jOg1Yu4C0Is21dnvsS2/X9A/N8h5ibrQQq3wZ5LwZUZuEJFRERERKRWCvK1w6233sptt93Gtm3b6NmzJwALFizg2WefZeLEiX4vUKSypTQ6i45p97My52kMW8m7QYygIn7Yfi9Doz4htuGbUHQIsr8pPUjeUth3tWefdcM68IuIiIiIiJzI53vSTdNkypQpPP/88+zbtw+Axo0b85e//IVx48ZhGFaLalUfuiddvPX1b5M4GDLTss2WnczInp8SZBTCrr6Qt8R6kOjrodF0MLTboYiIiIhIXeVLDvU5pB8vM9MznTcqKqqiQ1Q6hXTxltvt5oN5t5IftcKyPSqvB9f2nApFh2FnLyjcbD1QvfuhwbOBK1RERERERKq1gC4cl5ubS05ODuAJ56mpqUyZMoU5c+ZUrFqRaspmszH0nFcwMxtatmeGLuKHla9AUAIkfwd26/NInQypUwJXqIiIiIiI1Bo+h/TBgwfz3nvvAZCenk63bt14/vnnGTx4MK+//rrfCxSpSqGOcIac+RruvHDL9m2F01mz43sIaQ7J34CtjFklhyaA8+MAVioiIiIiIrWBzyF92bJl9O7dG4CZM2fSsGFDdu7cyXvvvcfLL7/s9wJFqlqDes04t9HfMV32Um2GzWTBgcc4mLYFQjtBky+AYOuB9t0I2T8FtlgREREREanRfA7pOTk5xfegz5kzhyuvvBKbzcY555zDzp07/V6gSHVwRvPzaWm/xbLNFpLP12vvIrcgEyL6Q+PpZYxSCHuHQN6KQJUpIiIiIiI1nM8hvVWrVnz55Zfs3r2b7777josuugiAQ4cOaSE2qdX6db6N6Ow+1o3hR/hsyZ243W6IHg4NXrA+z50JuwdCwY5AlSkiIiIiIjWYzyH90Ucf5b777iMlJYXu3bvTo0cPwHNVvVOnTn4vUKQ6ubrns9icLSzb8sPX8d8/nvR8Um8C1LvXehDXAdgzAIqOBKhKERERERGpqSq0BduBAwfYv38/HTp0wGbz5PwlS5YQHR3N6aef7vci/UlbsMmpcmYd5aPlQ7FFpFu2nxVxP93bDAPTDftvAOcM64FCu0PTH8EWEbhiRURERESkylXaPuk1kUK6+MP2fSuYs+sObCEFpdrMoiAubPoazRt2AbMAdl8KOT9YDxRxKSR9CUZQYAsWEREREZEqE9B90kUEmjfuSIfo+zDdRqk2I6iI77fdS0b2ITBCPCu+O8q4FSR7FhwYA3XrvTIRERERESmDQrpIBZ3T7ioSC4dYthmhmXy+bAwudyHYozx7qAc3tx4o419w5NHAFSoiIiIiIjWGQrrIKRjU/a+EODtYtrkidjHzt/8tHhfUEJK/A3u89UBH/w5prweoShERERERqSkU0kVOgc1mY1iPVzAzEy3bnaEL+HHlq55PQlpD0jdghFsPdvAuyPwiQJWKiIiIiEhN4HNInz59OrNmzSr+/P777yc2NpaePXuyc+dOvxYnUhOEOiIY3P5V3HnW4Xtr4Tus3fmj55OwrtBkJmC3ONOEfSMgZ17AahURERERkerN55D+zDPPEBYWBsCiRYt49dVXmTx5MvHx8UyYMMHvBYrUBIn1m9Or4ZOYrtLh27CZzN//KAfTtnoORA6ERv+0HsjMhz2XQ/7aAFYrIiIiIiLVlc8hfffu3bRq1QqAL7/8kquuuorbbruNSZMmMW+ergBK3dW+xQU0t422bLOF5PH12jvJL8z2HIgZCQmTrAdyp8Pui6Fwd2AKFRERERGRasvnkB4ZGcnRo0cBmDNnDhdeeCEAoaGh5Obm+rc6kRrmwi63E5l1nnVj+BE+WXwHbrfb83m9ByD2butzi/Z4grorLTCFioiIiIhIteRzSL/wwgu55ZZbuOWWW9i0aROXXHIJAGvXriUlJcXf9YnUONf0nIzhtN5uLT98LbOW/t3ziWFA4hSIutp6oIJ1nqnvbr35JSIiIiJSV/gc0l999VV69OjB4cOH+fzzz6lfvz4AS5cuZfjw4X4vUKSmCQoK5pour+POjrFs32/7iiUbP/N8Ytih0fsQdr71YLnzYd91YLoCVK2IiIiIiFQnhmmapi8ddu3aRVJSEjZbyXxvmia7d++madOmfi3Q35xOJzExMWRkZBAdHV3V5UgttnXvcn7YfQe2kMJSbWZREAOavUGzxI6eA6502HUe5K+2Hiz2dkh8zXP1XUREREREahRfcqjPV9KbN2/OkSNHSh1PTU2leXPrKb4idVHLJp04K/JeTHfpYG0EFfHd1gk4sw97DthjIelbCCrjTa70aXD06cAVKyIiIiIi1YLPIb2sC+9ZWVmEhoaeckEitUmP9tfQoGCwZZsR6mTm8jG43P+70h7cBJJngy3OerAjj0B6GVu3iYiIiIhIrRDk7YkTJ04EwDAMHn30UcLDw4vbXC4XixcvpmPHjn4vUKSmu/ycv/Her1spjC49ld0VvpPPF/+FoT2meA442kLSf2F3PzDzSg92YAwEJULkZYEtWkREREREqoTXV9KXL1/O8uXLMU2T1atXF3++fPlyNmzYQIcOHXj33XcDWKpIzWSz2Rh2zlTMzAaW7RmOefy06vU/D4T3hMafYP3r6YK9QyH3t4DUKiIiIiIiVcvnheNGjRrFSy+9VGMXXdPCcVJVDhzZxlebbsQWWnpLNdNt49wGz9Kuad8/D6a/6blybsVeH5ouAEebAFUrIiIiIiL+EtCF4yZPnlzmoKtXl7EytYjQML4FPROfxHSV/rUzbG7m7XuEQ+nb/jwYexvEP249mOso7B4AhfsCU6yIiIiIiFQJn0P6mWeeyaxZs0od/7//+z+6devml6JEaqszW/YlxRhl2WYLyePrNXeRX5j958H6j3rCupWinbDnEnBlBKBSERERERGpCj6H9IkTJ3LVVVdxxx13kJuby969e+nXrx+TJ09mxowZgahRpFa56Ow7icg817LNDD/Ep4vv+nMXBcOAxFch8nLrwfJXwt4rwJ0foGpFRERERKQy+RzS77//fhYtWsS8efM466yzOOuss3A4HKxatYorrrgiEDWK1DrX9HoOw5li2ZYXvppZS4/bE90IgsYfQVhP68Fyfob9N4Lp9n+hIiIiIiJSqXwO6QCtWrWiffv27NixA6fTybBhw2jYsKG/axOptYKDQriq82u4c6zXd9hn/JvfN83884AtHJL+AyFtrQfM/BQOTQTf1oEUEREREZFqxueQvmDBAs466yw2b97MqlWreP311xk7dizDhg0jLS0tEDWK1Epx0Yn0S/k/3IXBpdoMA5anP8euQyv/PGivB8mzIaix9YBpL0Hq/wWoWhERERERqQw+h/S+ffsybNgwfvvtN9q2bcstt9zC8uXL2bVrF2eeeWYgahSptVoldaF9+ATLC+BGUBGzt9xDZu6RPw8GN4Wk2WCLsR7w8P2Q8X5gihURERERkYDzOaTPmTOHf/zjHwQH/3n1r2XLlixYsIAxY8rY01lEytTrzGEk5FkvDGeEOpm5dAwud+GfB0PPhKSvwAixHnD/aMieE4BKRUREREQk0AzTrNhNrFu2bGHr1q2cd955hIWFYZomhmH4uz6/82UTeZHK4na7ee/XURRGr7Fsjy04n2vOeaHkQedM2DcUsLoMHwFN50LY2X6vVUREREREfONLDvX5SvrRo0fp168fp512Gpdccgn79+8H4Oabb+a+++6rWMUidZzNZmPYOVMxsxIs29NDfmHu6jdKHoy+GhJfth7QzPbsoV6wxc+VioiIiIhIIPkc0idMmEBwcDC7du0iPDy8+PiwYcP49ttv/VqcSF0SFhrFZW2n4s4Ls2zflP8263f/UvJg3N1Q/yHrAV2HYffFUHTIz5WKiIiIiEigVOie9GeffZakpKQSx1u3bs3OnTv9VphIXdQ4oRXdEx7DdJX+1TRsbubt/RtHMk74PYt/GqJHWg9YuNVzRd2dFYBqRURERETE33wO6dnZ2SWuoB+TmpqKw+HwS1EidVnH1hfSDOvQbYTk8uXq28kvzD7uoAGN3oKIgdYD5i2FvVeBWRCAakVERERExJ98Dum9e/fmvffeK/7cMAzcbjeTJ0/mggsu8GtxInXVRWffRURmT8s2M/wQny25mxJrPhrB0ORTCO1qPWD2HNh/C5Z7vYmIiIiISLXh8+rua9asoV+/fnTu3JmffvqJyy+/nLVr15KamsqCBQto2bJloGr1C63uLjVFYVEB0+dfixltfRtJE/MqLuny15IHiw7Dzp5QWMaCcfUegAb/8HOlIiIiIiJSnoCu7t6+fXs2bdrEueeey+DBg8nOzubKK69k+fLl1T6gi9QkwUEhXNX5ddw51r/Ee/icPzZ/UfJgUAIkfwf2ROtBU5+F1Jf8XKmIiIiIiPiLz1fSd+3aRXJysuWe6Lt27aJp06Z+Ky4QdCVdapote5by4967sAUXlmozi4K5OOVNmjY4q2RD3nLYdV4ZC8YZ0PgjiB4WmIJFRERERKSEgF5Jb968OYcPHy51/OjRozRv3tzX4UTkJFoldeGM8PGWt5MbQYXM3nwPmblHSjaEdoImXwDBFiOasP9GyP45EOWKiIiIiMgp8Dmkm6ZpeRU9KyuL0NBQvxQlIiWde+Zw4vMus2wzwjKYufR2XO6ikg0RF0Kjd60HNAtg7xDIW+nXOkVERERE5NQEeXvixIkTAc9q7o888kiJbdhcLheLFy+mY8eOfi9QRDyG9HiM6b9uoyh6Xam2ovDt/HvJA1x9zvMlG2JGQNF+OHxf6QHdTtgzEJouhJCUwBQtIiIiIiI+8TqkL1++HPBcSV+9ejUhISHFbSEhIXTo0IH77rMIAiLiFzabjWHdX+X934diiyx9y0layFx+WfMW57e/tWRD/XuhaB+kvVB60KL9sOdiaLYA7PUDVLmIiIiIiHjL54XjRo0axUsvvVRjF13TwnFS0+09tIn/bh2FzZFXqs102Tiv4fOcnnzeCQ1u2H89OD+yHjT0HGj6I9jCrdtFRERERKTCArpw3DvvvKNwK1KFmjQ4jW71H8V0lf71Nexuft3zV45k7Dqhwea5Pz28v/Wgeb/B3mFgFlm3i4iIiIhIpfA5pGdnZ/PII4/Qs2dPWrVqRYsWLUo8RCTwOp02gKbmDZZthiOXL1ffTkFR7gkNIdDkc3B0tB40+79w4HYsl5EXEREREZFK4fU96cfccsst/PLLL9xwww00atTIcqV3EQm8AV3H8uEvG8mN/q1Umxl+kM8W382Inm+X/B21R0Pyt7CzJxRuLz1oxj8hqDEkPBnAykVEREREpCw+35MeGxvLrFmz6NWrV6BqCijdky61SUFhPtMXXAvRuyzbk7iGgZ0ftOi4CXb2AteR0m0Aia9D3O1+rFREREREpO4K6D3pcXFx1KtXr8LFiYj/hAQ7uKrTa7hzoizbd5ufsXTLlxYdT4OkWWCUsVDcwbsg89/+K1RERERERLzic0h/6qmnePTRR8nJyQlEPSLio3oxjbig6WTchaXvXjEMWHr0H+w+vLp0x7Bu0OQzwG4xqhv2DYec+X6vV0REREREyubzdPdOnTqxdetWTNMkJSWF4ODgEu3Lli3za4H+punuUlv9uvIDNhS9iNUyEWZuLCO6fEZkqMUsmPR34cAo60FtsdBsPjjO8GepIiIiIiJ1ii851OeF44YMGVLRukQkgM7rcD2H5m8kLfybUm1GWDoz/xjDDT0/wm474dc+9iZw7YfDfy09qDsddl8MzRZBcFJA6hYRERERkT/5fCW9ptOVdKnNXC4X038diStmvWV7vcK+XNX9udINpgkHx0L6q9YDh5wBzeaBPc6P1YqIiIiI1A0BXTjumKVLl/LBBx/wwQcfsHz58ooOIyJ+ZLfbGdr9VdxZ8ZbtqcE/MW/tP0s3GAYkvgRRV1kPXLAW9gwGd54fqxURERERkRP5HNIPHTpE37596dq1K+PGjWPcuHF06dKFfv36cfjw4UDUKCI+iAyP4dI2L+POD7VsX58zjU17LBaEM+zQ6AMIO8964Nx5sO86MF1+rFZERERERI7nc0gfO3YsmZmZrF27ltTUVFJTU1mzZg1Op5Nx48YFokYR8VFSYhu61nsY01X6V9ywu5m7+yGOOneX7mgLhaSvwNHeeuCsL+DgOM/0eBERERER8Tuf70mPiYnhhx9+oGvXriWOL1myhIsuuoj09HR/1ud3uidd6pJvFr/I3uAPLNuMnIbceM5MQoLCSjcW7oGdPaHIIsgDxP8d4v/mx0pFRERERGqvgN6T7na7S227BhAcHIzb7fZ1OBEJoIHd7iHM2d2yzQw/wMwl47B8ny44CZJng62MheKOPAzp//JjpSIiIiIiAhUI6X379mX8+PHs27ev+NjevXuZMGEC/fr182txInJqDMNgaK8XwJls2Z4duozvVlis9g7gaAdJ/wHD+t52DtwGWbP8VKmIiIiIiEAFQvrUqVNxOp2kpKTQsmVLWrZsSfPmzXE6nbzyyiuBqFFETkFIcChXdnwNd06UZfsu9ycs3/q1defwXtD4Y6z/VLhg7zWQu9hvtYqIiIiI1HUV2ifdNE1++OEHNmzYAEDbtm3p37+/34sLBN2TLnXVxp2/MffAeGzBRaXazMJgLm35L5rEt7PunPYGHLzdus1eH5ouAEcbP1YrIiIiIlJ7+JJDKxTSazKFdKnLflnxPhtdUzAMi8bcWEZ0mUlEaBn3oR9+HI4+Yd0WnALNFkJQIz9VKiIiIiJSewR04bhx48bx8ssvlzo+depU7rnnHl+HE5FKdH7HG6iXc7F1Y1g6n/0xBpe79JV2AOIfg5hbrdsKd8DugeDK8EudIiIiIiJ1lc8h/fPPP6dXr16ljvfs2ZOZM2f6pSgRCZwrej6JPeN0y7bC8K189XsZW6sZBjR8DSIHWbfnr4S9V4I730+VioiIiIjUPT6H9KNHjxITE1PqeHR0NEeOHPFLUSISOHa7naHdX8OdVd+y/WjwD8xf9451ZyPIs5BcWA/r9pyfYP9IMLUdo4iIiIhIRfgc0lu1asXs2bNLHf/2229p0aKFX4oSkcCKDI/hktNewp3vsGxfl/0am/cutO5sC/dszRZifTWezE/g0L1Qt5a7EBERERHxiyBfO0ycOJG7776bw4cP07dvXwB+/PFHnn/+eaZMmeLv+kQkQJIbtqVL+t9Ylvk4hr3klW/D7ubnXQ9QP/oj6kUlle5srw/Js2FnTyjaV7o9bQoENYH69wWmeBERERGRWqpCq7u//vrrPP300+zb5/nHeUpKCo8//jg33nij3wv0N63uLlLSrMUvsC/4Q8s2W04jbjxnJsFBodad81bBrt7gdlq3N3ofYq73U6UiIiIiIjVTpW3BdvjwYcLCwoiMjKzoEJVOIV2kJNM0+eCXO8mLXmLZHpHXheE93sCw3LcNyJ4LewaAWWDRGATJsyDiIr/VKyIiIiJS0wR0C7bjJSQk1KiALiKlGYbB0J4vgLOJZXt26FLmrHi+7AEi+kCjDwCrEF8Ee6+CvKX+KFVEREREpNY7pZAuIrWDIySMIR1ew51j/abbTvdHrNj237IHiL4GGrxk3ebOgt2XQMFWP1QqIiIiIlK7KaSLCAAJcUmclzQJd2Hp9SQNA5Ycfpp9R9eXPUC9sVDvQes21yHYPQCKDvmpWhERERGR2kkhXUSKtU3pSZuQOyx3TzOCC5i1YSw5+ellD5DwDESXsYBk4VbYc6nnyrqIiIiIiFjyKaQXFhbSr18/Nm/eHKh6RKSK9el0E7HZZSz0FpbGZ3/cjstdZN1uGNDobYi42Lo97w/YezWYhf4pVkRERESklvEppAcHB7Nq1apA1SIi1cRVvf6OPaONZVtB2Ga+/uORsjsbwdDkMwg927o9+zvYfwuWl+tFREREROo4n6e7X3/99fzzn/8MRC0iUk3Y7Xau6fYq7qz6lu1HguawYP30sgewRULSLAhuZd3ufA8O/9UPlYqIiIiI1C4+h/SioiJef/11zj77bMaMGcPEiRNLPHz16quvkpKSQmhoKN27d2fJEuu9mo9JT0/nrrvuolGjRjgcDk477TS++eYbn59XRMoXFRHHxa2n4M53WLavzXqVLfsWlT1AUANI/g7sDazbU/8BqS/7oVIRERERkdqj9DLOJ7FmzRo6d+4MwKZNm0q0GYbVPsll++STT5g4cSLTpk2je/fuTJkyhQEDBrBx40YaNCj9D/uCggIuvPBCGjRowMyZM2nSpAk7d+4kNjbW15chIl5o1qgdndMfYnn2Exi2ktPTDbuLn3Y+QP3oj4iLtN5jnZAWkPwN7OpjvWDcoXsgqCFED/V77SIiIiIiNZFhmlV3Y2j37t3p2rUrU6dOBcDtdpOcnMzYsWN58MHSWzlNmzaN5557jg0bNhAcHFyh53Q6ncTExJCRkUF0dPQp1S9SV/x30XPsd3xs2WbLacyN53xGcFBo2QNkf+/ZKx2LBeeMEEiaDREX+KdYEREREZFqxpccWuEt2LZs2cJ3331Hbm4uAL5m/YKCApYuXUr//v3/LMZmo3///ixaZD2F9uuvv6ZHjx7cddddJCYm0r59e5555hlcLleZz5Ofn4/T6SzxEBHfXHrOfTgyrBeCc4fvY+aSe8r/GxBxITR617rNLIC9QyBPi1KKiIiIiPgc0o8ePUq/fv047bTTuOSSS9i/fz8AN998M/fee6/X4xw5cgSXy0ViYmKJ44mJiRw4cMCyz7Zt25g5cyYul4tvvvmGRx55hOeff56///3vZT7PpEmTiImJKX4kJyd7XaOIeBiGwbBeUzCd1tPas0J/54eVL5Y/SMx1kPCcdZvbCXsuhsKdp1ipiIiIiEjN5nNInzBhAsHBwezatYvw8PDi48OGDWP27Nl+Le5EbrebBg0a8Oabb9KlSxeGDRvG3/72N6ZNm1Zmn4ceeoiMjIzix+7duwNao0ht5QgJ44oOr+LKjbRs3140g1Xbvy1/kHr3QtwE67ai/bB7ALiOnmKlIiIiIiI1l88hfc6cOTz77LMkJSWVON66dWt27vT+Klh8fDx2u52DBw+WOH7w4EEaNmxo2adRo0acdtpp2O324mNt27blwIEDFBQUWPZxOBxER0eXeIhIxSTEJXNe42dwF5Vec9Kwmfx26En2p24sewDDgAb/B1HXWrcXbITdl4E7x08Vi4iIiIjULD6H9Ozs7BJX0I9JTU3F4bDeqslKSEgIXbp04ccffyw+5na7+fHHH+nRo4dln169erFlyxbcbnfxsU2bNtGoUSNCQkJ8eBUiUlHtmveiddAYyzYjuID/rr+LnPz0sgcwbJ7708P7Wrfn/Qb7rgXTYpE5EREREZFazueQ3rt3b957773izw3DwO12M3nyZC64wLfVmSdOnMhbb73F9OnTWb9+PXfccQfZ2dmMGjUKgBtvvJGHHnqo+Pw77riD1NRUxo8fz6ZNm5g1axbPPPMMd911l68vQ0ROQd/Oo4nO6m/dGJbGZ3/cgdtd9oKO2BzQ5N/g6GDdnvUfOHAHVN3mEyIiIiIiVcLnfdInT55Mv379+OOPPygoKOD+++9n7dq1pKamsmDBAp/GGjZsGIcPH+bRRx/lwIEDdOzYkdmzZxcvJrdr1y5stj/fR0hOTua7775jwoQJnHXWWTRp0oTx48fzwAMP+PoyROQUXd3rGd79dSfumM2l2grCNvH10kcZ0vXpsgewR0Pyt7CzJxTuKN2e8TYENYaEJ/xXtIiIiIhINVehfdIzMjKYOnUqK1euJCsri86dO3PXXXfRqFGjQNToV9onXcR/MrNTmbF0GLbIVMv29uET6HH69eUPUrAJdvYC1xHr9sRpEGc9vV5EREREpCbwJYdWKKTXZArpIv61Y98avtt5GzZHfqk202Wnf9JUWjTqVv4guYthV18wrRaMs0GTzyFqiF/qFRERERGpbL7kUJ/vSW/VqhWPP/44mzeXnuIqInVPSuP2dIp5ANNtlGoz7C5+2HEf6dn7yx8krDs0+RSwWzS6Yd9wyJnvl3pFRERERKozn0P6XXfdxaxZs2jTpg1du3blpZde4sCBA4GoTURqiG7tBtOw4BrLNsORzefLx1Dkst4msVjkpdDwLes2Mw/2DIL8dadYqYiIiIhI9eZzSJ8wYQK///47GzZs4JJLLuHVV18lOTmZiy66qMSq7yJStwzqcT+OjM6Wbe7wvcxcMp6T3l0TOwri/27d5k6H3RdD4Z5TK1REREREpBrzyz3pv/32G3fccQerVq3C5Spn26VqQPekiwROfkEu0xcOxYjeZ9ne3H4D/TvcU/4gpgkH74b016zbHe2h6a9gjzu1YkVEREREKklA70k/3pIlS7jnnnu44oor2LRpE9dcYz3dVUTqBkdIGEPOehV3boRl+7bCD1i9Y3b5gxgGJL4MkVdat+evgT1DwJ13asWKiIiIiFRDPof0TZs28dhjj3HaaafRq1cv1q9fz7PPPsvBgwf5+OOPA1GjiNQgDeo15dxGT+MuKr0InGEzWXTwCQ6kbip/EMMOjT+EsPOs23N/hf3Xg1m9Z+6IiIiIiPjK5+nuNpuNrl27MmLECK699loSExMDVVtAaLq7SOX4cenbbDNet27MrccNXT8nNOQkv4OuNNjZGwrWWrfH3gWJr3iuvouIiIiIVFMB3Sd98+bNtG7d+pQKrEoK6SKV5+Nf/kJm1E+WbY7c07m+x3vYbFbbrh2ncA/s7AFFZSwYF/80xP/1FCsVEREREQmcgIb0Y5YuXcr69esBaNeuHZ07W6/qXN0opItUnqKiIt6ddx1mzBbL9kT3JVx+9lMnHyh/Hew8F9xp1u0N/+VZGV5EREREpBoK6MJxhw4d4oILLqBr166MGzeOcePGcfbZZ9OvXz8OHz5c4aJFpPYJCgpiaNfXcGdZr8R+0PYNv2388OQDOdpB0tdghFq3H7gVsr45hUpFRERERKoHn0P62LFjycrKYu3ataSmppKamsqaNWtwOp2MGzcuEDWKSA0WHVmfi1q+gLsgxLJ9lfMlth/4/eQDhZ8LjT/C+s+WC/ZeA7mLT6lWEREREZGq5vN095iYGH744Qe6du1a4viSJUu46KKLSE9P92d9fqfp7iJV47c1/2ZV3tMYttJ/csz8SIae9TGxEY1OPlDaNDh4h3WbvT40Wwghp51itSIiIiIi/hPQ6e5ut5vg4OBSx4ODg3G73b4OJyJ1xDntryCx4CrLNsORxRcrbqfIVXDygeJuh/qPWre5jsLuAVB04BQqFRERERGpOj6H9L59+zJ+/Hj27dtXfGzv3r1MmDCBfv36+bU4EaldLu/xICEZnSzbXGF7+HzJBO8Gin8cYm6xbivcAbsHgstZoRpFRERERKqSzyF96tSpOJ1OUlJSaNmyJS1btqR58+Y4nU5eeeWVQNQoIrWEYRgM6/kyptN6WrvT8Rs/rnrZm4Gg4esQcZl1e/4K2HsluPMrXqyIiIiISBWo0BZspmnyww8/sGHDBgDatm1L//79/V5cIOiedJGqd/DoDv694UbsYdml2ky3Qc+EZ2jf7KKTD+TOgV39IO836/aoa6Hxh2D4/H6kiIiIiIjfVMo+6TWVQrpI9bBm6y8sOPoXbEGuUm1moYPBp00nMa71yQdyHYWdvaBgo3V73ERIfP4UqxURERERqbiALhwnIuIP7VueT0ub9X3lRnA+X6+7i7yCzJMPZK8PSbMhqIyV4dNegKMK6SIiIiJSMyiki0iV6X/2bUQ6+1g3hh3l09/vwDS92DUiJAWSvgVbGe9KHr4PMj6saJkiIiIiIpVGIV1EqtQ15/4DI72lZVt+2Hr+s/Rx7wYK7QBNvgQjxLp9/yjIml2hGkVEREREKotCuohUqaCgYK7u+iqurDjL9oO2WSze9JF3g0VcAI3eBwyLxkLYM9Czj3rWt+DNFXoRERERkUrm1cJxTqf3+w1X98XYtHCcSPW0bc8Kvt9zB7aQglJtpsvORU1fJyWxi3eDpb4Mh8aXf07I6RA3HmJuBFt4BSoWEREREfGO31d3t9lsGIbVlanSXK7SKzVXJwrpItXXotWfszp/Eoat9J8lMz+Sazt8SnR4oneDHXoQUp89+Xm2ehB7G8TdDcFNfKxYREREROTkfMmhQd4M+PPPPxd/vGPHDh588EFuuukmevToAcCiRYuYPn06kyZNOoWyRaSu63HmVRxcsIHDYV+UajMcWcxcPoaRPT7Dbgs++WAJk6BoHzjfL/88dyqk/gNS/w+ir4G4CRDWtYKvQERERETk1Pi8T3q/fv245ZZbGD58eInjM2bM4M0332Tu3Ln+rM/vdCVdpHozTZPpc2+mMGalZXtMQU+GnvOKl4MVwZGnIPU5MHO9LyKspyesRw0Bw6v3MkVEREREyhTQfdIXLVrE2WefXer42WefzZIlS3wdTkSkBMMwGNbjFUxnQ8v2jJCF/LR6qpeDBUHCE9BqNyQ8A0GNveuXuxD2XQNbW3n2WHdleFm9iIiIiMip8TmkJycn89Zbb5U6/vbbb5OcnOyXokSkbgsLjeDy9lNx5Vov6LYl/13W7vrB+wHt9aH+Q9ByBzT6EEJLv9FoqWinZ4/1rUlwcBwUbPH+OUVEREREKsDn6e7ffPMNV111Fa1ataJ79+4ALFmyhM2bN/P5559zySWXBKRQf9F0d5GaY9Xmn1mU9gC2oNILUpqFDoa0eZ8GsdZ7rJfLND1Xy9OmQOYXgLfbsRkQOcgzFT78fPByQU0RERERqdsCOt39kksuYdOmTQwaNIjU1FRSU1MZNGgQmzZtqvYBXURqlrNaX0BzY7RlmxGcz9dr7ySvIMv3gQ0DwntBk8+gxVaody/YvHnTzoSsr2H3BbCjM2RMB3e+788vIiIiIlIGn6+k13S6ki5Ss5imyUe/3Et29C+W7aF57bi+x3QMw+f3HEtyZULGu5D2EhRu9b6fPRHi7oTY2yGowanVICIiIiK1UkCvpAPMmzeP66+/np49e7J3714A3n//febPn1+R4UREymQYBkPPfRYjvYVle17oOmYte+rUn8geBfXGQouN0OQrCO/jXT/XQTjyGGxtCvtvhrzVp16LiIiIiNRZPof0zz//nAEDBhAWFsayZcvIz/dM9czIyOCZZ57xe4EiIkFBwVzd9TVcWbGW7fuNr1my6RP/PJlhh6jLoenPkLIcokeCEXLyfmY+ZPwLdpwFu/pD1n/B9PZedxERERERD59D+t///nemTZvGW2+9RXBwcPHxXr16sWzZMr8WJyJyTGxUAv2b/x/uAuvAvCL9BXYe8vPfoNCO0PhdaLkT6j8K9gTv+uX8CHsGwbbTIe1VcFfgvnkRERERqZN8DukbN27kvPPOK3U8JiaG9PR0f9QkImKpVXInzoyYiOkuvaq6EVTEd1sn4sw55P8nDmro2W+95S5o+E9wnOldv8LNcPBu2JIMh+6Hwt3+r01EREREahWfQ3rDhg3ZsqX0XsHz58+nRQvre0ZFRPyl55nXkJA32LLNcGTy+fIxuNyFgXlyWyjEjoaUlZD8A0Rc6l0/dzqkPgdbm8PeayH3t8DUJyIiIiI1ns8h/dZbb2X8+PEsXrwYwzDYt28fH374Iffddx933HFHIGoUESlhcM+/EZx+lmVbUdguvlhyX2ALMAyI6AfJ//UsNBd7JxjhXnR0QeYnsLMH7DgHnJ+AWRTYWkVERESkRvF5CzbTNHnmmWeYNGkSOTk5ADgcDu677z6eesoPKywHmLZgE6kdcnKzeH/xUGzRBy3bW4fcTJ/2d1ZeQa40SH8L0l6Boj3e9wtKhri7IfZWsMcFrj4RERERqTK+5NAK75NeUFDAli1byMrKol27dkRGRlao2MqmkC5Se+w7vJWvN92EPSynVJvpNuidOJm2yX0rtyizEDK/gNQpkOfDtHYjHGJugnrjIeS0QFUnIiIiIlWgUkJ6TaWQLlK7rNz0I4szHsKwu0q1mQWhDGn7Pg1iqmi9jNzfPGE9cyZQur4yRVwK9SZAeF/P1HoRERERqdECGtKvuOIKDIt/NBqGQWhoKK1atWLEiBG0adPGt6oriUK6SO3z3ZLX2BX0T8s2IzeBG7p9jiM4opKrOk7hLs9WbOlvehaR85bjTIi7B6JHeBatExEREZEayZcc6vPCcTExMfz0008sW7YMwzAwDIPly5fz008/UVRUxCeffEKHDh1YsGBBhV+AiIgvLup6B+EZvS3bzLDDfPb7nVTppKHgptDgWWi1GxJfheDW3vXLXw0HboatTeHwY1B0ILB1ioiIiEiVq9AWbCNGjGDbtm18/vnnfP7552zdupXrr7+eli1bsn79ekaOHMkDDzwQiHpFREoxDIOh506GjOaW7bmha/hm+d8ruSoLtkiIuxNabICk/0J4P+/6uQ7D0SdhazPYdxPkrQhklSIiIiJShXye7p6QkMCCBQs47bSSCxtt2rSJnj17cuTIEVavXk3v3r1JT0/3Z61+oenuIrVXmvMQn6y8FntEhmV7h6j76dZ6WCVXdRJ5qyFtCjg/BDPf+37hfTxT4SMvA8MeoOJERERExB8COt29qKiIDRs2lDq+YcMGXC7PwkihoaGW962LiARSXHQD+jV7DndBiGX7yszJ/GvhIOZveJP8oqxKrq4MoWdCo39Cy10Q/wTYE73rlzMX9g6BbW0g9RVwZQayShERERGpJD6H9BtuuIGbb76ZF198kfnz5zN//nxefPFFbr75Zm688UYAfvnlF8444wy/FysicjKtm3bhjLB7MN3WbxS6QvexPucNpi/vx2dL7mTHkd+q9n71Y4IaQPyj0HInNHoXHB2861e4FQ6Ng63JcOg+KNgRyCpFREREJMB8nu7ucrn4xz/+wdSpUzl48CAAiYmJjB07lgceeAC73c6uXbuw2WwkJSUFpOhToenuInXD5/OeIDXia6/OtRXUp0X0pXRvdQPhIfUCXJmXTBNyfoG0FyHrP4C3f6ptEHWlZyp8WE9t4SYiIiJSDVTaPulOpxOgRoVdhXSRusHlcvHeLzdTFLva6z6m20a0+0w6N72OVol9sFWXe70LtkDay5D+LzCzve8X2vV/W7hdA0ZwwMoTERERkfJVWkiviRTSReqOvPxcvlj4MJmR87DZXT71NQqjaRpxId1bjSQmtEmAKvSRKx3S/wlpr0DRTu/7BTWGuLsh9jaw1w9YeSIiIiJiLeAhfebMmXz66afs2rWLgoKCEm3Lli3zdbhKpZAuUvccOLyLhevf46D7F4KiU33qa5oQ4WpDx6ThnN7wYuy2anBF2iyCzC89q8LnLvC+nxEGMTdC3HhwtA1UdSIiIiJygoCu7v7yyy8zatQoEhMTWb58Od26daN+/fps27aNgQMHVrhoEZFAaZjQlCvPe5jbes+mQ/DfsR/tiLvAu7BtGJATtJGFBx7nX39cwDerHuFozpYAV3yyooIg+mpoNh+aLYboEUDQyfuZuZD+BmxvB7sHQvYcz7sQIiIiIlJt+Hwl/fTTT+exxx5j+PDhREVFsXLlSlq0aMGjjz5KamoqU6dODVStfqEr6SICcOToARas/YB9hT8RFHfQ5/6hRSm0b3g17ZsMJtgeHoAKfVS4B9Jfg7Q3wO3DbIGQdlDvHoi+HmxhAStPREREpC4L6HT38PBw1q9fT7NmzWjQoAHff/89HTp0YPPmzZxzzjkcPXr0lIoPNIV0ETmey+Vi1cb5rNjzCfkRK7GH5fk4QDCJIefStfkNNIw6C6OqV1N350DG+56p8AUbvO9nrw+xt0PsnRDcOGDliYiIiNRFAZ3u3rBhQ1JTPVdpmjZtym+//QbA9u3bq8dewyIiPrDb7XRqdz6jLnqNoe2+plH2aAqPJJW5z3rpAQo56PqZ/24ZzfQ/LmPJjn+SV5Qe0JrLZQuHuDHQfC0kfQsRF3nXz3UUjj4NW1Ng3w2QtzSgZYqIiIiINZ+vpN9yyy0kJyfz2GOP8eqrr/KXv/yFXr168ccff3DllVfyz3/+M1C1+oWupIvIybjdbtZu+p1lOz8iJ3wZQRE+bHsG4LZTP+hszk65geSY7hiGz++H+lf+Wkh9CZzvg+nDTIGw3p6p8JGDobpsRyciIiJSAwV0urvb7cbtdhMU5Fmk6OOPP2bhwoW0bt2aMWPGEBISUvHKK4FCuoj4IiMjg4WrZ7I9czZB8Tsw7G6f+ttdcZxWbxAdk68lMiQxQFV6qegwpL8J6a9C0X7v+wWnQNw4iLkZ7Pq7KSIiIuKrgIX0oqIinnnmGUaPHk1SUtIpF1oVFNJFpCLcbjcbt67mj60fkelYQnBMhm8DmAaxtjPp3PQ6mtc7H5tRhVu5mQXg/BRSX4R8H7bNtEV5gnrcWAhpEbj6RERERGqZgF5Jj4yMZM2aNaSkpJxKjVVGIV1ETlVWVhaLVv6HLWn/wZawFVtwkU/9be4IWsQMpFPycGJDUwJTpDdME3Lne8J61peAt/87MDxT4OtN8EyJr+rF8kRERESquYCG9MGDB3PllVcycuTIUyqyqiiki4i/mKbJlm0bWLL5Y9Ltiwip7/vuFlGcRoekYbSOH0BQVW6BVrAN0l6BjH+CO9P7fo7O/9vCbRgY1ft2JxEREZGqEtCQPm3aNJ544gmuu+46unTpQkRERIn2yy+/3PeKK5FCuogEQnZ2NktWfc+Gw//GiN+MPTTfp/6G20HTqH50Sh5BfNjpVbeVm8sJGf+CtJehcLv3/ewNIe4uzzZuQfGBq09ERESkBgpoSLfZyl6l2DAMXC6XL8NVOoV0EQkk0zTZvmMrSzZ8xhHmE9LggM+zwcNI4sxGQzk9YRCOoCr6O2W6IOtrSJ0Cub96388IhejrPVfXHWcEqjoRERGRGiWgIb2mU0gXkcqSk5PD0lW/svbAF5j11xMUkePbAGYQTcLOpWPytTSK7FJ1W7nlLfWEdecnQKH3/cIv9Ny3HjEAqnobOhEREZEqVGkhPS8vj9DQ0Ip2rxIK6SJS2UzTZNeunSxe928OuubiSNzn81ZuIWYCZyReRdsGQ4gISQhQpSdRuA/SX/c8XD7cfx9yOsSNh5gbwBZx8vNFREREapmAhnSXy8UzzzzDtGnTOHjwIJs2baJFixY88sgjpKSkcPPNN59S8YGmkC4iVSk3N5flq39j1Z7PcddbQ3C0D4u0AZgGDRxd6ZA0jKYx52IzggJTaHncueD8wHN1vWCd9/1scRA7xnPvenDN3MZTREREpCJ8yaE+zz98+umneffdd5k8eTIhIX+u5Nu+fXvefvtt36sVEalDwsLC6NntAsZc8QqDWr5PvYO3kLurOe5CL8O2YXKoYAnfb7uX6csuYuHOF8nI2x3Yok9kC4PYW6H5Gkj+DiIGetfPnQap/4CtzWHfCMhdEtg6RURERGogn6+kt2rVijfeeIN+/foRFRXFypUradGiBRs2bKBHjx6kpaUFqla/0JV0Ealu8vPzWbH6D1bu/IKCmFU46qX6PEZccHs6NB5G87i+BNmq4Dak/PWeFeEzpoOZ632/sJ4QNwGihkBVzAoQERERqQQBne4eFhbGhg0baNasWYmQvm7dOrp160ZWVtYpFR9oCukiUp3t27ePJau+Y2f2HByNd2B3FPjU32aG0areQNonXk398DYBqrIcrqOQ/hakTYWivd73C2oGcWMh9mawxwasPBEREZGqENDp7u3atWPevHmljs+cOZNOnTr5OpyIiByncePGDLl4FHcMms7Zjpdg20XkHWyAt2+nuo1cNqV9wRcbRvDxyqGsPfQZBS4f73s/Ffb6UP9BaLkdGs+A0K7e9SvaCYfvgy1JcHAcFGwJbJ0iIiIi1ZTPV9K/+uorRo4cyUMPPcSTTz7JE088wcaNG3nvvff473//y4UXXhioWv1CV9JFpKY5cOAAS1b8xHbnbEIabyMo3Ifp5IBhBpMS05czGl5Dw4iOGL5u3H4qTBNyF0Hai5D5BeDtqvYGRA6CuHsgvA8+bzYvIiIiUo0EfAu2efPm8eSTT7Jy5UqysrLo3Lkzjz76KBdddFGFi64sCukiUlMVFhayZs1qlm3+D9lhfxDaaD+Gzbc/4eG2RpzR8GpOqz+I8OD6Aaq0DIU7Ie0Vz3R4t9P7fo4OnrAePRxsjoCVJyIiIhIolbZPek2kkC4itcGhQ4f4ffk8Nqd+Q0ijrRXYys1GUlQvzki8iqToHpW7lZsrEzLehbSXoHCr9/3sDSDuToi9A4IaBKw8EREREX8LaEi/5ZZbuP766+nTp8+p1FhlFNJFpDYpKipi7dq1LN34Lc7gxYQ12YstyOXTGA6jHm0bXEGb+MFEO5oEqFILpguyZkHaFMj52ft+RghEX+e5uh56VqCqExEREfGbgIb0wYMH891335GQkMC1117LddddR8eOHU+l3kqlkC4itdWRI0f4Y/kiNh6cRVDDLYTU831LzMSwzpyReDXNYvsQVJlTy/NWeK6sO2eA6cOK9uF9od4EiLgEDJ/XQhURERGpFAGf7p6WlsZnn33GjBkzmDdvHqeffjrXXXcdI0aMICUlpaJ1VwqFdBGp7YqKiti4cSO/r5lDmm0hYcl7sIf4tpVbEJG0ib+M0xOGUC+sdYAqtVB0ANKmQfpr4Drsfb/g1lBvPMSMBFtk4OoTERERqYBKvSd9z549fPTRR/zrX/9i8+bNFBUVncpwAaeQLiJ1SWpqKkuXLWHdvm+wNdiEI+Gwzwul13OczhmJV9EibgAh9ojAFHoidx44P/KsCp+/2vt+tliIvRXi7obgpgErT0RERMQXlRbSCwsLmTVrFh988AGzZs2iXr167N27t6LDVQqFdBGpi1wuF5s2beL3VT9xxD2f8Ka7sIfl+TSGDQct4y6ibcIVNIg4q3K2cjNNyPkJUqdA9n996GiHqKs8U+HDzglUdSIiIiJeCXhI//nnn5kxYwaff/45brebK6+8kuuuu46+fftW7v67FaCQLiJ1XXp6OkuX/cGaXbMx4jcQ2vCAz1u5RQU3pV2DK2ld7zLCguMCVOkJCjZB6suQ8Q6YOd73C+3uCetRV4IRHLj6RERERMoQ0JDepEkTUlNTufjii7nuuusYNGgQDkfN2bdWIV1ExMPtdrN582b+WDmPAwW/Epa8k+CoLJ/GMLDTLOZ8To+/gibR3bEZ9gBVexxXGqS/7dlzvWi39/2CkiBurGc6vL2S3lgQERERIcAh/a233uKaa64hNjb2VGqsMgrpIiKlZWRksGz5MlZtnQP11xPaeJ/PW7mF2RNom3AFp9W/nChHowBVehyzEDL/DakvQt5v3vczwiHmJogbB442AStPRERE5JhKXTiuplFIFxEpm9vtZuvWrSxdsYjd2XMJT95JSFy6j6MYNI7sRtuEK2gWcz52W0ggSi0p9zfPfeuZMwEf3lyIuBTq3QPh/fB5RT0RERERLwU8pP/xxx98+umn7Nq1i4KCktv6fPHFF74OV6kU0kVEvJOZmcmKFStYvuFHXHFrCE/agy2k0KcxQmwxtIm/jDb1hxAX1iJAlR6ncDekTYX0N8Gd7n0/R3uIuweirwNbaKCqExERkToqoCH9448/5sYbb2TAgAHMmTOHiy66iE2bNnHw4EGuuOIK3nnnnVMqPtAU0kVEfGOaJtu2bWPp8iXszJhLWNIOHAlHfB4nIfxM2sZfQYu4Cwm2hweg0uO4syFjOqS95Flwzlv2BIi9HeLuhKCGgatPRERE6pSAhvSzzjqLMWPGcNdddxEVFcXKlStp3rw5Y8aMoVGjRjzxxBOnVHygKaSLiFRcVlYWK1euZPnaXyiIXkV48m6ft3KzG2G0qjeA0+OHkBDePrC7gphuyP7Wc996zo8+dAyG6OGeVeFDOwaqOhEREakjAhrSIyIiWLt2LSkpKdSvX5+5c+dy5plnsn79evr27cv+/ftPqfhAU0gXETl1pmmyY8cOli77g21H5xLaZAehiQd93sot1tGCtglX0KreJYQGxQam2GPyVnuurDs/ADPf+35h53vCeuRlUBmr14uIiEit40sOtfk6eFxcHJmZmYBnO7Y1a9YAnn13c3J82LdWRERqLMMwaN68OVdfdQ13X/8cXes9TOGyYTjXtaUoK8LrcdLzt7Foz/N8uGoAP25/iL3OxZimOzBFh54Jjd6Glrsg/gmwJ3rXL/cX2DsEtrXx7NPuygxMfSIiIiJU4Er6iBEjOPvss5k4cSJPPfUUr7zyCoMHD+b777+nc+fOWjhORKSOMk2TXbt2sXTZUjbv+5XQpO2ENd6HYfctdEcGN6JN/GBOq385kSFeBumKcOdD5seeVeHzV3jfzxYNMbd49lwPSQlQcSIiIlKbBHS6e2pqKnl5eTRu3Bi3283kyZNZuHAhrVu35uGHHyYuLu6Uig80hXQRkcDLzc1l1apVLFu5iKyQFYQ33UVIbIaPo9hIju5Bm/pDaBbbG5sRHJBaMU3I+QXSpkDW14C3/1u0QdQVEDcBwnpqCzcREREpk/ZJL4dCuohI5TFNk71797J06VI27JqHo/F2wpL2YAsu8mmcUHscp9UfRJv4wcSGpgSmWICCLZD2CmT8C9xZ3vcLPdsT1qOvBqMS9oUXERGRGsXvIT07O5uICO/vMfT1/MqkkC4iUjXy8vJYvXo1y5YvIcO2nPCmu3DEH/V5nMSIjpweP4Tmsf0JtocFoFLAlQEZ//Tcg1600/t+QY0h7m6IvQ3s9QNTm4iIiNQ4fg/pjRo1Yvz48YwcOZJGjRpZnmOaJj/88AMvvPAC5513Hg899FDFqg8whXQRkaplmib79+9n6dKlrNu2iOCGWz1buYX6sOI6EGyLoFW9i2lTfwjx4W0Ds5WbWQSZX0Hai5C7wPt+RhjE3Ahx48HR1v91iYiISI3i95C+ceNG/vrXvzJr1iw6dOjA2WefTePGjQkNDSUtLY1169axaNEigoKCeOihhxgzZgx2e/XcpkYhXUSk+sjPz2fNmjUsW/Y7qS7PveuOxIM+395dL+w0Tq8/hFb1BuIICtDf9tzfPfetOz8FfJiuH3ExxN0DERfpvnUREZE6KmD3pO/atYvPPvuMefPmsXPnTnJzc4mPj6dTp04MGDCAgQMHVttwfoxCuohI9bR//36WLVvG2k1LsDfYQnjTXQRF+La1p80IoXlsX06PH0KjyC4Yhs87jZ5c4V5IfxXS3gB3qvf9QtpBvfEQfQPYAjRNX0RERKqlGrdw3Kuvvspzzz3HgQMH6NChA6+88grdunU7ab+PP/6Y4cOHM3jwYL788kuvnkshXUSkeisoKGDt2rUsXbaUw7krCW+6k7BG+33eyi06JIk28YNpXW8QESEJ/i/UnQMZ73uurhds8L6fvT7E3g6xd0JwY//XJSIiItVOjQrpn3zyCTfeeCPTpk2je/fuTJkyhc8++4yNGzfSoEGDMvvt2LGDc889lxYtWlCvXj2FdBGRWujQoUMsXbqU1et+xxa/lfCmuwiOcfo0hoGd5JhenF5/CMkxvbAZQf4t0nRD9hzPfevZc3zoGATRw6DeBAjt4t+aREREpFqpUSG9e/fudO3alalTpwLgdrtJTk5m7NixPPjgg5Z9XC4X5513HqNHj2bevHmkp6eXGdLz8/PJz/9zMSKn00lycrJCuohIDVJYWMi6detYumwpBzJWe66uN9nr81Zu4UHxtK5/GW3qDyEmNNn/heavhbSXIeM9MPO87xfWG+rdA5GDwajet42JiIiI73wJ6QG4Wc97BQUFLF26lP79+xcfs9ls9O/fn0WLFpXZ78knn6RBgwbcfPPNJ32OSZMmERMTU/xITg7AP8pERCSggoOD6dChA6NHjebW6/5K24gxZMwbTNryjuQfref1ODlFR1h58F0+XTeE/266jS2p31Dk9iFMn4zjDGj4BrTcDfF/hyDrHVFKyZ0He6+Cba0g9UVw+TZbQERERGqPKr2Svm/fPpo0acLChQvp0aNH8fH777+fX375hcWLF5fqM3/+fK699lpWrFhBfHw8N910k66ki4jUQUVFRWzYsIGlS5ey58hqwpN3EZa8G7ujwKdxQuxRtKo3kNPrD6F+eBv/FmkWeFaDT30R8pd5388WBTGjIW4chLTwb00iIiJS6Xy5ku7nG/MCKzMzkxtuuIG33nqL+Ph4r/o4HA4cDkeAKxMRkcoWFBRE+/btad++PUePHmXZsmWsWLAUd9R2z1ZuDQ55teNZgSuTdYc/Zd3hT4kPb0ub+oNpVe9iQuxRp16kEQIx10P0dZA7H1KnQNaXwEkWwXNnQtpLnqnzkYM9962H9dYWbiIiInWAz1fSU1JSGD16NDfddBNNmzY9pScvKCggPDycmTNnMmTIkOLjI0eOJD09na+++qrE+StWrKBTp04ltnlzuz3/0LHZbGzcuJGWLVuW+5xaOE5EpPZyuVxs3LiRpUuXsmPfWsKTd3m2cgvP9Wkcu+GgRVx/2tQfQsPIThj+DMcF2yHtFch42xPGveXo5Anr0cM84V9ERERqjIAuHDdlyhTeffdd1qxZwwUXXMDNN9/MFVdcUeGr1d27d6dbt2688sorgCd0N23alLvvvrvUwnF5eXls2bKlxLGHH36YzMxMXnrpJU477TRCQsr/h4tCuohI3ZCWlua5ur5iOYWhnqvroQ0P+LyVW4yjGW3qD6Z1/UsJD/ZuFpdXXE7IeMdzxbxwu/f97A0h7i6IHQNBAdhaTkRERPyuUlZ3X7ZsGe+++y4fffQRLpeLESNGMHr0aDp37uzTOJ988gkjR47kjTfeoFu3bkyZMoVPP/2UDRs2kJiYyI033kiTJk2YNGmSZf+T3ZN+IoV0EZG6xeVysXnzZpYtW8bWnWsIa7LHs5VbtA9XsfFs5dY0pjenxw8hKbqH/7ZyM12Q9R/Pfeu5v/pQUChEXw/1xoOjvX9qERERkYCo1C3YCgsLee2113jggQcoLCzkzDPPZNy4cYwaNcrr6YFTp07lueee48CBA3Ts2JGXX36Z7t27A/D/7d15eFT1vT/w95l9JpNMVrKQDRJI2BdZihahikWvVeNWilyEXkSrUkCpglZB7FXQqlBbf0rFivWCoq2g4oIUBRVQISFssoYYtmyE7GGWzHx/f0wyyWSyTZIzM0ner+eZx+TMWT4zp+cp73y3yZMnIzk5GevWrWv2WIZ0IiJqr/Lycuzfvx/792fhsuJMw1JuKrtX5wlS98GAiJuQFnEzQrTxXVegOcs5br3iXQC29h9nuM65hFvQ9YDk14VbiIiIqBk+Cek2mw2bNm3Cm2++iW3btuFnP/sZ5syZg3PnzuGVV17BNddcgw0bNnToA8iJIZ2IiBwOB06dOoWsrCyczDkCXex5GBLPQBNe6vW54oLHIT0iA0mhk6FSdNFEpbX5QOn/A8peA+wX23+cJg0IWwCY7gYUQV1TCxEREXWarCE9KysLb775Jt555x0oFArcfffduOeee5Cenu7a5/Dhwxg7diwuX/Zuoh5fYEgnIqLGKioqkJ2djaysLFTbz8KQeAb6hHNQarxbyk2rDEFq+H8hPTID4foBXVOc4zJQsd7Zum490v7jFGFA6L1A2DxA3YUt/URERNQhsoZ0pVKJ6667DnPmzEFGRgbUarXHPtXV1Zg3bx7efPNN7yr3AYZ0IiJqjsPhwOnTp5GVlYXjJ3+EJiofhsQ8aKOKvV75LMowBGmRGUgJ+yU0SmPnixMCqPmPM6xXf+rFgUog+M66JdzGdb4OIiIi6hBZQ3peXh6SkpI6VaA/MaQTEVFbqqqqXK3rFebzMCScgT7xLFR673qIqRQ69A/7JdIjMtAnaHjXLOVmOeacEb78LUB4UY/+SiBsIRB8K9BVk94RERFRu8ga0vfu3QuHw+Ga2K3e999/D6VSiTFjxnhfsQ8xpBMRUXsJIZCbm4usrCwcPfoj1BGFMCTmOZdyU3g3pUuoNhlpkRkYEP4r6NVhnS/Ofgko+ztQ+jeg9nz7j1MlAmG/B0LvAZShna+DiIiI2iRrSB83bhweffRR3HHHHW7bP/jgAzz33HP4/vvvva/YhxjSiYioI6qrq3HgwAFkZWWhtPIC9PHO8evq4CqvzqOQVEgyTUJaRAb6hoyHQlJ2rjBhAyr/5VzCzby3/cdJQYDpt84l3DSpnauBiIiIWiVrSDcajTh48CD69+/vtj03NxfDhw9HZaV36876GkM6ERF1hhACeXl5yMrKwo8/HoEi5CKCEvOgi7vQgaXcopEWcQsGRtyMYG1sZwsDLu8BSlcDlf8G4GjngRJg/BUQ9hBgmAyvB+ATERFRm2QN6REREdiyZQsmTJjgtn337t248cYbUVrq/fI1vsSQTkREXaWmpgYHDx5EVlYWLpZegD6ubim3sDIvzyShb/B4pEdmIMk0CUqFpnOF2fKc3eDLXgcc5e0/TjvCOW49ZDrQVcvJERERkbwhffr06cjPz8eHH34Ik8kEACgrK0NGRgb69OmD9957r+OV+wBDOhERdTUhBM6dO4fMzEwcOXIE0JfAkHgGhvhzUGhsXp1LpwpFaviNSIu4BeH6lM4VZq8EKt4CLv0FsJ1q/3HKPkDYA0Do/YCqT+dqICIiInlD+vnz53H11VejpKQEo0aNAgBkZ2cjOjoa27ZtQ0JCQscr9wGGdCIikpPZbHa1rhcWX4AupgBBiXnQRl30+lx9goYjPSID/cOug1pp6HhRwg5UfQqUrgJqvmr/cZIGCJnhbF3XDe/49YmIiHo5WUM64Jw8Z/369Thw4AD0ej2GDx+O6dOnN7tmeqBhSCciIl8QQuDChQvIzMzE4cOH4VCXwZBwBoaEs1DqzV6dS60wOJdyi8xAlGFo55ZyMx9wjluv2AAIa/uPM1zjXG896L8ASdHx6xMREfVCsof07owhnYiIfM1iseDQoUPIyspCfsF5aKOKnUu5RRd6vZRbmC6lbim3G6BTdWIpt9oCoPQ1oOz/Afbi9h+nHgCE/BrQXeF8qRI42RwREVEbfBLSf/zxR5w5cwZWq/tf4W+++eaOnM5nGNKJiMifLly4gKysLBw6dAi1UgUMdUu5qYzVXp1HIamRHPoLpEdkIC54LKSOtm47zEDFO86u8JZD3h+vjAR0o52BXVv3X3UygzsREVEjsob006dP49Zbb8WhQ4cgSRLqD6/veme3e7f8jK8xpBMRUSCwWq04fPgwsrKycP78OWjCLzlb1+PyoVB69/+lRk0c0iJuxsCIm2HURHesICGc49VLVwNVWwB0oqOdIqwhuNf/V53C4E5ERL2WrCH9pptuglKpxNq1a9GvXz/88MMPKCkpwaJFi/DCCy9g4sSJnSpebgzpREQUaAoLC5GZmYmDBw/Caq+Cvu8551JuoV4snwZAggLxIROQFpGBpNCJUEgdnCvGetI5I3z5m4Co6dg5mlKYAN0o9xZ3zQCObyciol5B1pAeGRmJL7/8EsOHD4fJZMIPP/yAtLQ0fPnll1i0aBH279/fqeLlxpBORESBymaz4ccff0RmZibOnj0LVUg5ghLzoI8/B4W61qtz6VXhGBD+K6RF3oJQXXLHCrKXAmVrgdK/ArVnO3aO1iiMgHaUe4u7Jg2QlF1/LSIiIj+SNaSHhYUhKysL/fr1Q0pKCtauXYtf/OIXyMnJwbBhw1BT00V/cZcJQzoREXUHxcXFyMzMxIEDB2C2VkMfewGGxDPQRpZ4fa7ooJFIj8xAv9ApUCv13hcjaoHKD5yB3bwHcFR5f472kgyAbmTDxHTa0YB2ECCp5LsmERGRzGQN6RMnTsSiRYuQkZGBu+66C6WlpXjiiSfw97//3bXMTCBjSCciou6ktrYWR48eRWZmJvLy8qAMqmpYyk1n8epcakUQUsOvR1pEBiINgzq2lJtwOLvDmzMBS5bzv+YswFHh/bnaS9IB2hENwV03GtAOATranZ+IiMjHZA3pW7duRXV1NW677TacOnUKv/rVr3DixAlERERg48aNuOaaazpVvNwY0omIqLu6ePEisrKycODAAdRcroK2TxGCEvOgjS70ek62cP1ApEdkIDX8BmhVnfz/Q+EAbKcbArsruJd27rytkTSAdniTrvJDAYVWvmsSERF1kM/XSb906RLCwsI69hd5H2NIJyKi7q62thbHjx9HVlYWTp8+DYXucsNSbkHeDTtTShokh16D9MgMxBqv6PhSbk0JAdh+atLingnYve+u335qQDvUPbhrhwMKnYzXJCIiaptsId1ms0Gv1yM7OxtDhw7tdKH+wJBOREQ9yaVLl5CVlYXs7GxUV1dBE1ECQ2Ie9LH5kJQOr84VrOmLtMgMDAy/CUGaqK4vVgjnBHRuLe6ZgL2o66/lonR2jXcL7iMAhUHGaxIREbmTtSW9f//+2LRpE0aMGNGpIv2FIZ2IiHoiu92OEydOICsrC6dOnYKktkLf9zyCEvOgNnk3XlyCAgmmnyMt4hYkmq7q+FJu7SEEUHvBPbhbMoHafPmuCQWgGeQe3HUjnbPNExERyUDWkP7GG2/ggw8+wNtvv43w8PBOFeoPDOlERNTTlZWVYf/+/di/fz8qKyugNpU7W9f7nu/AUm4RGBhxE9IiboFJlyhTxc2ozXcf327OBGrPyXhBybn8m9vkdKMAJf+tQEREnSdrSB81ahROnToFm82GpKQkBAUFub2flZXlfcU+xJBORES9hcPhwMmTJ5GVlYWTJ08CCht09Uu5RVzy+nyxxiuQFpGBpNCJ0CiDZai4DbVFzQT3PHmvqR7gHtx1owFlqLzXJCKiHkfWkL58+fJW31+2bJk3p/M5hnQiIuqNKioqsH//fmRlZaGiogIqYyUMCWegTzgLpdbq9flCtPGI0Kcj0jAIkYZ0RBrSoVOFdn3hbbGXeAZ322l5r6nu30xwj5D3mkRE1K35fHb37oQhnYiIejOHw4GcnBxkZWXh+PHjELBDF10IQ2IetH2KvF7KrTGjJgaR+kGIqAvtkYZBMKj9EF7tpYB5f5PgflLea6qSmgT3KwCVDJPvERFRt8SQ3gqGdCIiIqfKykpkZ2cjKysLZWVlzqXcEs7AkHgWKoN3S7m1xKCOQqS+IbRHGNIRpO7j+2Vb7RWApUlwtx4HIOM/g1TxzQT3GPmuR0REAUvWkK5QKFr9P1a73e7N6XyOIZ2IiMidEAKnT59GVlYWjh07BofDDk3kRQQl5kEXU+D1Um5t0anCnKFd7+wqH2FIR7AmzvfB3VEFmLObBPejALr287pRxQLapsE9Dp3qwkBERAFP1pD+4Ycfuv1us9mwf/9+vPXWW1i+fDnmzJnjfcU+xJBORETUsqqqKhw4cABZWVm4dOkSJLUVhvhzMCTmQR1SKdt1tcoQZzf5ulb3CEM6TNoESJJCtms2y1ENWA42WRLuCAAZGyGU0Y2WgqsP7gkM7kREPYhfurtv2LABGzdu9AjxgYYhnYiIqG1CCPz000/IysrC0aNHYbfXQhVSAU3YJahN5c5XSAUkhXzdxdWKIEQY0upa3Z3d5U26JCgkpWzXbJbjMmA51CS4HwZgk++aysiGwK6t+686mcGdiKib8ktIP336NIYPH46qqqquOJ1sGNKJiIi8U1NTgwMHDuDgwYMoKChoeENhhzq4siG0m8qcwb2Lu8c3plLoEK4f2DCrvD4dYfp+UEhq2a7ZLIcFsB6uC+514d1yEBDez5TfboowzxZ3dQqDOxFRN+DzkH758mU89thj+Oyzz3D8+PHOnk5WDOlEREQdZ7FYUFBQgPz8fFy4cAH5+fm4ePFiww6SAypjlTOwm8qhDi2HOqQcCpV83cWVkgbh+gENs8rr0xGuT4VSoZHtms0SVsDyY5PgfgAQZvmuqTABulHuLe6aAYCvhwkQEVGrZA3pYWFhbhO7CCFQWVkJg8GA//u//8PNN9/csap9hCGdiIioa9UH9/rQfuHCBZSUlDTaQ9QF9/KG8G4qh0JdK1tNEpQI16ciwpCOKEM6IgyDEKFPhUqhl+2azRI2wHKsIbhbspyT1YmumT2/WQojoB3l3uKuSQN8PUyAiIhcZA3p69atcwvpCoUCUVFRGD9+PMLCwjpWsQ8xpBMREcnPbDZ7BPdLly412kNAaah2BXZNqDO8KzTyjfOWoECorl/dcnD1wX0gNMog2a7ZLGEHrMcaxrebs5zLwzlkHDIoGQDdyIYl4bSjAe0gQFLJd00iInLhOumtYEgnIiLyD7PZjPz8fLeu8h7BXX/ZvcU9tAxKrYzjvCHBpE1sFNydS8NpVcEyXrMZwgFYTzSamC7L+bOjQr5rSjpAO8J9LXftEMDX4/uJiHoBWUP6m2++CaPRiDvvvNNt+/vvv4+amhrMmjXL+4p9iCGdiIgocNQH98Yt7qWlpY32EFDozI0mpyuHxlQGpV7Gcd4AgjV9XcG9fpI6ncrHPQaFA7DluLe4mzMBR5l815Q0gHa4e3DXDAUUWvmuSUTUC8ga0gcOHIg1a9bgF7/4hdv2nTt34t577+XEcURERNQply9f9gjuZWVlbvsoNGaoTRXOFvdQZ3hXGWQc5w0gSB3tFtojDekwqKNkvaYHIQBbbpMW90zAXtL2sR2mBrRDm7S4DwcUOhmvSUTUs8ga0nU6HY4dO4bk5GS37T/99BMGDRqEy5cve12wLzGkExERdT81NTUeXeWbBndJbXW1tNe3uquM1bLWZVBHIkKf7tbqHqSOdpu/R3ZCALVnPFvc7UUyXlTp7BrvFtxHAAqDjNckIuq+vMmhXs8W0qdPHxw8eNAjpB84cAARERHeno6IiIioTQaDASkpKUhJSXFtqw/ujVvcyy9qYL3Y0LotqWxuXeXVpjKojFVdtrR4je0iamzf4mzFt65tOlVoo+DubHUP1vSVL7hLEqBOcr6Cb3VuEwKoPd9kcrpMoDa/iy5qd64LbzkIlL9Zt00BaAa5B3fdSOds80RE1G5eh/Tp06dj/vz5CA4OxtVXXw3A2dV9wYIF+M1vftPlBRIRERE1p7ngXl1d7RHcK0rUsJZEuvaRlLVQhVS4tbqrgishKbpmLl1zbRnOV36H85XfubZplMGuNdzr13M3aRMhybWeuSQB6njnK7jR8ri1+Y2Ce114rz3XRRd1ANYjzlfFP+sLATTpDUvB6UY7l4dTsjcjEVFLvO7ubrVaMXPmTLz//vtQqZwZ3+Fw4O6778Zrr70GjUYjS6Fdhd3diYiIepfq6mq30J6fn4+KiiazpivsUAdXuMa3q01lUAdXQlI6ZKtLrTAgwpDWKLgPQqguCQpfL4tWW9RMcM+T95qagc5l4Fwt7qMBZai81yQi8iOfLMF28uRJZGdnQ6/XY9iwYUhKSupQsb7GkE5ERERVVVUeLe6VlZXuO0kOqIIr3ca4q03lsgZ3paRFhGEgIvUNXeVDdf2hVPh4WbTai8612xsHd9tpea+pTnFvcdeNBpQcSklEPQPXSW8FQzoRERE1p6qqChcuXHAL7lVVVe47SQ6ojFXua7mbyqFQ2WWrSyGpEa5PdYb2urHuYfpUqHy9LJq9FDA3De4n5b2mOrlJi/sVgMrHM+oTEXUBWUP67bffjnHjxmHx4sVu259//nns3bsX77//vvcV+xBDOhEREbVXZWWlK7S3GNwhoAyqhia0zG2COoW6Vra6JCgRpk9xjXOPNAxCuH4A1Eq9bNdslr0csGS7B3frcQAytgGpEpq0uF8BqGLkux4RUReQNaRHRUXhyy+/xLBhw9y2Hzp0CFOmTEFhYaH3FfsQQzoRERF1Rn1wb9ziXl3ddKk3AaWhpqHFvW6su1Jjla0uCQqYdMkNs8rr0xFhGAiN0sezq9sr64J7o5nlrUcByDdMAKq4Zlrc49Bl0/gTEXWSrCFdr9cjOzsbaWlpbtuPHTuGUaNGcZ10IiIi6lWEEM0G95qamqZ7Qqm/7NbarjaVQ6mzyFpfiDYRUYZBzsnp9OmIMKRBpzLJek0PjmrAfACwNJqgzvIjAPmGCUAZ3ajFvS68qxIY3InIL2QN6ePGjcOvfvUrLF261G37U089hY8//hiZmZneV+xDDOlEREQkNyEEKioqPCan8wzugEJrbhjfXtfirtLL2+gRrOmLSEPDcnCR+kHQq8NkvaYHx2XnOuuNZ5a3HAYg3zABKCMbgru2LrirkxnciUh2sob0jz/+GLfddhvuuusuXHPNNQCA7du345133sH777+PjIyMDhfuCwzpRERE5A/1wb1pi3tzvRAVGotHi7sqyDPgd6UgdXRdV/m68K4fBIM6EpIvA6zDAlgONWlxPwQI+YYJQBHm2eKuTmFwJ6IuJfvs7p988gmeffZZ1xJsw4cPx7JlyzBp0qQOF+0rDOlEREQUKIQQKC8v91jHvbngLqmtUIeUu1rcNaYyqIxNx8J3Lb0qwiO4GzUxvg3uwgpYjjRpcT8ACBmHCShMgG5UQ3DXjgY0AwBJId81iahH4xJsrWBIJyIiokAmhEBZWZlHV3mz2eyxr6SyQR1S4bYcnCq4UtZGYK3S5Aru9Wu5B2vifRzcbYDlaENwt2QC5mxAyDhMQGEEtKPcW9w1aYCklO+aRNRjMKS3giGdiIiIupv64N60q7zF4tmaLClroQqpcIV2janMGdwV8v2TT6M0IkKf5grtkYZ0hGgTofBlgBV2wHqsYUZ5c6ZzXXchY28DyQDoRrq3uGsHAZJKvmsSUbcka0i32+1YtWoV3nvvPZw5cwZWq/sYoUuXLnlfsQ8xpBMREVFPIIRAaWmpR1f55oI7FHaogyuhbryWe3AFJKV8y6KpFPq64J7ueoXq+kHhywAr7ID1pHtwt2QBjkr5rinpAO0I9xZ37RBAUst3TSIKeLKG9KVLl2Lt2rVYtGgRnnjiCfzxj3/ETz/9hM2bN2Pp0qWYP39+p4qXG0M6ERER9VRCCFy6dMmjq3zTRhUAgOSAKrjS1dquNpVDZaqAQinfsmhKSYsI/YCGWeUNgxCmS4FS4cMAKxyALadJi3sW4CiT75qSBtAOb9JVfiig0Mp3TSIKKLKG9JSUFLz88su48cYbERwcjOzsbNe27777Dhs2bOhU8XJjSCciIqLepD64N+4qn5+f33JwD6pyLQVXP9ZdoZIvuCskFcJ1qYisX8vdkI5wfSpUCp1s1/QgBGDLbRLcMwGHnD1E1YB2sLOVXTPE+V/tEEDdj+PciXogWUN6UFAQjh49isTERMTGxuKTTz7B6NGjcfr0aYwaNQrl5eWdKl5uDOlERETU2wkhUFJS4tbaXlBQ0Hxwh4AyqNqtxV1tKodCY5OtPglKhOn7I1LfsJZ7hD4NaqVetmt6EAKoPeMZ3O3F8l5X0gGaQQ2hXTvUGeLVSZxdnqgb8yaHej0oKD4+Hvn5+UhMTERKSgq++OILjB49Gnv37oVWyy47RERERIFOkiRERkYiMjISw4cPBwA4HA6UlJS4dZXPz8+HzWaDvdoIe7UR5gt9684goNTXeLS4K7Vds565gB2XLp/EpcsngUsf11eNUF2yK7hHGQYhwjAQGmVwl1zTgyQ5g7E6CQi+ra4wAdSebya4F3TddYUZsOx3vtzqCXJOSte41V07BFAlck13oh7G65b0JUuWICQkBI8//jg2btyI//7v/0ZycjLOnDmDhx56CCtXrpSr1i7BlnQiIiKi9qkP7o27yhcUFMBma64VXUChMze0toc6w7tSJ+N65gBCtAnOWeX19eu5p0GnCpX1mh5q8z2De+1531xbEQxoBjeE9voQr+rL8E4UQHy6BNt3332H3bt3Y8CAAbjppps6cyqfYEgnIiIi6jiHw4GLFy96dJWvra1tdn+F1uzW2q42lUNlkHE9cwBGTZxzYjp9wwR1enW4rNf0UFtYF9obB/czvru+wuQc866p6zJfH+KVMQzvRH7AddJbwZBORERE1LUcDgeKi4vdusq3Gtw1loal4OoCvCqoRtYag9R9nOPb9emu9dwN6ihIvgystRedS8CZs5xruFuOANYTAOQb3+9BEebZ6q4dCqj6+K4Gol6IIb0VDOlERERE8qsP7k27ytvtzc8UL6mtUIdUNLS4h5ZDFVQla6OvXhVeNzFdQ3d5oybWt8Fd2JxruVuO1IX2I43Cu3yz6ntQRnqOd9cMAVSRvquBqAdjSG8FQzoRERGRf9jtdldwr291LywsbDm4K2vdWtvVpnKogitlDe5apalubHtDq3uIti8kX8+s7rA4g3p9aK8P8NZTABy+q0PZx32WeVe3+TDf1UDUAzCkt4IhnYiIiChwNA7u9eG9reCuCq5whXdNaJkzuCvk+yetWhHkCu5RhnRE6AfBpEuEwh/rmTvMgPW4e6u75TBgOw3Ah/+sV8W6h/b6n5Um39VA1I0wpLeCIZ2IiIgosNntdhQVFXkEd4ejhRZkhR3q4Er3CepCKiAp5WtxVil0iNCn1U1M5wzuYfpkKCS1bNdslaMGsB5raHWvD/G2XN/WoYpvMt59iHP2ebmWyiPqJmQN6f3798fevXsRERHhtr2srAyjR4/G6dOnva/YhxjSiYiIiLqf2tpaV3CvX8O91eAuOaAyVrrWcteYyqAKqYBCJd84b6WkRbg+1RXcIw2DEKZLgVKhke2abXJUA5ajztb2xl3nfTnTPOBcz107pEnX+UGAIsi3dRD5iawhXaFQoKCgAH36uM8AWVhYiMTERFgs8q6F2VkM6UREREQ9Q+PgXh/ei4qKWg7uEFAZq9yWg1ObyqFQNz8LfVdQSCqE6VJcoT3SkI5w/QCoFDrZrtku9krA+qPnhHW153xYhASok5uZsG4QoND7sA4i+XmTQ1XtPelHH33k+nnr1q0wmRrGm9jtdmzfvh3JycneV0tERERE1AEqlQpxcXGIi4tzbautrUVhYaFHcHe2S0morQpGbVUwLp9PqDtCQBlU3TDGvS7AKzRdsyyaQ9Si5PJxlFw+juMlHwIAJCgRqktGuD4VIdp4hGjjEaxNgEkbD70q0jezyyuDAf1456sxexlg+dFzwrrafBmKEM7u+LZcoHpLo+0SoE7xHO+uSQP8/ccNIh9od0u6QuGc0VKSJDQ9RK1WIzk5GS+++CJ+9atfdX2VXYgt6URERES9i81mQ2Fhods67g3BvTkCSv1lZ4t7aMPs8kqtVfZaVQodgjV9EaJNcAV45ysBRk0MFFK729i6lv2SM7zXT1RXH+LtRT4sQgFoUutC+9BGIX4gIPlxSAFRO8ja3b1fv37Yu3cvIiO755qJDOlEREREVB/cGy8HV1xc3GpwV+jMbq3talM5lHqzz2qWoESwNtYZ4DXxCNbGw6RNQLA2HiHavv7pQl970b3V3XIEsB4G7CU+LEIFaAY0M2HdAMBfE/kRNeHz2d3LysoQGhra2dP4BEM6ERERETXHZrOhoKDALbhfvHixleAOKLRmz7XcDZd9WHUDgzqqoQVeE+/WGq9V+fDfvUI4W9ibjne3HAEcpb6rA2pAm+a5VJwmBfBXjwTqtWQN6c899xySk5Mxbdo0AMCdd96Jf//734iNjcWnn36KESNGdLxyH2BIJyIiIqL2slqtKCgocOsqX1xc3Ooxktrq1tquNpVDZaz2UcXN0ypNjca/x8NUNw4+RBMPg9pH4+CFAOwFnq3uliOAo0L+69eTtM7x7a5Z5ute6n6ApPRdHdSryN7dff369bjyyiuxbds2/PrXv8bGjRvx3nvv4cyZM/jiiy86VbzcGNKJiIiIqDPqg3vTFvfWSCqbe4t7SAWUhhpZl4Rrr4Zx8PFNxsL7aBy8EEDtec9Wd+sRwFEl77Ubk3TOmeXdJqwbCqiTAEnhuzqoR5I1pOv1epw4cQIJCQlYsGABzGYz1qxZgxMnTmD8+PEoLfVlFxbvMaQTERERUVezWCwewb2kpK1x2QIKrQWqoGooDdVQBdXU/df56qoZ5jujfhx8sKbx+Pf6MC/zOHghgNqzzonq3LrO/wiIGvmu25RkALSDPZeKUyUCvuiBQD2CLEuw1QsLC8PZs2eRkJCAzz//HP/7v/8LABBCwG73/18CiYiIiIh8TavVIikpCUlJSa5tFosF+fn5bl3l3YO7BIdFB6tFB1yK8DinpLI1CvDVUAbVQFX/s48mrBOwo8JyDhWWczhf+Z3H+57j4Bta4zs9Dl6SAHWi82X8r0ZFOQBbXpMJ6w4D1qOAkOF7ETWAeZ/z1ZjCCGgG14X2Rl3nVX0Z3qlTvA7pt912G+666y4MGDAAJSUluOGGGwAA+/fvR2pqapcXSERERETUHWm1WiQnJyM5Odm1zWw2o6CgAIWFhSgrK0N5eTnKyspQVlaGy5fdJ5wTtWrYykNhKw/1PLnCDpWhBsq6VneVoSHEKw01kBSdnhu6XWpsxaixFaOgKsvjPa0yxBXYu3QcvKQANP2cL2Oj5Z+F3bnmetMJ66zHAGHp4CdshaMKMP/gfDWmMHm2vGuGAKpYhndqF6+7u9tsNvzlL3/B2bNnMXv2bIwaNQoAsGrVKgQHB+Oee+6RpdCuwu7uRERERBSILBaLW2hvGuJratrZxVtyQKm77Nb6Xh/mA2UcvFLSeqwDX/+zURPbtePgRS1gO93Q4u4K8McB+HBIgSKsyXj3upeyD8N7L+DzJdi6E4Z0IiIiIuqOrFZrqyG+uro9M8i3PA5eGVQDpcYq++doS+Nx8J6T2fWFSqHvmgsJG2A91WTCusOA9SSA2q65RnsoIxomqXNreY/0XQ0kO9lD+ttvv401a9bg9OnT2LNnD5KSkrB69Wr069cPt9xyS4cL9wWGdCIiIiLqiWw2m0dwb/x7VVXbM6W7j4Ov607v43HwbXGOg68L7xr31vguWQ9eWAHriSZLxR1xBnr4sBeCso9nq7t2CKAM910N1GVknTju1VdfxdKlS7Fw4UI888wzrsniQkNDsXr16oAP6UREREREPZFarUZUVBSioqKafd9ms6G8vNwjxNcH+crKyvaPg28ymZ1/xsHv93hPqwypG//eZCZ6b8bBS5q6Vu2h7tsdFsB63L3V3XIEsOUAkOGz24uAmiKg5iv37apYz/Hu2iGA0tT1NZBfeN2SPnjwYDz77LPIyMhAcHAwDhw4gP79++Pw4cOYPHlym2tE+htb0omIiIiIPNXW1roF+KZhvrKysuWD68bBK4NqGk1kF+jj4Bu60ndqHLzjsnNyuqYT1tlOd+0HaIuqb11ob9xtfjCgDPZtHdQsWVvSc3NzXZPFNabVats5DoaIiIiIiAKNSqVCREQEIiI8l4MDALvd3uqY+MpKJeyXg2C92LQlv24cvCu4+2ccvF1YUGrOQak5x+O9To2DV+gB3SjnqzFHNWA52mSpuCNAbV4Xf7I6teedr+ov3LerEpu0ug8FtIMARZA8dVCneR3S+/Xrh+zsbLc1IAHg888/x6BBg7qsMCIiIiIiChxKpRLh4eEID29+TLTdbkdFRUWLIb6irALW0rbWg28I8P5bD97zfdc4eE3TAN/KOHhFEKAf43w1Zq90runuanWv6zZfe67rPxgA1J5xvqo/c9+u7uc53l0zyPlHB/Krdof0p59+Gn/4wx/w8MMP48EHH4TZbIYQAj/88APeeecdrFixAmvXrpWzViIiIiIiClBKpRJhYWEICwtr9n273Y7KysoWQ3x5QTnMTUfiNhkH79ad3h/j4NHyOHi3Fvi6MN/sOHhlMKAf53w1Zi8HrD96TlhXe0GeD2XLdb6qtzTaKAHq/nWhfWhDiNekAQqdPHWQh3aPSVcqlcjPz0efPn2wfv16PPXUU8jJcXYViYuLw/LlyzFnzhxZi+0K7R0LYLfbYbP5cN3EXkKtVkOpVPq7DCIiIiIKMA6HwyPENw7y5eXlcDgcDQe0NA6+LtQH/jj4GCgkddsnsZd6jne3HAHshfJ/ABcFoEltZsK6NOdEe9QmWZZgUygUKCgoQJ8+fVzbampqUFVV5bYt0LX15QghUFBQgLKyMt8X10uEhoYiJiamfbNrEhERERHBGeKrqqpaDPFlZWWNQnzjcfA1HsvKBcZ68AoYNbEe3efbvR68vcSz1d1yGLD7ciJvFaAZ4LlUnGYA0J4/QPQisoX0wsLCFpd06C7a+nLy8/NRVlaGPn36wGAwMEh2ISEEampqUFRUhNDQUMTGxvq7JCIiIiLqIYQQbYb4+uWjG8bBNw7wgbUevFYRDpMuEaG6BNeyciHaeARr46FTtbLcWm2RZ6u75QjguOS74qEGNAMbus3Xh3hNCtDRWfS7OdlCuslkajO0Xrrky5vvvda+HLvdjhMnTqBPnz4tzmpJnVdSUoKioiIMHDiQXd+JiIiIyCfqQ3xL68SXlZWhtra2+XHwhobl5Hw1Dr41KgTBqI5DqD4RofokhOgS6sbB168Hr3A/QAhn9/jG67vXh3hHue8KlzSAJr1Jy/tQ5yR2Us/OBbItwbZ8+XKYTK381aabqx+DbjAY/FxJz1b//dpsNoZ0IiIiIvIJSZIQHByM4OBgxMfHe7wvhEB1dXXz68QXlKG0vBw2mxVK/WXXGvD+Ggdfi2qU2U6izHYSqGjyOYUaekU0QrTxCDMkIzwoydUSbzRcDUXQtY0/tHNiuqat7tYjgKOZae47S1gBy0Hny61onXNmebfx7kMAdTLQ9A8OvYBXIf03v/lNtxp/3lHs4i4vfr9EREREFGgkSYLRaITRaGwxxNfU1DTflT6vDCVlpbBLVa7135uuC6/U+mYcvJBsqBHnUGM+hwLzd0Djjs5CAQ3CYVDGwKRLRGRwf4QHJSNE2x8hoRMbxsELAdSebWbCuh8BUS1D0WbAst/5akwyONd01wx1XypOlQj04EzR7pDOYEVERERERL2VJEkICgpCUFAQ+vbt6/G+EAKXL1/2DPFFzp9LqorhUJd6jINXBtVAqbvsm8wpOWDFRVgdF1FWcxh5Ne5vK+zB0El9YFTHIUyfhMiQFEQar0eIaa5zHLxwALYz7uu7W444130Xl7u+XlEDmDOdL7dCjYBmsGe3eVXfHhHe2x3S2zl0nbq5yZMnY+TIkVi9erW/SyEiIiIi6jYkSYLBYIDBYEBcXJzH+0IImM1mz/HwZ8tRVn4RldYLsKtK3VrifT0O3qGsRA0qUWPPQVHVNzhe1ehNuxZqR6SzK70mHuFB/dAndCKiIlIQpA6HVHvGc8I661FAWGQotAow/+B8NaYIAfodAtSJXX9NH2p3SHdbk5ACzuzZs/HWW29hxYoVWLJkiWv75s2bceutt7b7jywffPAB1Goul0BERERE1JUkSYJer4der29xlaPmQnxZfinKa86jqvYCHOpSt8nsfLoevNICm/I8bDiPCnsWzlXANR5e2JVQ1IZCK6JgUMXApBuLSOM0xESlIMogoLSdcO86bz3uHJ/e1YQZUHn+gaS76Z3z3/dQOp0Ozz33HO677z6EhYV16Bzh4eFdXBUREREREbWHTqdDTEwMYmJimn3fbDa7TWhXWlqKsqp8VJjPosaRD4eqzC/j4CWlHUJZAjNKYMYxXKoFcssAlAHCIQFWI5S14XVd6e9AmCEefY1axARZYVCcgVTf+m49AaC244VoBvWIJd4C4hO88sor+POf/4yCggKMGDECf/3rXzFu3Lhm93399dfxz3/+E4cPHwYAXHHFFXj22Wdb3L8zPvroIxQXF3f5eb0RFRWFm2++uV37TpkyBadOncKKFSvw/PPPe7xfUlKCefPm4euvv0ZpaSlSUlLw+OOPY/r06a59Gnd3f/zxx7F9+3Z8//33bucZMWIEbr/9dixduhQAsHbtWrz44ovIzc1FcnIy5s+fjwceeKATn5qIiIiIiJrS6XTQ6XSIjo5u9n2LxeIW4i+VFKC0Jg9V1vO4LArhUJf5fBy8pBCArhIOVKIGeagBUFQLHC8DUAbYzTpIVhPUjtEIUkxFjF6DWIMdkfpqBKsuQOU4Dsl6EkA7egxoh8j6WXzF7yF948aNePjhh/Haa69h/PjxWL16NaZOnYrjx483O5P8jh07MH36dFx55ZWuluNf/vKXOHLkSLMTOHRGcXExzp0716XnlJNSqcSzzz6Lu+66C/Pnz/eYldJsNuOKK67A4sWLERISgk8++QQzZ85ESkpKs3/kmDFjBlasWIGcnBykpKQAAI4cOYKDBw/i3//+NwBg/fr1WLp0Kf72t79h1KhR2L9/P+bOnYugoCDMmjVL/g9NREREREQAAK1Wiz59+rS4IpfVanWF+EtlxbhYmYty8xlU2/JhQRGEptzn4+CVOjOgM8OOQtT3oD9hA2ADHFY17JfjoLKmIUqtRoxOoI/OgnBdGYzKc1CJM5DQaFi2hiG9S7z00kuYO3cufvvb3wIAXnvtNXzyySf4xz/+4Ta2ut769evdfl+7di3+/e9/Y/v27bj77rt9UnMgu/XWWzFy5EgsW7YMb7zxhtt7ffv2xR/+8AfX77///e+xdetWvPfee82G9CFDhmDEiBHYsGEDnnzySQDO73/8+PFITU0FACxbtgwvvvgibrvtNgBAv3798OOPP2LNmjUM6UREREREAUSj0SAqKgpRUVEABgC40u19m81W1wpfiqLy07hU9RMqLGdR4yiATSoBtOU+HQev0Nig0JQBKMNFABfr3zADwh4JUR2PEDsQpRSI0thwMb8QV06ogl5n9El9cvFrSLdarcjMzMRjjz3m2qZQKDBlyhTs2bOnXeeoqamBzWZrcSy1xWKBxdIwo2BFRUXniu4GnnvuOVxzzTVugRwA7HY7nn32Wbz33ns4f/48rFYrLBYLDAZDi+eaMWMG/vGPf+DJJ5+EEALvvPMOHn74YQBAdXU1cnJyMGfOHMydO9d1TG1tLUwmkzwfjoiIiIiIZKFWq10hfgAGerxfH+KLS/NQXHkapTU/odJ6HmZRiFrlJUi6Sh+Og3dACqlBFYAqALkAoPsWE1W9aAk2OVy8eBF2u91jTEV0dDSOHTvWrnMsXrwYcXFxmDJlSrPvr1ixAsuXL+90rd3J1VdfjalTp+Kxxx7D7NmzXdv//Oc/4y9/+QtWr16NYcOGISgoCAsXLoTV2vKDNH36dCxevBhZWVm4fPkyzp49i2nTpgEAqqqcazK8/vrrGD9+vNtxSqWy6z8YERERERH5TeMQD4zxeL+2thYXSy+goPQELlaednalr82HBc414iVtjazj4IVVD40qSL4L+Ijfu7t3xsqVK/Huu+9ix44d0Ol0ze7z2GOPuVp+AWdLekJCQrvO7/wfn391tIaVK1di5MiRSEtLc23btWsXbrnlFvz3f/83AOeyeidOnMDgwYNbPE98fDwmTZqE9evX4/Lly7juuutcY1yio6MRFxeH06dPY8aMGR2qk4iIiIiIegaVSoWYqETERDW/TrnFVoP8khMoKj+JkupcVFrOubrSC00FJEXnlv1W2Dq2wlWg8WtIj4yMhFKpRGFhodv2wsLCFpcdqPfCCy9g5cqV+M9//oPhw4e3uJ9Wq4VWq+1Qfe2dVT0QDRs2DDNmzMDLL7/s2jZgwAD861//wu7duxEWFoaXXnoJhYWFrYZ0wNnlfdmyZbBarVi1apXbe8uXL8f8+fNhMplw/fXXw2KxYN++fSgtLXX74wgREREREfVuWrUByTEjkRwz0uM9h7Cj0pyP/NLjKK7IQVlNHipt52EWRbArLwFKW9vnl/zfyNoV/BrSNRoNrrjiCmzfvh0ZGRkAnK2727dvx7x581o87vnnn8czzzyDrVu3YswYz24W5PT0009j48aNrt+feOIJnD59GlOnToXBYMC9996LjIwMlJeXt3qeO+64A/PmzYNSqXTdp3r33HMPDAYD/vznP+ORRx5BUFAQhg0bhoULF8rwiYiIiIiIqCdSSEqY9PEw6eORHnet23tCCJhrS1F6OQ/F5adwsfI0ysxnUFObD4tUDKGsAQAEq+P8UXqXk4QQvplbvwUbN27ErFmzsGbNGowbNw6rV6/Ge++9h2PHjiE6Ohp33303+vbtixUrVgBwToq2dOlSbNiwAVdddZXrPEajEUZj27P4VVRUwGQyoby8HCEhIW7vmc1m5Obmol+/fi12n6fO4/dMRERERERdxWqvQoXlHHQqE4yaWH+X06zWcmhTfh+TPm3aNBQXF2Pp0qUoKCjAyJEj8fnnn7smkztz5gwUCoVr/1dffRVWqxV33HGH23mWLVuGp556ypelExERERERkZ9plEZEGtL9XUaX8XtLuq+xJd3/+D0TEREREVFv4k1LuqLVd4mIiIiIiIjIZxjSiYiIiIiIiAIEQzoRERERERFRgGBIJyIiIiIiIgoQDOlEREREREREAYIhnYiIiIiIiChAMKQTERERERERBQiGdCIiIiIiIqIAwZDegxQXF+P+++9HYmIitFotYmJiMHXqVOzatQsAkJycjNWrV/u3SCIiIiIiImqRyt8FUNe5/fbbYbVa8dZbb6F///4oLCzE9u3bUVJS4u/SiIiIiIiIqB0Y0lvxdd6fUGrO8WsNYboUXJ30ZJv7lZWV4ZtvvsGOHTswadIkAEBSUhLGjRsnd4lERERERETURRjSW1FqzkFR9SF/l9EuRqMRRqMRmzdvxs9+9jNotVp/l0RERERERERe4pj0HkKlUmHdunV46623EBoaiquuugqPP/44Dh486O/SiIiIiIiIqJ0Y0nuQ22+/HRcuXMBHH32E66+/Hjt27MDo0aOxbt06f5dGRERERERE7cCQ3sPodDpcd911ePLJJ7F7927Mnj0by5Yt83dZRERERERE1A4ck96KMF2Kv0vodA2DBw/G5s2bu6YYIiIiIiIikhVDeivaM6t6oCgpKcGdd96J//mf/8Hw4cMRHByMffv24fnnn8ctt9zi7/KIiIiIiIioHRjSewij0Yjx48dj1apVyMnJgc1mQ0JCAubOnYvHH3/c3+URERERERFROzCk9xBarRYrVqzAihUrWtznp59+8l1BRERERERE5DVOHEdEREREREQUIBjSiYiIiIiIiAIEQzoRERERERFRgGBIJyIiIiIiIgoQDOlEREREREREAYIhnYiIiIiIiChAMKQTERERERERBQiGdCIiIiIiIqIAwZBOREREREREFCAY0omIiIiIiIgCBEN6DzF79mxkZGR0+jxPPfUUJEnyeP3nP//pfJFERERERETUKpW/Cwho+XMBy2H/1qAdCsS+7tNLDhkyxCOUh4eH+7QGIiIiIiKi3oghvTWWw4D5O39X0Sl/+MMfcOzYMWzZsgUAsHr1ajz00EP47LPPcP311wMAUlNTsWTJEtxzzz0AAJVKhZiYGL/VTERERERE1Fuxu3sPN2nSJHz77bew2+0AgJ07dyIyMhI7duwAAJw/fx45OTmYPHmy/4okIiIiIiIiAAzpPd7EiRNRWVmJ/fv3QwiBr7/+GosWLXKF9B07dqBv375ITU11HXPo0CEYjUbXa9y4cX6qnoiIiIiIqHdhd/ceLjQ0FCNGjMCOHTug0Wig0Whw7733YtmyZaiqqsLOnTsxadIkt2PS0tLw0UcfuX7XarW+LpuIiIiIiKhXYkhvjXaovyvokhomT56MHTt2QKvVYtKkSQgPD8egQYPw7bffYufOnVi0aJHb/hqNxq1lnYiIiIiIiHyDIb01Pp5VXS6TJk3CP/7xD6hUKtdkcZMnT8Y777yDEydOcDw6ERERERFRgGBI70HKy8uRnZ3tti0iIgJXX301KisrsWXLFqxcuRKAM6TfcccdiI2NxcCBA/1QLRERERERETXFkN6D7NixA6NGjXLbNmfOHKxduxbDhg1DYWEh0tPTAQBXX301HA6Hx3h0IiIiIiIi8h9JCCH8XYQvVVRUwGQyoby8HCEhIW7vmc1m5Obmol+/ftDpdH6qsOfj90xERERERL1Jazm0KS7BRkRERERERBQgGNKJiIiIiIiIAgRDOhEREREREVGAYEgnIiIiIiIiChAM6UREREREREQBgiGdiIiIiIiIKEAwpBMREREREREFCIZ0IiIiIiIiogDBkE5EREREREQUIBjSiYiIiIiIiAIEQ3oPMXv2bEiS5PG6/vrrAQDJycmubQaDAcOGDcPatWv9XDURERERERE1pvJ3AQFt7lzg8GH/1jB0KPD66+3a9frrr8ebb77ptk2r1bp+fvrppzF37lzU1NTg/fffx9y5c9G3b1/ccMMNXVoyERERERERdQxDemsOHwa++87fVbSbVqtFTExMi+8HBwe73l+8eDGef/55bNu2jSGdiIiIiIgoQDCk90IOhwObNm1CaWkpNBqNv8shIiIiIiKiOhyT3oNs2bIFRqPR7fXss8+63l+8eDGMRiO0Wi3uuOMOhIWF4Z577vFjxURERERERNQYW9J7kF/84hd49dVX3baFh4e7fn7kkUcwe/Zs5Ofn45FHHsEDDzyA1NRUX5dJRERERERELWBIb83Qof6uwKsagoKCWg3dkZGRSE1NRWpqKt5//30MGzYMY8aMweDBg7uiUiIiIiIiIuokhvTWtHNW9e4oISEB06ZNw2OPPYYPP/zQ3+UQERERERERGNJ7FIvFgoKCArdtKpUKkZGRze6/YMECDB06FPv27cOYMWN8USIRERERERG1ghPH9SCff/45YmNj3V4///nPW9x/8ODB+OUvf4mlS5f6sEoiIiIiIiJqiSSEEP4uwpcqKipgMplQXl6OkJAQt/fMZjNyc3PRr18/6HQ6P1XY8/F7JiIiIiKi3qS1HNoUW9KJiIiIiIiIAgRDOhEREREREVGAYEgnIiIiIiIiChAM6UREREREREQBgiGdiIiIiIiIKEAwpBMREREREREFCIZ0IiIiIiIiogDBkE5EREREREQUIBjSiYiIiIiIiAIEQ3oPMXnyZCxcuNDfZRAREREREVEnMKT3EB988AH+9Kc/uX4/deoUfvvb3yI+Ph5arRb9+vXD9OnTsW/fvnafc926dZAkCZIkQaFQIDY2FtOmTcOZM2fc9uMfCIiIiIiIiLqGyt8FBLKP5n6E4sPFfq0hamgUbn795jb3Cw8Pd/28b98+XHvttRg6dCjWrFmD9PR0VFZW4sMPP8SiRYuwc+fOdl8/JCQEx48fhxACubm5eOCBB3DnnXfi+++/79DnISIiIiIiopYxpLei+HAxzn13zt9ltMvkyZMxcuRIrFq1CrNnz8aAAQPwzTffQKFo6CwxcuRILFiwwPX74sWLsWnTJpw7dw4xMTGYMWMGli5dCrVa7dpHkiTExMQAAGJjYzFnzhzMnz8fFRUVCAkJ8d0HJCIiIiIi6gUY0nuY7OxsHDlyBBs2bHAL6PVCQ0NdPwcHB2PdunWIi4vDoUOHMHfuXAQHB+PRRx9t9txFRUXYtGkTlEollEqlXB+BiIiIiIio12JI72FOnjwJAEhPT29z3yeeeML1c3JyMv7whz/g3XffdQvp5eXlMBqNEEKgpqYGADB//nwEBQV1ceVERERERETEkN7DCCHave/GjRvx8ssvIycnB1VVVaitrfXowh4cHIysrCzYbDZ89tlnWL9+PZ555pmuLpuIiIiIiIjAkN6qqKFR/i7B6xoGDhwIADh27BhGjRrV4n579uzBjBkzsHz5ckydOhUmkwnvvvsuXnzxRbf9FAoFUlNTAQCDBg1CTk4O7r//frz99ttefhIiIiIiIiJqC0N6K9ozq3qgGTlyJAYPHowXX3wR06ZN8xiXXlZWhtDQUOzevRtJSUn44x//6HovLy+vzfMvWbIEKSkpeOihhzB69Ogur5+IiIiIiKg3Y0jvYSRJwptvvokpU6Zg4sSJ+OMf/4j09HRUVVXh448/xhdffIGdO3diwIABOHPmDN59912MHTsWn3zyCTZt2tTm+RMSEnDrrbdi6dKl2LJli2t7cXExsrOz3faNjY1FdHR0V39EIiIiIiKiHstz+m/q9saNG4d9+/YhNTUVc+fOxaBBg3DzzTfjyJEjWL16NQDg5ptvxkMPPYR58+Zh5MiR2L17N5588sl2nf+hhx7CJ598gh9++MG1bcOGDRg1apTb6/XXX5fj4xEREREREfVYkvBmprEeoKKiAiaTCeXl5R6TpJnNZuTm5qJfv37Q6XR+qrDn4/dMRERERES9SWs5tCm2pBMREREREREFCIZ0IiIiIiIiogDBkE5EREREREQUIBjSiYiIiIiIiAIEQ3ozetlcej7H75eIiIiIiKh5DOmNqNVqAEBNTY2fK+nZ6r/f+u+biIiIiIiInFT+LiCQKJVKhIaGoqioCABgMBggSZKfq+o5hBCoqalBUVERQkNDoVQq/V0SERERERFRQGFIbyImJgYAXEGdul5oaKjreyYiIiIiIqIGDOlNSJKE2NhY9OnTBzabzd/l9DhqtZot6ERERERERC1gSG+BUqlkmCQiIiIiIiKfCoiJ41555RUkJydDp9Nh/Pjx+OGHH1rd//3330d6ejp0Oh2GDRuGTz/91EeVEhEREREREcnH7yF948aNePjhh7Fs2TJkZWVhxIgRmDp1aotjwnfv3o3p06djzpw52L9/PzIyMpCRkYHDhw/7uHIiIiIiIiKiriUJPy9aPX78eIwdOxZ/+9vfAAAOhwMJCQn4/e9/jyVLlnjsP23aNFRXV2PLli2ubT/72c8wcuRIvPbaa21er6KiAiaTCeXl5QgJCem6D0JERERERETUDG9yqF/HpFutVmRmZuKxxx5zbVMoFJgyZQr27NnT7DF79uzBww8/7LZt6tSp2Lx5c7P7WywWWCwW1+/l5eUAnF8SERERERERkdzq82d72sj9GtIvXrwIu92O6Ohot+3R0dE4duxYs8cUFBQ0u39BQUGz+69YsQLLly/32J6QkNDBqomIiIiIiIi8V1lZCZPJ1Oo+PX5298cee8yt5d3hcODSpUuIiIiAJEl+rKxtFRUVSEhIwNmzZ9k1P0DxHnUPvE/dA+9T4OM96h54n7oH3qfAx3vUPXSX+ySEQGVlJeLi4trc168hPTIyEkqlEoWFhW7bCwsLERMT0+wxMTExXu2v1Wqh1WrdtoWGhna8aD8ICQkJ6P/BEe9Rd8H71D3wPgU+3qPugfepe+B9Cny8R91Dd7hPbbWg1/Pr7O4ajQZXXHEFtm/f7trmcDiwfft2TJgwodljJkyY4LY/AGzbtq3F/YmIiIiIiIi6C793d3/44Ycxa9YsjBkzBuPGjcPq1atRXV2N3/72twCAu+++G3379sWKFSsAAAsWLMCkSZPw4osv4sYbb8S7776Lffv24e9//7s/PwYRERERERFRp/k9pE+bNg3FxcVYunQpCgoKMHLkSHz++eeuyeHOnDkDhaKhwf/KK6/Ehg0b8MQTT+Dxxx/HgAEDsHnzZgwdOtRfH0E2Wq0Wy5Yt8+iuT4GD96h74H3qHnifAh/vUffA+9Q98D4FPt6j7qEn3ie/r5NORERERERERE5+HZNORERERERERA0Y0omIiIiIiIgCBEM6ERERERERUYBgSCciIiIiIiIKEAzpfvL111/jpptuQlxcHCRJwubNm9s8ZseOHRg9ejS0Wi1SU1Oxbt062evs7by9Tzt27IAkSR6vgoIC3xTcC61YsQJjx45FcHAw+vTpg4yMDBw/frzN495//32kp6dDp9Nh2LBh+PTTT31Qbe/Vkfu0bt06j2dJp9P5qOLe59VXX8Xw4cMREhKCkJAQTJgwAZ999lmrx/A58j1v7xOfI/9buXIlJEnCwoULW92Pz5N/tec+8XnyvaeeesrjO09PT2/1mJ7wLDGk+0l1dTVGjBiBV155pV375+bm4sYbb8QvfvELZGdnY+HChbjnnnuwdetWmSvt3by9T/WOHz+O/Px816tPnz4yVUg7d+7Egw8+iO+++w7btm2DzWbDL3/5S1RXV7d4zO7duzF9+nTMmTMH+/fvR0ZGBjIyMnD48GEfVt67dOQ+AUBISIjbs5SXl+ejinuf+Ph4rFy5EpmZmdi3bx+uueYa3HLLLThy5Eiz+/M58g9v7xPA58if9u7dizVr1mD48OGt7sfnyb/ae58APk/+MGTIELfv/Ntvv21x3x7zLAnyOwBi06ZNre7z6KOPiiFDhrhtmzZtmpg6daqMlVFj7blPX331lQAgSktLfVITeSoqKhIAxM6dO1vc59e//rW48cYb3baNHz9e3HfffXKXR3Xac5/efPNNYTKZfFcUeQgLCxNr165t9j0+R4GjtfvE58h/KisrxYABA8S2bdvEpEmTxIIFC1rcl8+T/3hzn/g8+d6yZcvEiBEj2r1/T3mW2JLeTezZswdTpkxx2zZ16lTs2bPHTxVRa0aOHInY2Fhcd9112LVrl7/L6VXKy8sBAOHh4S3uw+fJ/9pznwCgqqoKSUlJSEhIaLO1kLqO3W7Hu+++i+rqakyYMKHZffgc+V977hPA58hfHnzwQdx4440ez0lz+Dz5jzf3CeDz5A8nT55EXFwc+vfvjxkzZuDMmTMt7ttTniWVvwug9ikoKEB0dLTbtujoaFRUVODy5cvQ6/V+qowai42NxWuvvYYxY8bAYrFg7dq1mDx5Mr7//nuMHj3a3+X1eA6HAwsXLsRVV12FoUOHtrhfS88T5w7wjfbep7S0NPzjH//A8OHDUV5ejhdeeAFXXnkljhw5gvj4eB9W3HscOnQIEyZMgNlshtFoxKZNmzB48OBm9+Vz5D/e3Cc+R/7x7rvvIisrC3v37m3X/nye/MPb+8TnyffGjx+PdevWIS0tDfn5+Vi+fDkmTpyIw4cPIzg42GP/nvIsMaQTdaG0tDSkpaW5fr/yyiuRk5ODVatW4e233/ZjZb3Dgw8+iMOHD7c6Von8r733acKECW6tg1deeSUGDRqENWvW4E9/+pPcZfZKaWlpyM7ORnl5Of71r39h1qxZ2LlzZ4sBkPzDm/vE58j3zp49iwULFmDbtm2cVCyAdeQ+8XnyvRtuuMH18/DhwzF+/HgkJSXhvffew5w5c/xYmbwY0ruJmJgYFBYWum0rLCxESEgIW9ED3Lhx4xgafWDevHnYsmULvv766zb/mt3S8xQTEyNniQTv7lNTarUao0aNwqlTp2SqjjQaDVJTUwEAV1xxBfbu3Yu//OUvWLNmjce+fI78x5v71BSfI/llZmaiqKjIrQed3W7H119/jb/97W+wWCxQKpVux/B58r2O3Kem+Dz5XmhoKAYOHNjid95TniWOSe8mJkyYgO3bt7tt27ZtW6tj0CgwZGdnIzY21t9l9FhCCMybNw+bNm3Cl19+iX79+rV5DJ8n3+vIfWrKbrfj0KFDfJ58yOFwwGKxNPsen6PA0dp9aorPkfyuvfZaHDp0CNnZ2a7XmDFjMGPGDGRnZzcb/Pg8+V5H7lNTfJ58r6qqCjk5OS1+5z3mWfL3zHW9VWVlpdi/f7/Yv3+/ACBeeuklsX//fpGXlyeEEGLJkiVi5syZrv1Pnz4tDAaDeOSRR8TRo0fFK6+8IpRKpfj888/99RF6BW/v06pVq8TmzZvFyZMnxaFDh8SCBQuEQqEQ//nPf/z1EXq8+++/X5hMJrFjxw6Rn5/vetXU1Lj2mTlzpliyZInr9127dgmVSiVeeOEFcfToUbFs2TKhVqvFoUOH/PEReoWO3Kfly5eLrVu3ipycHJGZmSl+85vfCJ1OJ44cOeKPj9DjLVmyROzcuVPk5uaKgwcPiiVLlghJksQXX3whhOBzFCi8vU98jgJD01nD+TwFprbuE58n31u0aJHYsWOHyM3NFbt27RJTpkwRkZGRoqioSAjRc58lhnQ/qV+qq+lr1qxZQgghZs2aJSZNmuRxzMiRI4VGoxH9+/cXb775ps/r7m28vU/PPfecSElJETqdToSHh4vJkyeLL7/80j/F9xLN3R8Abs/HpEmTXPes3nvvvScGDhwoNBqNGDJkiPjkk098W3gv05H7tHDhQpGYmCg0Go2Ijo4W//Vf/yWysrJ8X3wv8T//8z8iKSlJaDQaERUVJa699lpX8BOCz1Gg8PY+8TkKDE3DH5+nwNTWfeLz5HvTpk0TsbGxQqPRiL59+4pp06aJU6dOud7vqc+SJIQQvmu3JyIiIiIiIqKWcEw6ERERERERUYBgSCciIiIiIiIKEAzpRERERERERAGCIZ2IiIiIiIgoQDCkExEREREREQUIhnQiIiIiIiKiAMGQTkRERERERBQgGNKJiIiIiIiIAgRDOhERUQA4duwYfvazn0Gn02HkyJH+LqdXqKmpwe23346QkBBIkoSysjJ/l0RERMSQTkRE5I3i4mJoNBpUV1fDZrMhKCgIZ86c6fR5ly1bhqCgIBw/fhzbt2/vgko7Jjk5GatXr+425+2Mt956C9988w12796N/Px8mEwmf5dEREQElb8LICIi6k727NmDESNGICgoCN9//z3Cw8ORmJjY6fPm5OTgxhtvRFJSUhdUSe2Rk5ODQYMGYejQoR0+hxACdrsdKhX/SUVERF2DLelERERe2L17N6666ioAwLfffuv6uTUOhwNPP/004uPjodVqMXLkSHz++eeu9yVJQmZmJp5++mlIkoSnnnqqxfM8//zzSE1NhVarRWJiIp555hnX+4cOHcI111wDvV6PiIgI3HvvvaiqqnK9P3v2bGRkZOCFF15AbGwsIiIi8OCDD8JmswEAJk+ejLy8PDz00EOQJAmSJLmO/fbbbzFx4kTo9XokJCRg/vz5qK6uBgD885//hNFoxMmTJ137P/DAA0hPT0dNTU2L583Ly8NNN92EsLAwBAUFYciQIfj0009b/B7/3//7fxgwYAB0Oh2io6Nxxx13uN6zWCyYP38++vTpA51Oh5///OfYu3dvi+eaPHkyXnzxRXz99deQJAmTJ08GALz99tsYM2YMgoODERMTg7vuugtFRUWu43bs2AFJkvDZZ5/hiiuugFarxbffftvidYiIiLwmiIiIqFV5eXnCZDIJk8kk1Gq10Ol0wmQyCY1GI7RarTCZTOL+++9v8fiXXnpJhISEiHfeeUccO3ZMPProo0KtVosTJ04IIYTIz88XQ4YMEYsWLRL5+fmisrKy2fM8+uijIiwsTKxbt06cOnVKfPPNN+L1118XQghRVVUlYmNjxW233SYOHToktm/fLvr16ydmzZrlOn7WrFkiJCRE/O53vxNHjx4VH3/8sTAYDOLvf/+7EEKIkpISER8fL55++mmRn58v8vPzhRBCnDp1SgQFBYlVq1aJEydOiF27dolRo0aJ2bNnu8595513irFjxwqbzSa2bNki1Gq12LdvX6vnvfHGG8V1110nDh48KHJycsTHH38sdu7c2exn37t3r1AqlWLDhg3ip59+EllZWeIvf/mL6/358+eLuLg48emnn4ojR46IWbNmibCwMFFSUtLs+UpKSsTcuXPFhAkTRH5+vmu/N954Q3z66aciJydH7NmzR0yYMEHccMMNruO++uorAUAMHz5cfPHFF+LUqVMtXoOIiKgjGNKJiIjaYLPZRG5urjhw4IBQq9XiwIED4tSpU8JoNIqdO3eK3NxcUVxc3OLxcXFx4plnnnHbNnbsWPHAAw+4fh8xYoRYtmxZi+eoqKgQWq3WFcqb+vvf/y7CwsJEVVWVa9snn3wiFAqFKCgoEEI4Q3pSUpKora117XPnnXeKadOmuX5PSkoSq1atcjv3nDlzxL333uu27ZtvvhEKhUJcvnxZCCHEpUuXRHx8vLj//vtFdHS0x+dt7rzDhg0TTz31VIufubF///vfIiQkRFRUVHi8V1VVJdRqtVi/fr1rm9VqFXFxceL5559v8ZwLFiwQkyZNavW6e/fuFQBcfzipD+mbN29uV91ERETeYnd3IiKiNqhUKiQnJ+PYsWMYO3Yshg8fjoKCAkRHR+Pqq69GcnIyIiMjmz22oqICFy5c8OgWf9VVV+Ho0aPtruHo0aOwWCy49tprW3y/fqx842s4HA4cP37ctW3IkCFQKpWu32NjY926czfnwIEDWLduHYxGo+s1depUOBwO5ObmAgDCwsLwxhtv4NVXX0VKSgqWLFnS5meaP38+/vd//xdXXXUVli1bhoMHD7a473XXXYekpCT0798fM2fOxPr161FTUwPAObbcZrO5fcdqtRrjxo3z6jsGgMzMTNx0001ITExEcHAwJk2aBAAekwOOGTPGq/MSERG1F0M6ERFRG4YMGQKj0YiZM2fihx9+gNFoxLXXXouffvoJRqMRQ4YMkb0GvV7fJedRq9Vuv0uSBIfD0eoxVVVVuO+++5Cdne16HThwACdPnkRKSoprv6+//hpKpRL5+fmu8eqtueeee3D69GnMnDkThw4dwpgxY/DXv/612X2Dg4ORlZWFd955B7GxsVi6dClGjBjRpcumVVdXY+rUqQgJCcH69euxd+9ebNq0CQBgtVrd9m38xxAiIqKuxJBORETUhk8//RTZ2dmIiYnB//3f/yE7OxtDhw7F6tWrkZ2d3epkZyEhIYiLi8OuXbvctu/atQuDBw9udw0DBgyAXq9vcXm2QYMG4cCBA27heNeuXVAoFEhLS2v3dTQaDex2u9u20aNH48cff0RqaqrHS6PRAHBOqPfcc8/h448/htFoxLx589o8LwAkJCTgd7/7HT744AMsWrQIr7/+eou1qVQqTJkyBc8//zwOHjyIn376CV9++SVSUlKg0WjcvmObzYa9e/d69R0fO3YMJSUlWLlyJSZOnIj09PQ2exkQERF1NYZ0IiKiNiQlJcFoNKKwsBC33HILEhIScOTIEdx+++1ITU1tc9m0Rx55BM899xw2btyI48ePY8mSJcjOzsaCBQvaXYNOp8PixYvx6KOP4p///CdycnLw3Xff4Y033gAAzJgxAzqdDrNmzcLhw4fx1Vdf4fe//z1mzpyJ6Ojodl8nOTkZX3/9Nc6fP4+LFy8CABYvXozdu3dj3rx5yM7OxsmTJ/Hhhx+6gnhlZSVmzpyJ+fPn44YbbsD69euxceNG/Otf/2r1vAsXLsTWrVuRm5uLrKwsfPXVVxg0aFCzdW3ZsgUvv/wysrOzkZeXh3/+859wOBxIS0tDUFAQ7r//fjzyyCP4/PPP8eOPP2Lu3LmoqanBnDlz2v3ZExMTodFo8Ne//hWnT5/GRx99hD/96U/tPp6IiKgrcFFPIiKidtixYwfGjh0LnU6Hb775BvHx8YiNjW3XsfPnz0d5eTkWLVqEoqIiDB48GB999BEGDBjgVQ1PPvkkVCoVli5digsXLiA2Nha/+93vAAAGgwFbt27FggULMHbsWBgMBtx+++146aWXvLrG008/jfvuuw8pKSmwWCwQQmD48OHYuXMn/vjHP2LixIkQQiAlJQXTpk0DACxYsABBQUF49tlnAQDDhg3Ds88+i/vuuw8TJkxA3759mz2v3W7Hgw8+iHPnziEkJATXX389Vq1a1WxdoaGh+OCDD/DUU0/BbDZjwIABeOedd1xDDVauXAmHw4GZM2eisrISY8aMwdatWxEWFtbuzx4VFYV169bh8ccfx8svv4zRo0fjhRdewM033+zVd0hERNQZkhBC+LsIIiIiIiIiImJ3dyIiIiIiIqKAwZBOREREREREFCAY0omIiIiIiIgCBEM6ERERERERUYBgSCciIiIiIiIKEAzpRERERERERAGCIZ2IiIiIiIgoQDCkExEREREREQUIhnQiIiIiIiKiAMGQTkRERERERBQgGNKJiIiIiIiIAsT/B/yBeFCRqQkfAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "# Create the plot\n",
+ "figure = visual_plt.plot_lines(\n",
+ " lines_to_plot, x_axes=plotting_dict_naive[\"x_context\"], line_names=line_names, colors=line_colors,\n",
+ " ylabel=\"Test accuracy (averaged over contexts so far)\", ylim=(0,1.05) if scenario==\"class\" else None,\n",
+ " xlabel=\"# of contexts so far\", title=\"{} -- {}-incremental learning\".format(experiment, scenario)\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8a5620d1",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.8"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/PyTorch/build-in/other/continual-learning/figures/rotatedMNIST.png b/PyTorch/build-in/other/continual-learning/figures/rotatedMNIST.png
new file mode 100644
index 000000000..afb9d51bb
Binary files /dev/null and b/PyTorch/build-in/other/continual-learning/figures/rotatedMNIST.png differ
diff --git a/PyTorch/build-in/other/continual-learning/figures/splitMNIST_results_tutorial.png b/PyTorch/build-in/other/continual-learning/figures/splitMNIST_results_tutorial.png
new file mode 100644
index 000000000..a118d1bf0
Binary files /dev/null and b/PyTorch/build-in/other/continual-learning/figures/splitMNIST_results_tutorial.png differ
diff --git a/PyTorch/build-in/other/continual-learning/figures/splitMNIST_schematic.png b/PyTorch/build-in/other/continual-learning/figures/splitMNIST_schematic.png
new file mode 100644
index 000000000..72d129f84
Binary files /dev/null and b/PyTorch/build-in/other/continual-learning/figures/splitMNIST_schematic.png differ
diff --git a/PyTorch/build-in/other/continual-learning/figures/stabilityGap.png b/PyTorch/build-in/other/continual-learning/figures/stabilityGap.png
new file mode 100644
index 000000000..e8a3a7c21
Binary files /dev/null and b/PyTorch/build-in/other/continual-learning/figures/stabilityGap.png differ
diff --git a/PyTorch/build-in/other/continual-learning/figures/strategies.png b/PyTorch/build-in/other/continual-learning/figures/strategies.png
new file mode 100644
index 000000000..f5aa4dfed
Binary files /dev/null and b/PyTorch/build-in/other/continual-learning/figures/strategies.png differ
diff --git a/PyTorch/build-in/other/continual-learning/get_loss.py b/PyTorch/build-in/other/continual-learning/get_loss.py
new file mode 100644
index 000000000..03f37fbef
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/get_loss.py
@@ -0,0 +1,87 @@
+import re
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib
+matplotlib.use('Agg')
+from scipy.signal import savgol_filter
+def compare_loss(benchmark_loss_array, sdaa_loss_array):
+
+ def MeanRelativeError(cuda_loss, sdaa_loss):
+ return ((sdaa_loss - cuda_loss) / cuda_loss).mean()
+
+ def MeanAbsoluteError(cuda_loss, sdaa_loss):
+ return (sdaa_loss - cuda_loss).mean()
+
+ benchmark_mean_loss = benchmark_loss_array
+ sdaa_mean_loss = sdaa_loss_array
+
+ benchmark_compare_loss = benchmark_mean_loss
+ sdaa_compare_loss = sdaa_mean_loss
+ mean_relative_error = MeanRelativeError(benchmark_compare_loss, sdaa_compare_loss)
+ mean_absolute_error = MeanAbsoluteError(benchmark_compare_loss, sdaa_compare_loss)
+
+ print("MeanRelativeError:", mean_relative_error)
+ print("MeanAbsoluteError:", mean_absolute_error)
+
+ if mean_relative_error <= mean_absolute_error:
+ print("Rule,mean_relative_error", mean_relative_error)
+ else:
+ print("Rule,mean_absolute_error", mean_absolute_error)
+
+ print_str = f"{mean_relative_error=} <= 0.05 or {mean_absolute_error=} <= 0.0002"
+ if mean_relative_error <= 0.05 or mean_absolute_error <= 0.0002:
+ print('pass', print_str)
+ return True, print_str
+ else:
+ print('fail', print_str)
+ return False, print_str
+
+def parse_string(string):
+ pattern = r"Loss: ([+-]?\d+\.\d+)"
+ matches = re.findall(pattern, string, re.IGNORECASE)
+ return matches
+
+def parse_loss(ret_list):
+ loss_arr = np.array([float(loss.strip()) for loss in ret_list], dtype=np.float32)
+ print("Parsed loss array (first 10):", loss_arr[:10])
+ return loss_arr
+
+
+def plot_loss(sdaa_loss,a100_loss):
+ fig, ax = plt.subplots(figsize=(12, 6))
+
+ smoothed_losses = savgol_filter(sdaa_loss, 20, 1)
+ x = list(range(len(sdaa_loss)))
+ ax.plot(x, smoothed_losses, label="sdaa_loss")
+
+ smoothed_losses = savgol_filter(a100_loss, 20, 1)
+ x = list(range(len(a100_loss)))
+ ax.plot(x, smoothed_losses, "--", label="cuda_loss")
+
+ ax.set_xlabel("Iteration")
+ ax.set_ylabel("Loss")
+ ax.set_title("Model Training Loss Curves (Smoothed)")
+ ax.legend()
+ plt.savefig("loss.jpg")
+
+if __name__=="__main__":
+ from argparse import ArgumentParser,ArgumentTypeError
+ parser = ArgumentParser(description='modelzoo')
+ parser.add_argument('--sdaa-log', type=str,default="/localnvme/application/huangyun/continual-learning/sdaa.log")
+ parser.add_argument('--cuda-log', type=str,default="/localnvme/application/huangyun/continual-learning/cuda.log")
+ args=parser.parse_args()
+
+ sdaa_log = args.sdaa_log
+ with open(sdaa_log, 'r') as f:
+ s = f.read()
+ sdaa_res = parse_string(s)
+
+ a100_log = args.cuda_log
+ with open(a100_log, 'r') as f:
+ s = f.read()
+ a100_res = parse_string(s)
+ length=min(len(a100_res),len(sdaa_res))
+ sdaa_loss = parse_loss(sdaa_res[:length])
+ a100_loss = parse_loss(a100_res[:length])
+ compare_loss(a100_loss, sdaa_loss)
+ plot_loss(sdaa_loss,a100_loss)
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/hands_on_tutorial_InvictaSpringSchool.ipynb b/PyTorch/build-in/other/continual-learning/hands_on_tutorial_InvictaSpringSchool.ipynb
new file mode 100644
index 000000000..af916f465
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/hands_on_tutorial_InvictaSpringSchool.ipynb
@@ -0,0 +1,2689 @@
+{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "provenance": [],
+ "include_colab_link": true
+ },
+ "kernelspec": {
+ "name": "python3",
+ "display_name": "Python 3"
+ },
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "view-in-github",
+ "colab_type": "text"
+ },
+ "source": [
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "# Hands-on tutorial: Continual Learning\n",
+ "\n",
+ "**You can make your own copy of this notebook by selecting File->Save a copy in Drive from the menu bar above.**"
+ ],
+ "metadata": {
+ "id": "pB15rzv-5A-i"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "This Colab Notebook was originally used for a hands-on tutorial at the [INVICTA Spring School](https://invicta.inesctec.pt/) in March 2024. The estimated time for this tutorial is 2 hours. At the school, this hands-on tutorial was preceded by a 3-hour interactive lecutre, for which the slides can be found [here](https://gmvandeven.github.io/files/slides/InvictaSpringSchool_Mar2024.pdf)."
+ ],
+ "metadata": {
+ "id": "aBduCQ3QONvx"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Things you'll learn in this session:\n",
+ "- How to set up a simple continual learning experiment\n",
+ "- How to implement EWC, replay and EWC+replay\n",
+ "- How to evaluate and visualize the results of a continual learning experiment"
+ ],
+ "metadata": {
+ "id": "DvB7ZIPdQr5c"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "This tutorial is based on code from this repository: https://github.com/GMvandeVen/continual-learning.\n",
+ "\n",
+ "Other popular libraries for continual learning are [Avalanche](https://github.com/ContinualAI/avalanche) and [Mammoth](https://github.com/aimagelab/mammoth)."
+ ],
+ "metadata": {
+ "id": "OS7kSVkOth4g"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "## Setup"
+ ],
+ "metadata": {
+ "id": "WojfTislQ6dg"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### Load required libraries\n",
+ "First, let's load some packages that we are going to need. We use [PyTorch](https://pytorch.org/) as our main deep learning library."
+ ],
+ "metadata": {
+ "id": "anTIGQYWnm--"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Standard libraries\n",
+ "import numpy as np\n",
+ "import copy\n",
+ "import tqdm\n",
+ "# Pytorch\n",
+ "import torch\n",
+ "from torch.nn import functional as F\n",
+ "from torchvision import datasets, transforms\n",
+ "# For visualization\n",
+ "from torchvision.utils import make_grid\n",
+ "import matplotlib.pyplot as plt"
+ ],
+ "metadata": {
+ "id": "LNJi8AZPQ8HO"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### Download data\n",
+ "For this tutorial we will use the [MNIST dataset](http://yann.lecun.com/exdb/mnist/), to construct different types of conitnual learning experiments."
+ ],
+ "metadata": {
+ "id": "lCOLAWSkyA5A"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "MNIST_trainset = datasets.MNIST(root='data/', train=True, download=True,\n",
+ " transform=transforms.ToTensor())\n",
+ "MNIST_testset = datasets.MNIST(root='data/', train=False, download=True,\n",
+ " transform=transforms.ToTensor())\n",
+ "config = {'size': 28, 'channels': 1, 'classes': 10}"
+ ],
+ "metadata": {
+ "id": "9mngyfO-RzyP"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "#@title Visualization functions\n",
+ "def multi_context_barplot(axis, accs, title=None):\n",
+ " '''Generate barplot using the values in [accs].'''\n",
+ " contexts = len(accs)\n",
+ " axis.bar(range(contexts), accs, color='k')\n",
+ " axis.set_ylabel('Testing Accuracy (%)')\n",
+ " axis.set_xticks(range(contexts), [f'Context {i+1}' for i in range(contexts)])\n",
+ " if title is not None:\n",
+ " axis.set_title(title)\n",
+ "\n",
+ "def plot_examples(axis, dataset, context_id=None):\n",
+ " '''Plot 25 examples from [dataset].'''\n",
+ " data_loader = torch.utils.data.DataLoader(dataset, batch_size=25, shuffle=True)\n",
+ " image_tensor, _ = next(iter(data_loader))\n",
+ " image_grid = make_grid(image_tensor, nrow=5, pad_value=1) # pad_value=0 would give black borders\n",
+ " axis.imshow(np.transpose(image_grid.numpy(), (1,2,0)))\n",
+ " if context_id is not None:\n",
+ " axis.set_title(\"Context {}\".format(context_id+1))\n",
+ " axis.axis('off')"
+ ],
+ "metadata": {
+ "id": "yv-AN2xn1mZd",
+ "cellView": "form"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "## Part 1: Catastrophic forgetting - Permuted MNIST\n",
+ "Let's start by trying to set up a simple continual learning experiment, to check whether there is indeed catastrophic forgetting."
+ ],
+ "metadata": {
+ "id": "sFEmAjmVQfRs"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### Set up the benchmark (Permuted MNIST)\n",
+ "For this we will use \"Permuted MNIST\". In this continual learning experiment, in each context (or task), the neural network must learn to classify the ten MNIST digits. However, in each context a different permutation is applied to the pixels of all images.\n",
+ "\n",
+ "Permuted MNIST was first used in this paper: https://arxiv.org/abs/1312.6211."
+ ],
+ "metadata": {
+ "id": "wdbrA_V6jrI_"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Let's start by specifying a function and a dataset class that we will use to create the various contexts (or tasks) of Permuted MNIST."
+ ],
+ "metadata": {
+ "id": "tm0iox7zklFq"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Function to apply a given permutation the pixels of an image.\n",
+ "def permutate_image_pixels(image, permutation):\n",
+ " '''Permutate the pixels of [image] according to [permutation].'''\n",
+ "\n",
+ " if permutation is None:\n",
+ " return image\n",
+ " else:\n",
+ " c, h, w = image.size()\n",
+ " image = image.view(c, -1)\n",
+ " image = image[:, permutation] #--> same permutation for each channel\n",
+ " image = image.view(c, h, w)\n",
+ " return image"
+ ],
+ "metadata": {
+ "id": "DHjgX-YuyvFj"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Class to create a dataset with images that have all been transformed in the same way.\n",
+ "class TransformedDataset(torch.utils.data.Dataset):\n",
+ " '''To modify an existing dataset with a transform.\n",
+ " Useful for creating different permutations of MNIST without loading the data multiple times.'''\n",
+ "\n",
+ " def __init__(self, original_dataset, transform=None, target_transform=None):\n",
+ " super().__init__()\n",
+ " self.dataset = original_dataset\n",
+ " self.transform = transform\n",
+ " self.target_transform = target_transform\n",
+ "\n",
+ " def __len__(self):\n",
+ " return len(self.dataset)\n",
+ "\n",
+ " def __getitem__(self, index):\n",
+ " (input, target) = self.dataset[index]\n",
+ " if self.transform:\n",
+ " input = self.transform(input)\n",
+ " if self.target_transform:\n",
+ " target = self.target_transform(target)\n",
+ " return (input, target)"
+ ],
+ "metadata": {
+ "id": "PlVMxtWVyhaa"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Now let's use these tools to create a Permuted MNIST benchmark with 2 contexts."
+ ],
+ "metadata": {
+ "id": "oGt1bpQkk8Kw"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "contexts = 2"
+ ],
+ "metadata": {
+ "id": "EklsnmDolDbR"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Specify for each context the permutations to use (with no permutation for the first context)\n",
+ "permutations = [None] + [np.random.permutation(config['size']**2) for _ in range(contexts-1)]"
+ ],
+ "metadata": {
+ "id": "XGx9n5ezlvzf"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Specify for each context the transformed train- and testset\n",
+ "train_datasets = []\n",
+ "test_datasets = []\n",
+ "for context_id, perm in enumerate(permutations):\n",
+ " train_datasets.append(TransformedDataset(\n",
+ " MNIST_trainset, transform=transforms.Lambda(lambda x, p=perm: permutate_image_pixels(x, p)),\n",
+ " ))\n",
+ " test_datasets.append(TransformedDataset(\n",
+ " MNIST_testset, transform=transforms.Lambda(lambda x, p=perm: permutate_image_pixels(x, p)),\n",
+ " ))"
+ ],
+ "metadata": {
+ "id": "AVAF1WGnyVlY"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Visualize the contexts\n",
+ "figure, axis = plt.subplots(1, contexts, figsize=(3*contexts, 4))\n",
+ "\n",
+ "for context_id in range(len(train_datasets)):\n",
+ " plot_examples(axis[context_id], train_datasets[context_id], context_id=context_id)"
+ ],
+ "metadata": {
+ "id": "w0tTkEFZy-VU"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### Set up the model\n",
+ "Now it is time to define the neural network model that we will sequentially train on these two contexts."
+ ],
+ "metadata": {
+ "id": "A9GSVyeAl1nh"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "We start by specifying some \"helper functions\" and \"helper code\" that make it easier to specify the model. If you are interested you can have a look at the code in the cell below, but that is not needed to follow the rest of this tutorial.\n",
+ "\n",
+ "**It is however needed to run the code in the below cell!**"
+ ],
+ "metadata": {
+ "id": "xwfRu6HTmoSS"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "#@title Helper functions\n",
+ "\n",
+ "class Identity(torch.nn.Module):\n",
+ " '''A nn-module to simply pass on the input data.'''\n",
+ " def forward(self, x):\n",
+ " return x\n",
+ "\n",
+ " def __repr__(self):\n",
+ " tmpstr = self.__class__.__name__ + '()'\n",
+ " return tmpstr\n",
+ "\n",
+ "\n",
+ "class Flatten(torch.nn.Module):\n",
+ " '''A nn-module to flatten a multi-dimensional tensor to 2-dim tensor.'''\n",
+ " def forward(self, x):\n",
+ " batch_size = x.size(0) # first dimenstion should be batch-dimension.\n",
+ " return x.view(batch_size, -1)\n",
+ "\n",
+ " def __repr__(self):\n",
+ " tmpstr = self.__class__.__name__ + '()'\n",
+ " return tmpstr\n",
+ "\n",
+ "\n",
+ "class fc_layer(torch.nn.Module):\n",
+ " '''Fully connected layer, with possibility of returning \"pre-activations\".\n",
+ "\n",
+ " Input: [batch_size] x ... x [in_size] tensor\n",
+ " Output: [batch_size] x ... x [out_size] tensor'''\n",
+ "\n",
+ " def __init__(self, in_size, out_size, nl=torch.nn.ReLU(), bias=True):\n",
+ " super().__init__()\n",
+ " self.bias = bias\n",
+ " self.linear = torch.nn.Linear(in_size, out_size, bias=bias)\n",
+ " if isinstance(nl, torch.nn.Module):\n",
+ " self.nl = nl\n",
+ " elif nl==\"relu\":\n",
+ " self.nl = torch.nn.ReLU()\n",
+ " elif nl==\"leakyrelu\":\n",
+ " self.nl = torch.nn.LeakyReLU()\n",
+ "\n",
+ " def forward(self, x):\n",
+ " pre_activ = self.linear(x)\n",
+ " output = self.nl(pre_activ) if hasattr(self, 'nl') else pre_activ\n",
+ " return output\n",
+ "\n",
+ "\n",
+ "class MLP(torch.nn.Module):\n",
+ " '''Module for a multi-layer perceptron (MLP).\n",
+ "\n",
+ " Input: [batch_size] x ... x [size_per_layer[0]] tensor\n",
+ " Output: (tuple of) [batch_size] x ... x [size_per_layer[-1]] tensor'''\n",
+ "\n",
+ " def __init__(self, input_size=1000, output_size=10, layers=2,\n",
+ " hid_size=1000, hid_smooth=None, size_per_layer=None,\n",
+ " nl=\"relu\", bias=True, output='normal'):\n",
+ " '''sizes: 0th=[input], 1st=[hid_size], ..., 1st-to-last=[hid_smooth], last=[output].\n",
+ " [input_size] # of inputs\n",
+ " [output_size] # of units in final layer\n",
+ " [layers] # of layers\n",
+ " [hid_size] # of units in each hidden layer\n",
+ " [hid_smooth] if None, all hidden layers have [hid_size] units, else # of units linearly in-/decreases s.t.\n",
+ " final hidden layer has [hid_smooth] units (if only 1 hidden layer, it has [hid_size] units)\n",
+ " [size_per_layer] None or with for each layer number of units (1st element = number of inputs)\n",
+ " --> overwrites [input_size], [output_size], [layers], [hid_size] and [hid_smooth]\n",
+ " [nl] ; type of non-linearity to be used (options: \"relu\", \"leakyrelu\", \"none\")\n",
+ " [output] ; if - \"normal\", final layer is same as all others\n",
+ " - \"none\", final layer has no non-linearity\n",
+ " - \"sigmoid\", final layer has sigmoid non-linearity'''\n",
+ "\n",
+ " super().__init__()\n",
+ " self.output = output\n",
+ "\n",
+ " # get sizes of all layers\n",
+ " if size_per_layer is None:\n",
+ " hidden_sizes = []\n",
+ " if layers > 1:\n",
+ " if (hid_smooth is not None):\n",
+ " hidden_sizes = [int(x) for x in np.linspace(hid_size, hid_smooth, num=layers-1)]\n",
+ " else:\n",
+ " hidden_sizes = [int(x) for x in np.repeat(hid_size, layers - 1)]\n",
+ " size_per_layer = [input_size] + hidden_sizes + [output_size] if layers>0 else [input_size]\n",
+ " self.layers = len(size_per_layer)-1\n",
+ "\n",
+ " # set label for this module\n",
+ " # -determine \"non-default options\"-label\n",
+ " nd_label = \"{bias}{nl}\".format(\n",
+ " bias=\"\" if bias else \"n\",\n",
+ " nl=\"l\" if nl==\"leakyrelu\" else (\"n\" if nl==\"none\" else \"\"),\n",
+ " )\n",
+ " nd_label = \"{}{}\".format(\"\" if nd_label==\"\" else \"-{}\".format(nd_label),\n",
+ " \"\" if output==\"normal\" else \"-{}\".format(output))\n",
+ " # -set label\n",
+ " size_statement = \"\"\n",
+ " for i in size_per_layer:\n",
+ " size_statement += \"{}{}\".format(\"-\" if size_statement==\"\" else \"x\", i)\n",
+ " self.label = \"F{}{}\".format(size_statement, nd_label) if self.layers>0 else \"\"\n",
+ "\n",
+ " # set layers\n",
+ " for lay_id in range(1, self.layers+1):\n",
+ " # number of units of this layer's input and output\n",
+ " in_size = size_per_layer[lay_id-1]\n",
+ " out_size = size_per_layer[lay_id]\n",
+ " # define and set the fully connected layer\n",
+ " layer = fc_layer(\n",
+ " in_size, out_size, bias=bias,\n",
+ " nl=(\"none\" if output==\"none\" else nn.Sigmoid()) if (\n",
+ " lay_id==self.layers and not output==\"normal\"\n",
+ " ) else nl,\n",
+ " )\n",
+ " setattr(self, 'fcLayer{}'.format(lay_id), layer)\n",
+ "\n",
+ " # if no layers, add \"identity\"-module to indicate in this module's representation nothing happens\n",
+ " if self.layers<1:\n",
+ " self.noLayers = Identity()\n",
+ "\n",
+ " def forward(self, x):\n",
+ " for lay_id in range(1, self.layers + 1):\n",
+ " x = getattr(self, \"fcLayer{}\".format(lay_id))(x)\n",
+ " return x\n",
+ "\n",
+ " @property\n",
+ " def name(self):\n",
+ " return self.label"
+ ],
+ "metadata": {
+ "cellView": "form",
+ "id": "W5zR4KSamLJT"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Now let's specify a class for a basic neural network classifier model."
+ ],
+ "metadata": {
+ "id": "UOymyzR9p3ob"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "class Classifier(torch.nn.Module):\n",
+ " '''Model for classifying images.'''\n",
+ "\n",
+ " def __init__(self, image_size, image_channels, output_units,\n",
+ " fc_layers=3, fc_units=1000, fc_nl=\"relu\", bias=True):\n",
+ "\n",
+ " super().__init__()\n",
+ "\n",
+ " # Flatten image to 2D-tensor\n",
+ " self.flatten = Flatten()\n",
+ "\n",
+ " # Specify the fully connected hidden layers\n",
+ " input_size = image_channels * image_size * image_size\n",
+ " self.fcE = MLP(input_size=input_size, output_size=fc_units, layers=fc_layers-1,\n",
+ " hid_size=fc_units, nl=fc_nl, bias=bias)\n",
+ " mlp_output_size = fc_units if fc_layers>1 else self.input_size\n",
+ "\n",
+ " # Specify the final linear classifier layer\n",
+ " self.classifier = fc_layer(mlp_output_size, output_units, nl='none')\n",
+ "\n",
+ " def forward(self, x):\n",
+ " flatten_x = self.flatten(x)\n",
+ " final_features = self.fcE(flatten_x)\n",
+ " out = self.classifier(final_features)\n",
+ " return out"
+ ],
+ "metadata": {
+ "id": "s5hXbEWan7w0"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Let's create an instance of such a classifier model, and print some details of it to screen."
+ ],
+ "metadata": {
+ "id": "SoADM8X_qCSk"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Specify the architectural layout of the network to use\n",
+ "fc_lay = 4 #--> number of fully-connected layers\n",
+ "fc_units = 40 #--> number of units in each hidden layer\n",
+ "fc_nl = \"relu\" #--> what non-linearity to use?"
+ ],
+ "metadata": {
+ "id": "pQJISF0xqJ2B"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Define the model\n",
+ "model = Classifier(image_size=config['size'], image_channels=config['channels'],\n",
+ " output_units=config['classes'],\n",
+ " fc_layers=fc_lay, fc_units=fc_units, fc_nl=fc_nl)"
+ ],
+ "metadata": {
+ "id": "JHKjU4MUqT4f"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Print the network architecture\n",
+ "model"
+ ],
+ "metadata": {
+ "id": "LsSDP19EqZHs"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Print info regarding number of parameters\n",
+ "total_params = 0\n",
+ "for param in model.parameters():\n",
+ " n_params = index_dims = 0\n",
+ " for dim in param.size():\n",
+ " n_params = dim if index_dims==0 else n_params*dim\n",
+ " index_dims += 1\n",
+ " total_params += n_params\n",
+ "print( \"--> this network has {} parameters (~{}K)\"\n",
+ " .format(total_params, round(total_params / 1000)))"
+ ],
+ "metadata": {
+ "id": "sPe52AA8qY4U"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### Train on first context\n",
+ "It's time to start training the model on the first context (i.e., the regular MNIST dataset)."
+ ],
+ "metadata": {
+ "id": "Pg_98FO7q7SI"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "To do this, let's define a function to that can train a given neural network (i.e., `model`) on a particular dataset (i.e., `dataset`). We can then later re-use this function, for example to train the network on the dataset of the second context.\n",
+ "\n",
+ "We can also specify the number of iterations we want to train for, the learning rate and the mini batch-size."
+ ],
+ "metadata": {
+ "id": "jbtG3Elh5wbV"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "def train(model, dataset, iters, lr, batch_size):\n",
+ " # Define the optimizer\n",
+ " optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.999))\n",
+ "\n",
+ " # Set model in training-mode\n",
+ " model.train()\n",
+ "\n",
+ " # Initialize # iters left on current data-loader(s)\n",
+ " iters_left = 1\n",
+ "\n",
+ " # Define tqdm progress bar(s)\n",
+ " progress_bar = tqdm.tqdm(range(1, iters+1))\n",
+ "\n",
+ " # Loop over all iterations\n",
+ " for batch_index in range(1, iters+1):\n",
+ "\n",
+ " # Update # iters left on current data-loader(s) and, if needed, create new one(s)\n",
+ " iters_left -= 1\n",
+ " if iters_left==0:\n",
+ " data_loader = iter(torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n",
+ " shuffle=True, drop_last=True))\n",
+ " iters_left = len(data_loader)\n",
+ "\n",
+ " # Sample training data of current context\n",
+ " x, y = next(data_loader)\n",
+ "\n",
+ " # Reset optimizer\n",
+ " optimizer.zero_grad()\n",
+ "\n",
+ " # Run model\n",
+ " y_hat = model(x)\n",
+ "\n",
+ " # Calculate prediction loss\n",
+ " loss = torch.nn.functional.cross_entropy(input=y_hat, target=y, reduction='mean')\n",
+ "\n",
+ " # Calculate training-accuracy (in %)\n",
+ " accuracy = (y == y_hat.max(1)[1]).sum().item()*100 / x.size(0)\n",
+ "\n",
+ " # Backpropagate errors\n",
+ " loss.backward()\n",
+ "\n",
+ " # Take the optimizer step\n",
+ " optimizer.step()\n",
+ "\n",
+ " # Update progress bar\n",
+ " progress_bar.set_description(\n",
+ " ' | training loss: {loss:.3} | training accuracy: {prec:.3}% |'\n",
+ " .format(loss=loss.item(), prec=accuracy)\n",
+ " )\n",
+ " progress_bar.update(1)\n",
+ "\n",
+ " # Close the progress bar\n",
+ " progress_bar.close()"
+ ],
+ "metadata": {
+ "id": "yNJOs8Vj54Ub"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Now we need to choose the training hyperparameters (i.e., learning rate, batch size and number of iterations)."
+ ],
+ "metadata": {
+ "id": "DlFBimOt7Tbx"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "iters = 200 # for how many iterations to train?\n",
+ "lr = 0.01 # learning rate\n",
+ "batch_size = 128 # size of mini-batches"
+ ],
+ "metadata": {
+ "id": "ITZJbebd7cZY"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "And let's train our neural network on the first context."
+ ],
+ "metadata": {
+ "id": "XyTV4Ee_7d9a"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Train the model on the first context\n",
+ "train(model, dataset=train_datasets[0], iters=iters, lr=lr, batch_size=batch_size)"
+ ],
+ "metadata": {
+ "id": "GJrEXhHwtO69"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "#### Evaluate\n",
+ "Did the training work? Let's find out by evaluating the performance of the trained model on the MNIST test set."
+ ],
+ "metadata": {
+ "id": "_XeVQWFWtSo9"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Again, let's start by specifying a function to evaluate any given model (i.e., `model`) on a specific dataset (i.e., `dataset`)."
+ ],
+ "metadata": {
+ "id": "gYkoS0NYyYC-"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "def test_acc(model, dataset, test_size=None, batch_size=128):\n",
+ " '''Evaluate accuracy (% samples classified correctly) of a classifier ([model]) on [dataset].'''\n",
+ "\n",
+ " # Set model to eval()-mode\n",
+ " mode = model.training\n",
+ " model.eval()\n",
+ "\n",
+ " # Loop over batches in [dataset]\n",
+ " data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n",
+ " shuffle=True, drop_last=False)\n",
+ " total_tested = total_correct = 0\n",
+ " for x, y in data_loader:\n",
+ " # -break on [test_size] (if \"None\", full dataset is used)\n",
+ " if test_size:\n",
+ " if total_tested >= test_size:\n",
+ " break\n",
+ " # -evaluate model\n",
+ " with torch.no_grad():\n",
+ " scores = model(x)\n",
+ " _, predicted = torch.max(scores, 1)\n",
+ " # -update statistics\n",
+ " total_correct += (predicted == y).sum().item()\n",
+ " total_tested += len(x)\n",
+ " accuracy = total_correct*100 / total_tested\n",
+ "\n",
+ " # Set model back to its initial mode, print result on screen (if requested) and return it\n",
+ " model.train(mode=mode)\n",
+ "\n",
+ " return accuracy"
+ ],
+ "metadata": {
+ "id": "oeR-wzk7trnq"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Now let's use this function to evaluate the performance of the model on both the test data from context 1 (i.e., regular MNIST, on which the model was just trained) and on the test data from context 2 (i.e., a permuted version of MNIST, on which the model has not yet been trained)."
+ ],
+ "metadata": {
+ "id": "oy0jxo7GzTZS"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Evaluate accuracy per context and print to screen\n",
+ "print(\"\\n Accuracy (in %) of the model on test-set of:\")\n",
+ "context1_accs = []\n",
+ "for i in range(contexts):\n",
+ " acc = test_acc(model, test_datasets[i], test_size=None)\n",
+ " print(\" - Context {}: {:.1f}\".format(i+1, acc))\n",
+ " context1_accs.append(acc)"
+ ],
+ "metadata": {
+ "id": "RLbnEFYizT-T"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "The model does well on test data from context 1, but is around chance level for test data from context 2.\n",
+ "\n",
+ "This is not very surprising, as we have not yet trained the model on context 2!"
+ ],
+ "metadata": {
+ "id": "tDX1C8e80Ous"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "#### Store model copy (for later)\n",
+ "Before continuing to train the model on the second context, let's store a copy of the model after training on the first context. We will later use this model copy to try out different ways of training the same model on the second context (i.e., we will try out different continual learning methods)."
+ ],
+ "metadata": {
+ "id": "nuSXFD-O00ZJ"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "model_after_context1 = copy.deepcopy(model)"
+ ],
+ "metadata": {
+ "id": "hfDXSlr903z-"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### Train on second context\n",
+ "Now let's continue to train our model on the second context, and see what happens."
+ ],
+ "metadata": {
+ "id": "KG5vmXzrtVY-"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Continue to train the model on the second context\n",
+ "train(model, dataset=train_datasets[1], iters=iters, lr=lr, batch_size=batch_size)"
+ ],
+ "metadata": {
+ "id": "Qm8nl4vZ8Qg8"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "#### Evaluate"
+ ],
+ "metadata": {
+ "id": "gzz4tXXFtXbT"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Evaluate accuracy per context and print to screen\n",
+ "print(\"\\n Accuracy (in %) of the model on test-set of:\")\n",
+ "context2_accs = []\n",
+ "for i in range(contexts):\n",
+ " acc = test_acc(model, test_datasets[i], test_size=None)\n",
+ " print(\" - Context {}: {:.1f}\".format(i+1, acc))\n",
+ " context2_accs.append(acc)"
+ ],
+ "metadata": {
+ "id": "FHVaWONJtsps"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### Train on both contexts at the same time"
+ ],
+ "metadata": {
+ "id": "Rq6PsBLitcRr"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Define a new model with same architecture\n",
+ "model_joint = Classifier(image_size=config['size'], image_channels=config['channels'],\n",
+ " output_units=config['classes'],\n",
+ " fc_layers=fc_lay, fc_units=fc_units, fc_nl=fc_nl)"
+ ],
+ "metadata": {
+ "id": "ix_3i8cittOj"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Create a joint dataset with data from both contexts\n",
+ "joint_trainset = torch.utils.data.ConcatDataset(train_datasets)"
+ ],
+ "metadata": {
+ "id": "y4trFHRl9jEk"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "We will use the same training-hyperparameters, except that we double the batch size."
+ ],
+ "metadata": {
+ "id": "Ldr-Z3Xa9Vgq"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "batch_size_joint = 256"
+ ],
+ "metadata": {
+ "id": "ON_nH7Ap94R6"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Train the joint model\n",
+ "train(model_joint, dataset=joint_trainset, iters=iters, lr=lr, batch_size=batch_size_joint)"
+ ],
+ "metadata": {
+ "id": "HP73reGQ-KlI"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Evaluate the model\n",
+ "print(\"\\n Accuracy (in %) of the model on test-set of:\")\n",
+ "joint_accs = []\n",
+ "for i in range(contexts):\n",
+ " acc = test_acc(model_joint, test_datasets[i], test_size=None)\n",
+ " print(\" - Context {}: {:.1f}\".format(i+1, acc))\n",
+ " joint_accs.append(acc)"
+ ],
+ "metadata": {
+ "id": "ngV6LI73-zKV"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### Visualize results"
+ ],
+ "metadata": {
+ "id": "jot0FyzK--uE"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Visualize\n",
+ "figure, axis = plt.subplots(1, 4, figsize=(15, 5))\n",
+ "\n",
+ "title='After training on context 1, \\nbut not yet training on context 2'\n",
+ "multi_context_barplot(axis[0], context1_accs, title)\n",
+ "\n",
+ "title='After first training on context 1, \\nand then training on context 2'\n",
+ "multi_context_barplot(axis[1], context2_accs, title)\n",
+ "\n",
+ "axis[2].axis('off')\n",
+ "\n",
+ "title='After jointly training on both contexts'\n",
+ "multi_context_barplot(axis[3], joint_accs, title)"
+ ],
+ "metadata": {
+ "id": "OJEcM-8i_CAV"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "We have observed **catastrophic forgetting**! When our neural network model first learns context 1 and is then trained on context 2, the model's performance on data from context 1 substantially drops (left panels). When the same neural network model is instead trained on both context 1 and 2 at the same time, the model is able to learn both contexts well (right panel), demonstrating that the forgetting cannot be explained by limited model capacity."
+ ],
+ "metadata": {
+ "id": "pWWeiBRcyaCH"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "## Part 2: Overcoming catastrophic forgetting - EWC & replay"
+ ],
+ "metadata": {
+ "id": "HrAUoOovzzMf"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Now that we found catastrophic forgetting, let's try out some methods to mitigate the forgetting.\n",
+ "\n",
+ "We will start by exploring two methods: EWC and replay. For both methods, the training on the first context is the same as before. We can therefore use as starting point the copy of the model that we stored after finishing training on the first context."
+ ],
+ "metadata": {
+ "id": "q6ox4ZT10EGB"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "model_ewc = copy.deepcopy(model_after_context1)\n",
+ "model_replay = copy.deepcopy(model_after_context1)"
+ ],
+ "metadata": {
+ "id": "z9FGHT8Z161d"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "#### Elastic Weight Consolidation (EWC)\n",
+ "EWC is a popular parameter regularization strategy for continual learning. It was introduced in the paper \"[Overcoming catastrophic forgetting in neural networks\n",
+ "](https://www.pnas.org/doi/abs/10.1073/pnas.1611835114)\" (Kirkpatrick et al., 2017; *PNAS*).\n",
+ "\n",
+ "EWC computes a diagonal approximation to the [Fisher Information matrix]((https://en.wikipedia.org/wiki/Fisher_information) to estimate for each parameter of the network how important it is for the performance on the previous context. During training on the next context, these parameter importance estimates are then used to penalize changes to the parameters, with changes to the most important parameters penalized most.\n",
+ "\n"
+ ],
+ "metadata": {
+ "id": "s7dQ5itP0RJc"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "When training on context $k>1$, the EWC regularization term is given by:\n",
+ "$$\n",
+ "\\mathcal{L}^{(k)}_{\\text{regularization}_{\\text{EWC}}}\\left(\\boldsymbol{\\theta}\\right) = \\frac{1}{2} \\sum_{i=1}^{N_{\\text{params}}} \\tilde{F}_{ii}^{(k)} \\left(\\theta_i - \\hat{\\theta}_{i}^{(k)} \\right)^2\n",
+ "$$\n",
+ "whereby $\\hat{\\theta}_{i}^{(k)}$ is the $i^{\\text{th}}$ element of $\\hat{\\boldsymbol{\\theta}}^{\\left(k\\right)}$, which is the vector with parameter values at the end of training of task $k$, and $\\tilde{F}_{ii}^{(k)}$ is an approximation of $F_{ii}^{(k)}$, the $i^{\\text{th}}$ diagonal element of $\\boldsymbol{F}^{(k)}$, which is the Fisher Information matrix of task $k$ evaluated at $\\hat{\\boldsymbol{\\theta}}^{(k)}$."
+ ],
+ "metadata": {
+ "id": "OcXpF4N6kyc_"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "(Technically, the above regularization term is for `[Online EWC](https://arxiv.org/abs/1805.06370)'. The original version of EWC did something weird, as explained in [this blog post](https://www.inference.vc/comment-on-overcoming-catastrophic-forgetting-in-nns-are-multiple-penalties-needed-2/).)"
+ ],
+ "metadata": {
+ "id": "uRJG6pGDoj_O"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "The Fisher Information matrix is defined as the covariance of the `score', which is the partial derivative with respect to $\\boldsymbol{\\theta}$ of the natural logarithm of the likelihood function. The $i^{\\text{th}}$ diagonal element of the Fisher Information on context $k$ is therefore given by:\n",
+ "\n",
+ "$$\n",
+ "F_{ii}^{(k)} = \\mathbb{E}_{\\boldsymbol{x}\\sim Q_{\\boldsymbol{x}}^{(k)}} \\left[ \\ \\mathbb{E}_{p_{\\hat{\\boldsymbol{\\theta}}^{(k)}}} \\left[ \\left( \\left. \\frac{\\delta \\log{p_{\\boldsymbol{\\theta}}\\left(Y=y|\\boldsymbol{x}\\right)}}{\\delta \\theta_i} \\right\\rvert_{\\boldsymbol{\\theta}=\\hat{\\boldsymbol{\\theta}}^{(k)}} \\right)^2 \\right] \\right]\n",
+ "$$\n",
+ "\n",
+ "whereby $Q_{\\boldsymbol{x}}^{(k)}$ is the input distribution of context $k$, $p_{\\boldsymbol{\\theta}}$ is the conditional distribution of $y$ given $\\boldsymbol{x}$ defined by the neural network with parameters $\\boldsymbol{\\theta}$, and $\\hat{\\boldsymbol{\\theta}}^{(k)}$ is the vector with parameter values after finisihing training on context $k$."
+ ],
+ "metadata": {
+ "id": "C_8xnOyI-D5_"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "The outer expectation in the above equation can be approximated using a random sample from the training set of context $k$. Because there is only a finit number of possible classes, the inner expectation in the above equation can be calculated for each sample exactly:\n",
+ "$$\n",
+ "\\tilde{F}_{ii}^{(k)} = \\frac{1}{|S^{(k)}|} \\sum_{\\boldsymbol{x}\\in S^{(k)}} \\left( \\sum_{c=1}^{N_{\\text{classes}}} \\tilde{y}_c \\left( \\left. \\frac{\\delta\\log p_{\\boldsymbol{\\theta}}\\left(Y=c|\\boldsymbol{x}\\right)}{\\delta\\theta_i} \\right\\rvert_{\\boldsymbol{\\theta}=\\hat{\\boldsymbol{\\theta}}^{(k)}} \\right)^2 \\right)\n",
+ "$$\n",
+ "whereby $S^{(k)}$ is the random sample of training data of context $k$ and $\\tilde{y}_c = p_{\\hat{\\boldsymbol{\\theta}}^{(k)}}\\left(Y=c|\\boldsymbol{x}\\right)$ (i.e., the probability that input $\\boldsymbol{x}$ belongs to class $c$ as predicted by the model after finishing training on context $k$)."
+ ],
+ "metadata": {
+ "id": "8MMh_ZZslXpd"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "**In the literature or on GitHub, often different, rather crude approximations of the Fisher Information are used ([example](https://github.com/ContinualAI/avalanche/blob/dbdc3804b11710b85b0e564b13034f487c7cf806/avalanche/training/plugins/ewc.py#L132-L186)). Be careful, as the quality of this approximation might influence the results!**"
+ ],
+ "metadata": {
+ "id": "Uitfc1Hom_cC"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "def estimate_fisher(model, dataset, n_samples, ewc_gamma=1.):\n",
+ " '''Estimate diagonal of Fisher Information matrix for [model] on [dataset] using [n_samples].'''\n",
+ "\n",
+ " # Prepare to store estimated Fisher Information matrix\n",
+ " est_fisher_info = {}\n",
+ " for n, p in model.named_parameters():\n",
+ " n = n.replace('.', '__')\n",
+ " est_fisher_info[n] = p.detach().clone().zero_()\n",
+ "\n",
+ " # Set model to evaluation mode\n",
+ " mode = model.training\n",
+ " model.eval()\n",
+ "\n",
+ " # Create data-loader to give batches of size 1\n",
+ " data_loader = torch.utils.data.DataLoader(dataset, batch_size=1)\n",
+ "\n",
+ " # Estimate the FI-matrix for [n_samples] batches of size 1\n",
+ " for index,(x,y) in enumerate(data_loader):\n",
+ " # break from for-loop if max number of samples has been reached\n",
+ " if n_samples is not None:\n",
+ " if index > n_samples:\n",
+ " break\n",
+ " # run forward pass of model\n",
+ " output = model(x)\n",
+ " # calculate the FI-matrix\n",
+ " with torch.no_grad():\n",
+ " label_weights = F.softmax(output, dim=1) #--> get weights, with no gradient tracked\n",
+ " # - loop over all classes\n",
+ " for label_index in range(output.shape[1]):\n",
+ " label = torch.LongTensor([label_index])\n",
+ " negloglikelihood = F.cross_entropy(output, label)\n",
+ " # Calculate gradient of negative loglikelihood for this class\n",
+ " model.zero_grad()\n",
+ " negloglikelihood.backward(retain_graph=True if (label_index+1)1:\n",
+ " ewc_losses = []\n",
+ " for n, p in model.named_parameters():\n",
+ " # Retrieve stored mode (MAP estimate) and precision (Fisher Information matrix)\n",
+ " n = n.replace('.', '__')\n",
+ " mean = getattr(model, '{}_EWC_param_values'.format(n))\n",
+ " fisher = getattr(model, '{}_EWC_estimated_fisher'.format(n))\n",
+ " # Calculate weight regularization loss\n",
+ " ewc_losses.append((fisher * (p-mean)**2).sum())\n",
+ " ewc_loss = (1./2)*sum(ewc_losses)\n",
+ " total_loss = loss + ewc_lambda*ewc_loss\n",
+ " else:\n",
+ " total_loss = loss\n",
+ "\n",
+ " accuracy = (y == y_hat.max(1)[1]).sum().item()*100 / x.size(0)\n",
+ " total_loss.backward()\n",
+ " optimizer.step()\n",
+ " progress_bar.set_description(\n",
+ " ' | training loss: {loss:.3} | training accuracy: {prec:.3}% |'\n",
+ " .format(loss=total_loss.item(), prec=accuracy)\n",
+ " )\n",
+ " progress_bar.update(1)\n",
+ " progress_bar.close()"
+ ],
+ "metadata": {
+ "id": "JjDl0lsg0Vs_"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Let's train the model on the second context using EWC."
+ ],
+ "metadata": {
+ "id": "8ikYkBt-JMm6"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Estimate the FI-matrix (and store it as attribute in the network)\n",
+ "estimate_fisher(model_ewc, train_datasets[0], n_samples=200)"
+ ],
+ "metadata": {
+ "id": "mvW1FKRNJU2F"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Train on the second context using EWC parameter regularization\n",
+ "ewc_lambda = 100 #--> this is a \"continual learning hyperparameter\", setting these is a delicate\n",
+ " # business. Here we ignore that and just use one that gives good performance.\n",
+ "train_ewc(model_ewc, train_datasets[1], iters=iters, lr=lr, batch_size=batch_size,\n",
+ " current_context=2, ewc_lambda=ewc_lambda)"
+ ],
+ "metadata": {
+ "id": "9UjjXJtLJUg7"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "... and evaluate its performance."
+ ],
+ "metadata": {
+ "id": "60zj34suJVYJ"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Evaluate the model\n",
+ "print(\"\\n Accuracy (in %) of the model on test-set of:\")\n",
+ "ewc_accs = []\n",
+ "for i in range(contexts):\n",
+ " acc = test_acc(model_ewc, test_datasets[i], test_size=None)\n",
+ " print(\" - Context {}: {:.1f}\".format(i+1, acc))\n",
+ " ewc_accs.append(acc)"
+ ],
+ "metadata": {
+ "id": "jiNB3edeJZ-L"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "That worked well! The performance on the first context barely dropped while the network learned the second context."
+ ],
+ "metadata": {
+ "id": "vSfLqS87Vm5e"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "#### Experience Replay\n",
+ "For comparison, now let's train another model copy on the second context using 'experience replay'.\n",
+ "\n",
+ "The typical approach is to store a relatively small amount of samples from previous contexts, and revisit those when training on a new context. We thus first need to populate a memory buffer with some samples from the first context. We select these samples using class-balanced random sampling from the training set (other approaches are possible here, how to optimally select the samples to store in the memory buffer is an active field of research)."
+ ],
+ "metadata": {
+ "id": "bLC3-drp0TIk"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "#@title Helper dataset classes for constructing memory buffer\n",
+ "class SubDataset(torch.utils.data.Dataset):\n",
+ " '''To sub-sample a dataset, taking only those samples with label in [sub_labels].\n",
+ "\n",
+ " After this selection of samples has been made, it is possible to transform the target-labels,\n",
+ " which can be useful when doing continual learning with fixed number of output units.'''\n",
+ "\n",
+ " def __init__(self, original_dataset, sub_labels, target_transform=None):\n",
+ " super().__init__()\n",
+ " self.dataset = original_dataset\n",
+ " self.sub_indeces = []\n",
+ " for index in range(len(self.dataset)):\n",
+ " if hasattr(original_dataset, \"targets\"):\n",
+ " if self.dataset.target_transform is None:\n",
+ " label = self.dataset.targets[index]\n",
+ " else:\n",
+ " label = self.dataset.target_transform(self.dataset.targets[index])\n",
+ " else:\n",
+ " label = self.dataset[index][1]\n",
+ " if label in sub_labels:\n",
+ " self.sub_indeces.append(index)\n",
+ " self.target_transform = target_transform\n",
+ "\n",
+ " def __len__(self):\n",
+ " return len(self.sub_indeces)\n",
+ "\n",
+ " def __getitem__(self, index):\n",
+ " sample = self.dataset[self.sub_indeces[index]]\n",
+ " if self.target_transform:\n",
+ " target = self.target_transform(sample[1])\n",
+ " sample = (sample[0], target)\n",
+ " return sample\n",
+ "\n",
+ "\n",
+ "class MemorySetDataset(torch.utils.data.Dataset):\n",
+ " '''Create dataset from list of with shape (N, C, H, W) (i.e., with N images each).\n",
+ "\n",
+ " The images at the i-th entry of [memory_sets] belong to class [i],\n",
+ " unless a [target_transform] is specified\n",
+ " '''\n",
+ "\n",
+ " def __init__(self, memory_sets, target_transform=None):\n",
+ " super().__init__()\n",
+ " self.memory_sets = memory_sets\n",
+ " self.target_transform = target_transform\n",
+ "\n",
+ " def __len__(self):\n",
+ " total = 0\n",
+ " for class_id in range(len(self.memory_sets)):\n",
+ " total += len(self.memory_sets[class_id])\n",
+ " return total\n",
+ "\n",
+ " def __getitem__(self, index):\n",
+ " total = 0\n",
+ " for class_id in range(len(self.memory_sets)):\n",
+ " examples_in_this_class = len(self.memory_sets[class_id])\n",
+ " if index < (total + examples_in_this_class):\n",
+ " class_id_to_return = class_id if self.target_transform is None else self.target_transform(class_id)\n",
+ " example_id = index - total\n",
+ " break\n",
+ " else:\n",
+ " total += examples_in_this_class\n",
+ " image = torch.from_numpy(self.memory_sets[class_id][example_id])\n",
+ " return (image, class_id_to_return)"
+ ],
+ "metadata": {
+ "id": "06uwJLSlYfYs",
+ "cellView": "form"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Fill the memory buffer using class-balanced random sampling\n",
+ "def fill_memory_buffer(memory_sets, dataset, buffer_size_per_class, class_indeces):\n",
+ " '''This function is rather slow and can be optimized.'''\n",
+ " for class_id in class_indeces:\n",
+ " # Create dataset with only instances of one class\n",
+ " class_dataset = SubDataset(original_dataset=dataset, sub_labels=[class_id])\n",
+ "\n",
+ " # Randomly select which indeces to store in the buffer\n",
+ " n_total = len(class_dataset)\n",
+ " indeces_selected = np.random.choice(n_total, size=min(buffer_size_per_class, n_total),\n",
+ " replace=False)\n",
+ "\n",
+ " # Select those indeces\n",
+ " memory_set = []\n",
+ " for k in indeces_selected:\n",
+ " memory_set.append(class_dataset[k][0].numpy())\n",
+ "\n",
+ " # Add this [memory_set] as a [n]x[ich]x[isz]x[isz] to the list of [memory_sets]\n",
+ " memory_sets.append(np.array(memory_set))\n",
+ "\n",
+ " return memory_sets"
+ ],
+ "metadata": {
+ "id": "Ll7YdfMo0VSl"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "buffer_size_per_class = 20\n",
+ "memory_sets = []\n",
+ "# The next command is unneccesary slow, apologies! Bonus question: optimize this implementation :)\n",
+ "memory_sets = fill_memory_buffer(memory_sets, train_datasets[0],\n",
+ " buffer_size_per_class=buffer_size_per_class,\n",
+ " class_indeces=list(range(10)))\n",
+ "buffer_dataset = MemorySetDataset(memory_sets)"
+ ],
+ "metadata": {
+ "id": "0Bbg05iTOye4"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Now we also need to define a new training-function that revisits data from the memory buffer along with training on the data from the new context."
+ ],
+ "metadata": {
+ "id": "XA5cDdKSO_Pq"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# (only the steps that differ from the original `train`-function are commented)\n",
+ "def train_replay(model, dataset, iters, lr, batch_size, current_context, buffer_dataset=None):\n",
+ " optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.999))\n",
+ " model.train()\n",
+ " iters_left = 1\n",
+ " iters_left_replay = 1\n",
+ " progress_bar = tqdm.tqdm(range(1, iters+1))\n",
+ "\n",
+ " for batch_index in range(1, iters+1):\n",
+ " optimizer.zero_grad()\n",
+ "\n",
+ " # Data from current context\n",
+ " iters_left -= 1\n",
+ " if iters_left==0:\n",
+ " data_loader = iter(torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n",
+ " shuffle=True, drop_last=True))\n",
+ " iters_left = len(data_loader)\n",
+ " x, y = next(data_loader)\n",
+ " y_hat = model(x)\n",
+ " loss = torch.nn.functional.cross_entropy(input=y_hat, target=y, reduction='mean')\n",
+ " accuracy = (y == y_hat.max(1)[1]).sum().item()*100 / x.size(0)\n",
+ "\n",
+ " # Replay data from memory buffer\n",
+ " if buffer_dataset is not None:\n",
+ " iters_left_replay -= 1\n",
+ " if iters_left_replay==0:\n",
+ " batch_size_to_use = min(batch_size, len(buffer_dataset))\n",
+ " data_loader_replay = iter(torch.utils.data.DataLoader(buffer_dataset,\n",
+ " batch_size_to_use, shuffle=True,\n",
+ " drop_last=True))\n",
+ " iters_left_replay = len(data_loader_replay)\n",
+ " x_, y_ = next(data_loader_replay)\n",
+ " y_hat_ = model(x_)\n",
+ " loss_replay = torch.nn.functional.cross_entropy(input=y_hat_, target=y_, reduction='mean')\n",
+ "\n",
+ " # Combine both losses to approximate the joint loss over both contexts\n",
+ " # (i.e., the loss on the replayed data has weight proportional to number of contexts so far)\n",
+ " if buffer_dataset is not None:\n",
+ " rnt = 1./current_context\n",
+ " total_loss = rnt*loss + (1-rnt)*loss_replay\n",
+ " else:\n",
+ " total_loss = loss\n",
+ "\n",
+ " total_loss.backward()\n",
+ " optimizer.step()\n",
+ " progress_bar.set_description(\n",
+ " ' | training loss: {loss:.3} | training accuracy: {prec:.3}% |'\n",
+ " .format(loss=total_loss.item(), prec=accuracy)\n",
+ " )\n",
+ " progress_bar.update(1)\n",
+ " progress_bar.close()"
+ ],
+ "metadata": {
+ "id": "FVfyNqsMPIf4"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Train on the second context using experience replay\n",
+ "train_replay(model_replay, train_datasets[1], iters=iters, lr=lr, batch_size=batch_size,\n",
+ " current_context=2, buffer_dataset=buffer_dataset)"
+ ],
+ "metadata": {
+ "id": "XU8LLSWFUokA"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "... and evaluate its performance"
+ ],
+ "metadata": {
+ "id": "H35hMk3zVFs_"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Evaluate the model\n",
+ "print(\"\\n Accuracy (in %) of the model on test-set of:\")\n",
+ "replay_accs = []\n",
+ "for i in range(contexts):\n",
+ " acc = test_acc(model_replay, test_datasets[i], test_size=None)\n",
+ " print(\" - Context {}: {:.1f}\".format(i+1, acc))\n",
+ " replay_accs.append(acc)"
+ ],
+ "metadata": {
+ "id": "hKuT_aFDVEn0"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "That also worked!"
+ ],
+ "metadata": {
+ "id": "t_ZmSg9hWJiV"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### Compare\n",
+ "Let's compare the performance of naive fine-tuning, EWC and experience replay."
+ ],
+ "metadata": {
+ "id": "Rp4rvzgZVLBT"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "figure, axis = plt.subplots(1, 3, figsize=(12, 4))\n",
+ "\n",
+ "title='Fine-tuning'\n",
+ "multi_context_barplot(axis[0], context2_accs, title)\n",
+ "\n",
+ "title='EWC \\n(lambda: {})'.format(ewc_lambda)\n",
+ "multi_context_barplot(axis[1], ewc_accs, title)\n",
+ "\n",
+ "title='Replay \\n(buffer: {} samples per class)'.format(buffer_size_per_class)\n",
+ "multi_context_barplot(axis[2], replay_accs, title)"
+ ],
+ "metadata": {
+ "id": "YWWDQqiFVSc0"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### **ASSIGNMENT**: Combine EWC and replay\n",
+ "Train another model copy on the second context using *both* EWC and experience replay."
+ ],
+ "metadata": {
+ "id": "kuzoBMh8fh2i"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "model_ewc_replay = copy.deepcopy(model_after_context1)"
+ ],
+ "metadata": {
+ "id": "E-vo75W6f1tU"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Start by defining the training function that can be used to train the model on the new context using both EWC and experience replay."
+ ],
+ "metadata": {
+ "id": "CxUvSooRgxiZ"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "def train_ewc_replay(model, dataset, buffer_dataset, iters, lr, batch_size, ewc_lambda):\n",
+ " pass\n",
+ " # TO BE COMPLETED (tip: use the above training functions as example / starting point)"
+ ],
+ "metadata": {
+ "id": "5r96gtzkgWzZ"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "#@title Possible Answer\n",
+ "def train_ewc_replay(model, dataset, iters, lr, batch_size, current_context,\n",
+ " ewc_lambda=100., buffer_dataset=None):\n",
+ " optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.999))\n",
+ " model.train()\n",
+ " iters_left = 1\n",
+ " iters_left_replay = 1\n",
+ " progress_bar = tqdm.tqdm(range(1, iters+1))\n",
+ "\n",
+ " for batch_index in range(1, iters+1):\n",
+ " optimizer.zero_grad()\n",
+ "\n",
+ " # Data from current context\n",
+ " iters_left -= 1\n",
+ " if iters_left==0:\n",
+ " data_loader = iter(torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n",
+ " shuffle=True, drop_last=True))\n",
+ " iters_left = len(data_loader)\n",
+ " x, y = next(data_loader)\n",
+ " y_hat = model(x)\n",
+ " loss = torch.nn.functional.cross_entropy(input=y_hat, target=y, reduction='mean')\n",
+ " accuracy = (y == y_hat.max(1)[1]).sum().item()*100 / x.size(0)\n",
+ "\n",
+ " # Replay data from memory buffer\n",
+ " if buffer_dataset is not None:\n",
+ " iters_left_replay -= 1\n",
+ " if iters_left_replay==0:\n",
+ " batch_size_to_use = min(batch_size, len(buffer_dataset))\n",
+ " data_loader_replay = iter(torch.utils.data.DataLoader(buffer_dataset,\n",
+ " batch_size_to_use,\n",
+ " shuffle=True,\n",
+ " drop_last=True))\n",
+ " iters_left_replay = len(data_loader_replay)\n",
+ " x_, y_ = next(data_loader_replay)\n",
+ " y_hat_ = model(x_)\n",
+ " loss_replay = torch.nn.functional.cross_entropy(input=y_hat_, target=y_,\n",
+ " reduction='mean')\n",
+ "\n",
+ " # Compute the EWC-regularization term, and add it to the loss\n",
+ " if current_context>1:\n",
+ " ewc_losses = []\n",
+ " for n, p in model.named_parameters():\n",
+ " # Retrieve stored mode (MAP estimate) and precision (Fisher Information matrix)\n",
+ " n = n.replace('.', '__')\n",
+ " mean = getattr(model, '{}_EWC_param_values'.format(n))\n",
+ " fisher = getattr(model, '{}_EWC_estimated_fisher'.format(n))\n",
+ " # Calculate weight regularization loss\n",
+ " ewc_losses.append((fisher * (p-mean)**2).sum())\n",
+ " ewc_loss = (1./2)*sum(ewc_losses)\n",
+ " else:\n",
+ " ewc_loss = 0.\n",
+ "\n",
+ " # Combine all three losses\n",
+ " if buffer_dataset is not None:\n",
+ " rnt = 1./current_context\n",
+ " total_loss = rnt*loss + (1-rnt)*loss_replay + ewc_lambda*ewc_loss\n",
+ " else:\n",
+ " total_loss = loss + ewc_lambda*ewc_loss\n",
+ "\n",
+ " total_loss.backward()\n",
+ " optimizer.step()\n",
+ " progress_bar.set_description(\n",
+ " ' | training loss: {loss:.3} | training accuracy: {prec:.3}% |'\n",
+ " .format(loss=total_loss.item(), prec=accuracy)\n",
+ " )\n",
+ " progress_bar.update(1)\n",
+ " progress_bar.close()"
+ ],
+ "metadata": {
+ "id": "qON7O-O1ozuM",
+ "cellView": "form"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Now test your function by training the model, ..."
+ ],
+ "metadata": {
+ "id": "1T2XW32xhAl3"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Select the hyperparameter for EWC\n",
+ "ewc_lambda_with_replay = 100 # YOU CAN EXPLORE OTHER VALUES\n",
+ "# (if you want to do a new try, first 'reset' [model_ewc_replay] by running the command\n",
+ "# `model_ewc_replay = copy.deepcopy(model_after_context1)` at the top of the assignment)\n",
+ "\n",
+ "# Compute the Fisher Information matrix (and store it as attribute in the network)\n",
+ "estimate_fisher(model_ewc_replay, train_datasets[0], n_samples=200)\n",
+ "\n",
+ "# Train on the second context using EWC and experience replay\n",
+ "train_ewc_replay(model_ewc_replay, train_datasets[1], iters=iters, lr=lr, batch_size=batch_size,\n",
+ " current_context=2, ewc_lambda=ewc_lambda_with_replay,\n",
+ " buffer_dataset=buffer_dataset)"
+ ],
+ "metadata": {
+ "id": "bVKKz3xdhBa9"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "... evaluating it, ..."
+ ],
+ "metadata": {
+ "id": "xCAXSuUGhu4B"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "print(\"\\n Accuracy (in %) of the model on test-set of:\")\n",
+ "ewc_replay_accs = []\n",
+ "for i in range(contexts):\n",
+ " acc = test_acc(model_ewc_replay, test_datasets[i], test_size=None)\n",
+ " print(\" - Context {}: {:.1f}\".format(i+1, acc))\n",
+ " ewc_replay_accs.append(acc)"
+ ],
+ "metadata": {
+ "id": "Damfry4kh7d7"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "... and comparing its performance with the performance of the individual methods."
+ ],
+ "metadata": {
+ "id": "E5zUtbJ3h1EG"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "figure, axis = plt.subplots(1, 4, figsize=(16, 4))\n",
+ "\n",
+ "title='Fine-tuning'\n",
+ "multi_context_barplot(axis[0], context2_accs, title)\n",
+ "\n",
+ "title='EWC \\n(lambda: {})'.format(ewc_lambda)\n",
+ "multi_context_barplot(axis[1], ewc_accs, title)\n",
+ "\n",
+ "title='Replay \\n(buffer: {} samples per class)'.format(buffer_size_per_class)\n",
+ "multi_context_barplot(axis[2], replay_accs, title)\n",
+ "\n",
+ "title='EWC + replay \\n(lambda: {} - buffer: {} per class)'.format(ewc_lambda_with_replay,\n",
+ " buffer_size_per_class)\n",
+ "multi_context_barplot(axis[3], ewc_replay_accs, title)"
+ ],
+ "metadata": {
+ "id": "QWR5Nw4bh7Im"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "## Part 3: Class-incremental learning - Split MNIST\n",
+ "We saw that on Permuted MNIST with two contexts, both EWC and experience replay (with a relatively small buffer of 20 samples per class) are able to succesfully prevent a large part of the catastrophic forgetting.\n",
+ "\n",
+ "Now let's look at a different type of continual learning problem. As discussed in the lecture, when it comes to supervised continual learning, three fundamental types - or 'scenarios' - can be distinguished: **task-incremental learning**, **domain-incremental learning** and **class-incremental learning**."
+ ],
+ "metadata": {
+ "id": "tW8VSZ_90BDN"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ ""
+ ],
+ "metadata": {
+ "id": "uxpSI2BttGa1"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ ""
+ ],
+ "metadata": {
+ "id": "ac1wTL2duGD2"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### ASSIGNMENT: What scenario was used for Permuted MNIST?\n",
+ "\n",
+ "What type of 'scenario' was the permuted MNIST problem that we explored above? Was it task-incremental, domain-incremental or class-incremental? Try to motivate your answer."
+ ],
+ "metadata": {
+ "id": "R_oxTtVTrM5g"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "#@title Possible Answer\n",
+ "\n",
+ "'''\n",
+ "The Permuted MNIST problem consisted of two contexts: normal MNIST (context 1) and MNIST\n",
+ "with permuted input images (context 2).\n",
+ "\n",
+ "After learning both contexts, when the model was evaluated, the model was not told to\n",
+ "which context an image belongs (i.e., the model was not told whether the image to be\n",
+ "classified was permuted or not), but the model also did not need to identify to\n",
+ "which context an image belongs (i.e., the model did not need to predict whether\n",
+ "the image to be classified had permuted pixels or not; it only needed to predict\n",
+ "the original digit displayed in the image).\n",
+ "This thus means that the above Permuted MNIST problem was an example of a domain-incremental\n",
+ "learning problem.\n",
+ "\n",
+ "Another way to motivate that this problem is an example of domain-incremental\n",
+ "learning, is to say that in both context 1 (normal MNIST) and context 2 (MNIST with\n",
+ "permuted input images), the 'type of problem' is the same (i.e., to identify the\n",
+ "digit displayed in the original image), but the 'domain' or 'context' is changing (i.e.,\n",
+ "the order/permutation in which the image pixels are presented).\n",
+ "''';"
+ ],
+ "metadata": {
+ "id": "CIvJrwERry3C",
+ "cellView": "form"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Now let's explore a **class-incremental learning** problem. For this we will no longer use Permuted MNIST (because it is a bit unintuitive to perform Permuted MNIST according to the class-incremental learning scenario), but we will use Split MNIST, which was introduced in the lecture."
+ ],
+ "metadata": {
+ "id": "Hbp9I_kpstgv"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### Set up the benchmark (Split MNIST)\n",
+ "We will split the MNIST dataset up in five contexts with two different classes per context."
+ ],
+ "metadata": {
+ "id": "Ho1dVPxFuKPj"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "contexts = 5\n",
+ "classes_per_context = 2\n",
+ "# Generate labels-per-context\n",
+ "labels_per_context = [\n",
+ " list(np.array(range(classes_per_context))+classes_per_context*context_id) for context_id in range(contexts)\n",
+ "]\n",
+ "# Split the train and test datasets up into sub-datasets, one for each context\n",
+ "train_datasets = []\n",
+ "test_datasets = []\n",
+ "for labels in labels_per_context:\n",
+ " train_datasets.append(SubDataset(MNIST_trainset, labels))\n",
+ " test_datasets.append(SubDataset(MNIST_testset, labels))"
+ ],
+ "metadata": {
+ "id": "aBAcIMoG-voX"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Visualize the contexts\n",
+ "figure, axis = plt.subplots(1, contexts, figsize=(3*contexts, 4))\n",
+ "\n",
+ "for context_id in range(len(train_datasets)):\n",
+ " plot_examples(axis[context_id], train_datasets[context_id], context_id=context_id)"
+ ],
+ "metadata": {
+ "id": "kVr-Qc3LaJAw"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### Set up the model\n",
+ "We use the same network architecture as before."
+ ],
+ "metadata": {
+ "id": "VLBAv9CyFRzc"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Specify the architectural layout of the network to use\n",
+ "fc_lay = 4 #--> number of fully-connected layers\n",
+ "fc_units = 40 #--> number of units in each hidden layer\n",
+ "fc_nl = \"relu\" #--> what non-linearity to use?"
+ ],
+ "metadata": {
+ "id": "JBmaNwqAlXQ6"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Define the model\n",
+ "model = Classifier(image_size=config['size'], image_channels=config['channels'],\n",
+ " output_units=config['classes'],\n",
+ " fc_layers=fc_lay, fc_units=fc_units, fc_nl=fc_nl)"
+ ],
+ "metadata": {
+ "id": "vmCYbrRDliwp"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### Prepare for evaluation throughout training\n",
+ "Below we will use the same continual learning strategies as before (fine-tuning, EWC and replay) to train models on the five contexts of Split MNIST. When we do this, we want to keep track of the performance of the model while it is sequentially trained on these different contexts. For that we will define some functions here.\n",
+ "\n",
+ "As is common in the continual learning literature, we will evaluate the performance of the model only after finishing training on each new task. (But see [this paper](https://openreview.net/forum?id=Zy350cRstc6) for an interesting phenomenon that can be observed if we would evaluate the model on previous tasks after each training iteration on the new task.)"
+ ],
+ "metadata": {
+ "id": "1zoLaBgvutQ9"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Define a function to initiate a dict-object in which performance throughout training is logged.\n",
+ "def initiate_result_dict(n_contexts):\n",
+ " '''Initiate with accuracy-measures to keep track of.'''\n",
+ " result_dict = {}\n",
+ " result_dict[\"acc per context\"] = {}\n",
+ " for i in range(n_contexts):\n",
+ " result_dict[\"acc per context\"][\"context {}\".format(i+1)] = []\n",
+ " result_dict[\"average_contexts_so_far\"] = [] # average accuracy over all contexts so far\n",
+ " result_dict[\"average_all_contexts\"] = [] # average accuracy over all contexts\n",
+ " result_dict[\"context\"] = [] # number of contexts so far\n",
+ " return result_dict"
+ ],
+ "metadata": {
+ "id": "S5KOtI2Axyn8"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "def test_all(model, datasets, current_context, test_size=None, result_dict=None, verbose=False):\n",
+ " '''Evaluate accuracy of a classifier (=[model]) on all contexts in [datasets].'''\n",
+ "\n",
+ " n_contexts = len(datasets)\n",
+ "\n",
+ " # Evaluate accuracy of model on all contexts\n",
+ " precs = []\n",
+ " for i in range(n_contexts):\n",
+ " precs.append(test_acc(model, datasets[i], test_size=test_size))\n",
+ "\n",
+ " # Compute average accuracy both for all contexts seen so far, and for all contexts\n",
+ " ave_so_far = sum([precs[context_id] for context_id in range(current_context)]) / current_context\n",
+ " ave_all = sum([precs[context_id] for context_id in range(n_contexts)]) / n_contexts\n",
+ "\n",
+ " # Print results on screen\n",
+ " if verbose:\n",
+ " print(' => ave accuracy (contexts so far): {:.3f}'.format(ave_so_far))\n",
+ " print(' => ave accuracy (all contexts): {:.3f}'.format(ave_all))\n",
+ "\n",
+ " # Add results to [result_dict]\n",
+ " if result_dict is not None:\n",
+ " for i in range(n_contexts):\n",
+ " result_dict['acc per context']['context {}'.format(i+1)].append(precs[i])\n",
+ " result_dict['average_all_contexts'].append(ave_all)\n",
+ " result_dict['average_contexts_so_far'].append(ave_so_far)\n",
+ " result_dict['context'].append(current_context)"
+ ],
+ "metadata": {
+ "id": "N_j_o9RCuyvB"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### Compare fine-tuning, EWC and experience replay on Split MNIST"
+ ],
+ "metadata": {
+ "id": "MooCLNx7luDs"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "#### Fine-tuning"
+ ],
+ "metadata": {
+ "id": "qRNzQe6vt0mf"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Create a copy from the base-model\n",
+ "model_finetune = copy.deepcopy(model)"
+ ],
+ "metadata": {
+ "id": "s78p_YQQtX-M"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Initiate a `results_dict` to keep track of performance throughout the continual training.\n",
+ "result_dict_finetune = initiate_result_dict(contexts)"
+ ],
+ "metadata": {
+ "id": "L0FKJoYRy7lp"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "For fine-tuning, we can simply re-use the `train`-function we had defined above to train the model on a given dataset in \"the standard way\" (i.e., without using any specific continual learning strategy)."
+ ],
+ "metadata": {
+ "id": "WrMqMENnuHRg"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Sequentially train the model on all contexts using finetuning\n",
+ "for context_id in range(contexts):\n",
+ " # train the model on this context\n",
+ " train(model_finetune, dataset=train_datasets[context_id], iters=iters, lr=lr,\n",
+ " batch_size=batch_size)\n",
+ " # evaluate the performance of the model after training on this context\n",
+ " test_all(model_finetune, test_datasets, context_id+1, test_size=None,\n",
+ " result_dict=result_dict_finetune, verbose=True)"
+ ],
+ "metadata": {
+ "id": "g0y65f07tVie"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "#@title Visualization function\n",
+ "def plot_lines(list_with_lines, x_axes=None, line_names=None, colors=None, title=None,\n",
+ " title_top=None, xlabel=None, ylabel=None, ylim=None, figsize=None, list_with_errors=None, errors=\"shaded\",\n",
+ " x_log=False, with_dots=False, linestyle='solid', h_line=None, h_label=None, h_error=None,\n",
+ " h_lines=None, h_colors=None, h_labels=None, h_errors=None):\n",
+ " '''Generates a figure containing multiple lines in one plot.\n",
+ "\n",
+ " :param list_with_lines: of all lines to plot (with each line being a as well)\n",
+ " :param x_axes: containing the values for the x-axis\n",
+ " :param line_names: containing the names of each line\n",
+ " :param colors: containing the colors of each line\n",
+ " :param title: title of plot\n",
+ " :param title_top: text to appear on top of the title\n",
+ " :return: f: \n",
+ " '''\n",
+ "\n",
+ " # if needed, generate default x-axis\n",
+ " if x_axes == None:\n",
+ " n_obs = len(list_with_lines[0])\n",
+ " x_axes = list(range(n_obs))\n",
+ "\n",
+ " # if needed, generate default line-names\n",
+ " if line_names == None:\n",
+ " n_lines = len(list_with_lines)\n",
+ " line_names = [\"line \" + str(line_id) for line_id in range(n_lines)]\n",
+ "\n",
+ " # make plot\n",
+ " size = (12,7) if figsize is None else figsize\n",
+ " f, axarr = plt.subplots(1, 1, figsize=size)\n",
+ "\n",
+ " # add error-lines / shaded areas\n",
+ " if list_with_errors is not None:\n",
+ " for line_id, name in enumerate(line_names):\n",
+ " if errors==\"shaded\":\n",
+ " axarr.fill_between(x_axes, list(np.array(list_with_lines[line_id]) + np.array(list_with_errors[line_id])),\n",
+ " list(np.array(list_with_lines[line_id]) - np.array(list_with_errors[line_id])),\n",
+ " color=None if (colors is None) else colors[line_id], alpha=0.25)\n",
+ " else:\n",
+ " axarr.plot(x_axes, list(np.array(list_with_lines[line_id]) + np.array(list_with_errors[line_id])), label=None,\n",
+ " color=None if (colors is None) else colors[line_id], linewidth=1, linestyle='dashed')\n",
+ " axarr.plot(x_axes, list(np.array(list_with_lines[line_id]) - np.array(list_with_errors[line_id])), label=None,\n",
+ " color=None if (colors is None) else colors[line_id], linewidth=1, linestyle='dashed')\n",
+ "\n",
+ " # mean lines\n",
+ " for line_id, name in enumerate(line_names):\n",
+ " axarr.plot(x_axes, list_with_lines[line_id], label=name,\n",
+ " color=None if (colors is None) else colors[line_id],\n",
+ " linewidth=4, marker='o' if with_dots else None, linestyle=linestyle if type(linestyle)==str else linestyle[line_id])\n",
+ "\n",
+ " # add horizontal line\n",
+ " if h_line is not None:\n",
+ " axarr.axhline(y=h_line, label=h_label, color=\"grey\")\n",
+ " if h_error is not None:\n",
+ " if errors == \"shaded\":\n",
+ " axarr.fill_between([x_axes[0], x_axes[-1]],\n",
+ " [h_line + h_error, h_line + h_error], [h_line - h_error, h_line - h_error],\n",
+ " color=\"grey\", alpha=0.25)\n",
+ " else:\n",
+ " axarr.axhline(y=h_line + h_error, label=None, color=\"grey\", linewidth=1, linestyle='dashed')\n",
+ " axarr.axhline(y=h_line - h_error, label=None, color=\"grey\", linewidth=1, linestyle='dashed')\n",
+ "\n",
+ " # add horizontal lines\n",
+ " if h_lines is not None:\n",
+ " h_colors = colors if h_colors is None else h_colors\n",
+ " for line_id, new_h_line in enumerate(h_lines):\n",
+ " axarr.axhline(y=new_h_line, label=None if h_labels is None else h_labels[line_id],\n",
+ " color=None if (h_colors is None) else h_colors[line_id])\n",
+ " if h_errors is not None:\n",
+ " if errors == \"shaded\":\n",
+ " axarr.fill_between([x_axes[0], x_axes[-1]],\n",
+ " [new_h_line + h_errors[line_id], new_h_line+h_errors[line_id]],\n",
+ " [new_h_line - h_errors[line_id], new_h_line - h_errors[line_id]],\n",
+ " color=None if (h_colors is None) else h_colors[line_id], alpha=0.25)\n",
+ " else:\n",
+ " axarr.axhline(y=new_h_line+h_errors[line_id], label=None,\n",
+ " color=None if (h_colors is None) else h_colors[line_id], linewidth=1,\n",
+ " linestyle='dashed')\n",
+ " axarr.axhline(y=new_h_line-h_errors[line_id], label=None,\n",
+ " color=None if (h_colors is None) else h_colors[line_id], linewidth=1,\n",
+ " linestyle='dashed')\n",
+ "\n",
+ " # finish layout\n",
+ " # -set y-axis\n",
+ " if ylim is not None:\n",
+ " axarr.set_ylim(ylim)\n",
+ " # -add axis-labels\n",
+ " if xlabel is not None:\n",
+ " axarr.set_xlabel(xlabel)\n",
+ " if ylabel is not None:\n",
+ " axarr.set_ylabel(ylabel)\n",
+ " # -add title(s)\n",
+ " if title is not None:\n",
+ " axarr.set_title(title)\n",
+ " if title_top is not None:\n",
+ " f.suptitle(title_top)\n",
+ " # -add legend\n",
+ " if line_names is not None:\n",
+ " axarr.legend()\n",
+ " # -set x-axis to log-scale\n",
+ " if x_log:\n",
+ " axarr.set_xscale('log')\n",
+ "\n",
+ " # return the figure\n",
+ " return f"
+ ],
+ "metadata": {
+ "cellView": "form",
+ "id": "tK2ztl4O2SeF"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Visualize performance on each context throughout the continual training\n",
+ "plot_list = []\n",
+ "for i in range(contexts):\n",
+ " plot_list.append(result_dict_finetune[\"acc per context\"][\"context {}\".format(i + 1)])\n",
+ "figure = plot_lines(\n",
+ " plot_list, x_axes=result_dict_finetune[\"context\"],\n",
+ " line_names=['context {}'.format(i + 1) for i in range(contexts)],\n",
+ " title=\"Fine-tuning\", ylabel=\"Test Accuracy (%)\",\n",
+ " xlabel=\"Number of contexts trained so far\", figsize=(10,5),\n",
+ ")"
+ ],
+ "metadata": {
+ "id": "izJVfClC0Nwe"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "That doesn't look great! The performance on data from each context drops to zero as soon as the next context has been learned.\n",
+ "\n",
+ "Can this be fixed with EWC or replay?\n",
+ "\n"
+ ],
+ "metadata": {
+ "id": "XoOvQDxU0H7V"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "#### EWC"
+ ],
+ "metadata": {
+ "id": "qZpWQSzct6-j"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Create a copy from the base-model\n",
+ "model_ewc = copy.deepcopy(model)"
+ ],
+ "metadata": {
+ "id": "A0zJrd324EYi"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Initiate a `results_dict` to keep track of performance throughout the continual training.\n",
+ "result_dict_ewc = initiate_result_dict(contexts)"
+ ],
+ "metadata": {
+ "id": "6b_toOZG4HK2"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Select a hyperparameter for EWC\n",
+ "ewc_lambda = 100"
+ ],
+ "metadata": {
+ "id": "KUvoqpvC462f"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Sequentially train the model on all contexts using EWC\n",
+ "for context_id in range(contexts):\n",
+ " # Train the model on this context\n",
+ " train_ewc(model_ewc, train_datasets[context_id], iters=iters, lr=lr, batch_size=batch_size,\n",
+ " current_context=context_id+1, ewc_lambda=ewc_lambda)\n",
+ " # Estimate/update the FI-matrix (which is stored as attribute in the network)\n",
+ " estimate_fisher(model_ewc, train_datasets[context_id], n_samples=200)\n",
+ " # Evaluate the performance of the model after training on this context\n",
+ " test_all(model_ewc, test_datasets, context_id+1, test_size=None,\n",
+ " result_dict=result_dict_ewc, verbose=True)"
+ ],
+ "metadata": {
+ "id": "UaTlFqCU4L23"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Visualize performance on each context throughout the continual training\n",
+ "plot_list = []\n",
+ "for i in range(contexts):\n",
+ " plot_list.append(result_dict_ewc[\"acc per context\"][\"context {}\".format(i + 1)])\n",
+ "figure = plot_lines(\n",
+ " plot_list, x_axes=result_dict_ewc[\"context\"],\n",
+ " line_names=['context {}'.format(i + 1) for i in range(contexts)],\n",
+ " title=\"EWC\", ylabel=\"Test Accuracy (%)\",\n",
+ " xlabel=\"Number of contexts trained so far\", figsize=(10,5),\n",
+ ")"
+ ],
+ "metadata": {
+ "id": "cnwEr6D86XXz"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "On this problem, EWC does not seem to help!"
+ ],
+ "metadata": {
+ "id": "A3veN7vFDgtg"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "#### Experience Replay"
+ ],
+ "metadata": {
+ "id": "xKga8yfYt8qI"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Create a copy from the base-model\n",
+ "model_replay = copy.deepcopy(model)"
+ ],
+ "metadata": {
+ "id": "PNUKP7xL7Mit"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Initiate a `results_dict` to keep track of performance throughout the continual training.\n",
+ "result_dict_replay = initiate_result_dict(contexts)"
+ ],
+ "metadata": {
+ "id": "w47-mCHp7PU1"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Select how many samples per class can be stored in the memory buffer\n",
+ "buffer_size_per_class = 20"
+ ],
+ "metadata": {
+ "id": "ysrAtTYN9Xq8"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Sequentially train the model on all contexts using Experience Replay\n",
+ "memory_sets = []\n",
+ "buffer_dataset = None\n",
+ "for context_id in range(contexts):\n",
+ " # Train the model on this context\n",
+ " train_replay(model_replay, train_datasets[context_id], iters=iters, lr=lr,\n",
+ " batch_size=batch_size, current_context=context_id+1, buffer_dataset=buffer_dataset)\n",
+ " # Update memory buffer\n",
+ " classes_in_this_context = list(range(classes_per_context*context_id,\n",
+ " classes_per_context*(context_id+1)))\n",
+ " memory_sets = fill_memory_buffer(memory_sets, train_datasets[context_id],\n",
+ " buffer_size_per_class=buffer_size_per_class,\n",
+ " class_indeces=classes_in_this_context)\n",
+ " buffer_dataset = MemorySetDataset(memory_sets)\n",
+ " # Evaluate the performance of the model after training on this context\n",
+ " test_all(model_replay, test_datasets, context_id+1, test_size=None,\n",
+ " result_dict=result_dict_replay, verbose=True)"
+ ],
+ "metadata": {
+ "id": "PqEQ0psH7S5_"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Visualize performance on each context throughout the continual training\n",
+ "plot_list = []\n",
+ "for i in range(contexts):\n",
+ " plot_list.append(result_dict_replay[\"acc per context\"][\"context {}\".format(i + 1)])\n",
+ "figure = plot_lines(\n",
+ " plot_list, x_axes=result_dict_replay[\"context\"],\n",
+ " line_names=['context {}'.format(i + 1) for i in range(contexts)],\n",
+ " title=\"Experience Replay ({} samples per class)\".format(buffer_size_per_class),\n",
+ " ylabel=\"Test Accuracy (%)\", xlabel=\"Number of contexts trained so far\", figsize=(10,5),\n",
+ ")"
+ ],
+ "metadata": {
+ "id": "Cd6cVn4596Z2"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Experience replay does help on this type of continual learning problem!"
+ ],
+ "metadata": {
+ "id": "0h0_dqswDxIF"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "#### Visual comparison fine-tuning, EWC and replay"
+ ],
+ "metadata": {
+ "id": "PeRrxlPMD9Ih"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "key = \"average_contexts_so_far\"\n",
+ "plot_list = [result_dict_finetune[key], result_dict_ewc[key], result_dict_replay[key]]\n",
+ "line_names = ['Fine-tuning', 'EWC', 'Experience Replay']\n",
+ "figure = plot_lines(\n",
+ " plot_list, x_axes=result_dict_replay[\"context\"], line_names=line_names,\n",
+ " title=\"Comparison (performance on all contexts so far)\",\n",
+ " ylabel=\"Test Accuracy (%)\", xlabel=\"Number of contexts trained so far\", figsize=(10,5),\n",
+ ")"
+ ],
+ "metadata": {
+ "id": "BlMgaQw3EDNG"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "key = \"average_all_contexts\"\n",
+ "plot_list = [result_dict_finetune[key], result_dict_ewc[key], result_dict_replay[key]]\n",
+ "line_names = ['Fine-tuning', 'EWC', 'Experience Replay']\n",
+ "figure = plot_lines(\n",
+ " plot_list, x_axes=result_dict_replay[\"context\"], line_names=line_names,\n",
+ " title=\"Comparison (performance on all contexts)\",\n",
+ " ylabel=\"Test Accuracy (%)\", xlabel=\"Number of contexts trained so far\", figsize=(10,5),\n",
+ ")"
+ ],
+ "metadata": {
+ "id": "jDvPRbb6NunA"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "(Note that the lines of `fine-tuning` and `EWC` might well overlap almost completely.)"
+ ],
+ "metadata": {
+ "id": "BnzPs8sLNiiW"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### **ASSIGNMENT**: Combine EWC and replay on class-incremental Split MNIST\n",
+ "Train another model copy on the class-incremental version of Split MNIST, now again using *both* EWC and experience replay."
+ ],
+ "metadata": {
+ "id": "nnHbEqUclFzL"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "model_ewc_replay = copy.deepcopy(model)\n",
+ "result_dict_ewc_replay = initiate_result_dict(contexts)"
+ ],
+ "metadata": {
+ "id": "9Z4mW-SGJ2Mz"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# TO BE COMPLETED"
+ ],
+ "metadata": {
+ "id": "o8uoeyclJsol"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "#@title Possible Answer\n",
+ "\n",
+ "# Select hyperparameter for EWC\n",
+ "ewc_lambda_with_replay = 100\n",
+ "\n",
+ "# Sequentially train the model on all contexts using both EWC and Experience Replay\n",
+ "memory_sets = []\n",
+ "buffer_dataset = None\n",
+ "for context_id in range(contexts):\n",
+ "\n",
+ " # Train the model on this context\n",
+ " train_ewc_replay(model_ewc_replay, train_datasets[context_id], iters=iters, lr=lr,\n",
+ " batch_size=batch_size, current_context=context_id+1,\n",
+ " ewc_lambda=ewc_lambda_with_replay, buffer_dataset=buffer_dataset)\n",
+ " # NOTE: depending on how you had written your `train_ewc_replay` function, you might need to\n",
+ " # adjust it to make it suitable for the more general case in which the function is used\n",
+ " # for arbitrary contexts\n",
+ "\n",
+ " # Estimate/update the FI-matrix (which is stored as attribute in the network)\n",
+ " estimate_fisher(model_ewc_replay, train_datasets[context_id], n_samples=200)\n",
+ "\n",
+ " # Update memory buffer\n",
+ " classes_in_this_context = list(range(classes_per_context*context_id,\n",
+ " classes_per_context*(context_id+1)))\n",
+ " memory_sets = fill_memory_buffer(memory_sets, train_datasets[context_id],\n",
+ " buffer_size_per_class=buffer_size_per_class,\n",
+ " class_indeces=classes_in_this_context)\n",
+ " buffer_dataset = MemorySetDataset(memory_sets)\n",
+ "\n",
+ " # Evaluate the performance of the model after training on this context\n",
+ " test_all(model_ewc_replay, test_datasets, context_id+1, test_size=None,\n",
+ " result_dict=result_dict_ewc_replay, verbose=True)"
+ ],
+ "metadata": {
+ "id": "NCIl4npjKA4b"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Visualize performance on each context throughout the continual training\n",
+ "plot_list = []\n",
+ "for i in range(contexts):\n",
+ " plot_list.append(result_dict_ewc_replay[\"acc per context\"][\"context {}\".format(i + 1)])\n",
+ "figure = plot_lines(\n",
+ " plot_list, x_axes=result_dict_ewc_replay[\"context\"],\n",
+ " line_names=['context {}'.format(i + 1) for i in range(contexts)],\n",
+ " title=\"EWC + Experience Replay ({} samples per class)\".format(buffer_size_per_class),\n",
+ " ylabel=\"Test Accuracy (%)\", xlabel=\"Number of contexts trained so far\", figsize=(10,5),\n",
+ ")"
+ ],
+ "metadata": {
+ "id": "H8Bv45h-LAOo"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Is there a benefit of \"EWC + Experience Replay\" over only \"Experience Replay\"? Let's compare them more directly."
+ ],
+ "metadata": {
+ "id": "lYRQToDAOHCC"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "key = \"average_contexts_so_far\"\n",
+ "plot_list = [result_dict_finetune[key], result_dict_ewc[key], result_dict_replay[key],\n",
+ " result_dict_ewc_replay[key]]\n",
+ "line_names = ['Fine-tuning', 'EWC', 'Experience Replay', 'EWC + Experience Replay']\n",
+ "figure = plot_lines(\n",
+ " plot_list, x_axes=result_dict_replay[\"context\"], line_names=line_names,\n",
+ " title=\"Comparison (performance on all contexts so far)\",\n",
+ " ylabel=\"Test Accuracy (%)\", xlabel=\"Number of contexts trained so far\", figsize=(10,5),\n",
+ ")"
+ ],
+ "metadata": {
+ "id": "Y7tYuOJqLm1F"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "## Bonus Part: Task- and domain-incremental learning version of Split MNIST\n",
+ "As a *bonus exercise*, let's try to think about how to adapt the code above such that we can run the same experiment, except that we perform Split MNIST according to the task- and domain-incremental learning scenarios rather than the class-incremental learning scenario."
+ ],
+ "metadata": {
+ "id": "tysftIoWOY-u"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Recall from the lecture that Split MNIST (just like any other sequence of classification tasks), can be performed according to each of the three scenarios:\n",
+ "\n",
+ "\n",
+ "\n"
+ ],
+ "metadata": {
+ "id": "qnzRpciWOnSY"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### Domain-incremental Split MNIST\n",
+ "To create the domain-incrmental learning version of Split MNIST, the number of possible output classes needs to be changed from ten (one output class for each digit) to two (one output class for `odd` and one output class for `even`).\n",
+ "\n",
+ "To implement this, we need to change both the way the benchmark is defined (e.g., in the second context, the digits '2' and '3' should no longer be labelled with `y=2` and `y=3`, but instead with `y=0` and `y=1`), and we need to change the way the classifier is defined (as it should now have two output units rather than ten)."
+ ],
+ "metadata": {
+ "id": "AS-70pNrv4E_"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### Task-incremental Split MNIST\n",
+ "To create the task-incremental version of Split MNIST, the context label needs to be provided as input to the model. Usually, this context label will then be used to enable a \"multi-headed output layer\", meaning that there is a separate output layer per task.\n",
+ "\n",
+ "One option to implement this is to keep the number of output classes at ten and to still have the same, single output layer as in the class-incremental learning case, but to always mask out the outputs that are not in the current context.\n",
+ "\n",
+ "Another option to implement this is to set the number of output classes to two (as in the domain-incremental learning case) and to define a separate output layer per task.\n",
+ "\n"
+ ],
+ "metadata": {
+ "id": "7Ipjh-G0wjwh"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ ""
+ ],
+ "metadata": {
+ "id": "oYI2Bbr4zCDE"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "$^{\\text{a}}$ With task-incremental learning, at the computational level, there is no difference between whether the algorithm must return the within-context label or the global label, because the within-context label can be combined with the context label (which is provided as input) to get the global label."
+ ],
+ "metadata": {
+ "id": "vFDIFf_KzLu-"
+ }
+ }
+ ]
+}
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/main.py b/PyTorch/build-in/other/continual-learning/main.py
new file mode 100644
index 000000000..d72979ec6
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/main.py
@@ -0,0 +1,584 @@
+#!/usr/bin/env python3
+import os
+import numpy as np
+import time
+import torch
+from torch import optim
+# -custom-written libraries
+import utils
+from utils import checkattr
+from data.load import get_context_set
+from models import define_models as define
+from models.cl.continual_learner import ContinualLearner
+from models.cl.memory_buffer import MemoryBuffer
+from models.cl import fromp_optimizer
+from train.train_task_based import train_cl, train_fromp, train_gen_classifier
+from params import options
+from params.param_stamp import get_param_stamp, get_param_stamp_from_args, visdom_name
+from params.param_values import set_method_options,check_for_errors,set_default_values
+from eval import evaluate, callbacks as cb
+from visual import visual_plt
+
+
+## Function for specifying input-options and organizing / checking them
+def handle_inputs():
+ # Set indicator-dictionary for correctly retrieving / checking input options
+ kwargs = {'main': True}
+ # Define input options
+ parser = options.define_args(filename="main", description='Run an individual continual learning experiment '
+ 'using the "academic continual learning setting".')
+ parser = options.add_general_options(parser, **kwargs)
+ parser = options.add_eval_options(parser, **kwargs)
+ parser = options.add_problem_options(parser, **kwargs)
+ parser = options.add_model_options(parser, **kwargs)
+ parser = options.add_train_options(parser, **kwargs)
+ parser = options.add_cl_options(parser, **kwargs)
+ # Parse, process and check chosen options
+ args = parser.parse_args()
+ set_method_options(args) # -if a method's "convenience"-option is chosen, select components
+ set_default_values(args, also_hyper_params=True) # -set defaults, some are based on chosen scenario / experiment
+ check_for_errors(args, **kwargs) # -check whether incompatible options are selected
+ return args
+
+
+def run(args, verbose=False):
+
+ # Create plots- and results-directories if needed
+ if not os.path.isdir(args.r_dir):
+ os.mkdir(args.r_dir)
+ if checkattr(args, 'pdf') and not os.path.isdir(args.p_dir):
+ os.mkdir(args.p_dir)
+
+ # If only want param-stamp, get it printed to screen and exit
+ if checkattr(args, 'get_stamp'):
+ print(get_param_stamp_from_args(args=args))
+ exit()
+
+ # Use cuda or mps (apple silicon)?
+ cuda = torch.cuda.is_available() and args.gpu
+ mps = torch.backends.mps.is_available() and args.gpu
+ if cuda:
+ device = torch.device("cuda")
+ elif mps:
+ device = torch.device("mps")
+ else:
+ device = torch.device("cpu")
+
+ # Report whether cuda or mps is used
+ if verbose:
+ if cuda:
+ print("CUDA is used")
+ elif mps:
+ print("MPS is used (apple silicon GPU)")
+ else:
+ print("NO GPU is used!")
+
+ # Set random seeds
+ np.random.seed(args.seed)
+ torch.manual_seed(args.seed)
+ if cuda:
+ torch.cuda.manual_seed(args.seed)
+ elif mps:
+ torch.mps.manual_seed(args.seed)
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #----------------#
+ #----- DATA -----#
+ #----------------#
+
+ # Prepare data for chosen experiment
+ if verbose:
+ print("\n\n " +' LOAD DATA '.center(70, '*'))
+ (train_datasets, test_datasets), config = get_context_set(
+ name=args.experiment, scenario=args.scenario, contexts=args.contexts, data_dir=args.d_dir,
+ normalize=checkattr(args, "normalize"), verbose=verbose, exception=(args.seed==0),
+ singlehead=checkattr(args, 'singlehead'), train_set_per_class=checkattr(args, 'gen_classifier')
+ )
+ # The experiments in this script follow the academic continual learning setting,
+ # the above lines of code therefore load both the 'context set' and the 'data stream'
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #-----------------------------#
+ #----- FEATURE EXTRACTOR -----#
+ #-----------------------------#
+
+ # Define the feature extractor
+ depth = args.depth if hasattr(args, 'depth') else 0
+ use_feature_extractor = checkattr(args, 'hidden') or (
+ checkattr(args, 'freeze_convE') and (not args.replay=="generative") and (not checkattr(args, "add_buffer"))
+ and (not checkattr(args, 'gen_classifier'))
+ )
+ #--> when the convolutional layers are frozen, it is faster to put the data through these layers only once at the
+ # beginning, but this currently does not work with iCaRL or pixel-level generative replay/classification
+ if use_feature_extractor and depth>0:
+ if verbose:
+ print("\n\n " + ' DEFINE FEATURE EXTRACTOR '.center(70, '*'))
+ feature_extractor = define.define_feature_extractor(args=args, config=config, device=device)
+ # - initialize (pre-trained) parameters
+ define.init_params(feature_extractor, args, verbose=verbose)
+ # - freeze the parameters & set model to eval()-mode
+ for param in feature_extractor.parameters():
+ param.requires_grad = False
+ feature_extractor.eval()
+ # - print characteristics of feature extractor on the screen
+ if verbose:
+ utils.print_model_info(feature_extractor)
+ # - reset size and # of channels to reflect the extracted features rather than the original images
+ config = config.copy() # -> make a copy to avoid overwriting info in the original config-file
+ config['size'] = feature_extractor.conv_out_size
+ config['channels'] = feature_extractor.conv_out_channels
+ depth = 0
+ else:
+ feature_extractor = None
+
+ # Convert original data to features (so this doesn't need to be done at run-time)
+ if (feature_extractor is not None) and args.depth>0:
+ if verbose:
+ print("\n\n " + ' PUT DATA TRHOUGH FEATURE EXTRACTOR '.center(70, '*'))
+ train_datasets = utils.preprocess(feature_extractor, train_datasets, config, batch=args.batch,
+ message='')
+ test_datasets = utils.preprocess(feature_extractor, test_datasets, config, batch=args.batch,
+ message=' ')
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #----------------------#
+ #----- CLASSIFIER -----#
+ #----------------------#
+
+ # Define the classifier
+ if verbose:
+ print("\n\n " + ' DEFINE THE CLASSIFIER '.center(70, '*'))
+ model = define.define_classifier(args=args, config=config, device=device, depth=depth)
+
+ # Some type of classifiers consist of multiple networks
+ n_networks = len(train_datasets) if (checkattr(args, 'separate_networks') or
+ checkattr(args, 'gen_classifier')) else 1
+
+ # Go through all networks to ...
+ for network_id in range(n_networks):
+ model_to_set = getattr(model, 'context{}'.format(network_id+1)) if checkattr(args, 'separate_networks') else (
+ getattr(model, 'vae{}'.format(network_id)) if checkattr(args, 'gen_classifier') else model
+ )
+ # ... initialize / use pre-trained / freeze model-parameters, and
+ define.init_params(model_to_set, args)
+ # ... define optimizer (only include parameters that "requires_grad")
+ if not checkattr(args, 'fromp'):
+ model_to_set.optim_list = [{'params': filter(lambda p: p.requires_grad, model_to_set.parameters()),
+ 'lr': args.lr}]
+ model_to_set.optim_type = args.optimizer
+ if model_to_set.optim_type in ("adam", "adam_reset"):
+ model_to_set.optimizer = optim.Adam(model_to_set.optim_list, betas=(0.9, 0.999))
+ elif model_to_set.optim_type=="sgd":
+ model_to_set.optimizer = optim.SGD(model_to_set.optim_list,
+ momentum=args.momentum if hasattr(args, 'momentum') else 0.)
+
+ # On what scenario will model be trained? If needed, indicate whether singlehead output / how to set active classes.
+ model.scenario = args.scenario
+ model.classes_per_context = config['classes_per_context']
+ model.singlehead = checkattr(args, 'singlehead')
+ model.neg_samples = args.neg_samples if hasattr(args, 'neg_samples') else "all"
+
+ # Print some model-characteristics on the screen
+ if verbose:
+ if checkattr(args, 'gen_classifier') or checkattr(args, 'separate_networks'):
+ message = '{} copies of:'.format(len(train_datasets))
+ utils.print_model_info(model.vae0 if checkattr(args, 'gen_classifier') else model.context1, message=message)
+ else:
+ utils.print_model_info(model)
+
+ # -------------------------------------------------------------------------------------------------#
+
+ # ----------------------------------------------------#
+ # ----- CL-STRATEGY: CONTEXT-SPECIFIC COMPONENTS -----#
+ # ----------------------------------------------------#
+
+ # XdG: create for every context a "mask" for each hidden fully connected layer
+ if isinstance(model, ContinualLearner) and checkattr(args, 'xdg') and args.gating_prop > 0.:
+ model.mask_dict = {}
+ for context_id in range(args.contexts):
+ model.mask_dict[context_id + 1] = {}
+ for i in range(model.fcE.layers):
+ layer = getattr(model.fcE, "fcLayer{}".format(i + 1)).linear
+ if context_id == 0:
+ model.excit_buffer_list.append(layer.excit_buffer)
+ n_units = len(layer.excit_buffer)
+ gated_units = np.random.choice(n_units, size=int(args.gating_prop * n_units), replace=False)
+ model.mask_dict[context_id + 1][i] = gated_units
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #-------------------------------------------------#
+ #----- CL-STRATEGY: PARAMETER REGULARIZATION -----#
+ #-------------------------------------------------#
+
+ # Options for computing the Fisher Information matrix (e.g., EWC, Online-EWC, KFAC-EWC, NCL)
+ use_fisher = hasattr(args, 'importance_weighting') and args.importance_weighting=="fisher" and \
+ (checkattr(args, 'precondition') or checkattr(args, 'weight_penalty'))
+ if isinstance(model, ContinualLearner) and use_fisher:
+ # -how to estimate the Fisher Information
+ model.fisher_n = args.fisher_n if hasattr(args, 'fisher_n') else None
+ model.fisher_labels = args.fisher_labels if hasattr(args, 'fisher_labels') else 'all'
+ model.fisher_batch = args.fisher_batch if hasattr(args, 'fisher_batch') else 1
+ # -options relating to 'Offline EWC' (Kirkpatrick et al., 2017) and 'Online EWC' (Schwarz et al., 2018)
+ model.offline = checkattr(args, 'offline')
+ if not model.offline:
+ model.gamma = args.gamma if hasattr(args, 'gamma') else 1.
+ # -if requested, initialize Fisher with prior
+ if checkattr(args, 'fisher_init'):
+ model.data_size = args.data_size #-> sets how strong the prior is
+ model.context_count = 1 #-> makes that already on the first context regularization will happen
+ if model.fisher_kfac:
+ model.initialize_kfac_fisher()
+ else:
+ model.initialize_fisher()
+
+ # Parameter regularization by adding a weight penalty (e.g., EWC, SI, NCL, EWC-KFAC)
+ if isinstance(model, ContinualLearner) and checkattr(args, 'weight_penalty'):
+ model.weight_penalty = True
+ model.importance_weighting = args.importance_weighting
+ model.reg_strength = args.reg_strength
+ if model.importance_weighting=='si':
+ model.epsilon = args.epsilon if hasattr(args, 'epsilon') else 0.1
+
+ # Parameter regularization through pre-conditioning of the gradient (e.g., OWM, NCL)
+ if isinstance(model, ContinualLearner) and checkattr(args, 'precondition'):
+ model.precondition = True
+ model.importance_weighting = args.importance_weighting
+ model.alpha = args.alpha
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #--------------------------------------------------#
+ #----- CL-STRATEGY: FUNCTIONAL REGULARIZATION -----#
+ #--------------------------------------------------#
+
+ # Should a distillation loss (i.e., soft targets) be used? (e.g., for LwF, but also for BI-R)
+ if isinstance(model, ContinualLearner) and hasattr(args, 'replay'):
+ model.replay_targets = "soft" if checkattr(args, 'distill') else "hard"
+ model.KD_temp = args.temp if hasattr(args, 'temp') else 2.
+ if args.replay=="current" and model.replay_targets=="soft":
+ model.lwf_weighting = True
+
+ # Should the FROMP-optimizer by used?
+ if checkattr(args, 'fromp'):
+ model.optimizer = fromp_optimizer.opt_fromp(model, lr=args.lr, tau=args.tau, betas=(0.9, 0.999))
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #-------------------------------#
+ #----- CL-STRATEGY: REPLAY -----#
+ #-------------------------------#
+
+ # DGR: Should a separate generative model be trained to generate the data to be replayed?
+ train_gen = True if (args.replay=="generative" and not checkattr(args, 'feedback')) else False
+ if train_gen:
+ if verbose:
+ print("\n\n " + ' SEPARATE GENERATIVE MODEL '.center(70, '*'))
+ # -specify architecture
+ generator = define.define_vae(args=args, config=config, device=device, depth=depth)
+ # -initialize parameters
+ define.init_params(generator, args, verbose=verbose)
+ # -set optimizer(s)
+ generator.optim_list = [{'params': filter(lambda p: p.requires_grad, generator.parameters()),
+ 'lr': args.lr_gen}]
+ generator.optim_type = args.optimizer
+ if generator.optim_type in ("adam", "adam_reset"):
+ generator.optimizer = optim.Adam(generator.optim_list, betas=(0.9, 0.999))
+ elif generator.optim_type == "sgd":
+ generator.optimizer = optim.SGD(generator.optim_list)
+ # -print architecture to screen
+ if verbose:
+ utils.print_model_info(generator)
+ else:
+ generator = None
+
+ # Should the model be trained with replay?
+ if isinstance(model, ContinualLearner) and hasattr(args, 'replay'):
+ model.replay_mode = args.replay
+
+ # A-GEM: How should the gradient of the loss on replayed data be used? (added, as inequality constraint or both?)
+ if isinstance(model, ContinualLearner) and hasattr(args, 'use_replay'):
+ model.use_replay = args.use_replay
+ model.eps_agem = args.eps_agem if hasattr(args, 'eps_agem') else 0.
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #-------------------------#
+ #----- MEMORY BUFFER -----#
+ #-------------------------#
+
+ # Should a memory buffer be maintained? (e.g., for experience replay, FROMP or prototype-based classification)
+ use_memory_buffer = checkattr(args, 'prototypes') or checkattr(args, 'add_buffer') \
+ or args.replay=="buffer" or checkattr(args, 'fromp')
+ if isinstance(model, MemoryBuffer) and use_memory_buffer:
+ model.use_memory_buffer = True
+ model.budget_per_class = args.budget
+ model.use_full_capacity = checkattr(args, 'use_full_capacity')
+ model.sample_selection = args.sample_selection if hasattr(args, 'sample_selection') else 'random'
+ model.norm_exemplars = (model.sample_selection=="herding")
+
+ # Should the memory buffer be added to the training set of the current context?
+ model.add_buffer = checkattr(args, 'add_buffer')
+
+ # Should classification be done using prototypes as class templates?
+ model.prototypes = checkattr(args, 'prototypes')
+
+ # Relevant for iCaRL: whether to use binary distillation loss for previous classes
+ if model.label=="Classifier":
+ model.binaryCE = checkattr(args, 'bce')
+ model.binaryCE_distill = checkattr(args, 'bce_distill')
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #---------------------------#
+ #----- PARAMETER STAMP -----#
+ #---------------------------#
+
+ # Get parameter-stamp (and print on screen)
+ if verbose:
+ if verbose:
+ print('\n\n' + ' PARAMETER STAMP '.center(70, '*'))
+ param_stamp = get_param_stamp(
+ args, model.name, replay_model_name=generator.name if train_gen else None,
+ feature_extractor_name= feature_extractor.name if (feature_extractor is not None) else None, verbose=verbose,
+ )
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #---------------------#
+ #----- CALLBACKS -----#
+ #---------------------#
+
+ # Prepare for keeping track of performance during training for plotting in pdf
+ plotting_dict = evaluate.initiate_plotting_dict(args.contexts) if (
+ checkattr(args, 'pdf') or checkattr(args, 'results_dict')
+ ) else None
+
+ # Setting up Visdom environment
+ if utils.checkattr(args, 'visdom'):
+ if verbose:
+ print('\n\n'+' VISDOM '.center(70, '*'))
+ from visdom import Visdom
+ env_name = "{exp}{con}-{sce}".format(exp=args.experiment, con=args.contexts, sce=args.scenario)
+ visdom = {'env': Visdom(env=env_name), 'graph': visdom_name(args)}
+ else:
+ visdom = None
+
+ # Callbacks for reporting and visualizing loss
+ generator_loss_cbs = [
+ cb._VAE_loss_cb(log=args.loss_log, visdom=visdom, replay=False if args.replay=="none" else True,
+ model=model if checkattr(args, 'feedback') else generator, contexts=args.contexts,
+ iters_per_context=args.iters if checkattr(args, 'feedback') else args.g_iters)
+ ] if (train_gen or checkattr(args, 'feedback')) else [None]
+ # loss_cbs = [
+ # cb._gen_classifier_loss_cb(
+ # log=args.loss_log, classes=config['classes'], visdom=visdom if args.loss_log>args.iters else None,
+ # ) if checkattr(args, 'gen_classifier') else cb._classifier_loss_cb(
+ # log=args.loss_log, visdom=visdom, model=model, contexts=args.contexts, iters_per_context=args.iters,
+ # )
+ # ] if (not checkattr(args, 'feedback')) else generator_loss_cbs
+
+ class SimpleLossPrinter:
+ def __init__(self):
+ self.log_file = f"step_losses_{time.time()}.txt"
+ with open(self.log_file, 'w') as f:
+ f.write("context,step,loss\n")
+
+ def __call__(self, progress, batch_index, loss_dict, context=0, **kwargs):
+ # 提取loss值
+ loss_value = 0.0
+ if isinstance(loss_dict, dict):
+ if 'loss' in loss_dict:
+ loss_value = loss_dict['loss']
+ elif len(loss_dict) > 0:
+ loss_value = next(iter(loss_dict.values()))
+ elif hasattr(loss_dict, 'item'):
+ loss_value = loss_dict.item()
+ else:
+ loss_value = float(loss_dict) if loss_dict is not None else 0.0
+
+ # 如果是tensor,转换为数值
+ if hasattr(loss_value, 'item'):
+ loss_value = loss_value.item()
+
+ # 打印到控制台
+ print(f"Context {context:2d} | Step {batch_index:4d} | Loss: {loss_value:.6f}")
+
+ # 保存到文件
+ with open(self.log_file, 'a') as f:
+ f.write(f"{context},{batch_index},{loss_value:.6f}\n")
+
+ # 强制刷新输出
+ import sys
+ sys.stdout.flush()
+
+ simple_loss_printer = SimpleLossPrinter()
+ loss_cbs = [simple_loss_printer]
+
+ # Callbacks for evaluating and plotting generated / reconstructed samples
+ no_samples = (checkattr(args, "no_samples") or feature_extractor is not None)
+ sample_cbs = [
+ cb._sample_cb(log=args.sample_log, visdom=visdom, config=config, sample_size=args.sample_n,
+ test_datasets=None if checkattr(args, 'gen_classifier') else test_datasets)
+ ] if (train_gen or checkattr(args, 'feedback') or checkattr(args, 'gen_classifier')) and not no_samples else [None]
+
+ # Callbacks for reporting and visualizing accuracy
+ # -after each [acc_log], for visdom
+ eval_cbs = [
+ cb._eval_cb(log=args.acc_log, test_datasets=test_datasets, visdom=visdom, iters_per_context=args.iters,
+ test_size=args.acc_n)
+ ] if (not checkattr(args, 'prototypes')) and (not checkattr(args, 'gen_classifier')) else [None]
+ # -after each context, for plotting in pdf (when using prototypes / generative classifier, this is also for visdom)
+ context_cbs = [
+ cb._eval_cb(log=args.iters, test_datasets=test_datasets, plotting_dict=plotting_dict,
+ visdom=visdom if checkattr(args, 'prototypes') or checkattr(args, 'gen_classifier') else None,
+ iters_per_context=args.iters, test_size=args.acc_n, S=args.eval_s if hasattr(args, 'eval_s') else 1)
+ ]
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #--------------------#
+ #----- TRAINING -----#
+ #--------------------#
+
+ # Should a baseline be used (i.e., 'joint training' or 'cummulative training')?
+ baseline = 'joint' if checkattr(args, 'joint') else ('cummulative' if checkattr(args, 'cummulative') else 'none')
+
+ # Train model
+ if args.train:
+ if verbose:
+ print('\n\n' + ' TRAINING '.center(70, '*'))
+ # -keep track of training-time
+ if args.time:
+ start = time.time()
+ # -select correct training function
+ train_fn = train_fromp if checkattr(args, 'fromp') else (
+ train_gen_classifier if checkattr(args, 'gen_classifier') else train_cl
+ )
+ # -perform training
+ train_fn(
+ model, train_datasets, iters=args.iters, batch_size=args.batch, baseline=baseline,
+ sample_cbs=sample_cbs, eval_cbs=eval_cbs, loss_cbs=loss_cbs, context_cbs=context_cbs,
+ # -if using generative replay with a separate generative model:
+ generator=generator, gen_iters=args.g_iters if hasattr(args, 'g_iters') else args.iters,
+ gen_loss_cbs=generator_loss_cbs,
+ )
+ # -get total training-time in seconds, write to file and print to screen
+ if args.time:
+ training_time = time.time() - start
+ time_file = open("{}/time-{}.txt".format(args.r_dir, param_stamp), 'w')
+ time_file.write('{}\n'.format(training_time))
+ time_file.close()
+ if verbose and args.time:
+ print("Total training time = {:.1f} seconds\n".format(training_time))
+ # -save trained model(s), if requested
+ if args.save:
+ save_name = "mM-{}".format(param_stamp) if (
+ not hasattr(args, 'full_stag') or args.full_stag == "none"
+ ) else "{}-{}".format(model.name, args.full_stag)
+ utils.save_checkpoint(model, args.m_dir, name=save_name, verbose=verbose)
+ else:
+ # Load previously trained model(s) (if goal is to only evaluate previously trained model)
+ if verbose:
+ print("\nLoading parameters of previously trained model...")
+ load_name = "mM-{}".format(param_stamp) if (
+ not hasattr(args, 'full_ltag') or args.full_ltag == "none"
+ ) else "{}-{}".format(model.name, args.full_ltag)
+ utils.load_checkpoint(model, args.m_dir, name=load_name, verbose=verbose, strict=False)
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #----------------------#
+ #----- EVALUATION -----#
+ #----------------------#
+
+ if verbose:
+ print('\n\n' + ' EVALUATION '.center(70, '*'))
+
+ # Set attributes of model that define how to do classification
+ if checkattr(args, 'gen_classifier'):
+ model.S = args.eval_s
+
+ # Evaluate accuracy of final model on full test-set
+ if verbose:
+ print("\n Accuracy of final model on test-set:")
+ accs = []
+ for i in range(args.contexts):
+ acc = evaluate.test_acc(
+ model, test_datasets[i], verbose=False, test_size=None, context_id=i, allowed_classes=list(
+ range(config['classes_per_context']*i, config['classes_per_context']*(i+1))
+ ) if (args.scenario=="task" and not checkattr(args, 'singlehead')) else None,
+ )
+ if verbose:
+ print(" - Context {}: {:.4f}".format(i + 1, acc))
+ accs.append(acc)
+ average_accs = sum(accs) / args.contexts
+ if verbose:
+ print('=> average accuracy over all {} contexts: {:.4f}\n\n'.format(args.contexts, average_accs))
+ # -write out to text file
+ file_name = "{}/acc-{}{}.txt".format(args.r_dir, param_stamp,
+ "--S{}".format(args.eval_s) if checkattr(args, 'gen_classifier') else "")
+ output_file = open(file_name, 'w')
+ output_file.write('{}\n'.format(average_accs))
+ output_file.close()
+ # -if requested, also save the results-dict (with accuracy after each task)
+ if checkattr(args, 'results_dict'):
+ file_name = "{}/dict-{}--n{}{}".format(args.r_dir, param_stamp, "All" if args.acc_n is None else args.acc_n,
+ "--S{}".format(args.eval_s) if checkattr(args, 'gen_classifier') else "")
+ utils.save_object(plotting_dict, file_name)
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #--------------------#
+ #----- PLOTTING -----#
+ #--------------------#
+
+ # If requested, generate pdf
+ if checkattr(args, 'pdf'):
+ # -open pdf
+ plot_name = "{}/{}.pdf".format(args.p_dir, param_stamp)
+ pp = visual_plt.open_pdf(plot_name)
+ # -show samples and reconstructions (either from main model or from separate generator)
+ if checkattr(args, 'feedback') or args.replay=="generative" or checkattr(args, 'gen_classifier'):
+ evaluate.show_samples(
+ model if checkattr(args, 'feedback') or checkattr(args, 'gen_classifier') else generator, config,
+ size=args.sample_n, pdf=pp
+ )
+ if not checkattr(args, 'gen_classifier'):
+ for i in range(args.contexts):
+ evaluate.show_reconstruction(model if checkattr(args, 'feedback') else generator,
+ test_datasets[i], config, pdf=pp, context=i+1)
+ figure_list = [] #-> create list to store all figures to be plotted
+ # -generate all figures (and store them in [figure_list])
+ plot_list = []
+ for i in range(args.contexts):
+ plot_list.append(plotting_dict["acc per context"]["context {}".format(i + 1)])
+ figure = visual_plt.plot_lines(
+ plot_list, x_axes=plotting_dict["x_context"],
+ line_names=['context {}'.format(i + 1) for i in range(args.contexts)]
+ )
+ figure_list.append(figure)
+ figure = visual_plt.plot_lines(
+ [plotting_dict["average"]], x_axes=plotting_dict["x_context"],
+ line_names=['average all contexts so far']
+ )
+ figure_list.append(figure)
+ # -add figures to pdf
+ for figure in figure_list:
+ pp.savefig(figure)
+ # -close pdf
+ pp.close()
+ # -print name of generated plot on screen
+ if verbose:
+ print("\nGenerated plot: {}\n".format(plot_name))
+
+
+
+if __name__ == '__main__':
+ # -load input-arguments
+ args = handle_inputs()
+ # -run experiment
+ run(args, verbose=True)
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/main_pretrain.py b/PyTorch/build-in/other/continual-learning/main_pretrain.py
new file mode 100644
index 000000000..6c163e1ca
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/main_pretrain.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python3
+import numpy as np
+import torch
+# -custom-written libraries
+import utils
+from utils import checkattr
+from data.load import get_singlecontext_datasets
+from models import define_models as define
+from train import train_standard
+from params import options
+from params.param_values import check_for_errors,set_default_values
+from eval import callbacks as cb
+from eval import evaluate
+
+
+## Function for specifying input-options and organizing / checking them
+def handle_inputs():
+ # Set indicator-dictionary for correctly retrieving / checking input options
+ kwargs = {'pretrain': True}
+ # Define input options
+ parser = options.define_args(filename="main_pretrain", description='Train classifier for pretraining conv-layers.')
+ parser = options.add_general_options(parser, **kwargs)
+ parser = options.add_eval_options(parser, **kwargs)
+ parser = options.add_problem_options(parser, **kwargs)
+ parser = options.add_model_options(parser, **kwargs)
+ parser = options.add_train_options(parser, **kwargs)
+ # Parse, process and check chosen options
+ args = parser.parse_args()
+ set_default_values(args, also_hyper_params=False, single_context=True) # -set defaults based on chosen experiment
+ check_for_errors(args, **kwargs) # -check for incompatible options
+ return args
+
+
+## Function for running one experiment
+def run(args, verbose=False):
+
+ # Use cuda or mps (apple silicon)?
+ cuda = torch.cuda.is_available() and args.gpu
+ mps = torch.backends.mps.is_available() and args.gpu
+ if cuda:
+ device = torch.device("cuda")
+ elif mps:
+ device = torch.device("mps")
+ else:
+ device = torch.device("cpu")
+
+ # Report whether cuda or mps is used
+ if verbose:
+ if cuda:
+ print("CUDA is used")
+ elif mps:
+ print("MPS is used (apple silicon GPU)")
+ else:
+ print("NO GPU is used!")
+
+ # Set random seeds
+ np.random.seed(args.seed)
+ torch.manual_seed(args.seed)
+ if cuda:
+ torch.cuda.manual_seed(args.seed)
+ elif mps:
+ torch.mps.manual_seed(args.seed)
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #----------------#
+ #----- DATA -----#
+ #----------------#
+
+ # Prepare data for chosen experiment
+ if verbose:
+ print("\n\n " +' LOAD DATA '.center(70, '*'))
+ (trainset, testset), config = get_singlecontext_datasets(
+ name=args.experiment, data_dir=args.d_dir, verbose=True,
+ normalize = utils.checkattr(args, "normalize"), augment = utils.checkattr(args, "augment"),
+ )
+
+ # Specify "data-loader" (among others for easy random shuffling and 'batchifying')
+ train_loader = utils.get_data_loader(trainset, batch_size=args.batch, cuda=cuda, drop_last=True)
+
+ # Determine number of iterations:
+ iters = args.iters if args.iters else args.epochs*len(train_loader)
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #-----------------#
+ #----- MODEL -----#
+ #-----------------#
+
+ # Specify model
+ if verbose:
+ print("\n\n " +' DEFINE MODEL '.center(70, '*'))
+ cnn = define.define_standard_classifier(args=args, config=config, device=device, depth=args.depth)
+
+ # Initialize (pre-trained) parameters
+ define.init_params(cnn, args)
+
+ # Set optimizer
+ optim_list = [{'params': filter(lambda p: p.requires_grad, cnn.parameters()), 'lr': args.lr}]
+ cnn.optimizer = torch.optim.Adam(optim_list, betas=(0.9, 0.999))
+
+ # Print some model-characteristics on the screen
+ if verbose:
+ utils.print_model_info(cnn)
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #---------------------#
+ #----- CALLBACKS -----#
+ #---------------------#
+
+ # Setting up Visdom environment
+ if utils.checkattr(args, 'visdom'):
+ if verbose:
+ print('\n\n'+' VISDOM '.center(70, '*'))
+ from visdom import Visdom
+ env_name = args.experiment
+ graph_name = cnn.name
+ visdom = {'env': Visdom(env=env_name), 'graph': graph_name}
+ else:
+ visdom = None
+
+ # Determine after how many iterations to evaluate the model (in visdom)
+ loss_log = args.loss_log if (args.loss_log is not None) else len(train_loader)
+ acc_log = args.acc_log if (args.acc_log is not None) else len(train_loader)
+
+ # Define callback-functions to evaluate during training
+ # -loss
+ loss_cbs = [cb._classifier_loss_cb(log=loss_log, visdom=visdom)]
+ # -accuracy
+ eval_cbs = [cb._eval_cb(log=acc_log, test_datasets=[testset], visdom=visdom, test_size=args.acc_n)]
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #--------------------------#
+ #----- (PRE-)TRAINING -----#
+ #--------------------------#
+
+ # (Pre)train model
+ if verbose:
+ print("\n\n " +' TRAINING '.center(70, '*'))
+ train_standard.train(cnn, train_loader, iters, loss_cbs=loss_cbs, eval_cbs=eval_cbs)
+
+ # Save (pre)trained conv-layers and the full model
+ if checkattr(args, 'save'):
+ # -conv-layers
+ save_name = cnn.convE.name if (
+ not hasattr(args, 'convE_stag') or args.convE_stag=="none"
+ ) else "{}-{}{}".format(cnn.convE.name, args.convE_stag,
+ "-s{}".format(args.seed) if checkattr(args, 'seed_to_stag') else "")
+ utils.save_checkpoint(cnn.convE, args.m_dir, name=save_name)
+ # -full model
+ save_name = cnn.name if (
+ not hasattr(args, 'full_stag') or args.full_stag=="none"
+ ) else "{}-{}".format(cnn.name, args.full_stag)
+ utils.save_checkpoint(cnn, args.m_dir, name=save_name)
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #----------------------#
+ #----- EVALUATION -----#
+ #----------------------#
+
+ # Evaluate accuracy of final model on full test-set
+ if verbose:
+ print("\n\n " +' EVALUATION '.center(70, '*'))
+ train_acc = evaluate.test_acc(cnn, trainset, verbose=False, test_size=None)
+ test_acc = evaluate.test_acc(cnn, testset, verbose=False, test_size=None)
+ if verbose:
+ print('=> ave accuracy (on training set): {:.4f}'.format(train_acc))
+ print('=> ave accuracy (on testing set): {:.4f}\n'.format(test_acc))
+
+
+
+if __name__ == '__main__':
+ args = handle_inputs()
+ run(args, verbose=True)
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/main_task_free.py b/PyTorch/build-in/other/continual-learning/main_task_free.py
new file mode 100644
index 000000000..bc35dafcf
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/main_task_free.py
@@ -0,0 +1,417 @@
+#!/usr/bin/env python3
+import os
+import numpy as np
+import time
+import torch
+from torch import optim
+# -custom-written libraries
+import utils
+from utils import checkattr
+from data.load import get_context_set
+from data.labelstream import SharpBoundaryStream, RandomStream, FuzzyBoundaryStream
+from data.datastream import DataStream
+from models import define_models as define
+from models.cl.continual_learner import ContinualLearner
+from models.cl.memory_buffer_stream import MemoryBuffer
+from train.train_stream import train_on_stream, train_gen_classifier_on_stream
+from params import options
+from params.param_stamp import get_param_stamp, get_param_stamp_from_args, visdom_name
+from params.param_values import set_method_options,check_for_errors,set_default_values
+from eval import evaluate, callbacks as cb
+
+
+## Function for specifying input-options and organizing / checking them
+def handle_inputs():
+ # Set indicator-dictionary for correctly retrieving / checking input options
+ kwargs = {'main': True, 'no_boundaries': True}
+ # Define input options
+ parser = options.define_args(filename="main_task_free",
+ description='Run a "task-free" continual learning experiment '
+ '(i.e., no [known,] sharp boundaries between contexts).')
+ parser = options.add_general_options(parser, **kwargs)
+ parser = options.add_eval_options(parser, **kwargs)
+ parser = options.add_problem_options(parser, **kwargs)
+ parser = options.add_model_options(parser, **kwargs)
+ parser = options.add_train_options(parser, **kwargs)
+ parser = options.add_cl_options(parser, **kwargs)
+ # Parse, process and check chosen options
+ args = parser.parse_args()
+ set_method_options(args) # -"convenience"-option used, select components
+ set_default_values(args, also_hyper_params=True, no_boundaries=True) # -set defaults, some based on chosen options
+ check_for_errors(args, **kwargs) # -check for incompatible options
+ return args
+
+
+def run(args, verbose=False):
+
+ # Create plots- and results-directories if needed
+ if not os.path.isdir(args.r_dir):
+ os.mkdir(args.r_dir)
+ if checkattr(args, 'pdf') and not os.path.isdir(args.p_dir):
+ os.mkdir(args.p_dir)
+
+ # If only want param-stamp, get it printed to screen and exit
+ if checkattr(args, 'get_stamp'):
+ print(get_param_stamp_from_args(args=args, no_boundaries=True))
+ exit()
+
+ # Use cuda or mps (apple silicon)?
+ cuda = torch.cuda.is_available() and args.gpu
+ mps = torch.backends.mps.is_available() and args.gpu
+ if cuda:
+ device = torch.device("cuda")
+ elif mps:
+ device = torch.device("mps")
+ else:
+ device = torch.device("cpu")
+
+ # Report whether cuda or mps is used
+ if verbose:
+ if cuda:
+ print("CUDA is used")
+ elif mps:
+ print("MPS is used (apple silicon GPU)")
+ else:
+ print("NO GPU is used!")
+
+ # Set random seeds
+ np.random.seed(args.seed)
+ torch.manual_seed(args.seed)
+ if cuda:
+ torch.cuda.manual_seed(args.seed)
+ elif mps:
+ torch.mps.manual_seed(args.seed)
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #-----------------------#
+ #----- CONTEXT SET -----#
+ #-----------------------#
+
+ # Prepare the context set for the chosen experiment
+ if verbose:
+ print("\n\n " +' LOAD DATA '.center(70, '*'))
+ (train_datasets, test_datasets), config = get_context_set(
+ name=args.experiment, scenario=args.scenario, contexts=args.contexts, data_dir=args.d_dir,
+ normalize=checkattr(args, "normalize"), verbose=verbose, exception=(args.seed==0),
+ singlehead=checkattr(args, 'singlehead')
+ )
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #-----------------------------#
+ #----- FEATURE EXTRACTOR -----#
+ #-----------------------------#
+
+ # Define the feature extractor
+ depth = args.depth if hasattr(args, 'depth') else 0
+ use_feature_extractor = checkattr(args, 'hidden') or (
+ checkattr(args, 'freeze_convE') and (not args.replay=="generative") and (not checkattr(args, "add_buffer"))
+ and (not checkattr(args, 'gen_classifier'))
+ )
+ #--> when the convolutional layers are frozen, it is faster to put the data through these layers only once at the
+ # beginning, but this currently does not work with iCaRL or pixel-level generative replay/classification
+ if use_feature_extractor and depth>0:
+ if verbose:
+ print("\n\n " + ' DEFINE FEATURE EXTRACTOR '.center(70, '*'))
+ feature_extractor = define.define_feature_extractor(args=args, config=config, device=device)
+ # - initialize (pre-trained) parameters
+ define.init_params(feature_extractor, args, verbose=verbose)
+ # - freeze the parameters & set model to eval()-mode
+ for param in feature_extractor.parameters():
+ param.requires_grad = False
+ feature_extractor.eval()
+ # - print characteristics of feature extractor on the screen
+ if verbose:
+ utils.print_model_info(feature_extractor)
+ # - reset size and # of channels to reflect the extracted features rather than the original images
+ config = config.copy() # -> make a copy to avoid overwriting info in the original config-file
+ config['size'] = feature_extractor.conv_out_size
+ config['channels'] = feature_extractor.conv_out_channels
+ depth = 0
+ else:
+ feature_extractor = None
+
+ # Convert original data to features (so this doesn't need to be done at run-time)
+ if (feature_extractor is not None) and args.depth>0:
+ if verbose:
+ print("\n\n " + ' PUT DATA TRHOUGH FEATURE EXTRACTOR '.center(70, '*'))
+ train_datasets = utils.preprocess(feature_extractor, train_datasets, config, batch=args.batch,
+ message='')
+ test_datasets = utils.preprocess(feature_extractor, test_datasets, config, batch=args.batch,
+ message=' ')
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #-----------------------#
+ #----- DATA-STREAM -----#
+ #-----------------------#
+
+ # Set up the stream of context-labels to use
+ if args.stream == "academic-setting":
+ label_stream = SharpBoundaryStream(n_contexts=args.contexts, iters_per_context=args.iters)
+ elif args.stream == "fuzzy-boundaries":
+ label_stream = FuzzyBoundaryStream(
+ n_contexts=args.contexts, iters_per_context=args.iters, fuzziness=args.fuzziness,
+ batch_size=1 if checkattr(args, 'labels_per_batch') else args.batch
+ )
+ elif args.stream == "random":
+ label_stream = RandomStream(n_contexts=args.contexts)
+ else:
+ raise NotImplementedError("Stream type '{}' not currently implemented.".format(args.stream))
+
+ # Set up the data-stream to be presented to the network
+ data_stream = DataStream(
+ train_datasets, label_stream, batch_size=args.batch, return_context=(args.scenario=="task"),
+ per_batch=True if (args.stream=="academic-setting") else checkattr(args, 'labels_per_batch'),
+ )
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #----------------------#
+ #----- CLASSIFIER -----#
+ #----------------------#
+
+ # Define the classifier
+ if verbose:
+ print("\n\n " + ' DEFINE THE CLASSIFIER '.center(70, '*'))
+ model = define.define_classifier(args=args, config=config, device=device, depth=depth, stream=True)
+
+ # Some type of classifiers consist of multiple networks
+ n_networks = len(train_datasets) if checkattr(args, 'separate_networks') else (
+ model.classes if checkattr(args, 'gen_classifier') else 1
+ )
+
+ # Go through all networks to ...
+ for network_id in range(n_networks):
+ model_to_set = getattr(model, 'context{}'.format(network_id+1)) if checkattr(args, 'separate_networks') else (
+ getattr(model, 'vae{}'.format(network_id)) if checkattr(args, 'gen_classifier') else model
+ )
+ # ... initialize / use pre-trained / freeze model-parameters, and
+ define.init_params(model_to_set, args)
+ # ... define optimizer (only include parameters that "requires_grad")
+ model_to_set.optim_list = [{'params': filter(lambda p: p.requires_grad, model_to_set.parameters()),
+ 'lr': args.lr}]
+ model_to_set.optim_type = args.optimizer
+ if model_to_set.optim_type=="adam":
+ model_to_set.optimizer = optim.Adam(model_to_set.optim_list, betas=(0.9, 0.999))
+ elif model_to_set.optim_type=="sgd":
+ model_to_set.optimizer = optim.SGD(model_to_set.optim_list,
+ momentum=args.momentum if hasattr(args, 'momentum') else 0.)
+
+ # On what scenario will model be trained?
+ model.scenario = args.scenario
+ model.classes_per_context = config['classes_per_context']
+
+ # Print some model-characteristics on the screen
+ if verbose:
+ if checkattr(args, 'gen_classifier') or checkattr(args, 'separate_networks'):
+ message = '{} copies of:'.format(len(train_datasets))
+ utils.print_model_info(model.vae0 if checkattr(args, 'gen_classifier') else model.context1, message=message)
+ else:
+ utils.print_model_info(model)
+
+ # -------------------------------------------------------------------------------------------------#
+
+ # For multiple continual learning methods: how often (after how many iters) to perform the consolidation operation?
+ # (this can be interpreted as: how many iterations together should be considered a "context")
+ model.update_every = args.update_every if hasattr(args, 'update_every') else 1
+
+ # -------------------------------------------------------------------------------------------------#
+
+ # ----------------------------------------------------#
+ # ----- CL-STRATEGY: CONTEXT-SPECIFIC COMPONENTS -----#
+ # ----------------------------------------------------#
+
+ # XdG: already indicated when defining the classifier
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #-------------------------------------------------#
+ #----- CL-STRATEGY: PARAMETER REGULARIZATION -----#
+ #-------------------------------------------------#
+
+ # Parameter regularization by adding a weight penalty (e.g., SI)
+ if isinstance(model, ContinualLearner) and checkattr(args, 'weight_penalty'):
+ model.weight_penalty = True
+ model.importance_weighting = args.importance_weighting
+ model.reg_strength = args.reg_strength
+ if model.importance_weighting=='si':
+ model.epsilon = args.epsilon if hasattr(args, 'epsilon') else 0.1
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #--------------------------------------------------#
+ #----- CL-STRATEGY: FUNCTIONAL REGULARIZATION -----#
+ #--------------------------------------------------#
+
+ # Should a distillation loss (i.e., soft targets) be used? (e.g., for LwF)
+ if isinstance(model, ContinualLearner) and hasattr(args, 'replay'):
+ model.replay_targets = "soft" if checkattr(args, 'distill') else "hard"
+ model.KD_temp = args.temp if hasattr(args, 'temp') else 2.
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #-------------------------------#
+ #----- CL-STRATEGY: REPLAY -----#
+ #-------------------------------#
+
+ # Should the model be trained with replay?
+ if isinstance(model, ContinualLearner) and hasattr(args, 'replay'):
+ model.replay_mode = args.replay
+
+ # A-GEM: How should the gradient of the loss on replayed data be used? (added, as inequality constraint or both?)
+ if isinstance(model, ContinualLearner) and hasattr(args, 'use_replay'):
+ model.use_replay = args.use_replay
+ model.eps_agem = args.eps_agem if hasattr(args, 'eps_agem') else 0.
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #-------------------------#
+ #----- MEMORY BUFFER -----#
+ #-------------------------#
+
+ # Should a memory buffer be maintained? (e.g., for experience replay or prototype-based classification)
+ use_memory_buffer = checkattr(args, 'prototypes') or args.replay=="buffer"
+ if isinstance(model, MemoryBuffer) and use_memory_buffer:
+ model.use_memory_buffer = True
+ model.budget = args.budget
+ model.initialize_buffer(config, return_c=(args.scenario=='task'))
+
+ # Should classification be done using prototypes as class templates?
+ model.prototypes = checkattr(args, 'prototypes')
+
+ # Relevant for "modified iCaRL": whether to use binary loss
+ if model.label=="Classifier":
+ model.binaryCE = checkattr(args, 'bce')
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #---------------------------#
+ #----- PARAMETER STAMP -----#
+ #---------------------------#
+
+ # Get parameter-stamp (and print on screen)
+ if verbose:
+ if verbose:
+ print('\n\n' + ' PARAMETER STAMP '.center(70, '*'))
+ param_stamp = get_param_stamp(
+ args, model.name, feature_extractor_name= feature_extractor.name if (feature_extractor is not None) else None,
+ verbose=verbose, no_boundaries=True,
+ )
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #---------------------#
+ #----- CALLBACKS -----#
+ #---------------------#
+
+ # Setting up Visdom environment
+ if utils.checkattr(args, 'visdom'):
+ if verbose:
+ print('\n\n'+' VISDOM '.center(70, '*'))
+ from visdom import Visdom
+ env_name = "{exp}{con}-{sce}".format(exp=args.experiment, con=args.contexts, sce=args.scenario)
+ visdom = {'env': Visdom(env=env_name), 'graph': visdom_name(args)}
+ else:
+ visdom = None
+
+ # Callbacks for reporting and visualizing loss
+ loss_cbs = [
+ cb._gen_classifier_loss_cb(
+ log=args.loss_log, classes=None, visdom=None,
+ ) if checkattr(args, 'gen_classifier') else cb._classifier_loss_cb(
+ log=args.loss_log, visdom=visdom, model=model, contexts=None,
+ )
+ ]
+
+ # Callbacks for reporting and visualizing accuracy
+ eval_cbs = [
+ cb._eval_cb(log=args.acc_log, test_datasets=test_datasets, visdom=visdom, iters_per_context=args.iters,
+ test_size=args.acc_n)
+ ]
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #--------------------#
+ #----- TRAINING -----#
+ #--------------------#
+
+ # Train model
+ if args.train:
+ if verbose:
+ print('\n\n' + ' TRAINING '.center(70, '*'))
+ # -keep track of training-time
+ if args.time:
+ start = time.time()
+ # -select training function
+ train_fn = train_gen_classifier_on_stream if checkattr(args, 'gen_classifier') else train_on_stream
+ # -perform training
+ train_fn(model, data_stream, iters=args.iters*args.contexts, eval_cbs=eval_cbs, loss_cbs=loss_cbs)
+ # -get total training-time in seconds, write to file and print to screen
+ if args.time:
+ training_time = time.time() - start
+ time_file = open("{}/time-{}.txt".format(args.r_dir, param_stamp), 'w')
+ time_file.write('{}\n'.format(training_time))
+ time_file.close()
+ if verbose and args.time:
+ print("Total training time = {:.1f} seconds\n".format(training_time))
+ # -save trained model(s), if requested
+ if args.save:
+ save_name = "mM-{}".format(param_stamp) if (
+ not hasattr(args, 'full_stag') or args.full_stag == "none"
+ ) else "{}-{}".format(model.name, args.full_stag)
+ utils.save_checkpoint(model, args.m_dir, name=save_name, verbose=verbose)
+ else:
+ # Load previously trained model(s) (if goal is to only evaluate previously trained model)
+ if verbose:
+ print("\nLoading parameters of previously trained model...")
+ load_name = "mM-{}".format(param_stamp) if (
+ not hasattr(args, 'full_ltag') or args.full_ltag == "none"
+ ) else "{}-{}".format(model.name, args.full_ltag)
+ utils.load_checkpoint(model, args.m_dir, name=load_name, verbose=verbose, strict=False)
+
+ #-------------------------------------------------------------------------------------------------#
+
+ #----------------------#
+ #----- EVALUATION -----#
+ #----------------------#
+
+ if verbose:
+ print('\n\n' + ' EVALUATION '.center(70, '*'))
+
+ # Set attributes of model that define how to do classification
+ if checkattr(args, 'gen_classifier'):
+ model.S = args.eval_s
+
+ # Evaluate accuracy of final model on full test-set
+ if verbose:
+ print("\n Accuracy of final model on test-set:")
+ accs = []
+ for context_id in range(args.contexts):
+ acc = evaluate.test_acc(
+ model, test_datasets[context_id], verbose=False, context_id=context_id, allowed_classes=list(
+ range(config['classes_per_context'] * context_id, config['classes_per_context'] * (context_id+1))
+ ) if (args.scenario == "task" and not checkattr(args, 'singlehead')) else None, test_size=None,
+ )
+ if verbose:
+ print(" - Context {}: {:.4f}".format(context_id+1, acc))
+ accs.append(acc)
+ average_accs = sum(accs) / args.contexts
+ if verbose:
+ print('=> average accuracy over all {} contexts: {:.4f}\n\n'.format(args.contexts, average_accs))
+ # -write out to text file
+ file_name = "{}/acc-{}{}.txt".format(args.r_dir, param_stamp,
+ "--S{}".format(args.eval_s) if checkattr(args, 'gen_classifier') else "")
+ output_file = open(file_name, 'w')
+ output_file.write('{}\n'.format(average_accs))
+ output_file.close()
+
+
+
+
+if __name__ == '__main__':
+ # -load input-arguments
+ args = handle_inputs()
+ # -run experiment
+ run(args, verbose=True)
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/models/__init__.py b/PyTorch/build-in/other/continual-learning/models/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/PyTorch/build-in/other/continual-learning/models/cl/__init__.py b/PyTorch/build-in/other/continual-learning/models/cl/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/PyTorch/build-in/other/continual-learning/models/cl/continual_learner.py b/PyTorch/build-in/other/continual-learning/models/cl/continual_learner.py
new file mode 100644
index 000000000..d7c5dc177
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/models/cl/continual_learner.py
@@ -0,0 +1,641 @@
+import abc
+import numpy as np
+import torch
+from torch import nn
+from torch.distributions import Categorical
+from torch.nn import functional as F
+from utils import get_data_loader
+from models import fc
+from models.utils.ncl import additive_nearest_kf
+
+
+class ContinualLearner(nn.Module, metaclass=abc.ABCMeta):
+ '''Abstract module to add continual learning capabilities to a classifier (e.g., param regularization, replay).'''
+
+ def __init__(self):
+ super().__init__()
+
+ # List with the methods to create generators that return the parameters on which to apply param regularization
+ self.param_list = [self.named_parameters] #-> lists the parameters to regularize with SI or diagonal Fisher
+ # (default is to apply it to all parameters of the network)
+ #-> with OWM or KFAC Fisher, only parameters in [self.fcE] and [self.classifier] are regularized
+
+ # Optimizer (and whether it needs to be reset)
+ self.optimizer = None
+ self.optim_type = "adam"
+ #--> self.[optim_type] name of optimizer, relevant if optimizer should be reset for every context
+ self.optim_list = []
+ #--> self.[optim_list] , if optimizer should be reset after each context, provide list of required
+
+ # Scenario, singlehead & negative samples
+ self.scenario = 'task' # which scenario will the model be trained on
+ self.classes_per_context = 2 # number of classes per context
+ self.singlehead = False # if Task-IL, does the model have a single-headed output layer?
+ self.neg_samples = 'all' # if Class-IL, which output units should be set to 'active'?
+
+ # LwF / Replay
+ self.replay_mode = "none" # should replay be used, and if so what kind? (none|current|buffer|all|generative)
+ self.replay_targets = "hard" # should distillation loss be used? (hard|soft)
+ self.KD_temp = 2. # temperature for distillation loss
+ self.use_replay = "normal" # how to use the replayed data? (normal|inequality|both)
+ # -inequality = use gradient of replayed data as inequality constraint for gradient
+ # of the current data (as in A-GEM; Chaudry et al., 2019; ICLR)
+ self.eps_agem = 0. # parameter that improves numerical stability of AGEM (if set slighly above 0)
+ self.lwf_weighting = False # LwF has different weighting of the 'stability' and 'plasticity' terms than replay
+
+ # XdG:
+ self.mask_dict = None # -> with context-specific masks for each hidden fully-connected layer
+ self.excit_buffer_list = [] # -> with excit-buffers for all hidden fully-connected layers
+
+ # Parameter-regularization
+ self.weight_penalty = False
+ self.reg_strength = 0 #-> hyperparam: how strong to weigh the weight penalty ("regularisation strength")
+ self.precondition = False
+ self.alpha = 1e-10 #-> small constant to stabilize inversion of the Fisher Information Matrix
+ # (this is used as hyperparameter in OWM)
+ self.importance_weighting = 'fisher' #-> Options for estimation of parameter importance:
+ # - 'fisher': Fisher Information matrix (e.g., as in EWC, NCL)
+ # - 'si': ... diagonal, online importance estimation ...
+ # - 'owm': ...
+ self.fisher_kfac = False #-> whether to use a block-diagonal KFAC approximation to the Fisher Information
+ # (alternative is a diagonal approximation)
+ self.fisher_n = None #-> sample size for estimating FI-matrix (if "None", full pass over dataset)
+ self.fisher_labels = "all" #-> what label(s) to use for any given sample when calculating the FI matrix?
+ # - 'all': use all labels, weighted according to their predicted probabilities
+ # - 'sample': sample one label to use, using predicted probabilities for sampling
+ # - 'pred': use the predicted label (i.e., the one with highest predicted prob)
+ # - 'true': use the true label (NOTE: this is also called "empirical FI")
+ self.fisher_batch = 1 #-> batch size for estimating FI-matrix (should be 1, for best results)
+ # (different from 1 only works if [fisher_labels]='pred' or 'true')
+ self.context_count = 0 #-> counts 'contexts' (if a prior is used, this is counted as the first context)
+ self.data_size = None #-> inverse prior (can be set to # samples per context, or used as hyperparameter)
+ self.epsilon = 0.1 #-> dampening parameter (SI): bounds 'omega' when squared parameter-change goes to 0
+ self.offline = False #-> use separate penalty term per context (as in original EWC paper)
+ self.gamma = 1. #-> decay-term for old contexts' contribution to cummulative FI (as in 'Online EWC')
+ self.randomize_fisher = False
+
+ def _device(self):
+ return next(self.parameters()).device
+
+ def _is_on_cuda(self):
+ return next(self.parameters()).is_cuda
+
+
+ #----------------- XdG-specifc functions -----------------#
+
+ def apply_XdGmask(self, context):
+ '''Apply context-specific mask, by setting activity of pre-selected subset of nodes to zero.
+
+ [context] , starting from 1'''
+
+ assert self.mask_dict is not None
+ torchType = next(self.parameters()).detach()
+
+ # Loop over all buffers for which a context-specific mask has been specified
+ for i,excit_buffer in enumerate(self.excit_buffer_list):
+ gating_mask = np.repeat(1., len(excit_buffer))
+ gating_mask[self.mask_dict[context][i]] = 0. # -> find context-specific mask
+ excit_buffer.set_(torchType.new(gating_mask)) # -> apply this mask
+
+ def reset_XdGmask(self):
+ '''Remove context-specific mask, by setting all "excit-buffers" to 1.'''
+ torchType = next(self.parameters()).detach()
+ for excit_buffer in self.excit_buffer_list:
+ gating_mask = np.repeat(1., len(excit_buffer)) # -> define "unit mask" (i.e., no masking at all)
+ excit_buffer.set_(torchType.new(gating_mask)) # -> apply this unit mask
+
+
+ #------------- "Synaptic Intelligence"-specifc functions -------------#
+
+ def register_starting_param_values(self):
+ '''Register the starting parameter values into the model as a buffer.'''
+ for gen_params in self.param_list:
+ for n, p in gen_params():
+ if p.requires_grad:
+ n = n.replace('.', '__')
+ self.register_buffer('{}_SI_prev_context'.format(n), p.detach().clone())
+
+ def prepare_importance_estimates_dicts(self):
+ '''Prepare to store running importance estimates and param-values before update.'''
+ W = {}
+ p_old = {}
+ for gen_params in self.param_list:
+ for n, p in gen_params():
+ if p.requires_grad:
+ n = n.replace('.', '__')
+ W[n] = p.data.clone().zero_()
+ p_old[n] = p.data.clone()
+ return W, p_old
+
+ def update_importance_estimates(self, W, p_old):
+ '''Update the running parameter importance estimates in W.'''
+ for gen_params in self.param_list:
+ for n, p in gen_params():
+ if p.requires_grad:
+ n = n.replace('.', '__')
+ if p.grad is not None:
+ W[n].add_(-p.grad*(p.detach()-p_old[n]))
+ p_old[n] = p.detach().clone()
+
+ def update_omega(self, W, epsilon):
+ '''After completing training on a context, update the per-parameter regularization strength.
+
+ [W] estimated parameter-specific contribution to changes in total loss of completed context
+ [epsilon] dampening parameter (to bound [omega] when [p_change] goes to 0)'''
+
+ # Loop over all parameters
+ for gen_params in self.param_list:
+ for n, p in gen_params():
+ if p.requires_grad:
+ n = n.replace('.', '__')
+
+ # Find/calculate new values for quadratic penalty on parameters
+ p_prev = getattr(self, '{}_SI_prev_context'.format(n))
+ p_current = p.detach().clone()
+ p_change = p_current - p_prev
+ omega_add = W[n]/(p_change**2 + epsilon)
+ try:
+ omega = getattr(self, '{}_SI_omega'.format(n))
+ except AttributeError:
+ omega = p.detach().clone().zero_()
+ omega_new = omega + omega_add
+
+ # Store these new values in the model
+ self.register_buffer('{}_SI_prev_context'.format(n), p_current)
+ self.register_buffer('{}_SI_omega'.format(n), omega_new)
+
+ def surrogate_loss(self):
+ '''Calculate SI's surrogate loss.'''
+ try:
+ losses = []
+ for gen_params in self.param_list:
+ for n, p in gen_params():
+ if p.requires_grad:
+ # Retrieve previous parameter values and their normalized path integral (i.e., omega)
+ n = n.replace('.', '__')
+ prev_values = getattr(self, '{}_SI_prev_context'.format(n))
+ omega = getattr(self, '{}_SI_omega'.format(n))
+ # Calculate SI's surrogate loss, sum over all parameters
+ losses.append((omega * (p-prev_values)**2).sum())
+ return sum(losses)
+ except AttributeError:
+ # SI-loss is 0 if there is no stored omega yet
+ return torch.tensor(0., device=self._device())
+
+
+ #----------------- EWC-specifc functions -----------------#
+
+ def initialize_fisher(self):
+ '''Initialize diagonal fisher matrix with the prior precision (as in NCL).'''
+ for gen_params in self.param_list:
+ for n, p in gen_params():
+ if p.requires_grad:
+ n = n.replace('.', '__')
+ # -take initial parameters as zero for regularization purposes
+ self.register_buffer('{}_EWC_prev_context'.format(n), p.detach().clone()*0)
+ # -precision (approximated by diagonal Fisher Information matrix)
+ self.register_buffer( '{}_EWC_estimated_fisher'.format(n), torch.ones(p.shape) / self.data_size)
+
+ def estimate_fisher(self, dataset, allowed_classes=None):
+ '''After completing training on a context, estimate diagonal of Fisher Information matrix.
+
+ [dataset]: to be used to estimate FI-matrix
+ [allowed_classes]: with class-indeces of 'allowed' or 'active' classes'''
+
+ # Prepare to store estimated Fisher Information matrix
+ est_fisher_info = {}
+ for gen_params in self.param_list:
+ for n, p in gen_params():
+ if p.requires_grad:
+ n = n.replace('.', '__')
+ est_fisher_info[n] = p.detach().clone().zero_()
+
+ # Set model to evaluation mode
+ mode = self.training
+ self.eval()
+
+ # Create data-loader to give batches of size 1 (unless specifically asked to do otherwise)
+ data_loader = get_data_loader(dataset, batch_size=1 if self.fisher_batch is None else self.fisher_batch,
+ cuda=self._is_on_cuda())
+
+ # Estimate the FI-matrix for [self.fisher_n] batches of size 1
+ for index,(x,y) in enumerate(data_loader):
+ # break from for-loop if max number of samples has been reached
+ if self.fisher_n is not None:
+ if index > self.fisher_n:
+ break
+ # run forward pass of model
+ x = x.to(self._device())
+ output = self(x) if allowed_classes is None else self(x)[:, allowed_classes]
+ # calculate FI-matrix (according to one of the four options)
+ if self.fisher_labels=='all':
+ # -use a weighted combination of all labels
+ with torch.no_grad():
+ label_weights = F.softmax(output, dim=1) # --> get weights, which shouldn't have gradient tracked
+ for label_index in range(output.shape[1]):
+ label = torch.LongTensor([label_index]).to(self._device())
+ negloglikelihood = F.cross_entropy(output, label) #--> get neg log-likelihoods for this class
+ # Calculate gradient of negative loglikelihood
+ self.zero_grad()
+ negloglikelihood.backward(retain_graph=True if (label_index+1) use provided true label to calculate loglikelihood --> "empirical Fisher":
+ label = torch.LongTensor([y]) if type(y)==int else y #-> shape: [self.fisher_batch]
+ if allowed_classes is not None:
+ label = [int(np.where(i == allowed_classes)[0][0]) for i in label.numpy()]
+ label = torch.LongTensor(label)
+ label = label.to(self._device())
+ elif self.fisher_labels=='pred':
+ # --> use predicted label to calculate loglikelihood:
+ label = output.max(1)[1]
+ elif self.fisher_labels=='sample':
+ # --> sample one label from predicted probabilities
+ with torch.no_grad():
+ label_weights = F.softmax(output, dim=1) #--> get predicted probabilities
+ weights_array = np.array(label_weights[0].cpu()) #--> change to np-array, avoiding rounding errors
+ label = np.random.choice(len(weights_array), 1, p=weights_array/weights_array.sum())
+ label = torch.LongTensor(label).to(self._device()) #--> change label to tensor on correct device
+ # calculate negative log-likelihood
+ negloglikelihood = F.nll_loss(F.log_softmax(output, dim=1), label)
+ # calculate gradient of negative loglikelihood
+ self.zero_grad()
+ negloglikelihood.backward()
+ # square gradients and keep running sum
+ for gen_params in self.param_list:
+ for n, p in gen_params():
+ if p.requires_grad:
+ n = n.replace('.', '__')
+ if p.grad is not None:
+ est_fisher_info[n] += p.grad.detach() ** 2
+ if self.randomize_fisher:
+ idx = torch.randperm(est_fisher_info[n].nelement())
+ est_fisher_info[n] = est_fisher_info[n].view(-1)[idx].view(est_fisher_info[n].size())
+
+ # Normalize by sample size used for estimation
+ est_fisher_info = {n: p/index for n, p in est_fisher_info.items()}
+
+ # Store new values in the network
+ for gen_params in self.param_list:
+ for n, p in gen_params():
+ if p.requires_grad:
+ n = n.replace('.', '__')
+ # -mode (=MAP parameter estimate)
+ self.register_buffer('{}_EWC_prev_context{}'.format(n, self.context_count+1 if self.offline else ""),
+ p.detach().clone())
+ # -precision (approximated by diagonal Fisher Information matrix)
+ if (not self.offline) and hasattr(self, '{}_EWC_estimated_fisher'.format(n)):
+ existing_values = getattr(self, '{}_EWC_estimated_fisher'.format(n))
+ est_fisher_info[n] += self.gamma * existing_values
+ self.register_buffer(
+ '{}_EWC_estimated_fisher{}'.format(n, self.context_count+1 if self.offline else ""), est_fisher_info[n]
+ )
+
+ # Increase context-count
+ self.context_count += 1
+
+ # Set model back to its initial mode
+ self.train(mode=mode)
+
+ def ewc_loss(self):
+ '''Calculate EWC-loss.'''
+ try:
+ losses = []
+ # If "offline EWC", loop over all previous contexts as each context has separate penalty term
+ num_penalty_terms = self.context_count if (self.offline and self.context_count>0) else 1
+ for context in range(1, num_penalty_terms+1):
+ for gen_params in self.param_list:
+ for n, p in gen_params():
+ if p.requires_grad:
+ # Retrieve stored mode (MAP estimate) and precision (Fisher Information matrix)
+ n = n.replace('.', '__')
+ mean = getattr(self, '{}_EWC_prev_context{}'.format(n, context if self.offline else ""))
+ fisher = getattr(self, '{}_EWC_estimated_fisher{}'.format(n, context if self.offline else ""))
+ # If "online EWC", apply decay-term to the running sum of the Fisher Information matrices
+ fisher = fisher if self.offline else self.gamma*fisher
+ # Calculate weight regularization loss
+ losses.append((fisher * (p-mean)**2).sum())
+ # Sum the regularization loss from all parameters (and from all contexts, if "offline EWC")
+ return (1./2)*sum(losses)
+ except AttributeError:
+ # Regularization loss is 0 if there are no stored mode and precision yet
+ return torch.tensor(0., device=self._device())
+
+
+ # ----------------- KFAC-specifc functions -----------------#
+
+ def initialize_kfac_fisher(self):
+ '''Initialize Kronecker-factored Fisher matrix with the prior precision (as in NCL).'''
+ fcE = self.fcE
+ classifier = self.classifier
+
+ def initialize_for_fcLayer(layer):
+ if not isinstance(layer, fc.layers.fc_layer):
+ raise NotImplemented
+ linear = layer.linear
+ g_dim, a_dim = linear.weight.shape
+ abar_dim = a_dim + 1 if linear.bias is not None else a_dim
+ A = torch.eye(abar_dim) / np.sqrt(self.data_size)
+ G = torch.eye(g_dim) / np.sqrt(self.data_size)
+ return {"A": A, "G": G, "weight": linear.weight.data * 0,
+ "bias": None if linear.bias is None else linear.bias.data * 0}
+
+ def initialize():
+ est_fisher_info = {}
+ for i in range(1, fcE.layers + 1):
+ label = f"fcLayer{i}"
+ layer = getattr(fcE, label)
+ est_fisher_info[label] = initialize_for_fcLayer(layer)
+ est_fisher_info["classifier"] = initialize_for_fcLayer(classifier)
+ return est_fisher_info
+
+ self.KFAC_FISHER_INFO = initialize()
+
+ def estimate_kfac_fisher(self, dataset, allowed_classes=None):
+ """After completing training on a context, estimate KFAC Fisher Information matrix.
+
+ [dataset]: to be used to estimate FI-matrix
+ [allowed_classes]: with class-indeces of 'allowed' or 'active' classes
+ """
+
+ print('computing kfac fisher')
+
+ fcE = self.fcE
+ classifier = self.classifier
+
+ def initialize_for_fcLayer(layer):
+ if not isinstance(layer, fc.layers.fc_layer):
+ raise NotImplemented
+ linear = layer.linear
+ g_dim, a_dim = linear.weight.shape
+ abar_dim = a_dim + 1 if linear.bias is not None else a_dim
+ A = torch.zeros(abar_dim, abar_dim)
+ G = torch.zeros(g_dim, g_dim)
+ if linear.bias is None:
+ bias = None
+ else:
+ bias = linear.bias.data.clone()
+ return {"A": A, "G": G, "weight": linear.weight.data.clone(), "bias": bias}
+
+ def initialize():
+ est_fisher_info = {}
+ for i in range(1, fcE.layers + 1):
+ label = f"fcLayer{i}"
+ layer = getattr(fcE, label)
+ est_fisher_info[label] = initialize_for_fcLayer(layer)
+ est_fisher_info["classifier"] = initialize_for_fcLayer(classifier)
+ return est_fisher_info
+
+ def update_fisher_info_layer(est_fisher_info, intermediate, label, layer, n_samples, weight=1):
+ if not isinstance(layer, fc.layers.fc_layer):
+ raise NotImplemented
+ if not hasattr(layer, 'phantom'):
+ raise Exception(f"Layer {label} does not have phantom parameters")
+ g = layer.phantom.grad.detach()
+ G = g[..., None] @ g[..., None, :]
+ _a = intermediate[label].detach()
+ # Here we do one batch at a time (not ideal)
+ assert _a.shape[0] == 1
+ a = _a[0]
+
+ if classifier.bias is None:
+ abar = a
+ else:
+ o = torch.ones(*a.shape[0:-1], 1).to(self._device())
+ abar = torch.cat((a, o), -1)
+ A = abar[..., None] @ abar[..., None, :]
+ Ao = est_fisher_info[label]["A"].to(self._device())
+ Go = est_fisher_info[label]["G"].to(self._device())
+ est_fisher_info[label]["A"] = Ao + weight * A / n_samples
+ est_fisher_info[label]["G"] = Go + weight * G / n_samples
+
+ def update_fisher_info(est_fisher_info, intermediate, n_samples, weight=1):
+ for i in range(1, fcE.layers + 1):
+ label = f"fcLayer{i}"
+ layer = getattr(fcE, label)
+ update_fisher_info_layer(est_fisher_info, intermediate, label, layer, n_samples, weight=weight)
+ update_fisher_info_layer(est_fisher_info, intermediate, "classifier", self.classifier, n_samples,
+ weight=weight)
+
+ # initialize estimated fisher info
+ est_fisher_info = initialize()
+ # Set model to evaluation mode
+ mode = self.training
+ self.eval()
+
+ # Create data-loader to give batches of size 1 (unless specifically asked to do otherwise)
+ data_loader = get_data_loader(dataset, batch_size=1 if self.fisher_batch is None else self.fisher_batch,
+ cuda=self._is_on_cuda())
+
+ n_samples = len(data_loader) if self.fisher_n is None else self.fisher_n
+
+ # Estimate the FI-matrix for [self.fisher_n] batches of size 1
+ for i, (x, y) in enumerate(data_loader):
+ # break from for-loop if max number of samples has been reached
+ if i > n_samples:
+ break
+ # run forward pass of model
+ x = x.to(self._device())
+ _output, intermediate = self(x, return_intermediate=True)
+ output = _output if allowed_classes is None else _output[:, allowed_classes]
+ # calculate FI-matrix (according to one of the four options)
+ if self.fisher_labels=='all':
+ # -use a weighted combination of all labels
+ with torch.no_grad():
+ label_weights = F.softmax(output, dim=1) # --> get weights, which shouldn't have gradient tracked
+ for label_index in range(output.shape[1]):
+ label = torch.LongTensor([label_index]).to(self._device())
+ negloglikelihood = F.nll_loss(F.log_softmax(output, dim=1), label)
+ # Calculate gradient of negative loglikelihood
+ self.zero_grad()
+ negloglikelihood.backward(retain_graph=True if (label_index+1) use provided true label to calculate loglikelihood --> "empirical Fisher":
+ label = torch.LongTensor([y]) if type(y) == int else y # -> shape: [self.fisher_batch]
+ if allowed_classes is not None:
+ label = [int(np.where(i == allowed_classes)[0][0]) for i in label.numpy()]
+ label = torch.LongTensor(label)
+ label = label.to(self._device())
+ elif self.fisher_labels == 'pred':
+ # --> use predicted label to calculate loglikelihood:
+ label = output.max(1)[1]
+ elif self.fisher_labels == 'sample':
+ # --> sample one label from predicted probabilities
+ with torch.no_grad():
+ label_weights = F.softmax(output, dim=1) # --> get predicted probabilities
+ weights_array = np.array(label_weights[0].cpu()) # --> change to np-array, avoiding rounding errors
+ label = np.random.choice(len(weights_array), 1, p=weights_array / weights_array.sum())
+ label = torch.LongTensor(label).to(self._device()) # --> change label to tensor on correct device
+
+ # calculate negative log-likelihood
+ negloglikelihood = F.nll_loss(F.log_softmax(output, dim=1), label)
+
+ # Calculate gradient of negative loglikelihood
+ self.zero_grad()
+ negloglikelihood.backward()
+ update_fisher_info(est_fisher_info, intermediate, n_samples)
+
+
+ for label in est_fisher_info:
+ An = est_fisher_info[label]["A"].to(self._device()) # new kronecker factor
+ Gn = est_fisher_info[label]["G"].to(self._device()) # new kronecker factor
+ Ao = self.gamma * self.KFAC_FISHER_INFO[label]["A"].to(self._device()) # old kronecker factor
+ Go = self.KFAC_FISHER_INFO[label]["G"].to(self._device()) # old kronecker factor
+
+ As, Gs = additive_nearest_kf({"A": Ao, "G": Go}, {"A": An, "G": Gn}) # sum of kronecker factors
+ self.KFAC_FISHER_INFO[label]["A"] = As
+ self.KFAC_FISHER_INFO[label]["G"] = Gs
+
+ for param_name in ["weight", "bias"]:
+ p = est_fisher_info[label][param_name].to(self._device())
+ self.KFAC_FISHER_INFO[label][param_name] = p
+
+ # Set model back to its initial mode
+ self.train(mode=mode)
+
+
+ def ewc_kfac_loss(self):
+ fcE = self.fcE
+
+ def loss_for_layer(label, layer):
+ if not isinstance(layer, fc.layers.fc_layer):
+ raise NotImplemented
+ info = self.KFAC_FISHER_INFO[label]
+ A = info["A"].detach().to(self._device())
+ G = info["G"].detach().to(self._device())
+ bias0 = info["bias"]
+ weight0 = info["weight"]
+ bias = layer.linear.bias
+ weight = layer.linear.weight
+ if bias0 is not None and bias is not None:
+ p = torch.cat([weight, bias[..., None]], -1)
+ p0 = torch.cat([weight0, bias0[..., None]], -1)
+ else:
+ p = weight
+ p0 = weight0
+ assert p.shape[-1] == A.shape[1]
+ assert p0.shape[-1] == A.shape[1]
+ dp = p.to(self._device()) - p0.to(self._device())
+ return torch.sum(dp * (G @ dp @ A))
+
+ classifier = self.classifier
+ if self.context_count > 0:
+ l = loss_for_layer("classifier", classifier)
+ for i in range(1, fcE.layers + 1):
+ label = f"fcLayer{i}"
+ nl = loss_for_layer(label, getattr(fcE, label))
+ l += nl
+ return 0.5 * l
+ else:
+ return torch.tensor(0.0, device=self._device())
+
+
+ # ----------------- OWM-specifc functions -----------------#
+
+ def estimate_owm_fisher(self, dataset, **kwargs):
+ '''After completing training on a context, estimate OWM Fisher Information matrix based on [dataset].'''
+
+ ## QUESTION: Should OWM not also be applied to the outputs??
+
+ fcE = self.fcE
+ classifier = self.classifier
+
+ def initialize_for_fcLayer(layer):
+ if not isinstance(layer, fc.layers.fc_layer):
+ raise NotImplemented
+ linear = layer.linear
+ g_dim, a_dim = linear.weight.shape
+ abar_dim = a_dim + 1 if linear.bias is not None else a_dim
+ A = torch.zeros(abar_dim, abar_dim)
+ return {'A': A, 'weight': linear.weight.data.clone(),
+ 'bias': None if linear.bias is None else linear.bias.data.clone()}
+
+ def initialize():
+ est_fisher_info = {}
+ for i in range(1, fcE.layers + 1):
+ label = f"fcLayer{i}"
+ layer = getattr(fcE, label)
+ est_fisher_info[label] = initialize_for_fcLayer(layer)
+ est_fisher_info['classifier'] = initialize_for_fcLayer(classifier)
+ return est_fisher_info
+
+ def update_fisher_info_layer(est_fisher_info, intermediate, label, n_samples):
+ _a = intermediate[label].detach()
+ # Here we do one batch at a time (not ideal)
+ assert (_a.shape[0] == 1)
+ a = _a[0]
+ if classifier.bias is None:
+ abar = a
+ else:
+ o = torch.ones(*a.shape[0:-1], 1).to(self._device())
+ abar = torch.cat((a, o), -1)
+ A = abar[..., None] @ abar[..., None, :]
+ Ao = est_fisher_info[label]['A'].to(self._device())
+ est_fisher_info[label]['A'] = Ao + A / n_samples
+
+ def update_fisher_info(est_fisher_info, intermediate, n_samples):
+ for i in range(1, fcE.layers + 1):
+ label = f"fcLayer{i}"
+ update_fisher_info_layer(est_fisher_info, intermediate, label, n_samples)
+ update_fisher_info_layer(est_fisher_info, intermediate, 'classifier', n_samples)
+
+ # initialize estimated fisher info
+ est_fisher_info = initialize()
+ # Set model to evaluation mode
+ mode = self.training
+ self.eval()
+
+ # Create data-loader to give batches of size 1
+ data_loader = get_data_loader(dataset, batch_size=1, cuda=self._is_on_cuda())
+
+ n_samples = len(data_loader) if self.fisher_n is None else self.fisher_n
+
+ # Estimate the FI-matrix for [self.fisher_n] batches of size 1
+ for i, (x, _) in enumerate(data_loader):
+ if i > n_samples:
+ break
+ # run forward pass of model
+ x = x.to(self._device())
+ output, intermediate = self(x, return_intermediate=True)
+ # update OWM importance matrix
+ self.zero_grad()
+ update_fisher_info(est_fisher_info, intermediate, n_samples)
+
+ if self.context_count == 0:
+ self.KFAC_FISHER_INFO = {}
+
+ for label in est_fisher_info:
+ An = est_fisher_info[label]['A'].to(self._device()) # new kronecker factor
+ if self.context_count == 0:
+ self.KFAC_FISHER_INFO[label] = {}
+ As = An
+ else:
+ Ao = self.gamma * self.KFAC_FISHER_INFO[label]['A'].to(self._device()) # old kronecker factor
+ frac = 1 / (self.context_count + 1)
+ As = (1 - frac) * Ao + frac * An
+
+ self.KFAC_FISHER_INFO[label]['A'] = As
+
+ for param_name in ['weight', 'bias']:
+ p = est_fisher_info[label][param_name].to(self._device())
+ self.KFAC_FISHER_INFO[label][param_name] = p
+
+ self.context_count += 1
+
+ # Set model back to its initial mode
+ self.train(mode=mode)
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/models/cl/fromp_optimizer.py b/PyTorch/build-in/other/continual-learning/models/cl/fromp_optimizer.py
new file mode 100644
index 000000000..97f8ed98e
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/models/cl/fromp_optimizer.py
@@ -0,0 +1,415 @@
+import math
+import numpy as np
+import random
+import torch
+from torch.optim.optimizer import Optimizer
+from torch.nn.utils import parameters_to_vector, vector_to_parameters
+import torch.nn as nn
+import torch.nn.functional as F
+from models.fc import excitability_modules as em
+
+
+## This code has been based upon: https://github.com/team-approx-bayes/fromp (accessed 8 July 2021)
+
+
+#--------------------------------------------------------------------------------------------#
+
+############################
+## COMPUTATION OF HESSIAN ##
+############################
+
+# Calculate the diagonal elements of the hessian
+def softmax_hessian(f):
+ s = F.softmax(f, dim=-1)
+ return s - s*s
+
+# Calculate the full softmax hessian
+def full_softmax_hessian(f):
+ s = F.softmax(f, dim=-1)
+ e = torch.eye(s.shape[-1], dtype=s.dtype, device=s.device)
+ return s[:, :, None]*e[None, :, :] - s[:, :, None]*s[:, None, :]
+
+
+#--------------------------------------------------------------------------------------------#
+
+######################
+## HELPER FUNCTIONS ##
+######################
+
+def _update_input(self, input, output):
+ self.input = input[0].data
+ self.output = output
+
+def _check_param_device(param, old_param_device):
+ if old_param_device is None:
+ old_param_device = param.get_device() if param.is_cuda else -1
+ else:
+ warn = (param.get_device() != old_param_device) if param.is_cuda else (old_param_device != -1)
+ if warn:
+ raise TypeError('Parameters are on different devices, not currently supported.')
+ return old_param_device
+
+def _parameters_to_matrix(parameters):
+ param_device = None
+ mat = []
+ for param in parameters:
+ param_device = _check_param_device(param, param_device)
+ m = param.shape[0]
+ mat.append(param.view(m, -1))
+ return torch.cat(mat, dim=-1)
+
+def _parameters_grads_to_vector(parameters):
+ param_device = None
+ vec = []
+ for param in parameters:
+ param_device = _check_param_device(param, param_device)
+ if param.grad is None:
+ raise ValueError('Gradient is not available.')
+ vec.append(param.grad.data.view(-1))
+ return torch.cat(vec, dim=-1)
+
+
+#--------------------------------------------------------------------------------------------#
+
+#####################
+## FROMP OPTIMIZER ##
+#####################
+
+class opt_fromp(Optimizer):
+ '''Implements the FROMP algorithm (Pan et al., 2020 NeurIPS) as a PyTorch-optimizer, combined with Adam.
+
+ Args:
+ model (nn.Module): model whose parameters are to be trained
+ lr (float, optional): learning rate (default: 0.001)
+ betas (tuple, optional): coefs for computing running mean of gradient and its square (default: (0.9, 0.999))
+ amsgrad (bool, optional): whether to use the AMSGrad-variant of the Adam algorithm (default: False)
+ tau (float, optional): how strongly to weight the regularization term, FROMP's main hyperparameter (default: 1.)
+ eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8)
+ prior_prec (float, optional): ... (default: 1e-3)
+ grad_clip_norm (float, optional): what value to clip the norm of the gradient to during training (default: 1.)
+ per_context (bool, optional): ... (default: True)
+ '''
+
+ def __init__(self, model, lr=1e-3, betas=(0.9, 0.999), amsgrad=False,
+ tau=1., eps=1e-8, prior_prec=1e-3, grad_clip_norm=1., per_context=True):
+
+ # Check for invalid arguments
+ if not 0.0 <= lr:
+ raise ValueError("invalid learning rate: {}".format(lr))
+ if not 0.0 <= eps:
+ raise ValueError("invalid epsilon value: {}".format(eps))
+ if not 0.0 <= betas[0] < 1.0:
+ raise ValueError("invalid beta parameter at index 0: {}".format(betas[0]))
+ if not 0.0 <= betas[1] < 1.0:
+ raise ValueError("invalid beta parameter at index 1: {}".format(betas[1]))
+ if not 0.0 <= prior_prec:
+ raise ValueError("invalid prior precision: {}".format(prior_prec))
+ if grad_clip_norm is not None and not 0.0 <= grad_clip_norm:
+ raise ValueError("invalid gradient clip norm: {}".format(grad_clip_norm))
+ if not 0.0 <= tau:
+ raise ValueError("invalid tau: {}".format(tau))
+
+ # Deal with arguments set per parameter group (ALTHOUGH PARAMETER GROUPS ARE NOT FUNCTIONAL WITH THIS OPTIMIZER)
+ defaults = dict(lr=lr, betas=betas, eps=eps, prior_prec=prior_prec, grad_clip_norm=grad_clip_norm,
+ tau=tau, amsgrad=amsgrad)
+ super(opt_fromp, self).__init__(model.parameters(), defaults)
+
+ # Set the model and its trainable modules
+ self.per_context = per_context
+ self.model = model
+ self.train_modules = []
+ self.set_train_modules(model)
+ for module in self.train_modules:
+ module.register_forward_hook(_update_input)
+
+ # Initialize the optimizer's state variables
+ parameters = self.param_groups[0]['params']
+ p = parameters_to_vector(parameters)
+ self.state['mu'] = p.clone().detach()
+ self.state['mu_previous'] = p.clone().detach()
+ self.state['fisher'] = torch.zeros_like(self.state['mu'])
+ self.state['step'] = 0
+ self.state['exp_avg'] = torch.zeros_like(self.state['mu'])
+ self.state['exp_avg_sq'] = torch.zeros_like(self.state['mu'])
+ if amsgrad:
+ self.state['max_exp_avg_sq'] = torch.zeros_like(self.state['mu'])
+
+ # Set all trainable modules, required for calculating Jacobians in PyTorch
+ def set_train_modules(self, module):
+ if len(list(module.children())) == 0:
+ if len(list(module.parameters())) != 0:
+ self.train_modules.append(module)
+ else:
+ for child in list(module.children()):
+ self.set_train_modules(child)
+
+ #----------------------------------------------------------------------------------------------------------#
+
+ # Calculate the gradient of the parameters [lc] with respect to the loss (required for calculating the Jacobian)
+ def cac_grad(self, loss, lc, retain_graph=None):
+ linear_grad = torch.autograd.grad(loss, lc, retain_graph=retain_graph)
+ grad = []
+ for i, module in enumerate(self.train_modules):
+ # print("--> Starting module {} of {}".format(i, len(self.train_modules)))
+ g = linear_grad[i].detach()
+ a = module.input.clone().detach()
+ m = a.shape[0]
+
+ if isinstance(module, nn.Linear) or isinstance(module, em.LinearExcitability):
+ with torch.no_grad():
+ grad.append(torch.einsum('ij,ik->ijk', g, a))
+ if module.bias is not None:
+ grad.append(g)
+
+ if isinstance(module, nn.Conv2d):
+ with torch.no_grad():
+ a = F.unfold(a, kernel_size=module.kernel_size, dilation=module.dilation, padding=module.padding,
+ stride=module.stride)
+ _, k, hw = a.shape
+ _, c, _, _ = g.shape
+ g = g.view(m, c, -1)
+ grad.append(torch.einsum('ijl,ikl->ijk', g, a))
+ if module.bias is not None:
+ a = torch.ones((m, 1, hw), device=a.device)
+ grad.append(torch.einsum('ijl,ikl->ijk', g, a))
+
+ if isinstance(module, nn.BatchNorm1d):
+ with torch.no_grad():
+ grad.append(torch.mul(g, a))
+ if module.bias is not None:
+ grad.append(g)
+
+ if isinstance(module, nn.BatchNorm2d):
+ with torch.no_grad():
+ grad.append(torch.einsum('ijkl->ij', torch.mul(g, a)))
+ if module.bias is not None:
+ grad.append(torch.einsum('ijkl->ij', g))
+
+ grad_m = _parameters_to_matrix(grad)
+ return grad_m.detach()
+
+ # Calculate the Jacobian matrix
+ def cac_jacobian(self, output, lc):
+ if output.dim() > 2:
+ raise ValueError('the dimension of output must be smaller than 3.')
+ elif output.dim() == 2:
+ num_fun = output.shape[1]
+ grad = []
+ for i in range(num_fun):
+ retain_graph = None if i == num_fun-1 else True
+ loss = output[:, i].sum()
+ g = self.cac_grad(loss, lc, retain_graph=retain_graph)
+ grad.append(g)
+ result = torch.zeros((grad[0].shape[0], grad[0].shape[1], num_fun),
+ dtype=grad[0].dtype, device=grad[0].device)
+ for i in range(num_fun):
+ result[:, :, i] = grad[i]
+ return result
+
+ #----------------------------------------------------------------------------------------------------------#
+
+ # Calculate values (memorable_logits, hkh_l) for regularisation term (all but the first context)
+ def init_context(self, context_id, eps=1e-6, reset=True, classes_per_context=2, label_sets=None):
+
+ # If requested, reset the adam-optimizer
+ if reset:
+ self.state['exp_avg'] = torch.zeros_like(self.state['mu'])
+ self.state['exp_avg_sq'] = torch.zeros_like(self.state['mu'])
+ self.state['step'] = 0
+
+ # Initiliase objects to be stored as empty lists
+ self.state['kernel_inv'] = []
+ self.state['memorable_logits'] = []
+
+ # Compute covariance (using the pre-computed Fisher matrix)
+ fisher = self.state['fisher']
+ prior_prec = self.param_groups[0]['prior_prec']
+ covariance = 1. / (fisher + prior_prec) #-> size: [n_params]
+
+ # Get and store parameter values
+ mu = self.state['mu']
+ self.state['mu_previous'] = mu.clone().detach()
+ parameters = self.param_groups[0]['params']
+ vector_to_parameters(mu, parameters)
+
+ # Loop over all contexts so far
+ self.model.eval()
+ for i in range(context_id if self.per_context else 1):
+
+ # Collect all memorable points for this context from the memory buffer
+ classes_in_context = range(classes_per_context*i, classes_per_context*(i+1)) if self.per_context else range(
+ classes_per_context*context_id
+ )
+ mem_points_np = np.concatenate([self.model.memory_sets[id] for id in classes_in_context], axis=0)
+ memorable_points_t = torch.from_numpy(mem_points_np).to(self.model._device())
+ #-> size: [n_per_context]x[ch]x[length]x[width]
+
+ # Compute and store the mean of their function values (i.e., the predicted logits)
+ self.zero_grad()
+ logits = self.model.forward(memorable_points_t)
+ preds = logits if (label_sets[i] is None) else logits[:, label_sets[i]]
+ preds = torch.softmax(preds, dim=-1) #-> size: [n_per_context]x[classes_per_context]
+ self.state['memorable_logits'].append(preds.detach())
+
+ # Compute and store the kernel of their function values
+ lc = []
+ for module in self.train_modules:
+ lc.append(module.output)
+ kernel_inv = []
+ num_classes = preds.shape[-1]
+
+ for class_id in range(num_classes):
+ loss = preds[:, class_id].sum()
+ retain_graph = True if class_id < num_classes-1 else None
+ grad = self.cac_grad(loss, lc, retain_graph=retain_graph) #-> size: [n_mem_points]x[n_params]
+ with torch.no_grad():
+ kernel = torch.einsum('ij,j,pj->ip', grad, covariance, grad) + \
+ torch.eye(grad.shape[0], dtype=grad.dtype, device=grad.device)*eps
+ # -store inverse of kernel (size: [n_mem_points]x[n_mem_points]) for this class via Cholesky decomp
+ kernel_inv.append(torch.cholesky_inverse(torch.cholesky(kernel)))
+
+ self.state['kernel_inv'].append(kernel_inv)
+
+ # After training on a new context, update the fisher matrix estimate
+ def update_fisher(self, dataloader, label_set=None):
+ fisher = self.state['fisher']
+
+ self.model.eval()
+ for data,_ in dataloader:
+ data = data.to(self.model._device())
+
+ self.zero_grad()
+ logits = self.model.forward(data)
+ preds = logits if label_set is None else logits[:, label_set]
+
+ lc = []
+ for module in self.train_modules:
+ lc.append(module.output)
+ jac = self.cac_jacobian(preds, lc).detach()
+ with torch.no_grad():
+ hes = full_softmax_hessian(preds.detach())
+ jhj = torch.einsum('ijd,idp,ijp->j', jac, hes, jac)
+ fisher.add_(jhj)
+
+ #----------------------------------------------------------------------------------------------------------#
+
+ def step(self, x, y, label_sets, context_id, classes_per_context):
+ '''Performs a single optimization step.'''
+
+ defaults = self.defaults
+ lr = self.param_groups[0]['lr']
+ beta1, beta2 = self.param_groups[0]['betas']
+ amsgrad = self.param_groups[0]['amsgrad']
+ parameters = self.param_groups[0]['params']
+ mu = self.state['mu']
+
+ self.model.train()
+
+ # Calculate normal loss term over current context's data, and compute its gradient
+ vector_to_parameters(mu, parameters)
+ self.zero_grad()
+ logits = self.model.forward(x) if (
+ label_sets[context_id] is None
+ ) else self.model.forward(x)[:, label_sets[context_id]]
+ loss_cur = F.cross_entropy(input=logits, target=y, reduction='mean')
+ accuracy = (y == logits.max(1)[1]).sum().item() / x.size(0)
+ loss_cur.backward(retain_graph=None)
+ grad = _parameters_grads_to_vector(parameters).detach()
+
+ # Calculate the loss term corresponding to the memorable points, and compute & add their gradients
+ if context_id > 0:
+ self.model.eval()
+ kernel_inv = self.state['kernel_inv']
+ memorable_logits = self.state['memorable_logits']
+ grad_t_sum = torch.zeros_like(grad)
+ for t in range(context_id if self.per_context else 1):
+
+ # Select subset of memorable points to use in this batch
+ batch_size_per_context = int(np.ceil(x.shape[0] / context_id)) if self.per_context else x.shape[0]
+ if self.per_context:
+ memory_samples_per_context = (len(self.model.memory_sets[0])*classes_per_context)
+ else:
+ memory_samples_per_context = (len(self.model.memory_sets[0])*classes_per_context*context_id)
+ if batch_size_per_contextj', jac_t, kinvf_t)
+
+ grad_t_sum.add_(grad_t)
+
+ # Weight term corresponding to memorable points by [tau] and add to gradient
+ with torch.no_grad():
+ grad_t_sum.mul_(defaults['tau'])
+ grad.add_(grad_t_sum)
+
+ # Do gradient norm clipping
+ clip_norm = self.defaults['grad_clip_norm']
+ if clip_norm is not None:
+ grad_norm = torch.norm(grad)
+ grad_norm = 1.0 if grad_norm < clip_norm else grad_norm/clip_norm
+ grad.div_(grad_norm)
+
+ # Given the gradient computed above, prepare for the updated based on Adam algorithm
+ exp_avg, exp_avg_sq = self.state['exp_avg'], self.state['exp_avg_sq']
+ if amsgrad:
+ max_exp_avg_sq = self.state['max_exp_avg_sq']
+ self.state['step'] += 1
+ exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
+ exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
+ if amsgrad:
+ torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
+ denom = max_exp_avg_sq.sqrt().add_(self.param_groups[0]['eps'])
+ else:
+ denom = exp_avg_sq.sqrt().add_(self.param_groups[0]['eps'])
+ bias_correction1 = 1 - beta1 ** self.state['step']
+ bias_correction2 = 1 - beta2 ** self.state['step']
+ step_size = lr * math.sqrt(bias_correction2) / bias_correction1
+
+ # Do the parameter update
+ mu.addcdiv_(exp_avg, denom, value=-step_size)
+ vector_to_parameters(mu, parameters)
+
+ # Return the dictionary with different training-loss split in categories
+ return {
+ 'loss_total': loss_cur.item(),
+ 'loss_current': loss_cur.item(),
+ 'pred': loss_cur.item(),
+ 'accuracy': accuracy if accuracy is not None else 0.,
+ }
+
+ #----------------------------------------------------------------------------------------------------------#
diff --git a/PyTorch/build-in/other/continual-learning/models/cl/memory_buffer.py b/PyTorch/build-in/other/continual-learning/models/cl/memory_buffer.py
new file mode 100644
index 000000000..fb4c3f37a
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/models/cl/memory_buffer.py
@@ -0,0 +1,207 @@
+import abc
+import torch
+from torch import nn
+from torch.nn import functional as F
+from utils import get_data_loader
+import copy
+import numpy as np
+from models.cl.fromp_optimizer import softmax_hessian
+
+
+class MemoryBuffer(nn.Module, metaclass=abc.ABCMeta):
+ """Abstract module for a classifier that enables it to maintain a memory buffer."""
+
+ def __init__(self):
+ super().__init__()
+
+ # List with memory-sets
+ self.memory_sets = [] #-> each entry of [self.memory_sets] is an of N images with shape (N, Ch, H, W)
+ self.memory_set_means = []
+ self.compute_means = True
+
+ # Settings
+ self.use_memory_buffer = False
+ self.budget_per_class = 100
+ self.use_full_capacity = False
+ self.sample_selection = 'random'
+ self.norm_exemplars = True
+
+ # Atributes defining how to use memory-buffer
+ self.prototypes = False #-> perform classification by using prototypes as class templates
+ self.add_buffer = False #-> add the memory buffer to the training set of the current task
+
+
+ def _device(self):
+ return next(self.parameters()).device
+
+ def _is_on_cuda(self):
+ return next(self.parameters()).is_cuda
+
+ @abc.abstractmethod
+ def feature_extractor(self, images):
+ pass
+
+
+ ####----MANAGING THE MEMORY BUFFER----####
+
+ def reduce_memory_sets(self, m):
+ for y, P_y in enumerate(self.memory_sets):
+ self.memory_sets[y] = P_y[:m]
+
+ def construct_memory_set(self, dataset, n, label_set):
+ '''Construct memory set of [n] examples from [dataset] using 'herding', 'random' or 'fromp' selection.
+
+ Note that [dataset] should be from specific class; selected sets are added to [self.memory_sets] in order.'''
+
+ # set model to eval()-mode
+ mode = self.training
+ self.eval()
+
+ n_max = len(dataset)
+ memory_set = []
+
+ if self.sample_selection=="fromp":
+ first_entry = True
+
+ # Loop over all samples in the dataset
+ dataloader = get_data_loader(dataset, 128, cuda=self._is_on_cuda())
+ for i, dt in enumerate(dataloader):
+ # Compute for each sample its "importance score"
+ data, _ = dt
+ f = self.forward(data.to(self._device()))
+ lamb = softmax_hessian(f if label_set is None else f[:,label_set])
+ lamb = torch.sum(lamb.cpu(), dim=-1).detach()
+
+ # Store both the samples and their computed scores
+ if first_entry:
+ memorable_points = data
+ scores = lamb
+ first_entry = False
+ else:
+ memorable_points = torch.cat([memorable_points, data], dim=0)
+ scores = torch.cat([scores, lamb], dim=0)
+
+ # Select the samples with the best (or worst) scores, and store them in the memory buffer
+ if len(memorable_points) > n:
+ _, indices = scores.sort(descending=True)
+ memorable_points = memorable_points[indices[:n]]
+ # -add this [memory_set] as a [n]x[ich]x[isz]x[isz] to the list of [memory_sets]
+ self.memory_sets.append(memorable_points.numpy())
+
+ elif self.sample_selection=="herding":
+ # Compute features for each example in [dataset]
+ first_entry = True
+ dataloader = get_data_loader(dataset, 128, cuda=self._is_on_cuda())
+ for (image_batch, _) in dataloader:
+ image_batch = image_batch.to(self._device())
+ with torch.no_grad():
+ feature_batch = self.feature_extractor(image_batch).cpu()
+ if first_entry:
+ features = feature_batch
+ first_entry = False
+ else:
+ features = torch.cat([features, feature_batch], dim=0)
+ if self.norm_exemplars:
+ features = F.normalize(features, p=2, dim=1)
+
+ # Calculate mean of all features
+ class_mean = torch.mean(features, dim=0, keepdim=True)
+ if self.norm_exemplars:
+ class_mean = F.normalize(class_mean, p=2, dim=1)
+
+ # One by one, select samples so the mean of all selected samples is as close to [class_mean] as possible
+ selected_features = torch.zeros_like(features[:min(n, n_max)])
+ list_of_selected = []
+ for k in range(min(n, n_max)):
+ if k>0:
+ selected_samples_sum = torch.sum(selected_features[:k], dim=0).unsqueeze(0)
+ features_means = (features + selected_samples_sum)/(k+1)
+ features_dists = features_means - class_mean
+ else:
+ features_dists = features - class_mean
+ index_selected = np.argmin(torch.norm(features_dists, p=2, dim=1))
+ if index_selected in list_of_selected:
+ raise ValueError("Samples in the memory buffer should not be repeated!!!!")
+ list_of_selected.append(index_selected)
+
+ memory_set.append(dataset[index_selected][0].numpy())
+ selected_features[k] = copy.deepcopy(features[index_selected])
+ # -make sure this example won't be selected again
+ features[index_selected] = features[index_selected] + 10000
+ # -add this [memory_set] as a [n]x[ich]x[isz]x[isz] to the list of [memory_sets]
+ self.memory_sets.append(np.array(memory_set))
+
+ else:
+ indeces_selected = np.random.choice(n_max, size=min(n, n_max), replace=False)
+ for k in indeces_selected:
+ memory_set.append(dataset[k][0].numpy())
+ # -add this [memory_set] as a [n]x[ich]x[isz]x[isz] to the list of [memory_sets]
+ self.memory_sets.append(np.array(memory_set))
+
+ # Set mode of model back
+ self.train(mode=mode)
+
+
+ ####----CLASSIFICATION----####
+
+ def classify_with_prototypes(self, x, allowed_classes=None):
+ """Classify images by nearest-prototype / nearest-mean-of-exemplars rule (after transform to feature space)
+
+ INPUT: x = of size (bsz,ich,isz,isz) with input image batch
+ allowed_classes = None or containing all "active classes" between which should be chosen
+
+ OUTPUT: scores = of size (bsz,n_classes)
+ """
+
+ # Set model to eval()-mode
+ mode = self.training
+ self.eval()
+
+ batch_size = x.size(0)
+
+ # Do the exemplar-means (=prototypes) need to be recomputed?
+ if self.compute_means:
+ memory_set_means = [] #--> list of 1D-tensors (of size [feature_size]), list is of length [n_classes]
+ for P_y in self.memory_sets:
+ exemplars = []
+ # Collect all 'exemplars' in P_y into a and extract their features
+ for ex in P_y:
+ exemplars.append(torch.from_numpy(ex))
+ exemplars = torch.stack(exemplars).to(self._device())
+ with torch.no_grad():
+ features = self.feature_extractor(exemplars)
+ if self.norm_exemplars:
+ features = F.normalize(features, p=2, dim=1)
+ # Calculate their mean and add to list
+ mu_y = features.mean(dim=0, keepdim=True)
+ if self.norm_exemplars:
+ mu_y = F.normalize(mu_y, p=2, dim=1)
+ memory_set_means.append(mu_y.squeeze()) # -> squeeze removes all dimensions of size 1
+ # Update model's attributes
+ self.memory_set_means = memory_set_means
+ self.compute_means = False
+
+ # Reorganize the [memory_set_means]-
+ memory_set_means = self.memory_set_means if allowed_classes is None else [
+ self.memory_set_means[i] for i in allowed_classes
+ ]
+ means = torch.stack(memory_set_means) # (n_classes, feature_size)
+ means = torch.stack([means] * batch_size) # (batch_size, n_classes, feature_size)
+ means = means.transpose(1, 2) # (batch_size, feature_size, n_classes)
+
+ # Extract features for input data (and reorganize)
+ with torch.no_grad():
+ feature = self.feature_extractor(x) # (batch_size, feature_size)
+ if self.norm_exemplars:
+ feature = F.normalize(feature, p=2, dim=1)
+ feature = feature.unsqueeze(2) # (batch_size, feature_size, 1)
+ feature = feature.expand_as(means) # (batch_size, feature_size, n_classes)
+
+ # For each sample in [x], find the (negative) distance of its extracted features to exemplar-mean of each class
+ scores = -(feature-means).pow(2).sum(dim=1).squeeze() # (batch_size, n_classes)
+
+ # Set mode of model back
+ self.train(mode=mode)
+
+ return scores
+
diff --git a/PyTorch/build-in/other/continual-learning/models/cl/memory_buffer_stream.py b/PyTorch/build-in/other/continual-learning/models/cl/memory_buffer_stream.py
new file mode 100644
index 000000000..81578a000
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/models/cl/memory_buffer_stream.py
@@ -0,0 +1,168 @@
+import abc
+import torch
+from torch import nn
+from torch.nn import functional as F
+import numpy as np
+
+
+def reservoir_sampling(samples_so_far, budget):
+ '''Reservoir sampling algorithm to decide whether an new sample should be stored in the buffer or not.'''
+
+ # If buffer is not yet full, simply add the new sample at the first available index
+ if samples_so_far < budget:
+ return samples_so_far
+
+ # If buffer is full, draw random number to decide whether new sample should replace old sample (and which one)
+ rand = np.random.randint(0, samples_so_far + 1)
+ if rand < budget:
+ return rand #--> new sample should replace old sample at index [rand]
+ else:
+ return -1 #--> new sample should not be stored in the buffer
+
+
+class MemoryBuffer(nn.Module, metaclass=abc.ABCMeta):
+ """Abstract module for classifier for maintaining a memory buffer using (global-)class-based reservoir sampling."""
+
+ def __init__(self):
+ super().__init__()
+
+ # Settings
+ self.use_memory_buffer = False
+ self.budget = 100 #-> this is the overall budget (there is not memory buffer per class)
+ self.samples_so_far = 0
+ self.contexts_so_far = []
+
+ # Settings related to using the memory buffer for nearest-class-mean classification
+ self.prototypes = False #-> whether classification is performed by using prototypes as class templates
+ self.compute_means = True #-> whenever new data is added, class-means must be recomputed
+ self.norm_exemplars = True
+
+
+ def _device(self):
+ return next(self.parameters()).device
+
+ def _is_on_cuda(self):
+ return next(self.parameters()).is_cuda
+
+ @abc.abstractmethod
+ def feature_extractor(self, images, **kwargs):
+ pass
+
+ def initialize_buffer(self, config, return_c=False):
+ '''Initalize the memory buffer with tensors of correct shape filled with zeros.'''
+ self.buffer_x = torch.zeros(self.budget, config['channels'], config['size'], config['size'],
+ dtype=torch.float32, device=self._device())
+ self.buffer_y = torch.zeros(self.budget, dtype=torch.int64, device=self._device())
+ if return_c:
+ self.buffer_c = torch.zeros(self.budget, dtype=torch.int64, device=self._device())
+ pass
+
+ def add_new_samples(self, x, y, c):
+ '''Process the data, and based on reservoir sampling algorithm potentially add to the buffer.'''
+
+ # Whenever new training data is observed, indicate that class-means of stored data should be recomputed
+ self.compute_means = True
+
+ # Loop through all the samples contained in [x]
+ for index in range(x.shape[0]):
+ # -check whether this sample should be added to the memory buffer
+ reservoir_index = reservoir_sampling(self.samples_so_far, self.budget)
+ # -increase count of number of encountered samples
+ self.samples_so_far += 1
+ # -if selected, add the sample to the memory buffer
+ if reservoir_index >= 0:
+ self.buffer_x[reservoir_index] = x[index].to(self._device())
+ self.buffer_y[reservoir_index] = y[index].to(self._device())
+ if hasattr(self, 'buffer_c'):
+ self.buffer_c[reservoir_index] = c[index].to(self._device())
+
+ def sample_from_buffer(self, size):
+ '''Randomly sample [size] samples from the memory buffer.'''
+
+ # If more samples are requested than in the buffer, set [size] to number of samples currently in the buffer
+ samples_in_buffer = min(self.samples_so_far, self.budget)
+ if size>samples_in_buffer:
+ size = samples_in_buffer
+
+ # Randomly select samples from the buffer and return them
+ selected_indeces = np.random.choice(samples_in_buffer, size=size, replace=False)
+ x = self.buffer_x[selected_indeces]
+ y = self.buffer_y[selected_indeces]
+ c = self.buffer_c[selected_indeces] if hasattr(self, 'buffer_c') else None
+ return (x, y, c)
+
+ def keep_track_of_contexts_so_far(self, c):
+ self.contexts_so_far += [item.item() for item in c]
+
+ def sample_contexts(self, size):
+ if len(self.contexts_so_far)==0:
+ raise AssertionError('No contexts have been observed yet.')
+ else:
+ return torch.tensor(np.random.choice(self.contexts_so_far, size, replace=True))
+
+
+ def classify_with_prototypes(self, x, context=None):
+ """Classify images by nearest-prototype / nearest-mean-of-exemplars rule (after transform to feature space)
+
+ INPUT: x = of size (bsz,ich,isz,isz) with input image batch
+
+ OUTPUT: scores = of size (bsz,n_classes)
+ """
+
+ # Set model to eval()-mode
+ mode = self.training
+ self.eval()
+
+ batch_size = x.size(0)
+
+ # Do the exemplar-means (=prototypes) need to be recomputed?
+ if self.compute_means:
+ self.possible_classes = [] #--> list of classes present in the memory buffer
+ memory_set_means = [] #--> list of 1D-tensors (of size [feature_size]), list is of length [n_classes]
+ for y in range(self.classes):
+ if y in self.buffer_y:
+ self.possible_classes.append(y)
+ # Collect all stored samples of [y]
+ x_this_y = self.buffer_x[self.buffer_y==y]
+ c_this_y = self.buffer_c[self.buffer_y==y] if hasattr(self, 'buffer_c') else None
+ # Extract their features
+ with torch.no_grad():
+ features = self.feature_extractor(x_this_y, context=c_this_y)
+ if self.norm_exemplars:
+ features = F.normalize(features, p=2, dim=1)
+ # Calculate their mean and add to list
+ mu_y = features.mean(dim=0, keepdim=True)
+ if self.norm_exemplars:
+ mu_y = F.normalize(mu_y, p=2, dim=1)
+ memory_set_means.append(mu_y.squeeze()) # -> squeeze removes all dimensions of size 1
+ else:
+ memory_set_means.append(None) # to indicate that this class is not present in the memory buffer
+ # Update model's attributes
+ self.memory_set_means = memory_set_means
+ self.compute_means = False
+
+ # Reorganize the [memory_set_means]-
+ memory_set_means = [self.memory_set_means[i] for i in self.possible_classes]
+ means = torch.stack(memory_set_means) # (n_possible_classes, feature_size)
+ means = torch.stack([means] * batch_size) # (batch_size, n_possible_classes, feature_size)
+ means = means.transpose(1, 2) # (batch_size, feature_size, n_possible_classes)
+
+ # Extract features for input data (and reorganize)
+ with torch.no_grad():
+ feature = self.feature_extractor(x, context=context) # (batch_size, feature_size)
+ if self.norm_exemplars:
+ feature = F.normalize(feature, p=2, dim=1)
+ feature = feature.unsqueeze(2) # (batch_size, feature_size, 1)
+ feature = feature.expand_as(means) # (batch_size, feature_size, n_possible_classes)
+
+ # For each sample in [x], find the (negative) distance of its extracted features to exemplar-mean of each class
+ scores = -(feature-means).pow(2).sum(dim=1).squeeze() # (batch_size, n_possible_classes)
+
+ # For all classes not in the memory, return a score of [-inf]
+ all_scores = torch.ones(batch_size, self.classes, device=self._device())*-np.inf
+ all_scores[:, self.possible_classes] = scores # (batch_size, n_classes)
+
+ # Set mode of model back
+ self.train(mode=mode)
+
+ return scores
diff --git a/PyTorch/build-in/other/continual-learning/models/classifier.py b/PyTorch/build-in/other/continual-learning/models/classifier.py
new file mode 100644
index 000000000..f1aa485ae
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/models/classifier.py
@@ -0,0 +1,431 @@
+import torch
+from torch.nn import functional as F
+from models.fc.layers import fc_layer
+from models.fc.nets import MLP
+from models.conv.nets import ConvLayers
+from models.cl.memory_buffer import MemoryBuffer
+from models.cl.continual_learner import ContinualLearner
+from models.utils import loss_functions as lf, modules
+from models.utils.ncl import additive_nearest_kf
+
+
+class Classifier(ContinualLearner, MemoryBuffer):
+ '''Model for classifying images, "enriched" as ContinualLearner- and MemoryBuffer-object.'''
+
+ def __init__(self, image_size, image_channels, classes,
+ # -conv-layers
+ conv_type="standard", depth=0, start_channels=64, reducing_layers=3, conv_bn=True, conv_nl="relu",
+ num_blocks=2, global_pooling=False, no_fnl=True, conv_gated=False,
+ # -fc-layers
+ fc_layers=3, fc_units=1000, fc_drop=0, fc_bn=True, fc_nl="relu", fc_gated=False,
+ bias=True, excitability=False, excit_buffer=False, phantom=False):
+
+ # configurations
+ super().__init__()
+ self.classes = classes
+ self.label = "Classifier"
+ self.depth = depth
+ self.fc_layers = fc_layers
+ self.fc_drop = fc_drop
+ self.phantom = phantom
+
+ # settings for training
+ self.binaryCE = False #-> use binary (instead of multiclass) prediction error
+ self.binaryCE_distill = False #-> for classes from previous contexts, use the by the previous model
+ # predicted probs as binary targets (only in Class-IL with binaryCE)
+
+ # check whether there is at least 1 fc-layer
+ if fc_layers<1:
+ raise ValueError("The classifier needs to have at least 1 fully-connected layer.")
+
+
+ ######------SPECIFY MODEL------######
+ #--> convolutional layers
+ self.convE = ConvLayers(
+ conv_type=conv_type, block_type="basic", num_blocks=num_blocks, image_channels=image_channels,
+ depth=depth, start_channels=start_channels, reducing_layers=reducing_layers, batch_norm=conv_bn, nl=conv_nl,
+ global_pooling=global_pooling, gated=conv_gated, output="none" if no_fnl else "normal",
+ )
+ self.flatten = modules.Flatten() # flatten image to 2D-tensor
+ #------------------------------calculate input/output-sizes--------------------------------#
+ self.conv_out_units = self.convE.out_units(image_size)
+ self.conv_out_size = self.convE.out_size(image_size)
+ self.conv_out_channels = self.convE.out_channels
+ #------------------------------------------------------------------------------------------#
+ #--> fully connected hidden layers
+ self.fcE = MLP(input_size=self.conv_out_units, output_size=fc_units, layers=fc_layers-1,
+ hid_size=fc_units, drop=fc_drop, batch_norm=fc_bn, nl=fc_nl, bias=bias,
+ excitability=excitability, excit_buffer=excit_buffer, gated=fc_gated, phantom=phantom)
+ mlp_output_size = fc_units if fc_layers>1 else self.conv_out_units
+ #--> classifier
+ self.classifier = fc_layer(mlp_output_size, classes, excit_buffer=True, nl='none', drop=fc_drop,
+ phantom=phantom)
+
+ # Flags whether parts of the network are frozen (so they can be set to evaluation mode during training)
+ self.convE.frozen = False
+ self.fcE.frozen = False
+
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ list = []
+ list += self.convE.list_init_layers()
+ list += self.fcE.list_init_layers()
+ list += self.classifier.list_init_layers()
+ return list
+
+ @property
+ def name(self):
+ if self.depth>0 and self.fc_layers>1:
+ return "{}_{}_c{}".format(self.convE.name, self.fcE.name, self.classes)
+ elif self.depth>0:
+ return "{}_{}c{}".format(self.convE.name, "drop{}-".format(self.fc_drop) if self.fc_drop>0 else "",
+ self.classes)
+ elif self.fc_layers>1:
+ return "{}_c{}".format(self.fcE.name, self.classes)
+ else:
+ return "i{}_{}c{}".format(self.conv_out_units, "drop{}-".format(self.fc_drop) if self.fc_drop>0 else "",
+ self.classes)
+
+
+ def forward(self, x, return_intermediate=False):
+ hidden = self.convE(x)
+ flatten_x = self.flatten(hidden)
+ if not return_intermediate:
+ final_features = self.fcE(flatten_x)
+ else:
+ final_features, intermediate = self.fcE(flatten_x, return_intermediate=True)
+ intermediate["classifier"] = final_features
+ out = self.classifier(final_features)
+ return (out, intermediate) if return_intermediate else out
+
+
+ def feature_extractor(self, images):
+ return self.fcE(self.flatten(self.convE(images)))
+
+ def classify(self, x, allowed_classes=None, no_prototypes=False):
+ '''For input [x] (image/"intermediate" features), return predicted "scores"/"logits" for [allowed_classes].'''
+ if self.prototypes and not no_prototypes:
+ return self.classify_with_prototypes(x, allowed_classes=allowed_classes)
+ else:
+ image_features = self.flatten(self.convE(x))
+ hE = self.fcE(image_features)
+ scores = self.classifier(hE)
+ return scores if (allowed_classes is None) else scores[:, allowed_classes]
+
+
+ def train_a_batch(self, x, y, scores=None, x_=None, y_=None, scores_=None, rnt=0.5, active_classes=None, context=1,
+ **kwargs):
+ '''Train model for one batch ([x],[y]), possibly supplemented with replayed data ([x_],[y_/scores_]).
+
+ [x] batch of inputs (could be None, in which case only 'replayed' data is used)
+ [y] batch of corresponding labels
+ [scores] None or 2Dtensor:[batch]x[classes] predicted "scores"/"logits" for [x]
+ NOTE: only to be used for "BCE with distill" (only when scenario=="class")
+ [x_] None or ( of) batch of replayed inputs
+ [y_] None or ( of) batch of corresponding "replayed" labels
+ [scores_] None or ( of) 2Dtensor:[batch]x[classes] predicted "scores"/"logits" for [x_]
+ [rnt] in [0,1], relative importance of new context
+ [active_classes] None or ( of) with "active" classes
+ [context] context-ID, with first context labelled as '1' (e.g., for setting context-specific mask)
+ '''
+
+ # Set model to training-mode
+ self.train()
+ # -however, if some layers are frozen, they should be set to eval() to prevent batch-norm layers from changing
+ if self.convE.frozen:
+ self.convE.eval()
+ if self.fcE.frozen:
+ self.fcE.eval()
+
+ # Reset optimizer
+ self.optimizer.zero_grad()
+
+ # Should gradient be computed separately for each context? (needed when a context-mask is combined with replay)
+ gradient_per_context = True if ((self.mask_dict is not None) and (x_ is not None)) else False
+
+
+ ##--(1)-- REPLAYED DATA --##
+
+ if x_ is not None:
+ # If there are different predictions per context, [y_] or [scores_] are lists and [x_] must be evaluated
+ # separately on each of them (although [x_] could be a list as well!)
+ PerContext = (type(y_)==list) if (y_ is not None) else (type(scores_)==list)
+ if not PerContext:
+ y_ = [y_]
+ scores_ = [scores_]
+ active_classes = [active_classes] if (active_classes is not None) else None
+ n_replays = len(y_) if (y_ is not None) else len(scores_)
+
+ # Prepare lists to store losses for each replay
+ loss_replay = [None]*n_replays
+ predL_r = [None]*n_replays
+ distilL_r = [None]*n_replays
+
+ # Run model (if [x_] is not a list with separate replay per context and there is no context-specific mask)
+ if (not type(x_)==list) and (self.mask_dict is None):
+ y_hat_all = self(x_)
+
+ # Loop to evalute predictions on replay according to each previous context
+ for replay_id in range(n_replays):
+
+ # -if [x_] is a list with separate replay per context, evaluate model on this context's replay
+ if (type(x_)==list) or (self.mask_dict is not None):
+ x_temp_ = x_[replay_id] if type(x_)==list else x_
+ if self.mask_dict is not None:
+ self.apply_XdGmask(context=replay_id+1)
+ y_hat_all = self(x_temp_)
+
+ # -if needed, remove predictions for classes not active in the replayed context
+ y_hat = y_hat_all if (active_classes is None) else y_hat_all[:, active_classes[replay_id]]
+
+ # Calculate losses
+ if (y_ is not None) and (y_[replay_id] is not None):
+ if self.binaryCE:
+ binary_targets_ = lf.to_one_hot(y_[replay_id].cpu(), y_hat.size(1)).to(y_[replay_id].device)
+ predL_r[replay_id] = F.binary_cross_entropy_with_logits(
+ input=y_hat, target=binary_targets_, reduction='none'
+ ).sum(dim=1).mean() #--> sum over classes, then average over batch
+ else:
+ predL_r[replay_id] = F.cross_entropy(y_hat, y_[replay_id], reduction='mean')
+ if (scores_ is not None) and (scores_[replay_id] is not None):
+ # n_classes_to_consider = scores.size(1) #--> with this version, no zeroes are added to [scores]!
+ n_classes_to_consider = y_hat.size(1) #--> zeros will be added to [scores] to make it this size!
+ kd_fn = lf.loss_fn_kd_binary if self.binaryCE else lf.loss_fn_kd
+ distilL_r[replay_id] = kd_fn(scores=y_hat[:, :n_classes_to_consider],
+ target_scores=scores_[replay_id], T=self.KD_temp)
+
+ # Weigh losses
+ if self.replay_targets=="hard":
+ loss_replay[replay_id] = predL_r[replay_id]
+ elif self.replay_targets=="soft":
+ loss_replay[replay_id] = distilL_r[replay_id]
+
+ # If needed, perform backward pass before next context-mask (gradients of all contexts will be accumulated)
+ if gradient_per_context:
+ weight = 1. if self.use_replay=='inequality' else (1.-rnt)
+ weighted_replay_loss_this_context = weight * loss_replay[replay_id] / n_replays
+ weighted_replay_loss_this_context.backward()
+
+ # Calculate total replay loss
+ loss_replay = None if (x_ is None) else sum(loss_replay)/n_replays
+ if (x_ is not None) and self.lwf_weighting and (not self.scenario=='class'):
+ loss_replay *= (context-1)
+
+ # If using the replayed loss as an inequality constraint, calculate and store averaged gradient of replayed data
+ if self.use_replay in ('inequality', 'both') and x_ is not None:
+ # Perform backward pass to calculate gradient of replayed batch (if not yet done)
+ if not gradient_per_context:
+ if self.use_replay == 'both':
+ loss_replay = (1-rnt) * loss_replay
+ loss_replay.backward()
+ # Reorganize the gradient of the replayed batch as a single vector
+ grad_rep = []
+ for p in self.parameters():
+ if p.requires_grad:
+ grad_rep.append(p.grad.data.view(-1))
+ grad_rep = torch.cat(grad_rep)
+ # If gradients are only used as inequality constraint, reset them
+ if self.use_replay=='inequality':
+ self.optimizer.zero_grad()
+
+
+ ##--(2)-- CURRENT DATA --##
+
+ if x is not None:
+ # If requested, apply correct context-specific mask
+ if self.mask_dict is not None:
+ self.apply_XdGmask(context=context)
+
+ # Run model
+ y_hat = self(x)
+ # -if needed, remove predictions for classes not active in the current context
+ if active_classes is not None:
+ class_entries = active_classes[-1] if type(active_classes[0])==list else active_classes
+ y_hat = y_hat[:, class_entries]
+
+ # Calculate prediction loss
+ if self.binaryCE:
+ # -binary prediction loss
+ binary_targets = lf.to_one_hot(y.cpu(), y_hat.size(1)).to(y.device)
+ if self.binaryCE_distill and (scores is not None):
+ # -replace targets for previously seen classes with predictions of previous model
+ binary_targets[:,:scores.size(1)] = torch.sigmoid(scores / self.KD_temp)
+ predL = None if y is None else F.binary_cross_entropy_with_logits(
+ input=y_hat, target=binary_targets, reduction='none'
+ ).sum(dim=1).mean() #--> sum over classes, then average over batch
+ else:
+ # -multiclass prediction loss
+ predL = None if y is None else F.cross_entropy(input=y_hat, target=y, reduction='mean')
+
+ # Weigh losses
+ loss_cur = predL
+
+ # Calculate training-accuracy
+ accuracy = None if y is None else (y == y_hat.max(1)[1]).sum().item() / x.size(0)
+ else:
+ accuracy = predL = None
+ # -> it's possible there is only "replay" [i.e., for offline with incremental context learning]
+
+
+ # Combine loss from current and replayed batch
+ if x_ is None or self.use_replay=='inequality':
+ loss_total = loss_cur
+ elif gradient_per_context or self.use_replay=='both':
+ # -if backward passes are performed per context (i.e., XdG combined with replay), or when the replayed loss
+ # is both added to the current loss and used as inequality constraint, the gradients of the replayed loss
+ # are already backpropagated and accumulated
+ loss_total = rnt*loss_cur
+ else:
+ if self.lwf_weighting:
+ loss_total = loss_replay if (x is None) else loss_cur+loss_replay
+ else:
+ loss_total = loss_replay if (x is None) else rnt*loss_cur+(1-rnt)*loss_replay
+
+
+ ##--(3)-- PARAMETER REGULARIZATION LOSSES --##
+
+ # Add a parameter regularization penalty to the loss function
+ weight_penalty_loss = None
+ if self.weight_penalty:
+ if self.importance_weighting=='si':
+ weight_penalty_loss = self.surrogate_loss()
+ elif self.importance_weighting=='fisher':
+ if self.fisher_kfac:
+ weight_penalty_loss = self.ewc_kfac_loss()
+ else:
+ weight_penalty_loss = self.ewc_loss()
+ loss_total += self.reg_strength * weight_penalty_loss
+
+
+ ##--(4)-- COMPUTE (AND MANIPULATE) GRADIENTS --##
+
+ # Backpropagate errors (for the part of the loss that has not yet been backpropagated)
+ loss_total.backward()
+
+ # A-GEM: check whether gradients to be used align with gradients of replayed data, project them if needed
+ if self.use_replay in ('inequality', 'both') and x_ is not None:
+ # -reorganize the gradients to be used for the optimization step as single vector
+ grad_cur = []
+ for p in self.parameters():
+ if p.requires_grad:
+ grad_cur.append(p.grad.view(-1))
+ grad_cur = torch.cat(grad_cur)
+ # -check inequality constraint
+ angle = (grad_cur * grad_rep).sum()
+ if angle < 0:
+ # -if violated, project the current gradient onto the gradient of the replayed batch ...
+ length_rep = (grad_rep * grad_rep).sum()
+ grad_proj = grad_cur - (angle / (length_rep + self.eps_agem)) * grad_rep
+ # -...and replace all the gradients within the model with this projected gradient
+ index = 0
+ for p in self.parameters():
+ if p.requires_grad:
+ n_param = p.numel() # number of parameters in [p]
+ p.grad.copy_(grad_proj[index:index + n_param].view_as(p))
+ index += n_param
+
+ # Precondition gradient of current data using projection matrix constructed from parameter importance estimates
+ if self.precondition:
+
+ if self.importance_weighting=='fisher' and not self.fisher_kfac:
+ if self.context_count>0:
+ #--> scale gradients by inverse diagonal Fisher
+ for gen_params in self.param_list:
+ for n, p in gen_params():
+ if p.requires_grad:
+ # Retrieve prior fisher matrix
+ n = n.replace(".", "__")
+ fisher = getattr(self, "{}_EWC_estimated_fisher".format(n))
+ # Scale loss landscape by inverse prior fisher and divide learning rate by data size
+ scale = (fisher + self.alpha**2) ** (-1)
+ p.grad *= scale # scale lr by inverse prior information
+ if self.data_size is not None:
+ p.grad /= self.data_size # scale lr by prior (necessary for stability in 1st context)
+
+ elif self.importance_weighting=='fisher' and self.fisher_kfac:
+ #--> scale gradients by inverse Fisher kronecker factors
+ def scale_grad(label, layer):
+ assert isinstance(layer, fc_layer)
+ info = self.KFAC_FISHER_INFO[label] # get previous KFAC fisher
+ A = info["A"].to(self._device())
+ G = info["G"].to(self._device())
+ linear = layer.linear
+ if linear.bias is not None:
+ g = torch.cat( (linear.weight.grad, linear.bias.grad[..., None]), -1).clone()
+ else:
+ g = layer.linear.weight.grad.clone()
+
+ assert g.shape[-1] == A.shape[-1]
+ assert g.shape[-2] == G.shape[-2]
+ iA = torch.eye(A.shape[0]).to(self._device()) * (self.alpha)
+ iG = torch.eye(G.shape[0]).to(self._device()) * (self.alpha)
+
+ As, Gs = additive_nearest_kf({"A": A, "G": G}, {"A": iA, "G": iG}) # kronecker sums
+ Ainv = torch.inverse(As)
+ Ginv = torch.inverse(Gs)
+
+ scaled_g = Ginv @ g @ Ainv
+ if linear.bias is not None:
+ linear.weight.grad = scaled_g[..., 0:-1].detach() / self.data_size
+ linear.bias.grad = scaled_g[..., -1].detach() / self.data_size
+ else:
+ linear.weight.grad = scaled_g[..., 0:-1, :] / self.data_size
+
+ # make sure to reset all phantom to have no zeros
+ if not hasattr(layer, 'phantom'):
+ raise ValueError(f"Layer {label} does not have phantom parameters")
+ # make sure phantom stays zero
+ layer.phantom.grad.zero_()
+ layer.phantom.data.zero_()
+
+ scale_grad("classifier", self.classifier)
+ for i in range(1, self.fcE.layers + 1):
+ label = f"fcLayer{i}"
+ scale_grad(label, getattr(self.fcE, label))
+
+ elif self.importance_weighting=='owm' and context>1:
+ def scale_grad(label, layer):
+ info = self.KFAC_FISHER_INFO[label] # get previous KFAC fisher
+ A = info['A'].to(self._device())
+
+ linear = layer.linear
+ if linear.bias is not None:
+ g = torch.cat((linear.weight.grad, linear.bias.grad[..., None]), -1).clone()
+ else:
+ g = layer.linear.weight.grad.clone()
+
+ assert (g.shape[-1] == A.shape[-1])
+ iA = torch.eye(A.shape[0]).to(self._device()) # * (self.alpha)
+ As = A / self.alpha + iA
+ Ainv = torch.inverse(As)
+ scaled_g = g @ Ainv
+
+ if linear.bias is not None:
+ linear.weight.grad = scaled_g[..., 0:-1].detach()
+ linear.bias.grad = scaled_g[..., -1].detach()
+ else:
+ linear.weight.grad = scaled_g[..., 0:-1, :]
+
+ scale_grad('classifier', self.classifier)
+ for i in range(1, self.fcE.layers + 1):
+ label = f"fcLayer{i}"
+ scale_grad(label, getattr(self.fcE, label))
+
+
+ ##--(5)-- TAKE THE OPTIMIZATION STEP --##
+ self.optimizer.step()
+
+
+ # Return the dictionary with different training-loss split in categories
+ return {
+ 'loss_total': loss_total.item(),
+ 'loss_current': loss_cur.item() if x is not None else 0,
+ 'loss_replay': loss_replay.item() if (loss_replay is not None) and (x is not None) else 0,
+ 'pred': predL.item() if predL is not None else 0,
+ 'pred_r': sum(predL_r).item()/n_replays if (x_ is not None and predL_r[0] is not None) else 0,
+ 'distil_r': sum(distilL_r).item()/n_replays if (x_ is not None and distilL_r[0] is not None) else 0,
+ 'param_reg': weight_penalty_loss.item() if weight_penalty_loss is not None else 0,
+ 'accuracy': accuracy if accuracy is not None else 0.,
+ }
+
diff --git a/PyTorch/build-in/other/continual-learning/models/classifier_stream.py b/PyTorch/build-in/other/continual-learning/models/classifier_stream.py
new file mode 100644
index 000000000..097d5515d
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/models/classifier_stream.py
@@ -0,0 +1,312 @@
+import numpy as np
+import torch
+from torch.nn import functional as F
+from models.fc.layers import fc_layer, fc_multihead_layer
+from models.fc.nets import MLP, MLP_gates
+from models.conv.nets import ConvLayers
+from models.cl.memory_buffer_stream import MemoryBuffer
+from models.cl.continual_learner import ContinualLearner
+from models.utils import loss_functions as lf, modules
+
+
+class Classifier(ContinualLearner, MemoryBuffer):
+ '''Model for classifying images, "enriched" as ContinualLearner- and MemoryBuffer-object.'''
+
+ def __init__(self, image_size, image_channels, classes,
+ # -conv-layers
+ conv_type="standard", depth=0, start_channels=64, reducing_layers=3, conv_bn=True, conv_nl="relu",
+ num_blocks=2, global_pooling=False, no_fnl=True, conv_gated=False,
+ # -fc-layers
+ fc_layers=3, fc_units=1000, fc_drop=0, fc_bn=True, fc_nl="relu", fc_gated=False,
+ bias=True, excitability=False, excit_buffer=False, phantom=False,
+ # -how to use context-ID?
+ xdg_prob=0., n_contexts=5, multihead=False, device='cpu'):
+
+ # configurations
+ super().__init__()
+ self.classes = classes
+ self.label = "Classifier"
+ self.stream_classifier = True
+ self.depth = depth
+ self.fc_layers = fc_layers
+ self.fc_drop = fc_drop
+ self.phantom = phantom
+
+ # for using context information
+ self.xdg_prob = xdg_prob
+ self.n_contexts = n_contexts
+ self.multihead = multihead
+
+ # for consolidation-operations, how often to update the model relative to which stay close
+ self.update_every = 1
+
+ # settings for training
+ self.binaryCE = False #-> use binary (instead of multiclass) prediction error
+ self.binaryCE_distill = False #-> for classes from previous contexts, use the by the previous model
+ # predicted probs as binary targets (only in Class-IL with binaryCE)
+
+ # check whether there is at least 1 fc-layer
+ if fc_layers<1:
+ raise ValueError("The classifier needs to have at least 1 fully-connected layer.")
+
+
+ ######------SPECIFY MODEL------######
+ #--> convolutional layers
+ self.convE = ConvLayers(
+ conv_type=conv_type, block_type="basic", num_blocks=num_blocks, image_channels=image_channels,
+ depth=depth, start_channels=start_channels, reducing_layers=reducing_layers, batch_norm=conv_bn, nl=conv_nl,
+ global_pooling=global_pooling, gated=conv_gated, output="none" if no_fnl else "normal",
+ )
+ self.flatten = modules.Flatten() # flatten image to 2D-tensor
+ #------------------------------calculate input/output-sizes--------------------------------#
+ self.conv_out_units = self.convE.out_units(image_size)
+ self.conv_out_size = self.convE.out_size(image_size)
+ self.conv_out_channels = self.convE.out_channels
+ #------------------------------------------------------------------------------------------#
+ #--> fully connected hidden layers
+ if self.xdg_prob>0.:
+ self.fcE = MLP_gates(input_size=self.conv_out_units, output_size=fc_units, layers=fc_layers-1,
+ hid_size=fc_units, drop=fc_drop, batch_norm=fc_bn, nl=fc_nl, bias=bias,
+ excitability=excitability, excit_buffer=excit_buffer,
+ gate_size=n_contexts, gating_prop=xdg_prob, final_gate=True, device=device)
+ else:
+ self.fcE = MLP(input_size=self.conv_out_units, output_size=fc_units, layers=fc_layers-1,
+ hid_size=fc_units, drop=fc_drop, batch_norm=fc_bn, nl=fc_nl, bias=bias,
+ excitability=excitability, excit_buffer=excit_buffer, gated=fc_gated, phantom=phantom)
+ mlp_output_size = fc_units if fc_layers>1 else self.conv_out_units
+
+ #--> classifier
+ if self.multihead:
+ self.classifier = fc_multihead_layer(mlp_output_size, classes, n_contexts,
+ excit_buffer=True, nl='none', drop=fc_drop, device=device)
+ else:
+ self.classifier = fc_layer(mlp_output_size, classes, excit_buffer=True, nl='none', drop=fc_drop,
+ phantom=phantom)
+
+ # Flags whether parts of the network are frozen (so they can be set to evaluation mode during training)
+ self.convE.frozen = False
+ self.fcE.frozen = False
+
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ list = []
+ list += self.convE.list_init_layers()
+ list += self.fcE.list_init_layers()
+ list += self.classifier.list_init_layers()
+ return list
+
+ @property
+ def name(self):
+ if self.depth>0 and self.fc_layers>1:
+ return "{}_{}_c{}".format(self.convE.name, self.fcE.name, self.classes)
+ elif self.depth>0:
+ return "{}_{}c{}".format(self.convE.name, "drop{}-".format(self.fc_drop) if self.fc_drop>0 else "",
+ self.classes)
+ elif self.fc_layers>1:
+ return "{}_c{}".format(self.fcE.name, self.classes)
+ else:
+ return "i{}_{}c{}".format(self.conv_out_units, "drop{}-".format(self.fc_drop) if self.fc_drop>0 else "",
+ self.classes)
+
+
+ def forward(self, x, context=None):
+ # -if needed, convert [context] to one-hot vector
+ if (self.xdg_prob>0. or self.multihead) and (context is not None) and (type(context)==np.ndarray or context.dim()<2):
+ context_one_hot = lf.to_one_hot(context, classes=self.n_contexts, device=self._device())
+
+ hidden = self.convE(x)
+ flatten_x = self.flatten(hidden)
+ final_features = self.fcE(flatten_x, context_one_hot) if self.xdg_prob>0. else self.fcE(flatten_x)
+ out = self.classifier(final_features, context_one_hot) if self.multihead else self.classifier(final_features)
+ return out
+
+
+ def feature_extractor(self, images, context=None):
+ # -if needed, convert [context] to one-hot vector
+ if (self.xdg_prob>0. or self.multihead) and (context is not None) and (type(context)==np.ndarray or context.dim()<2):
+ context_one_hot = lf.to_one_hot(context, classes=self.n_contexts, device=self._device())
+
+ hidden = self.convE(images)
+ flatten_x = self.flatten(hidden)
+ final_features = self.fcE(flatten_x, context_one_hot) if self.xdg_prob>0. else self.fcE(flatten_x)
+ return final_features
+
+
+ def classify(self, x, context=None, no_prototypes=False):
+ '''For input [x] (image/"intermediate" features), return predicted "scores"/"logits" for [allowed_classes].'''
+ if self.prototypes and not no_prototypes:
+ return self.classify_with_prototypes(x, context=context)
+ else:
+ return self.forward(x, context=context)
+
+
+ def train_a_batch(self, x, y, c=None, x_=None, y_=None, c_=None, scores_=None, rnt=0.5, **kwargs):
+ '''Train model for one batch ([x],[y]), possibly supplemented with replayed data ([x_],[y_/scores_]).
+
+ [x] batch of inputs (could be None, in which case only 'replayed' data is used)
+ [y] <1D-tensor> batch of corresponding labels
+ [c] <1D-tensor> or ; for each batch-element in [x] its context-ID --OR--
+ <2D-tensor>; for each batch-element in [x] a probability for every context-ID
+ [x_] None or ( of) batch of replayed inputs
+ [y_] None or ( of) batch of corresponding "replayed" labels
+ [c_]
+ [scores_] None or ( of) 2Dtensor:[batch]x[classes] predicted "scores"/"logits" for [x_]
+ [rnt] in [0,1], relative importance of new context
+ '''
+
+ # Set model to training-mode
+ self.train()
+ # -however, if some layers are frozen, they should be set to eval() to prevent batch-norm layers from changing
+ if self.convE.frozen:
+ self.convE.eval()
+ if self.fcE.frozen:
+ self.fcE.eval()
+
+ # Reset optimizer
+ self.optimizer.zero_grad()
+
+
+ ##--(1)-- REPLAYED DATA --##
+
+ if x_ is not None:
+ # Run model
+ y_hat = self(x_, c_)
+
+ # Calculate losses
+ predL_r, distilL_r = None, None
+ if (y_ is not None) and (y_ is not None):
+ if self.binaryCE:
+ binary_targets_ = lf.to_one_hot(y_.cpu(), y_hat.size(1)).to(y_.device)
+ predL_r = F.binary_cross_entropy_with_logits(
+ input=y_hat, target=binary_targets_, reduction='none'
+ ).sum(dim=1).mean() # --> sum over classes, then average over batch
+ else:
+ predL_r = F.cross_entropy(y_hat, y_, reduction='mean')
+ if (scores_ is not None) and (scores_ is not None):
+ kd_fn = lf.loss_fn_kd_binary if self.binaryCE else lf.loss_fn_kd
+ distilL_r = kd_fn(scores=y_hat, target_scores=scores_, T=self.KD_temp)
+
+ # Weigh losses
+ if self.replay_targets == "hard":
+ loss_replay = predL_r
+ elif self.replay_targets == "soft":
+ loss_replay = distilL_r
+
+ # Calculate total replay loss
+ loss_replay = None if (x_ is None) else loss_replay
+
+ # If using the replayed loss as an inequality constraint, calculate and store averaged gradient of replayed data
+ if self.use_replay in ('inequality', 'both') and x_ is not None:
+ # Perform backward pass to calculate gradient of replayed batch (if not yet done)
+ if self.use_replay == 'both':
+ loss_replay = (1-rnt) * loss_replay
+ loss_replay.backward()
+ # Reorganize the gradient of the replayed batch as a single vector
+ grad_rep = []
+ for p in self.parameters():
+ if p.requires_grad:
+ grad_rep.append(p.grad.data.view(-1))
+ grad_rep = torch.cat(grad_rep)
+ # If gradients are only used as inequality constraint, reset them
+ if self.use_replay=='inequality':
+ self.optimizer.zero_grad()
+
+
+ ##--(2)-- CURRENT DATA --##
+
+ if x is not None:
+ # Run model
+ y_hat = self(x, c)
+
+ # Calculate prediction loss
+ if self.binaryCE:
+ # -binary prediction loss
+ binary_targets = lf.to_one_hot(y.cpu(), y_hat.size(1)).to(y.device)
+ predL = None if y is None else F.binary_cross_entropy_with_logits(
+ input=y_hat, target=binary_targets, reduction='none'
+ ).sum(dim=1).mean() #--> sum over classes, then average over batch
+ else:
+ # -multiclass prediction loss
+ predL = None if y is None else F.cross_entropy(input=y_hat, target=y, reduction='mean')
+
+ # Weigh losses
+ loss_cur = predL
+
+ # Calculate training-accuracy
+ accuracy = None if y is None else (y == y_hat.max(1)[1]).sum().item() / x.size(0)
+ else:
+ accuracy = predL = None
+ # -> it's possible there is only "replay" [i.e., for offline with incremental context learning]
+
+
+ # Combine loss from current and replayed batch
+ if x_ is None or self.use_replay=='inequality':
+ loss_total = loss_cur
+ elif self.use_replay=='both':
+ # -if the replayed loss is both added to the current loss and used as inequality constraint,
+ # the gradients of the replayed loss are already backpropagated and accumulated
+ loss_total = rnt*loss_cur
+ else:
+ loss_total = loss_replay if (x is None) else rnt*loss_cur+(1-rnt)*loss_replay
+
+
+ ##--(3)-- PARAMETER REGULARIZATION LOSSES --##
+
+ # Add a parameter regularization penalty to the loss function
+ weight_penalty_loss = None
+ if self.weight_penalty:
+ if self.importance_weighting=='si':
+ weight_penalty_loss = self.surrogate_loss()
+ elif self.importance_weighting=='fisher':
+ if self.fisher_kfac:
+ weight_penalty_loss = self.ewc_kfac_loss()
+ else:
+ weight_penalty_loss = self.ewc_loss()
+ loss_total += self.reg_strength * weight_penalty_loss
+
+
+ ##--(4)-- COMPUTE (AND MANIPULATE) GRADIENTS --##
+
+ # Backpropagate errors (for the part of the loss that has not yet been backpropagated)
+ loss_total.backward()
+
+ # A-GEM: check whether gradients to be used align with gradients of replayed data, project them if needed
+ if self.use_replay in ('inequality', 'both') and x_ is not None:
+ # -reorganize the gradients to be used for the optimization step as single vector
+ grad_cur = []
+ for p in self.parameters():
+ if p.requires_grad:
+ grad_cur.append(p.grad.view(-1))
+ grad_cur = torch.cat(grad_cur)
+ # -check inequality constrain
+ angle = (grad_cur * grad_rep).sum()
+ if angle < 0:
+ # -if violated, project the current gradient onto the gradient of the replayed batch ...
+ length_rep = (grad_rep * grad_rep).sum()
+ grad_proj = grad_cur - (angle / (length_rep + self.eps_agem)) * grad_rep
+ # -...and replace all the gradients within the model with this projected gradient
+ index = 0
+ for p in self.parameters():
+ if p.requires_grad:
+ n_param = p.numel() # number of parameters in [p]
+ p.grad.copy_(grad_proj[index:index + n_param].view_as(p))
+ index += n_param
+
+
+ ##--(5)-- TAKE THE OPTIMIZATION STEP --##
+ self.optimizer.step()
+
+
+ # Return the dictionary with different training-loss split in categories
+ return {
+ 'loss_total': loss_total.item(),
+ 'loss_current': loss_cur.item() if x is not None else 0,
+ 'loss_replay': loss_replay.item() if (loss_replay is not None) and (x is not None) else 0,
+ 'pred': predL.item() if predL is not None else 0,
+ 'pred_r': predL_r.item() if (x_ is not None and predL_r is not None) else 0,
+ 'distil_r': distilL_r.item() if (scores_ is not None and distilL_r is not None) else 0,
+ 'param_reg': weight_penalty_loss.item() if weight_penalty_loss is not None else 0,
+ 'accuracy': accuracy if accuracy is not None else 0.,
+ }
+
diff --git a/PyTorch/build-in/other/continual-learning/models/cond_vae.py b/PyTorch/build-in/other/continual-learning/models/cond_vae.py
new file mode 100644
index 000000000..a33118b15
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/models/cond_vae.py
@@ -0,0 +1,747 @@
+import numpy as np
+import math
+import torch
+from torch import nn
+from torch.nn import functional as F
+from models.fc.layers import fc_layer,fc_layer_split,fc_layer_fixed_gates
+from models.fc.nets import MLP,MLP_gates
+from models.conv.nets import ConvLayers,DeconvLayers
+from models.cl.continual_learner import ContinualLearner
+from models.utils import loss_functions as lf, modules
+
+
+class CondVAE(ContinualLearner):
+ """Class for conditional variational auto-encoder (cond-VAE) model."""
+
+ def __init__(self, image_size, image_channels, classes,
+ # -conv-layers
+ conv_type="standard", depth=0, start_channels=64, reducing_layers=3, conv_bn=True, conv_nl="relu",
+ num_blocks=2, global_pooling=False, no_fnl=True, conv_gated=False,
+ # -fc-layers
+ fc_layers=3, fc_units=1000, fc_drop=0, fc_bn=False, fc_nl="relu", fc_gated=False, excit_buffer=False,
+ # -prior
+ prior="standard", z_dim=20, per_class=False, n_modes=1,
+ # -decoder
+ recon_loss='BCE', network_output="sigmoid", deconv_type="standard",
+ dg_gates=False, dg_type="context", dg_prop=0., contexts=5, scenario="task", device='cuda',
+ # -classifer
+ classifier=True, **kwargs):
+ '''Class for variational auto-encoder (VAE) models.'''
+
+ # Set configurations for setting up the model
+ super().__init__()
+ self.label = "CondVAE"
+ self.image_size = image_size
+ self.image_channels = image_channels
+ self.classes = classes
+ self.fc_layers = fc_layers
+ self.z_dim = z_dim
+ self.fc_units = fc_units
+ self.fc_drop = fc_drop
+ self.depth = depth
+ # -type of loss to be used for reconstruction
+ self.recon_loss = recon_loss # options: BCE|MSE
+ self.network_output = network_output
+ # -settings for class- or context-specific gates in fully-connected hidden layers of decoder
+ self.dg_type = dg_type
+ self.dg_prop = dg_prop
+ self.dg_gates = dg_gates if (dg_prop is not None) and dg_prop>0. else False
+ self.gate_size = (contexts if dg_type=="context" else classes) if self.dg_gates else 0
+ self.scenario = scenario
+
+ # Optimizer (needs to be set before training starts))
+ self.optimizer = None
+ self.optim_list = []
+
+ # Prior-related parameters
+ self.prior = prior
+ self.per_class = per_class
+ self.n_modes = n_modes*classes if self.per_class else n_modes
+ self.modes_per_class = n_modes if self.per_class else None
+
+ # Weigths of different components of the loss function
+ self.lamda_rcl = 1.
+ self.lamda_vl = 1.
+ self.lamda_pl = 1. if classifier else 0.
+
+ self.average = True #--> makes that [reconL] and [variatL] are both divided by number of input-pixels
+
+ # Check whether there is at least 1 fc-layer
+ if fc_layers<1:
+ raise ValueError("VAE cannot have 0 fully-connected layers!")
+
+
+ ######------SPECIFY MODEL------######
+
+ ##>----Encoder (= q[z|x])----<##
+ self.convE = ConvLayers(conv_type=conv_type, block_type="basic", num_blocks=num_blocks,
+ image_channels=image_channels, depth=self.depth, start_channels=start_channels,
+ reducing_layers=reducing_layers, batch_norm=conv_bn, nl=conv_nl,
+ output="none" if no_fnl else "normal", global_pooling=global_pooling,
+ gated=conv_gated)
+ # -flatten image to 2D-tensor
+ self.flatten = modules.Flatten()
+ #------------------------------calculate input/output-sizes--------------------------------#
+ self.conv_out_units = self.convE.out_units(image_size)
+ self.conv_out_size = self.convE.out_size(image_size)
+ self.conv_out_channels = self.convE.out_channels
+ #------------------------------------------------------------------------------------------#
+ # -fully connected hidden layers
+ self.fcE = MLP(input_size=self.conv_out_units, output_size=fc_units, layers=fc_layers-1,
+ hid_size=fc_units, drop=fc_drop, batch_norm=fc_bn, nl=fc_nl, gated=fc_gated,
+ excit_buffer=excit_buffer)
+ mlp_output_size = fc_units if fc_layers > 1 else self.conv_out_units
+ # -to z
+ self.toZ = fc_layer_split(mlp_output_size, z_dim, nl_mean='none', nl_logvar='none')
+
+ ##>----Classifier----<##
+ if classifier:
+ self.classifier = fc_layer(mlp_output_size, classes, excit_buffer=True, nl='none')
+
+ ##>----Decoder (= p[x|z])----<##
+ out_nl = True if fc_layers > 1 else (True if (self.depth > 0 and not no_fnl) else False)
+ real_h_dim_down = fc_units if fc_layers > 1 else self.convE.out_units(image_size, ignore_gp=True)
+ if self.dg_gates:
+ self.fromZ = fc_layer_fixed_gates(
+ z_dim, real_h_dim_down, batch_norm=(out_nl and fc_bn), nl=fc_nl if out_nl else "none",
+ gate_size=self.gate_size, gating_prop=dg_prop, device=device
+ )
+ else:
+ self.fromZ = fc_layer(z_dim, real_h_dim_down, batch_norm=(out_nl and fc_bn), nl=fc_nl if out_nl else "none")
+ # -> if 'gp' is used in forward pass, size of first/final hidden layer differs between forward and backward pass
+ if self.dg_gates:
+ self.fcD = MLP_gates(input_size=fc_units, output_size=self.convE.out_units(image_size, ignore_gp=True),
+ layers=fc_layers-1, hid_size=fc_units, drop=fc_drop, batch_norm=fc_bn, nl=fc_nl,
+ gate_size=self.gate_size, gating_prop=dg_prop, device=device,
+ output=self.network_output if self.depth==0 else 'normal')
+ else:
+ self.fcD = MLP(input_size=fc_units, output_size=self.convE.out_units(image_size, ignore_gp=True),
+ layers=fc_layers-1, hid_size=fc_units, drop=fc_drop, batch_norm=fc_bn, nl=fc_nl,
+ gated=fc_gated, output=self.network_output if self.depth==0 else 'normal')
+ # to image-shape
+ self.to_image = modules.Reshape(image_channels=self.convE.out_channels if self.depth>0 else image_channels)
+ # through deconv-layers
+ self.convD = DeconvLayers(
+ image_channels=image_channels, final_channels=start_channels, depth=self.depth,
+ reducing_layers=reducing_layers, batch_norm=conv_bn, nl=conv_nl, gated=conv_gated,
+ output=self.network_output, deconv_type=deconv_type,
+ )
+
+ ##>----Prior----<##
+ # -if using the GMM-prior, add its parameters
+ if self.prior=="GMM":
+ # -create
+ self.z_class_means = nn.Parameter(torch.Tensor(self.n_modes, self.z_dim))
+ self.z_class_logvars = nn.Parameter(torch.Tensor(self.n_modes, self.z_dim))
+ # -initialize
+ self.z_class_means.data.normal_()
+ self.z_class_logvars.data.normal_()
+
+ # Flags whether parts of the network are frozen (so they can be set to evaluation mode during training)
+ self.convE.frozen = False
+ self.fcE.frozen = False
+
+
+
+ ##------ NAMES --------##
+
+ def get_name(self):
+ convE_label = "{}--".format(self.convE.name) if self.depth>0 else ""
+ fcE_label = "{}--".format(self.fcE.name) if self.fc_layers>1 else "{}{}-".format("h" if self.depth>0 else "i",
+ self.conv_out_units)
+ z_label = "z{}{}".format(self.z_dim, "" if self.prior=="standard" else "-{}{}{}".format(
+ self.prior, self.n_modes, "pc" if self.per_class else ""
+ ))
+ class_label = "-c{}".format(self.classes) if hasattr(self, "classifier") else ""
+ decoder_label = "_{}{}".format("tg" if self.dg_type=="context" else "cg", self.dg_prop) if self.dg_gates else ""
+ return "{}={}{}{}{}{}".format(self.label, convE_label, fcE_label, z_label, class_label, decoder_label)
+
+ @property
+ def name(self):
+ return self.get_name()
+
+
+
+ ##------ LAYERS --------##
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ list = []
+ list += self.convE.list_init_layers()
+ list += self.fcE.list_init_layers()
+ if hasattr(self, "classifier"):
+ list += self.classifier.list_init_layers()
+ list += self.toZ.list_init_layers()
+ list += self.fromZ.list_init_layers()
+ list += self.fcD.list_init_layers()
+ list += self.convD.list_init_layers()
+ return list
+
+ def layer_info(self):
+ '''Return list with shape of all hidden layers.'''
+ # create list with hidden convolutional layers
+ layer_list = self.convE.layer_info(image_size=self.image_size)
+ # add output of final convolutional layer (if there was at least one conv-layer and there's fc-layers after)
+ if (self.fc_layers>0 and self.depth>0):
+ layer_list.append([self.conv_out_channels, self.conv_out_size, self.conv_out_size])
+ # add layers of the MLP
+ if self.fc_layers>1:
+ for layer_id in range(1, self.fc_layers):
+ layer_list.append([self.fc_layer_sizes[layer_id]])
+ return layer_list
+
+
+
+ ##------ FORWARD FUNCTIONS --------##
+
+ def encode(self, x):
+ '''Pass input through feed-forward connections, to get [z_mean], [z_logvar] and [hE].'''
+ # Forward-pass through conv-layers
+ hidden_x = self.convE(x)
+ image_features = self.flatten(hidden_x)
+ # Forward-pass through fc-layers
+ hE = self.fcE(image_features)
+ # Get parameters for reparametrization
+ (z_mean, z_logvar) = self.toZ(hE)
+ return z_mean, z_logvar, hE, hidden_x
+
+ def classify(self, x, allowed_classes=None, **kwargs):
+ '''For input [x] (image/"intermediate" features), return predicted "scores"/"logits" for [allowed_classes].'''
+ if hasattr(self, "classifier"):
+ image_features = self.flatten(self.convE(x))
+ hE = self.fcE(image_features)
+ scores = self.classifier(hE)
+ return scores if (allowed_classes is None) else scores[:, allowed_classes]
+ else:
+ return None
+
+ def reparameterize(self, mu, logvar):
+ '''Perform "reparametrization trick" to make these stochastic variables differentiable.'''
+ std = logvar.mul(0.5).exp_()
+ eps = std.new(std.size()).normal_()#.requires_grad_()
+ return eps.mul(std).add_(mu)
+
+ def decode(self, z, gate_input=None):
+ '''Decode latent variable activations.
+
+ INPUT: - [z] <2D-tensor>; latent variables to be decoded
+ - [gate_input] <1D-tensor> or ; for each batch-element in [x] its class-/context-ID --OR--
+ <2D-tensor>; for each batch-element in [x] a probability for every class-/context-ID
+
+ OUTPUT: - [image_recon] <4D-tensor>'''
+
+ # -if needed, convert [gate_input] to one-hot vector
+ if self.dg_gates and (gate_input is not None) and (type(gate_input)==np.ndarray or gate_input.dim()<2):
+ gate_input = lf.to_one_hot(gate_input, classes=self.gate_size, device=self._device())
+
+ # -put inputs through decoder
+ hD = self.fromZ(z, gate_input=gate_input) if self.dg_gates else self.fromZ(z)
+ image_features = self.fcD(hD, gate_input=gate_input) if self.dg_gates else self.fcD(hD)
+ image_recon = self.convD(self.to_image(image_features))
+ return image_recon
+
+ def forward(self, x, gate_input=None, full=False, reparameterize=True, **kwargs):
+ '''Forward function to propagate [x] through the encoder, reparametrization and decoder.
+
+ Input: - [x] <4D-tensor> of shape [batch_size]x[channels]x[image_size]x[image_size]
+ - [gate_input] <1D-tensor> or ; for each batch-element in [x] its class-ID (eg, [y]) ---OR---
+ <2D-tensor>; for each batch-element in [x] a probability for each class-ID (eg, [y_hat])
+
+ If [full] is True, output should be a consisting of:
+ - [x_recon] <4D-tensor> reconstructed image (features) in same shape as [x] (or 2 of those: mean & logvar)
+ - [y_hat] <2D-tensor> with predicted logits for each class
+ - [mu] <2D-tensor> with either [z] or the estimated mean of [z]
+ - [logvar] None or <2D-tensor> estimated log(SD^2) of [z]
+ - [z] <2D-tensor> reparameterized [z] used for reconstruction
+ If [full] is False, output is the reconstructed image (i.e., [x_recon]).
+ '''
+ # -encode (forward), reparameterize and decode (backward)
+ mu, logvar, hE, hidden_x = self.encode(x)
+ z = self.reparameterize(mu, logvar) if reparameterize else mu
+ gate_input = gate_input if self.dg_gates else None
+ x_recon = self.decode(z, gate_input=gate_input)
+ # -classify
+ y_hat = self.classifier(hE) if hasattr(self, "classifier") else None
+ # -return
+ return (x_recon, y_hat, mu, logvar, z) if full else x_recon
+
+ def feature_extractor(self, images):
+ '''Extract "final features" (i.e., after both conv- and fc-layers of forward pass) from provided images.'''
+ return self.fcE(self.flatten(self.convE(images)))
+
+
+
+ ##------ SAMPLE FUNCTIONS --------##
+
+ def sample(self, size, allowed_classes=None, class_probs=None, sample_mode=None, allowed_domains=None,
+ only_x=True, **kwargs):
+ '''Generate [size] samples from the model. Outputs are tensors (not "requiring grad"), on same device as .
+
+ INPUT: - [allowed_classes] of [class_ids] from which to sample
+ - [class_probs] with for each class the probability it is sampled from it
+ - [sample_mode] to sample from specific mode of [z]-distr'n, overwrites [allowed_classes]
+ - [allowed_domains] of [context_ids] which are allowed to be used for 'context-gates' (if used)
+ NOTE: currently only relevant if [scenario]=="domain"
+
+ OUTPUT: - [X] <4D-tensor> generated images / image-features
+ - [y_used] labels of classes intended to be sampled (using )
+ - [context_used] labels of domains/contexts used for context-gates in decoder'''
+
+ # set model to eval()-mode
+ self.eval()
+
+ # pick for each sample the prior-mode to be used
+ if self.prior=="GMM":
+ if sample_mode is None:
+ if (allowed_classes is None and class_probs is None) or (not self.per_class):
+ # -randomly sample modes from all possible modes (and find their corresponding class, if applicable)
+ sampled_modes = np.random.randint(0, self.n_modes, size)
+ y_used = np.array(
+ [int(mode / self.modes_per_class) for mode in sampled_modes]
+ ) if self.per_class else None
+ else:
+ if allowed_classes is None:
+ allowed_classes = [i for i in range(len(class_probs))]
+ # -sample from modes belonging to [allowed_classes], possibly weighted according to [class_probs]
+ allowed_modes = [] # -collect all allowed modes
+ unweighted_probs = [] # -collect unweighted sample-probabilities of those modes
+ for index, class_id in enumerate(allowed_classes):
+ allowed_modes += list(range(class_id * self.modes_per_class, (class_id+1)*self.modes_per_class))
+ if class_probs is not None:
+ for i in range(self.modes_per_class):
+ unweighted_probs.append(class_probs[index].item())
+ mode_probs = None if class_probs is None else [p / sum(unweighted_probs) for p in unweighted_probs]
+ sampled_modes = np.random.choice(allowed_modes, size, p=mode_probs, replace=True)
+ y_used = np.array([int(mode / self.modes_per_class) for mode in sampled_modes])
+ else:
+ # -always sample from the provided mode
+ sampled_modes = np.repeat(sample_mode, size)
+ y_used = np.repeat(int(sample_mode / self.modes_per_class), size) if self.per_class else None
+ else:
+ y_used = None
+
+ # sample z
+ if self.prior=="GMM":
+ prior_means = self.z_class_means
+ prior_logvars = self.z_class_logvars
+ # -for each sample to be generated, select the previously sampled mode
+ z_means = prior_means[sampled_modes, :]
+ z_logvars = prior_logvars[sampled_modes, :]
+ with torch.no_grad():
+ z = self.reparameterize(z_means, z_logvars)
+ else:
+ z = torch.randn(size, self.z_dim).to(self._device())
+
+ # if no classes are selected yet, but they are needed for the "decoder-gates", select classes to be sampled
+ if (y_used is None) and (self.dg_gates):
+ if allowed_classes is None and class_probs is None:
+ y_used = np.random.randint(0, self.classes, size)
+ else:
+ if allowed_classes is None:
+ allowed_classes = [i for i in range(len(class_probs))]
+ y_used = np.random.choice(allowed_classes, size, p=class_probs, replace=True)
+ # if gates in the decoder are "context-gates", convert [y_used] to corresponding contexts (if Task-/Class-IL)
+ # or simply sample which contexts should be generated (if Domain-IL) from [allowed_domains]
+ context_used = None
+ if self.dg_gates and self.dg_type=="context":
+ if self.scenario=="domain":
+ context_used = np.random.randint(0,self.gate_size,size) if (
+ allowed_domains is None
+ ) else np.random.choice(allowed_domains, size, replace=True)
+ else:
+ classes_per_context = int(self.classes/self.gate_size)
+ context_used = np.array([int(class_id / classes_per_context) for class_id in y_used])
+
+ # decode z into image X
+ with torch.no_grad():
+ X = self.decode(z,
+ gate_input=(context_used if self.dg_type=="context" else y_used) if self.dg_gates else None)
+
+ # return samples as [batch_size]x[channels]x[image_size]x[image_size] tensor, plus requested additional info
+ return X if only_x else (X, y_used, context_used)
+
+
+
+ ##------ LOSS FUNCTIONS --------##
+
+ def calculate_recon_loss(self, x, x_recon, average=False):
+ '''Calculate reconstruction loss for each element in the batch.
+
+ INPUT: - [x] with original input (1st dimension (ie, dim=0) is "batch-dimension")
+ - [x_recon] (tuple of 2x) with reconstructed input in same shape as [x]
+ - [average] , if True, loss is average over all pixels; otherwise it is summed
+
+ OUTPUT: - [reconL] <1D-tensor> of length [batch_size]'''
+
+ batch_size = x.size(0)
+ if self.recon_loss=="MSE":
+ # reconL = F.mse_loss(input=x_recon.view(batch_size, -1), target=x.view(batch_size, -1), reduction='none')
+ # reconL = torch.mean(reconL, dim=1) if average else torch.sum(reconL, dim=1)
+ reconL = -lf.log_Normal_standard(x=x, mean=x_recon, average=average, dim=-1)
+ elif self.recon_loss=="BCE":
+ reconL = F.binary_cross_entropy(input=x_recon.view(batch_size, -1), target=x.view(batch_size, -1),
+ reduction='none')
+ reconL = torch.mean(reconL, dim=1) if average else torch.sum(reconL, dim=1)
+ else:
+ raise NotImplementedError("Wrong choice for type of reconstruction-loss!")
+ # --> if [average]=True, reconstruction loss is averaged over all pixels/elements (otherwise it is summed)
+ # (averaging over all elements in the batch will be done later)
+ return reconL
+
+
+ def calculate_log_p_z(self, z, y=None, y_prob=None, allowed_classes=None):
+ '''Calculate log-likelihood of sampled [z] under the prior distirbution.
+
+ INPUT: - [z] <2D-tensor> with sampled latent variables (1st dimension (ie, dim=0) is "batch-dimension")
+
+ OPTIONS THAT ARE RELEVANT ONLY IF self.per_class IS TRUE:
+ - [y] None or <1D-tensor> with target-classes (as integers)
+ - [y_prob] None or <2D-tensor> with probabilities for each class (in [allowed_classes])
+ - [allowed_classes] None or with class-IDs to use for selecting prior-mode(s)
+
+ OUTPUT: - [log_p_z] <1D-tensor> of length [batch_size]'''
+
+ if self.prior == "standard":
+ log_p_z = lf.log_Normal_standard(z, average=False, dim=1) # [batch_size]
+
+ if self.prior == "GMM":
+ ## Get [means] and [logvars] of all (possible) modes
+ allowed_modes = list(range(self.n_modes))
+ # -if we don't use the specific modes of a target, we could select modes based on list of classes
+ if (y is None) and (allowed_classes is not None) and self.per_class:
+ allowed_modes = []
+ for class_id in allowed_classes:
+ allowed_modes += list(range(class_id * self.modes_per_class, (class_id + 1) * self.modes_per_class))
+ # -calculate/retireve the means and logvars for the selected modes
+ prior_means = self.z_class_means[allowed_modes, :]
+ prior_logvars = self.z_class_logvars[allowed_modes, :]
+ # -rearrange / select for each batch prior-modes to be used
+ z_expand = z.unsqueeze(1) # [batch_size] x 1 x [z_dim]
+ means = prior_means.unsqueeze(0) # 1 x [n_modes] x [z_dim]
+ logvars = prior_logvars.unsqueeze(0) # 1 x [n_modes] x [z_dim]
+
+ ## Calculate "log_p_z" (log-likelihood of "reparameterized" [z] based on selected priors)
+ n_modes = self.modes_per_class if (
+ ((y is not None) or (y_prob is not None)) and self.per_class
+ ) else len(allowed_modes)
+ a = lf.log_Normal_diag(z_expand, mean=means, log_var=logvars, average=False, dim=2) - math.log(n_modes)
+ # --> for each element in batch, calculate log-likelihood for all pseudoinputs: [batch_size] x [n_modes]
+ if (y is not None) and self.per_class:
+ modes_list = list()
+ for i in range(len(y)):
+ target = y[i].item()
+ modes_list.append(list(range(target * self.modes_per_class, (target + 1) * self.modes_per_class)))
+ modes_tensor = torch.LongTensor(modes_list).to(self._device())
+ a = a.gather(dim=1, index=modes_tensor)
+ # --> reduce [a] to size [batch_size]x[modes_per_class] (ie, per batch only keep modes of [y])
+ # but within the batch, elements can have different [y], so this reduction couldn't be done before
+ a_max, _ = torch.max(a, dim=1) # [batch_size]
+ # --> for each element in batch, take highest log-likelihood over all pseudoinputs
+ # this is calculated and used to avoid underflow in the below computation
+ a_exp = torch.exp(a - a_max.unsqueeze(1)) # [batch_size] x [n_modes]
+ if (y is None) and (y_prob is not None) and self.per_class:
+ batch_size = y_prob.size(0)
+ y_prob = y_prob.view(-1, 1).repeat(1, self.modes_per_class).view(batch_size, -1)
+ # ----> extend probabilities per class to probabilities per mode; y_prob: [batch_size] x [n_modes]
+ a_logsum = torch.log(torch.clamp(torch.sum(y_prob * a_exp, dim=1), min=1e-40))
+ else:
+ a_logsum = torch.log(torch.clamp(torch.sum(a_exp, dim=1), min=1e-40)) # -> sum over modes: [batch_size]
+ log_p_z = a_logsum + a_max # [batch_size]
+
+ return log_p_z
+
+
+ def calculate_variat_loss(self, z, mu, logvar, y=None, y_prob=None, allowed_classes=None):
+ '''Calculate reconstruction loss for each element in the batch.
+
+ INPUT: - [z] <2D-tensor> with sampled latent variables (1st dimension (ie, dim=0) is "batch-dimension")
+ - [mu] <2D-tensor> by encoder predicted mean for [z]
+ - [logvar] <2D-tensor> by encoder predicted logvar for [z]
+
+ OPTIONS THAT ARE RELEVANT ONLY IF self.per_class IS TRUE:
+ - [y] None or <1D-tensor> with target-classes (as integers)
+ - [y_prob] None or <2D-tensor> with probabilities for each class (in [allowed_classes])
+ - [allowed_classes] None or with class-IDs to use for selecting prior-mode(s)
+
+ OUTPUT: - [variatL] <1D-tensor> of length [batch_size]'''
+
+ if self.prior == "standard":
+ # --> calculate analytically
+ # ---- see Appendix B from: Kingma & Welling (2014) Auto-Encoding Variational Bayes, ICLR ----#
+ variatL = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)
+
+ elif self.prior=="GMM":
+ # --> calculate "by estimation"
+
+ ## Calculate "log_p_z" (log-likelihood of "reparameterized" [z] based on selected priors)
+ log_p_z = self.calculate_log_p_z(z, y=y, y_prob=y_prob, allowed_classes=allowed_classes)
+ # -----> log_p_z: [batch_size]
+
+ ## Calculate "log_q_z_x" (entropy of "reparameterized" [z] given [x])
+ log_q_z_x = lf.log_Normal_diag(z, mean=mu, log_var=logvar, average=False, dim=1)
+ # -----> mu: [batch_size] x [z_dim]; logvar: [batch_size] x [z_dim]; z: [batch_size] x [z_dim]
+ # -----> log_q_z_x: [batch_size]
+
+ ## Combine
+ variatL = -(log_p_z - log_q_z_x)
+
+ return variatL
+
+
+ def loss_function(self, x, y, x_recon, y_hat, scores, mu, z, logvar=None, allowed_classes=None, batch_weights=None):
+ '''Calculate and return various losses that could be used for training and/or evaluating the model.
+
+ INPUT: - [x] <4D-tensor> original image
+ - [y] <1D-tensor> with target-classes (as integers, corresponding to [allowed_classes])
+ - [x_recon] (tuple of 2x) <4D-tensor> reconstructed image in same shape as [x]
+ - [y_hat] <2D-tensor> with predicted "logits" for each class (corresponding to [allowed_classes])
+ - [scores] <2D-tensor> with target "logits" for each class (corresponding to [allowed_classes])
+ (if len(scores) with either [z] or the estimated mean of [z]
+ - [z] <2D-tensor> with reparameterized [z]
+ - [logvar] None or <2D-tensor> with estimated log(SD^2) of [z]
+ - [batch_weights] <1D-tensor> with a weight for each batch-element (if None, normal average over batch)
+ - [allowed_classes]None or with class-IDs to use for selecting prior-mode(s)
+
+ OUTPUT: - [reconL] reconstruction loss indicating how well [x] and [x_recon] match
+ - [variatL] variational (KL-divergence) loss "indicating how close distribion [z] is to prior"
+ - [predL] prediction loss indicating how well targets [y] are predicted
+ - [distilL] knowledge distillation (KD) loss indicating how well the predicted "logits" ([y_hat])
+ match the target "logits" ([scores])'''
+
+ ###-----Reconstruction loss-----###
+ batch_size = x.size(0)
+ reconL = self.calculate_recon_loss(x=x.view(batch_size, -1), average=True,
+ x_recon=x_recon.view(batch_size, -1)) # -> average over pixels
+ reconL = lf.weighted_average(reconL, weights=batch_weights, dim=0) # -> average over batch
+
+ ###-----Variational loss-----###
+ if logvar is not None:
+ actual_y = torch.tensor([allowed_classes[i.item()] for i in y]).to(self._device()) if (
+ (allowed_classes is not None) and (y is not None)
+ ) else y
+ if (y is None and scores is not None):
+ y_prob = F.softmax(scores / self.KD_temp, dim=1)
+ if allowed_classes is not None and len(allowed_classes) > y_prob.size(1):
+ n_batch = y_prob.size(0)
+ zeros_to_add = torch.zeros(n_batch, len(allowed_classes) - y_prob.size(1))
+ zeros_to_add = zeros_to_add.to(self._device())
+ y_prob = torch.cat([y_prob, zeros_to_add], dim=1)
+ else:
+ y_prob = None
+ # ---> if [y] is not provided but [scores] is, calculate variational loss using weighted sum of prior-modes
+ variatL = self.calculate_variat_loss(z=z, mu=mu, logvar=logvar, y=actual_y, y_prob=y_prob,
+ allowed_classes=allowed_classes)
+ variatL = lf.weighted_average(variatL, weights=batch_weights, dim=0) # -> average over batch
+ variatL /= (self.image_channels * self.image_size ** 2) # -> divide by # of input-pixels
+ else:
+ variatL = torch.tensor(0., device=self._device())
+
+ ###-----Prediction loss-----###
+ if y is not None and y_hat is not None:
+ predL = F.cross_entropy(input=y_hat, target=y, reduction='none')
+ #--> no reduction needed, summing over classes is "implicit"
+ predL = lf.weighted_average(predL, weights=batch_weights, dim=0) # -> average over batch
+ else:
+ predL = torch.tensor(0., device=self._device())
+
+ ###-----Distilliation loss-----###
+ if scores is not None and y_hat is not None:
+ # n_classes_to_consider = scores.size(1) #--> with this version, no zeroes would be added to [scores]!
+ n_classes_to_consider = y_hat.size(1) #--> zeros will be added to [scores] to make it this size!
+ distilL = lf.loss_fn_kd(scores=y_hat[:, :n_classes_to_consider], target_scores=scores, T=self.KD_temp,
+ weights=batch_weights) #--> summing over classes & averaging over batch in function
+ else:
+ distilL = torch.tensor(0., device=self._device())
+
+ # Return a tuple of the calculated losses
+ return reconL, variatL, predL, distilL
+
+
+
+ ##------ TRAINING FUNCTIONS --------##
+
+ def train_a_batch(self, x, y=None, x_=None, y_=None, scores_=None, contexts_=None, rnt=0.5,
+ active_classes=None, context=1, **kwargs):
+ '''Train model for one batch ([x],[y]), possibly supplemented with replayed data ([x_],[y_]).
+
+ [x] batch of inputs (could be None, in which case only 'replayed' data is used)
+ [y] None or batch of corresponding labels
+ [x_] None or ( of) batch of replayed inputs
+ [y_] None or ( of) <1Dtensor>:[batch] of corresponding "replayed" labels
+ [scores_] None or ( of) <2Dtensor>:[batch]x[classes] target "scores"/"logits" for [x_]
+ [contexts_] None or ( of) <1Dtensor>/:[batch] of context-IDs of replayed samples (as )
+ [rnt] in [0,1], relative importance of new context
+ [active_classes] None or ( of) with "active" classes
+ [context] , for setting context-specific mask'''
+
+ # Set model to training-mode
+ self.train()
+ # -however, if some layers are frozen, they should be set to eval() to prevent batch-norm layers from changing
+ if self.convE.frozen:
+ self.convE.eval()
+ if self.fcE.frozen:
+ self.fcE.eval()
+
+ # Reset optimizer
+ self.optimizer.zero_grad()
+
+
+ ##--(1)-- CURRENT DATA --##
+ accuracy = 0.
+ if x is not None:
+ # If using context-gates, create [context_tensor] as it's needed in the decoder
+ context_tensor = None
+ if self.dg_gates and self.dg_type=="context":
+ context_tensor = torch.tensor(np.repeat(context-1, x.size(0))).to(self._device())
+
+ # Run the model
+ recon_batch, y_hat, mu, logvar, z = self(
+ x, gate_input=(context_tensor if self.dg_type=="context" else y) if self.dg_gates else None, full=True,
+ reparameterize=True
+ )
+ # --if needed, remove predictions for classes not active in the current context
+ if active_classes is not None:
+ class_entries = active_classes[-1] if type(active_classes[0])==list else active_classes
+ if y_hat is not None:
+ y_hat = y_hat[:, class_entries]
+
+ # Calculate all losses
+ reconL, variatL, predL, _ = self.loss_function(
+ x=x, y=y, x_recon=recon_batch, y_hat=y_hat, scores=None, mu=mu, z=z, logvar=logvar,
+ allowed_classes=class_entries if active_classes is not None else None
+ ) #--> [allowed_classes] will be used only if [y] is not provided
+
+ # Weigh losses as requested
+ loss_cur = self.lamda_rcl*reconL + self.lamda_vl*variatL + self.lamda_pl*predL
+
+ # Calculate training-accuracy
+ if y is not None and y_hat is not None:
+ _, predicted = y_hat.max(1)
+ accuracy = (y == predicted).sum().item() / x.size(0)
+
+
+ ##--(2)-- REPLAYED DATA --##
+ if x_ is not None:
+ # If there are different predictions per context, [y_] or [scores_] are lists and [x_] must be evaluated
+ # separately on each of them (although [x_] could be a list as well!)
+ PerContext = (type(y_)==list) if (y_ is not None) else (type(scores_)==list)
+ if not PerContext:
+ y_ = [y_]
+ scores_ = [scores_]
+ active_classes = [active_classes] if (active_classes is not None) else None
+ n_replays = len(y_) if (y_ is not None) else len(scores_)
+
+ # Prepare lists to store losses for each replay
+ loss_replay = [torch.tensor(0., device=self._device())]*n_replays
+ reconL_r = [torch.tensor(0., device=self._device())]*n_replays
+ variatL_r = [torch.tensor(0., device=self._device())]*n_replays
+ predL_r = [torch.tensor(0., device=self._device())]*n_replays
+ distilL_r = [torch.tensor(0., device=self._device())]*n_replays
+
+ # Run model (if [x_] is not a list with separate replay per context and there is no context-specific mask)
+ if (not type(x_)==list) and (not (self.dg_gates and PerContext)):
+ # -if needed in the decoder-gates, find class-tensor [y_predicted]
+ y_predicted = None
+ if self.dg_gates and self.dg_type=="class":
+ if y_[0] is not None:
+ y_predicted = y_[0]
+ else:
+ y_predicted = F.softmax(scores_[0] / self.KD_temp, dim=1)
+ if y_predicted.size(1) < self.classes:
+ # in case of Class-IL, add zeros at the end:
+ n_batch = y_predicted.size(0)
+ zeros_to_add = torch.zeros(n_batch, self.classes - y_predicted.size(1))
+ zeros_to_add = zeros_to_add.to(self._device())
+ y_predicted = torch.cat([y_predicted, zeros_to_add], dim=1)
+ # -run full model
+ x_temp_ = x_
+ gate_input = (contexts_ if self.dg_type=="context" else y_predicted) if self.dg_gates else None
+ recon_batch, y_hat_all, mu, logvar, z = self(x_temp_, gate_input=gate_input, full=True)
+
+ # Loop to perform each replay
+ for replay_id in range(n_replays):
+ # -if [x_] is a list with separate replay per context, evaluate model on this context's replay
+ if (type(x_)==list) or (PerContext and self.dg_gates):
+ # -if needed in the decoder-gates, find class-tensor [y_predicted]
+ y_predicted = None
+ if self.dg_gates and self.dg_type == "class":
+ if y_ is not None and y_[replay_id] is not None:
+ y_predicted = y_[replay_id]
+ # because of Task-IL, increase class-ID with number of classes before context being replayed
+ y_predicted = y_predicted + replay_id*len(active_classes[0])
+ else:
+ y_predicted = F.softmax(scores_[replay_id] / self.KD_temp, dim=1)
+ if y_predicted.size(1) < self.classes:
+ # in case of Task-IL, add zeros before and after:
+ n_batch = y_predicted.size(0)
+ zeros_to_add_before = torch.zeros(n_batch, replay_id*y_predicted.size(1))
+ zeros_to_add_before = zeros_to_add_before.to(self._device())
+ zeros_to_add_after = torch.zeros(n_batch,self.classes-(replay_id+1)*y_predicted.size(1))
+ zeros_to_add_after = zeros_to_add_after.to(self._device())
+ y_predicted = torch.cat([zeros_to_add_before, y_predicted, zeros_to_add_after], dim=1)
+ # -run full model
+ x_temp_ = x_[replay_id] if type(x_)==list else x_
+ gate_input = (
+ contexts_[replay_id] if self.dg_type=="context" else y_predicted
+ ) if self.dg_gates else None
+ recon_batch, y_hat_all, mu, logvar, z = self(x_temp_, full=True, gate_input=gate_input)
+
+ # --if needed, remove predictions for classes not active in the replayed context
+ y_hat = y_hat_all if (
+ active_classes is None or y_hat_all is None
+ ) else y_hat_all[:, active_classes[replay_id]]
+
+ # Calculate all losses
+ reconL_r[replay_id],variatL_r[replay_id],predL_r[replay_id],distilL_r[replay_id] = self.loss_function(
+ x=x_temp_, y=y_[replay_id] if (y_ is not None) else None, x_recon=recon_batch, y_hat=y_hat,
+ scores=scores_[replay_id] if (scores_ is not None) else None, mu=mu, z=z, logvar=logvar,
+ allowed_classes=active_classes[replay_id] if active_classes is not None else None,
+ )
+
+ # Weigh losses as requested
+ loss_replay[replay_id] = self.lamda_rcl*reconL_r[replay_id] + self.lamda_vl*variatL_r[replay_id]
+ if self.replay_targets=="hard":
+ loss_replay[replay_id] += self.lamda_pl*predL_r[replay_id]
+ elif self.replay_targets=="soft":
+ loss_replay[replay_id] += self.lamda_pl*distilL_r[replay_id]
+
+
+ # Calculate total loss
+ loss_replay = None if (x_ is None) else sum(loss_replay)/n_replays
+ loss_total = loss_replay if (x is None) else (loss_cur if x_ is None else rnt*loss_cur+(1-rnt)*loss_replay)
+
+
+ ##--(3)-- PARAMETER REGULARIZATION LOSSES --##
+
+ # Add a parameter regularization penalty to the loss function
+ weight_penalty_loss = None
+ if self.weight_penalty:
+ if self.importance_weighting=='si':
+ weight_penalty_loss = self.surrogate_loss()
+ elif self.importance_weighting=='fisher':
+ if self.fisher_kfac:
+ weight_penalty_loss = self.ewc_kfac_loss()
+ else:
+ weight_penalty_loss = self.ewc_loss()
+ loss_total += self.reg_strength * weight_penalty_loss
+
+
+ # Backpropagate errors
+ loss_total.backward()
+ # Take optimization-step
+ self.optimizer.step()
+
+
+ # Return the dictionary with different training-loss split in categories
+ return {
+ 'loss_total': loss_total.item(), 'accuracy': accuracy,
+ 'recon': reconL.item() if x is not None else 0,
+ 'variat': variatL.item() if x is not None else 0,
+ 'pred': predL.item() if x is not None else 0,
+ 'recon_r': sum(reconL_r).item()/n_replays if x_ is not None else 0,
+ 'variat_r': sum(variatL_r).item()/n_replays if x_ is not None else 0,
+ 'pred_r': sum(predL_r).item()/n_replays if (x_ is not None and predL_r[0] is not None) else 0,
+ 'distil_r': sum(distilL_r).item()/n_replays if (x_ is not None and distilL_r[0] is not None) else 0,
+ 'param_reg': weight_penalty_loss.item() if weight_penalty_loss is not None else 0,
+ }
diff --git a/PyTorch/build-in/other/continual-learning/models/conv/__init__.py b/PyTorch/build-in/other/continual-learning/models/conv/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/PyTorch/build-in/other/continual-learning/models/conv/layers.py b/PyTorch/build-in/other/continual-learning/models/conv/layers.py
new file mode 100644
index 000000000..266edb396
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/models/conv/layers.py
@@ -0,0 +1,347 @@
+import torch.nn as nn
+from models.utils import modules
+
+
+#-----------------------------------------------------------------------------------------------------------#
+
+#####################
+### ResNet-blocks ###
+#####################
+
+class BasicBlock(nn.Module):
+ '''Standard building block for ResNets.'''
+ expansion = 1
+
+ def __init__(self, in_planes, planes, stride=1, batch_norm=True, nl="relu", no_fnl=False):
+ super(BasicBlock, self).__init__()
+
+ # normal block-layers
+ self.block_layer1 = nn.Sequential(
+ nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False if batch_norm else True),
+ nn.BatchNorm2d(planes) if batch_norm else modules.Identity(),
+ nn.ReLU() if nl=="relu" else nn.LeakyReLU()
+ )
+ self.block_layer2 = nn.Sequential(
+ nn.Conv2d(planes, self.expansion*planes, kernel_size=3, stride=1, padding=1,
+ bias=False if batch_norm else True),
+ nn.BatchNorm2d(self.expansion*planes) if batch_norm else modules.Identity()
+ )
+
+ # shortcut block-layer
+ self.shortcut = modules.Identity()
+ if stride != 1 or in_planes != self.expansion*planes:
+ self.shortcut = nn.Sequential(
+ nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride,
+ bias=False if batch_norm else True),
+ nn.BatchNorm2d(self.expansion*planes) if batch_norm else modules.Identity()
+ )
+
+ # final non-linearity
+ self.nl = (nn.ReLU() if nl=="relu" else nn.LeakyReLU()) if not no_fnl else modules.Identity()
+
+ def forward(self, x):
+ out = self.block_layer2(self.block_layer1(x))
+ out += self.shortcut(x)
+ return self.nl(out)
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ list = [self.block_layer1[0], self.block_layer2[0]]
+ if not type(self.shortcut) == modules.Identity:
+ list.append(self.shortcut[0])
+ return list
+
+
+class Bottleneck(nn.Module):
+ '''Building block (with "bottleneck") for ResNets.'''
+ expansion = 4
+
+ def __init__(self, in_planes, planes, stride=1, batch_norm=True, nl="relu", no_fnl=False):
+ super(Bottleneck, self).__init__()
+
+ # normal block-layers
+ self.block_layer1 = nn.Sequential(
+ nn.Conv2d(in_planes, planes, kernel_size=1, bias=False if batch_norm else True),
+ nn.BatchNorm2d(planes) if batch_norm else modules.Identity(),
+ nn.ReLU() if nl == "relu" else nn.LeakyReLU()
+ )
+ self.block_layer2 = nn.Sequential(
+ nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False if batch_norm else True),
+ nn.BatchNorm2d(planes) if batch_norm else modules.Identity(),
+ nn.ReLU() if nl == "relu" else nn.LeakyReLU()
+ )
+ self.block_layer3 = nn.Sequential(
+ nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False if batch_norm else True),
+ nn.BatchNorm2d(self.expansion*planes) if batch_norm else modules.Identity()
+ )
+
+ # shortcut block-layer
+ self.shortcut = modules.Identity()
+ if stride != 1 or in_planes != self.expansion*planes:
+ self.shortcut = nn.Sequential(
+ nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride,
+ bias=False if batch_norm else True),
+ nn.BatchNorm2d(self.expansion*planes) if batch_norm else True
+ )
+
+ # final non-linearity
+ self.nl = (nn.ReLU() if nl == "relu" else nn.LeakyReLU()) if not no_fnl else modules.Identity()
+
+ def forward(self, x):
+ out = self.block_layer3(self.block_layer2(self.block_layer1(x)))
+ out += self.shortcut(x)
+ return self.nl(out)
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ list = [self.block_layer1[0], self.block_layer2[0], self.block_layer3[0]]
+ if not type(self.shortcut) == modules.Identity:
+ list.append(self.shortcut[0])
+ return list
+
+
+#-----------------------------------------------------------------------------------------------------------#
+
+###################
+### Conv-layers ###
+###################
+
+class conv_layer(nn.Module):
+ '''Standard convolutional layer. Possible to return pre-activations.'''
+
+ def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, padding=1,
+ drop=0, batch_norm=False, nl=nn.ReLU(), bias=True, gated=False):
+ super().__init__()
+ if drop>0:
+ self.dropout = nn.Dropout2d(drop)
+ self.conv = nn.Conv2d(in_planes, out_planes, stride=stride, kernel_size=kernel_size, padding=padding, bias=bias)
+ if batch_norm:
+ self.bn = nn.BatchNorm2d(out_planes)
+ if gated:
+ self.gate = nn.Conv2d(in_planes, out_planes, stride=stride, kernel_size=kernel_size, padding=padding,
+ bias=False)
+ self.sigmoid = nn.Sigmoid()
+ if isinstance(nl, nn.Module):
+ self.nl = nl
+ elif not nl=="none":
+ self.nl = nn.ReLU() if nl=="relu" else (nn.LeakyReLU() if nl=="leakyrelu" else modules.Identity())
+
+ def forward(self, x, return_pa=False):
+ input = self.dropout(x) if hasattr(self, 'dropout') else x
+ pre_activ = self.bn(self.conv(input)) if hasattr(self, 'bn') else self.conv(input)
+ gate = self.sigmoid(self.gate(x)) if hasattr(self, 'gate') else None
+ gated_pre_activ = gate * pre_activ if hasattr(self, 'gate') else pre_activ
+ output = self.nl(gated_pre_activ) if hasattr(self, 'nl') else gated_pre_activ
+ return (output, gated_pre_activ) if return_pa else output
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ return [self.conv]
+
+
+class res_layer(nn.Module):
+ '''Convolutional res-net layer. Possible to return pre-activations.'''
+
+ def __init__(self, in_planes, out_planes, block=BasicBlock, num_blocks=2, stride=1, drop=0, batch_norm=True,
+ nl="relu", no_fnl=False):
+
+ ## NOTE: should [no_fnl] be changed so that also no batch_norm is applied?? ##
+
+ # Set configurations
+ super().__init__()
+ self.num_blocks = num_blocks
+ self.in_planes = in_planes
+ self.out_planes = out_planes * block.expansion
+
+ # Create layer
+ self.dropout = nn.Dropout2d(drop)
+ for block_id in range(num_blocks):
+ # -first block has given stride, later blocks have stride 1
+ new_block = block(in_planes, out_planes, stride=stride if block_id==0 else 1, batch_norm=batch_norm, nl=nl,
+ no_fnl=True if block_id==(num_blocks-1) else False)
+ setattr(self, "block{}".format(block_id+1), new_block)
+ in_planes = out_planes * block.expansion
+ # self.bn = nn.BatchNorm2d(out_planes * block.expansion) if batch_norm else utils.Identity()
+ self.nl = (nn.ReLU() if nl == "relu" else nn.LeakyReLU()) if not no_fnl else modules.Identity()
+
+ def forward(self, x, return_pa=False):
+ x = self.dropout(x)
+ for block_id in range(self.num_blocks):
+ x = getattr(self, "block{}".format(block_id+1))(x)
+ output = self.nl(x)
+ return (output, x) if return_pa else output
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ list = []
+ for block_id in range(self.num_blocks):
+ list += getattr(self, 'block{}'.format(block_id+1)).list_init_layers()
+ return list
+
+
+#-----------------------------------------------------------------------------------------------------------#
+
+#####################
+### Deconv-blocks ###
+#####################
+
+class DeconvBlock(nn.Module):
+ '''Building block for deconv-layer with multiple blocks.'''
+ expansion = 1
+
+ def __init__(self, in_planes, planes, stride=1, batch_norm=True, nl="relu", no_fnl=False, smaller_kernel=False):
+ super(DeconvBlock, self).__init__()
+
+ # normal block-layers
+ self.block_layer1 = nn.Sequential(
+ nn.ConvTranspose2d(in_planes, planes, stride=stride, bias=False if batch_norm else True,
+ kernel_size=(2 if smaller_kernel else 4) if stride==2 else 3,
+ padding=0 if (stride==2 and smaller_kernel) else 1),
+ nn.BatchNorm2d(planes) if batch_norm else modules.Identity(),
+ nn.ReLU() if nl=="relu" else nn.LeakyReLU()
+ )
+ self.block_layer2 = nn.Sequential(
+ nn.ConvTranspose2d(planes, self.expansion*planes, kernel_size=3, stride=1, padding=1,
+ bias=False if batch_norm else True),
+ nn.BatchNorm2d(self.expansion*planes) if batch_norm else modules.Identity()
+ )
+
+ # shortcut block-layer
+ self.shortcut = modules.Identity()
+ if stride != 1 or in_planes != self.expansion*planes:
+ self.shortcut = nn.Sequential(
+ nn.ConvTranspose2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride,
+ output_padding=0 if stride==1 else 1, bias=False if batch_norm else True),
+ nn.BatchNorm2d(self.expansion*planes) if batch_norm else modules.Identity()
+ )
+
+ # final non-linearity
+ self.nl = (nn.ReLU() if nl=="relu" else nn.LeakyReLU()) if not no_fnl else modules.Identity()
+
+ def forward(self, x):
+ out = self.block_layer2(self.block_layer1(x))
+ out += self.shortcut(x)
+ return self.nl(out)
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ list = [self.block_layer1[0], self.block_layer2[0]]
+ if not type(self.shortcut) == modules.Identity:
+ list.append(self.shortcut[0])
+ return list
+
+
+#-----------------------------------------------------------------------------------------------------------#
+
+#####################
+### Deconv-layers ###
+#####################
+
+class deconv_layer(nn.Module):
+ '''Standard "deconvolutional" layer. Possible to return pre-activations.'''
+
+ def __init__(self, input_channels, output_channels, stride=1, drop=0, batch_norm=True, nl="relu", bias=True,
+ gated=False, smaller_kernel=False):
+ super().__init__()
+ if drop>0:
+ self.dropout = nn.Dropout2d(drop)
+ self.deconv = nn.ConvTranspose2d(input_channels, output_channels, bias=bias, stride=stride,
+ kernel_size=(2 if smaller_kernel else 4) if stride==2 else 3,
+ padding=0 if (stride==2 and smaller_kernel) else 1)
+ if batch_norm:
+ self.bn = nn.BatchNorm2d(output_channels)
+ if gated:
+ self.gate = nn.ConvTranspose2d(input_channels, output_channels, bias=False, stride=stride,
+ kernel_size=(2 if smaller_kernel else 4) if stride==2 else 3,
+ padding=0 if (stride==2 and smaller_kernel) else 1)
+ self.sigmoid = nn.Sigmoid()
+ if isinstance(nl, nn.Module):
+ self.nl = nl
+ elif nl in ("sigmoid", "hardtanh"):
+ self.nl = nn.Sigmoid() if nl=="sigmoid" else nn.Hardtanh(min_val=-4.5, max_val=0)
+ elif not nl=="none":
+ self.nl = nn.ReLU() if nl == "relu" else (nn.LeakyReLU() if nl == "leakyrelu" else modules.Identity())
+
+ def forward(self, x, return_pa=False):
+ input = self.dropout(x) if hasattr(self, 'dropout') else x
+ pre_activ = self.bn(self.deconv(input)) if hasattr(self, 'bn') else self.deconv(input)
+ gate = self.sigmoid(self.gate(x)) if hasattr(self, 'gate') else None
+ gated_pre_activ = gate * pre_activ if hasattr(self, 'gate') else pre_activ
+ output = self.nl(gated_pre_activ) if hasattr(self, 'nl') else gated_pre_activ
+ return (output, gated_pre_activ) if return_pa else output
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ return [self.deconv]
+
+
+class deconv_layer_split(nn.Module):
+ '''"Deconvolutional" layer outputing [mean] and [logvar] for each unit.'''
+
+ def __init__(self, input_channels, output_channels, nl_mean="sigmoid", nl_logvar="hardtanh",
+ stride=1, drop=0, batch_norm=True, bias=True, gated=False, smaller_kernel=False):
+ super().__init__()
+ self.mean = deconv_layer(input_channels, output_channels, nl=nl_mean, smaller_kernel=smaller_kernel,
+ stride=stride, drop=drop, batch_norm=batch_norm, bias=bias, gated=gated)
+ self.logvar = deconv_layer(input_channels, output_channels, nl=nl_logvar, smaller_kernel=smaller_kernel,
+ stride=stride, drop=drop, batch_norm=batch_norm, bias=False, gated=gated)
+
+ def forward(self, x, return_pa=False):
+ mean, pre_activ = self.mean(x, return_pa=True)
+ logvar = self.logvar(x)
+ return ((mean, logvar), pre_activ) if return_pa else (mean, logvar)
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ list = []
+ list += self.mean.list_init_layers()
+ list += self.logvar.list_init_layers()
+ return list
+
+
+class deconv_res_layer(nn.Module):
+ '''Deconvolutional res-net layer. Possible to return pre-activations.'''
+
+ def __init__(self, in_planes, out_planes, block=DeconvBlock, num_blocks=2, stride=1, drop=0, batch_norm=True,
+ nl="relu", smaller_kernel=False, output="normal"):
+
+ ## NOTE: should [output=="none"] be changed so that also no batch_norm is applied?? ##
+
+ # Set configurations
+ super().__init__()
+ self.num_blocks = num_blocks
+ self.in_planes = in_planes
+ self.out_planes = out_planes * block.expansion
+
+ # Create layer
+ self.dropout = nn.Dropout2d(drop)
+ for block_id in range(num_blocks):
+ # -first block has given stride, later blocks have stride 1
+ new_block = block(in_planes, out_planes, stride=stride if block_id==0 else 1, batch_norm=batch_norm, nl=nl,
+ no_fnl=True if block_id==(num_blocks-1) else False, smaller_kernel=smaller_kernel)
+ setattr(self, "block{}".format(block_id+1), new_block)
+ in_planes = out_planes * block.expansion
+ # self.bn = nn.BatchNorm2d(out_planes * block.expansion) if batch_norm else utils.Identity()
+ if output=="sigmoid":
+ self.nl = nn.Sigmoid()
+ elif output=="normal":
+ self.nl = nn.ReLU() if nl=="relu" else nn.LeakyReLU()
+ elif output=="none":
+ self.nl = modules.Identity()
+ else:
+ raise NotImplementedError("Ouptut '{}' not implemented for deconvolutional ResNet layer.".format(output))
+
+ def forward(self, x, return_pa=False):
+ x = self.dropout(x)
+ for block_id in range(self.num_blocks):
+ x = getattr(self, "block{}".format(block_id+1))(x)
+ output = self.nl(x)
+ return (output, x) if return_pa else output
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ list = []
+ for block_id in range(self.num_blocks):
+ list += getattr(self, 'block{}'.format(block_id+1)).list_init_layers()
+ return list
+
diff --git a/PyTorch/build-in/other/continual-learning/models/conv/nets.py b/PyTorch/build-in/other/continual-learning/models/conv/nets.py
new file mode 100644
index 000000000..93a6f65da
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/models/conv/nets.py
@@ -0,0 +1,252 @@
+from torch import nn
+import numpy as np
+import models.conv.layers as conv_layers
+from models.utils import modules
+
+
+class ConvLayers(nn.Module):
+ '''Convolutional feature extractor model for (natural) images. Possible to return (pre)activations of each layer.
+ Also possible to supply a [skip_first]- or [skip_last]-argument to the forward-function to only pass certain layers.
+
+ Input: [batch_size] x [image_channels] x [image_size] x [image_size] tensor
+ Output: [batch_size] x [out_channels] x [out_size] x [out_size] tensor
+ - with [out_channels] = [start_channels] x 2**[reducing_layers] x [block.expansion]
+ [out_size] = [image_size] / 2**[reducing_layers]'''
+
+ def __init__(self, conv_type="standard", block_type="basic", num_blocks=2,
+ image_channels=3, depth=5, start_channels=16, reducing_layers=None, batch_norm=True, nl="relu",
+ output="normal", global_pooling=False, gated=False):
+ '''Initialize stacked convolutional layers (either "standard" or "res-net" ones--1st layer is always standard).
+
+ [conv_type] type of conv-layers to be used: [standard|resnet]
+ [block_type] block-type to be used: [basic|bottleneck] (only relevant if [type]=resNet)
+ [num_blocks] or (with len=[depth]-1) of # blocks in each layer
+ [image_channels] # channels of input image to encode
+ [depth] # layers
+ [start_channels] # channels in 1st layer, doubled in every "rl" (=reducing layer)
+ [reducing_layers] # layers in which image-size is halved & # channels doubled (default=[depth]-1)
+ ("rl"'s are the last conv-layers; in 1st layer # channels cannot double)
+ [batch_norm] whether to use batch-norm after each convolution-operation
+ [nl] non-linearity to be used: [relu|leakyrelu]
+ [output] if - "normal", final layer is same as all others
+ - "none", final layer has no batchnorm or non-linearity
+ [global_pooling] whether to include global average pooling layer at very end
+ [gated] whether conv-layers should be gated (not implemented for ResNet-layers)'''
+
+ # Process type and number of blocks
+ conv_type = "standard" if depth<2 else conv_type
+ if conv_type=="resNet":
+ num_blocks = [num_blocks]*(depth-1) if type(num_blocks)==int else num_blocks
+ assert len(num_blocks)==(depth-1)
+ block = conv_layers.Bottleneck if block_type == "bottleneck" else conv_layers.BasicBlock
+
+ # Prepare label
+ type_label = "C" if conv_type=="standard" else "R{}".format("b" if block_type=="bottleneck" else "")
+ channel_label = "{}-{}x{}".format(image_channels, depth, start_channels)
+ block_label = ""
+ if conv_type=="resNet" and depth>1:
+ block_label += "-"
+ for block_num in num_blocks:
+ block_label += "b{}".format(block_num)
+ nd_label = "{bn}{nl}{gp}{gate}{out}".format(bn="b" if batch_norm else "", nl="l" if nl=="leakyrelu" else "",
+ gp="p" if global_pooling else "", gate="g" if gated else "",
+ out="n" if output=="none" else "")
+ nd_label = "" if nd_label=="" else "-{}".format(nd_label)
+
+ # Set configurations
+ super().__init__()
+ self.depth = depth
+ self.rl = depth-1 if (reducing_layers is None) else (reducing_layers if (depth+1)>reducing_layers else depth)
+ rl_label = "" if self.rl==(self.depth-1) else "-rl{}".format(self.rl)
+ self.label = "{}{}{}{}{}".format(type_label, channel_label, block_label, rl_label, nd_label)
+ self.block_expansion = block.expansion if conv_type=="resNet" else 1
+ # -> constant by which # of output channels of each block is multiplied (if >1, it creates "bottleneck"-effect)
+ double_factor = self.rl if self.rl how often # start-channels is doubled
+ self.out_channels = (start_channels * 2**double_factor) * self.block_expansion if depth>0 else image_channels
+ # -> number channels in last layer (as seen from image)
+ self.start_channels = start_channels # -> number channels in 1st layer (doubled in every "reducing layer")
+ self.global_pooling = global_pooling # -> whether or not average global pooling layer should be added at end
+
+ # Conv-layers
+ output_channels = start_channels
+ for layer_id in range(1, depth+1):
+ # should this layer down-sample? --> last [self.rl] layers should be down-sample layers
+ reducing = True if (layer_id > (depth-self.rl)) else False
+ # calculate number of this layer's input and output channels
+ input_channels = image_channels if layer_id==1 else output_channels * self.block_expansion
+ output_channels = output_channels*2 if (reducing and not layer_id==1) else output_channels
+ # define and set the convolutional-layer
+ if conv_type=="standard" or layer_id==1:
+ conv_layer = conv_layers.conv_layer(input_channels, output_channels, stride=2 if reducing else 1,
+ drop=0, nl="no" if output=="none" and layer_id==depth else nl,
+ batch_norm=False if output=="none" and layer_id==depth else batch_norm,
+ gated= False if output=="none" and layer_id==depth else gated)
+ else:
+ conv_layer = conv_layers.res_layer(input_channels, output_channels, block=block,
+ num_blocks=num_blocks[layer_id-2], stride=2 if reducing else 1,
+ drop=0, batch_norm=batch_norm, nl=nl,
+ no_fnl=True if output=="none" and layer_id==depth else False)
+ setattr(self, 'convLayer{}'.format(layer_id), conv_layer)
+ # Perform pooling (if requested)
+ self.pooling = nn.AdaptiveAvgPool2d((1,1)) if global_pooling else modules.Identity()
+
+ def forward(self, x, skip_first=0, skip_last=0, return_lists=False):
+ # Initiate for keeping track of intermediate hidden (pre-)activations
+ if return_lists:
+ hidden_act_list = []
+ pre_act_list = []
+ # Sequentially pass [x] through all conv-layers
+ for layer_id in range(skip_first+1, self.depth+1-skip_last):
+ (x, pre_act) = getattr(self, 'convLayer{}'.format(layer_id))(x, return_pa=True)
+ if return_lists:
+ pre_act_list.append(pre_act) #-> for each layer, store pre-activations
+ if layer_id<(self.depth-skip_last):
+ hidden_act_list.append(x) #-> for all but last layer, store hidden activations
+ # Global average pooling (if requested)
+ x = self.pooling(x)
+ # Return final [x], if requested along with [hidden_act_list] and [pre_act_list]
+ return (x, hidden_act_list, pre_act_list) if return_lists else x
+
+ def out_size(self, image_size, ignore_gp=False):
+ '''Given [image_size] of input, return the size of the "final" image that is outputted.'''
+ out_size = int(np.ceil(image_size / 2**(self.rl))) if self.depth>0 else image_size
+ return 1 if (self.global_pooling and not ignore_gp) else out_size
+
+ def out_units(self, image_size, ignore_gp=False):
+ '''Given [image_size] of input, return the total number of units in the output.'''
+ return self.out_channels * self.out_size(image_size, ignore_gp=ignore_gp)**2
+
+ def layer_info(self, image_size):
+ '''Return list with shape of all hidden layers.'''
+ layer_list = []
+ reduce_number = 0 # keep track how often image-size has been halved
+ double_number = 0 # keep track how often channel number has been doubled
+ for layer_id in range(1, self.depth):
+ reducing = True if (layer_id > (self.depth-self.rl)) else False
+ if reducing:
+ reduce_number += 1
+ if reducing and layer_id>1:
+ double_number += 1
+ pooling = True if self.global_pooling and layer_id==(self.depth-1) else False
+ expansion = 1 if layer_id==1 else self.block_expansion
+ # add shape of this layer to list
+ layer_list.append([(self.start_channels * 2**double_number) * expansion,
+ 1 if pooling else int(np.ceil(image_size / 2**reduce_number)),
+ 1 if pooling else int(np.ceil(image_size / 2**reduce_number))])
+ return layer_list
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ list = []
+ for layer_id in range(1, self.depth+1):
+ list += getattr(self, 'convLayer{}'.format(layer_id)).list_init_layers()
+ return list
+
+ @property
+ def name(self):
+ return self.label
+
+
+
+
+class DeconvLayers(nn.Module):
+ '''"Deconvolutional" feature decoder model for (natural) images. Possible to return (pre)activations of each layer.
+ Also possible to supply a [skip_first]- or [skip_last]-argument to the forward-function to only pass certain layers.
+
+ Input: [batch_size] x [in_channels] x [in_size] x [in_size] tensor
+ Output: (tuple of) [batch_size] x [image_channels] x [final_size] x [final_size] tensor
+ - with [final_size] = [in_size] x 2**[reducing_layers]
+ [in_channels] = [final_channels] x 2**min([reducing_layers], [depth]-1)'''
+
+ def __init__(self, image_channels=3, final_channels=16, depth=5, reducing_layers=None, batch_norm=True, nl="relu",
+ gated=False, output="normal", smaller_kernel=False, deconv_type="standard"):
+ '''[image_channels] # channels of image to decode
+ [final_channels] # channels in layer before output, was halved in every "rl" (=reducing layer) when moving
+ through model; corresponds to [start_channels] in "ConvLayers"-module
+ [depth] # layers (seen from the image, # channels is halved in each layer going to output image)
+ [reducing_layers] # of layers in which image-size is doubled & number of channels halved (default=[depth]-1)
+ ("rl"'s are the first conv-layers encountered--i.e., last conv-layers as seen from image)
+ (note that in the last layer # channels cannot be halved)
+ [batch_norm] whether to use batch-norm after each convolution-operation
+ [nl] what non-linearity to use -- choices: [relu, leakyrelu, sigmoid, none]
+ [gated] whether deconv-layers should be gated
+ [output] ; if - "normal", final layer is same as all others
+ - "none", final layer has no non-linearity
+ - "sigmoid", final layer has sigmoid non-linearity
+ [smaller_kernel] if True, use kernel-size of 2 (instead of 4) & without padding in reducing-layers'''
+
+ # configurations
+ super().__init__()
+ self.depth = depth if depth>0 else 0
+ self.rl = self.depth-1 if (reducing_layers is None) else min(self.depth, reducing_layers)
+ type_label = "Deconv" if deconv_type=="standard" else "DeResNet"
+ nd_label = "{bn}{nl}{gate}{out}".format(bn="-bn" if batch_norm else "", nl="-lr" if nl=="leakyrelu" else "",
+ gate="-gated" if gated else "",
+ out="" if output=="normal" else "-{}".format(output))
+ self.label = "{}-ic{}-{}x{}-rl{}{}{}".format(type_label, image_channels, self.depth, final_channels, self.rl,
+ "s" if smaller_kernel else "", nd_label)
+ if self.depth>0:
+ self.in_channels = final_channels * 2**min(self.rl, self.depth-1) # -> input-channels for deconv
+ self.final_channels = final_channels # -> channels in layer before output
+ self.image_channels = image_channels # -> output-channels for deconv
+
+ # "Deconv"- / "transposed conv"-layers
+ if self.depth>0:
+ output_channels = self.in_channels
+ for layer_id in range(1, self.depth+1):
+ # should this layer down-sample? --> first [self.rl] layers should be down-sample layers
+ reducing = True if (layer_id<(self.rl+1)) else False
+ # update number of this layer's input and output channels
+ input_channels = output_channels
+ output_channels = int(output_channels/2) if reducing else output_channels
+ # define and set the "deconvolutional"-layer
+ if deconv_type=="standard":
+ new_layer = conv_layers.deconv_layer(
+ input_channels, output_channels if layer_id for keeping track of intermediate hidden (pre-)activations
+ if return_lists:
+ hidden_act_list = []
+ pre_act_list = []
+ # Sequentially pass [x] through all "deconv"-layers
+ if self.depth>0:
+ for layer_id in range(skip_first+1, self.depth+1-skip_last):
+ (x, pre_act) = getattr(self, 'deconvLayer{}'.format(layer_id))(x, return_pa=True)
+ if return_lists:
+ pre_act_list.append(pre_act) #-> for each layer, store pre-activations
+ if layer_id<(self.depth-skip_last):
+ hidden_act_list.append(x) #-> for all but last layer, store hidden activations
+ # Return final [x], if requested along with [hidden_act_list] and [pre_act_list]
+ return (x, hidden_act_list, pre_act_list) if return_lists else x
+
+ def image_size(self, in_units):
+ '''Given the number of units fed in, return the size of the target image.'''
+ if self.depth>0:
+ input_image_size = np.sqrt(in_units/self.in_channels) #-> size of image fed to last layer (seen from image)
+ return input_image_size * 2**self.rl
+ else:
+ return np.sqrt(in_units/self.image_channels)
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ list = []
+ for layer_id in range(1, self.depth+1):
+ list += getattr(self, 'deconvLayer{}'.format(layer_id)).list_init_layers()
+ return list
+
+ @property
+ def name(self):
+ return self.label
diff --git a/PyTorch/build-in/other/continual-learning/models/define_models.py b/PyTorch/build-in/other/continual-learning/models/define_models.py
new file mode 100644
index 000000000..fbec558c5
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/models/define_models.py
@@ -0,0 +1,299 @@
+import utils
+from utils import checkattr
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+def define_classifier(args, config, device, depth=0, stream=False):
+ if checkattr(args, 'separate_networks'):
+ model = define_separate_classifiers(args=args, config=config, device=device, depth=depth)
+ elif checkattr(args, 'feedback'):
+ model = define_rtf_classifier(args=args, config=config, device=device, depth=depth)
+ elif checkattr(args, 'gen_classifier'):
+ model = define_generative_classifer(args=args, config=config, device=device, depth=depth)
+ elif stream:
+ model = define_stream_classifier(args=args, config=config, device=device, depth=depth)
+ else:
+ model = define_standard_classifier(args=args, config=config, device=device, depth=depth)
+ return model
+
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+## Function for defining discriminative classifier model
+def define_stream_classifier(args, config, device, depth=0):
+ # Import required model
+ from models.classifier_stream import Classifier
+ # Specify model
+ model = Classifier(
+ image_size=config['size'],
+ image_channels=config['channels'],
+ classes=config['output_units'],
+ # -conv-layers
+ depth=depth,
+ conv_type=args.conv_type if depth > 0 else None,
+ start_channels=args.channels if depth > 0 else None,
+ reducing_layers=args.rl if depth > 0 else None,
+ num_blocks=args.n_blocks if depth > 0 else None,
+ conv_bn=(True if args.conv_bn == "yes" else False) if depth > 0 else None,
+ conv_nl=args.conv_nl if depth > 0 else None,
+ no_fnl=True if depth > 0 else None,
+ global_pooling=checkattr(args, 'gp') if depth > 0 else None,
+ # -fc-layers
+ fc_layers=args.fc_lay,
+ fc_units=args.fc_units,
+ fc_drop=args.fc_drop,
+ fc_bn=True if args.fc_bn == "yes" else False,
+ fc_nl=args.fc_nl,
+ excit_buffer=True,
+ phantom=checkattr(args, 'fisher_kfac'),
+ # -how to use context-ID
+ xdg_prob=args.gating_prop if checkattr(args, 'xdg') else 0.,
+ n_contexts=args.contexts,
+ multihead=((args.scenario=='task') and not checkattr(args, 'singlehead')),
+ device=device
+ ).to(device)
+ # Return model
+ return model
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+## Function for defining discriminative classifier model
+def define_standard_classifier(args, config, device, depth=0):
+ # Import required model
+ from models.classifier import Classifier
+ # Specify model
+ model = Classifier(
+ image_size=config['size'],
+ image_channels=config['channels'],
+ classes=config['output_units'],
+ # -conv-layers
+ depth=depth,
+ conv_type=args.conv_type if depth>0 else None,
+ start_channels=args.channels if depth>0 else None,
+ reducing_layers=args.rl if depth>0 else None,
+ num_blocks=args.n_blocks if depth>0 else None,
+ conv_bn=(True if args.conv_bn=="yes" else False) if depth>0 else None,
+ conv_nl=args.conv_nl if depth>0 else None,
+ no_fnl=True if depth>0 else None,
+ global_pooling=checkattr(args, 'gp') if depth>0 else None,
+ # -fc-layers
+ fc_layers=args.fc_lay,
+ fc_units=args.fc_units,
+ fc_drop=args.fc_drop,
+ fc_bn=True if args.fc_bn=="yes" else False,
+ fc_nl=args.fc_nl,
+ excit_buffer=True,
+ phantom=checkattr(args, 'fisher_kfac')
+ ).to(device)
+ # Return model
+ return model
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+## Function for defining 'replay-through-feedback' model
+def define_rtf_classifier(args, config, device, depth=0):
+ # Import required model
+ from models.cond_vae import CondVAE
+ # Specify model
+ model = CondVAE(
+ image_size=config['size'],
+ image_channels=config['channels'],
+ classes=config['output_units'],
+ # -conv-layers
+ depth=depth,
+ conv_type=args.conv_type if depth > 0 else None,
+ start_channels=args.channels if depth > 0 else None,
+ reducing_layers=args.rl if depth > 0 else None,
+ num_blocks=args.n_blocks if depth > 0 else None,
+ conv_bn=(True if args.conv_bn == "yes" else False) if depth > 0 else None,
+ conv_nl=args.conv_nl if depth > 0 else None,
+ global_pooling=checkattr(args, 'gp') if depth > 0 else None,
+ # -fc-layers
+ fc_layers=args.fc_lay,
+ fc_units=args.fc_units,
+ fc_drop=args.fc_drop,
+ fc_bn=(args.fc_bn=="yes"),
+ fc_nl=args.fc_nl,
+ excit_buffer=True,
+ # -prior
+ prior=args.prior if hasattr(args, "prior") else "standard",
+ n_modes=args.n_modes if hasattr(args, "prior") else 1,
+ per_class=args.per_class if hasattr(args, "prior") else False,
+ z_dim=args.z_dim,
+ # -decoder
+ recon_loss=args.recon_loss,
+ network_output="none" if checkattr(args, "normalize") else "sigmoid",
+ deconv_type=args.deconv_type if hasattr(args, "deconv_type") else "standard",
+ dg_gates=checkattr(args, 'dg_gates'),
+ dg_type=args.dg_type if hasattr(args, 'dg_type') else "context",
+ dg_prop=args.dg_prop if hasattr(args, 'dg_prop') else 0.,
+ contexts=args.contexts if hasattr(args, 'contexts') else None,
+ scenario=args.scenario if hasattr(args, 'scenario') else None, device=device,
+ # -classifier
+ classifier=True,
+ ).to(device)
+ # -return model
+ return model
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+## Function for defining classifier model with separate network per context
+def define_separate_classifiers(args, config, device, depth=0):
+ # Import required model
+ from models.separate_classifiers import SeparateClassifiers
+ # Specify model
+ model = SeparateClassifiers(
+ image_size=config['size'],
+ image_channels=config['channels'],
+ classes_per_context=config['classes_per_context'],
+ contexts=args.contexts,
+ # -conv-layers
+ depth=depth,
+ conv_type=args.conv_type if depth>0 else None,
+ start_channels=args.channels if depth>0 else None,
+ reducing_layers=args.rl if depth>0 else None,
+ num_blocks=args.n_blocks if depth>0 else None,
+ conv_bn=(True if args.conv_bn=="yes" else False) if depth>0 else None,
+ conv_nl=args.conv_nl if depth>0 else None,
+ no_fnl=True if depth>0 else None,
+ global_pooling=checkattr(args, 'gp') if depth>0 else None,
+ # -fc-layers
+ fc_layers=args.fc_lay,
+ fc_units=args.fc_units,
+ fc_drop=args.fc_drop,
+ fc_bn=True if args.fc_bn=="yes" else False,
+ fc_nl=args.fc_nl,
+ excit_buffer=True,
+ ).to(device)
+ return model
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+## Function for defining generative classifier (with separate VAE per class)
+def define_generative_classifer(args, config, device, depth=0):
+ # Import required model
+ from models.generative_classifier import GenerativeClassifier
+ # Specify model
+ model = GenerativeClassifier(
+ image_size=config['size'],
+ image_channels=config['channels'],
+ classes=config['classes'],
+ # -conv-layers
+ depth=depth,
+ conv_type=args.conv_type if depth>0 else None,
+ start_channels=args.channels if depth>0 else None,
+ reducing_layers=args.rl if depth>0 else None,
+ num_blocks=args.n_blocks if depth>0 else None,
+ conv_bn=(True if args.conv_bn=="yes" else False) if depth>0 else None,
+ conv_nl=args.conv_nl if depth>0 else None,
+ no_fnl=True if depth>0 else None,
+ global_pooling=checkattr(args, 'gp') if depth>0 else None,
+ # -fc-layers
+ fc_layers=args.fc_lay,
+ fc_units=args.fc_units,
+ fc_drop=args.fc_drop,
+ fc_bn=(args.fc_bn=="yes"),
+ fc_nl=args.fc_nl,
+ excit_buffer=True,
+ # -prior
+ prior=args.prior if hasattr(args, "prior") else "standard",
+ n_modes=args.n_modes if hasattr(args, "prior") else 1,
+ z_dim=args.z_dim,
+ # -decoder
+ recon_loss=args.recon_loss,
+ network_output="none" if checkattr(args, "normalize") else "sigmoid",
+ deconv_type=args.deconv_type if hasattr(args, "deconv_type") else "standard",
+ ).to(device)
+ # Return model
+ return model
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+## Function for defining feature extractor model
+def define_feature_extractor(args, config, device):
+ # -import required model
+ from models.feature_extractor import FeatureExtractor
+ # -create model
+ model = FeatureExtractor(
+ image_size=config['size'],
+ image_channels=config['channels'],
+ # -conv-layers
+ conv_type=args.conv_type,
+ depth=args.depth,
+ start_channels=args.channels,
+ reducing_layers=args.rl,
+ num_blocks=args.n_blocks,
+ conv_bn=True if args.conv_bn=="yes" else False,
+ conv_nl=args.conv_nl,
+ global_pooling=checkattr(args, 'gp'),
+ ).to(device)
+ # -return model
+ return model
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+## Function for defining VAE model
+def define_vae(args, config, device, depth=0):
+ # Import required model
+ from models.vae import VAE
+ # Specify model
+ model = VAE(
+ image_size=config['size'],
+ image_channels=config['channels'],
+ # -conv-layers
+ depth=depth,
+ conv_type=args.conv_type if depth > 0 else None,
+ start_channels=args.channels if depth > 0 else None,
+ reducing_layers=args.rl if depth > 0 else None,
+ num_blocks=args.n_blocks if depth > 0 else None,
+ conv_bn=(True if args.conv_bn == "yes" else False) if depth > 0 else None,
+ conv_nl=args.conv_nl if depth > 0 else None,
+ global_pooling=False if depth > 0 else None,
+ # -fc-layers
+ fc_layers=args.g_fc_lay if hasattr(args, 'g_fc_lay') else args.fc_lay,
+ fc_units=args.g_fc_uni if hasattr(args, 'g_fc_uni') else args.fc_units,
+ fc_drop=0,
+ fc_bn=(args.fc_bn=="yes"),
+ fc_nl=args.fc_nl,
+ excit_buffer=True,
+ # -prior
+ prior=args.prior if hasattr(args, "prior") else "standard",
+ n_modes=args.n_modes if hasattr(args, "prior") else 1,
+ z_dim=args.g_z_dim if hasattr(args, 'g_z_dim') else args.z_dim,
+ # -decoder
+ recon_loss=args.recon_loss,
+ network_output="none" if checkattr(args, "normalize") else "sigmoid",
+ deconv_type=args.deconv_type if hasattr(args, "deconv_type") else "standard",
+ ).to(device)
+ # Return model
+ return model
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+## Function for (re-)initializing the parameters of [model]
+def init_params(model, args, verbose=False):
+
+ ## Initialization
+ # - reinitialize all parameters according to default initialization
+ model.apply(utils.weight_reset)
+ # - initialize parameters according to chosen custom initialization (if requested)
+ if hasattr(args, 'init_weight') and not args.init_weight=="standard":
+ utils.weight_init(model, strategy="xavier_normal")
+ if hasattr(args, 'init_bias') and not args.init_bias=="standard":
+ utils.bias_init(model, strategy="constant", value=0.01)
+
+ ## Use pre-training
+ if checkattr(args, "pre_convE") and hasattr(model, 'depth') and model.depth>0:
+ load_name = model.convE.name if (
+ not hasattr(args, 'convE_ltag') or args.convE_ltag=="none"
+ ) else "{}-{}{}".format(model.convE.name, args.convE_ltag,
+ "-s{}".format(args.seed) if checkattr(args, 'seed_to_ltag') else "")
+ utils.load_checkpoint(model.convE, model_dir=args.m_dir, name=load_name, verbose=verbose)
+
+ ## Freeze some parameters?
+ if checkattr(args, "freeze_convE") and hasattr(model, 'convE'):
+ for param in model.convE.parameters():
+ param.requires_grad = False
+ model.convE.frozen = True #--> so they're set to .eval() duting trainng to ensure batchnorm-params do not change
+
+##-------------------------------------------------------------------------------------------------------------------##
diff --git a/PyTorch/build-in/other/continual-learning/models/fc/__init__.py b/PyTorch/build-in/other/continual-learning/models/fc/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/PyTorch/build-in/other/continual-learning/models/fc/excitability_modules.py b/PyTorch/build-in/other/continual-learning/models/fc/excitability_modules.py
new file mode 100644
index 000000000..6d14baf96
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/models/fc/excitability_modules.py
@@ -0,0 +1,91 @@
+import math
+import torch
+from torch import nn
+from torch.nn.parameter import Parameter
+
+
+def linearExcitability(input, weight, excitability=None, bias=None):
+ '''Applies a linear transformation to the incoming data: :math:`y = c(xA^T) + b`.
+
+ Shape:
+ - input: :math:`(N, *, in_features)`
+ - weight: :math:`(out_features, in_features)`
+ - excitability: :math:`(out_features)`
+ - bias: :math:`(out_features)`
+ - output: :math:`(N, *, out_features)`
+ (NOTE: `*` means any number of additional dimensions)'''
+
+ if excitability is not None:
+ output = input.matmul(weight.t()) * excitability
+ else:
+ output = input.matmul(weight.t())
+ if bias is not None:
+ output += bias
+ return output
+
+
+class LinearExcitability(nn.Module):
+ '''Module for a linear transformation with multiplicative excitability-parameter (i.e., learnable) and/or -buffer.
+
+ Args:
+ in_features: size of each input sample
+ out_features: size of each output sample
+ bias: if 'False', layer will not learn an additive bias-parameter (DEFAULT=True)
+ excitability: if 'True', layer will learn a multiplicative excitability-parameter (DEFAULT=False)
+ excit_buffer: if 'True', layer will have excitability-buffer whose value can be set (DEFAULT=False)
+
+ Shape:
+ - input: :math:`(N, *, in_features)` where `*` means any number of additional dimensions
+ - output: :math:`(N, *, out_features)` where all but the last dimension are the same shape as the input.
+
+ Attributes:
+ weight: the learnable weights of the module of shape (out_features x in_features)
+ excitability: the learnable multiplication terms (out_features)
+ bias: the learnable bias of the module of shape (out_features)
+ excit_buffer: fixed multiplication variable (out_features)'''
+
+ def __init__(self, in_features, out_features, bias=True, excitability=False, excit_buffer=False):
+ super(LinearExcitability, self).__init__()
+ self.in_features = in_features
+ self.out_features = out_features
+ self.weight = Parameter(torch.Tensor(out_features, in_features))
+ if excitability:
+ self.excitability = Parameter(torch.Tensor(out_features))
+ else:
+ self.register_parameter('excitability', None)
+ if bias:
+ self.bias = Parameter(torch.Tensor(out_features))
+ else:
+ self.register_parameter('bias', None)
+ if excit_buffer:
+ buffer = torch.Tensor(out_features).uniform_(1,1)
+ self.register_buffer("excit_buffer", buffer)
+ else:
+ self.register_buffer("excit_buffer", None)
+ self.reset_parameters()
+
+ def reset_parameters(self):
+ '''Modifies the parameters "in-place" to initialize / reset them at appropriate values.'''
+ stdv = 1. / math.sqrt(self.weight.size(1))
+ self.weight.data.uniform_(-stdv, stdv)
+ if self.excitability is not None:
+ self.excitability.data.uniform_(1, 1)
+ if self.bias is not None:
+ self.bias.data.uniform_(-stdv, stdv)
+
+ def forward(self, input):
+ '''Running this model's forward step requires/returns:
+ -[input]: [batch_size]x[...]x[in_features]
+ -[output]: [batch_size]x[...]x[hidden_features]'''
+ if self.excit_buffer is None:
+ excitability = self.excitability
+ elif self.excitability is None:
+ excitability = self.excit_buffer
+ else:
+ excitability = self.excitability*self.excit_buffer
+ return linearExcitability(input, self.weight, excitability, self.bias)
+
+ def __repr__(self):
+ return self.__class__.__name__ + '(' \
+ + 'in_features=' + str(self.in_features) \
+ + ', out_features=' + str(self.out_features) + ')'
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/models/fc/layers.py b/PyTorch/build-in/other/continual-learning/models/fc/layers.py
new file mode 100644
index 000000000..d201b924d
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/models/fc/layers.py
@@ -0,0 +1,152 @@
+import numpy as np
+import torch
+from torch import nn
+from models.utils import modules
+from models.fc import excitability_modules as em
+
+
+class fc_layer(nn.Module):
+ '''Fully connected layer, with possibility of returning "pre-activations".
+
+ Input: [batch_size] x ... x [in_size] tensor
+ Output: [batch_size] x ... x [out_size] tensor'''
+
+ def __init__(self, in_size, out_size, nl=nn.ReLU(), drop=0., bias=True, batch_norm=False,
+ excitability=False, excit_buffer=False, gated=False, phantom=False):
+ super().__init__()
+ self.bias = False if batch_norm else bias
+ if drop>0:
+ self.dropout = nn.Dropout(drop)
+ self.linear = em.LinearExcitability(in_size, out_size, bias=False if batch_norm else bias,
+ excitability=excitability, excit_buffer=excit_buffer)
+ if batch_norm:
+ self.bn = nn.BatchNorm1d(out_size)
+ if gated:
+ self.gate = nn.Linear(in_size, out_size)
+ self.sigmoid = nn.Sigmoid()
+ if phantom:
+ self.phantom = nn.Parameter(torch.zeros(out_size), requires_grad=True)
+ if isinstance(nl, nn.Module):
+ self.nl = nl
+ elif not nl=="none":
+ self.nl = nn.ReLU() if nl == "relu" else (nn.LeakyReLU() if nl == "leakyrelu" else modules.Identity())
+
+ def forward(self, x, return_pa=False, **kwargs):
+ input = self.dropout(x) if hasattr(self, 'dropout') else x
+ pre_activ = self.bn(self.linear(input)) if hasattr(self, 'bn') else self.linear(input)
+ gate = self.sigmoid(self.gate(x)) if hasattr(self, 'gate') else None
+ gated_pre_activ = gate * pre_activ if hasattr(self, 'gate') else pre_activ
+ if hasattr(self, 'phantom'):
+ gated_pre_activ = gated_pre_activ + self.phantom
+ output = self.nl(gated_pre_activ) if hasattr(self, 'nl') else gated_pre_activ
+ return (output, gated_pre_activ) if return_pa else output
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ return [self.linear, self.gate] if hasattr(self, 'gate') else [self.linear]
+
+
+
+class fc_layer_split(nn.Module):
+ '''Fully connected layer outputting [mean] and [logvar] for each unit.
+
+ Input: [batch_size] x ... x [in_size] tensor
+ Output: tuple with two [batch_size] x ... x [out_size] tensors'''
+
+ def __init__(self, in_size, out_size, nl_mean=nn.Sigmoid(), nl_logvar=nn.Hardtanh(min_val=-4.5, max_val=0.),
+ drop=0., bias=True, excitability=False, excit_buffer=False, batch_norm=False, gated=False):
+ super().__init__()
+
+ self.mean = fc_layer(in_size, out_size, drop=drop, bias=bias, excitability=excitability,
+ excit_buffer=excit_buffer, batch_norm=batch_norm, gated=gated, nl=nl_mean)
+ self.logvar = fc_layer(in_size, out_size, drop=drop, bias=False, excitability=excitability,
+ excit_buffer=excit_buffer, batch_norm=batch_norm, gated=gated, nl=nl_logvar)
+
+ def forward(self, x):
+ return (self.mean(x), self.logvar(x))
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ list = []
+ list += self.mean.list_init_layers()
+ list += self.logvar.list_init_layers()
+ return list
+
+
+
+class fc_layer_fixed_gates(nn.Module):
+ '''Fully connected layer, with possibility of returning "pre-activations". Has fixed gates (of specified dimension).
+
+ Input: [batch_size] x ... x [in_size] tensor & [batch_size] x ... x [gate_size] tensor
+ Output: [batch_size] x ... x [out_size] tensor'''
+
+ def __init__(self, in_size, out_size, nl=nn.ReLU(),
+ drop=0., bias=True, excitability=False, excit_buffer=False, batch_norm=False,
+ gate_size=0, gating_prop=0.8, device='cpu'):
+ super().__init__()
+ if drop > 0:
+ self.dropout = nn.Dropout(drop)
+ self.linear = em.LinearExcitability(in_size, out_size, bias=False if batch_norm else bias,
+ excitability=excitability, excit_buffer=excit_buffer)
+ if batch_norm:
+ self.bn = nn.BatchNorm1d(out_size)
+ if gate_size>0:
+ self.gate_mask = torch.tensor(
+ np.random.choice([0., 1.], size=(gate_size, out_size), p=[gating_prop, 1.-gating_prop]),
+ dtype=torch.float, device=device
+ )
+ if isinstance(nl, nn.Module):
+ self.nl = nl
+ elif not nl == "none":
+ self.nl = nn.ReLU() if nl == "relu" else (nn.LeakyReLU() if nl == "leakyrelu" else modules.Identity())
+
+ def forward(self, x, gate_input=None, return_pa=False):
+ input = self.dropout(x) if hasattr(self, 'dropout') else x
+ pre_activ = self.bn(self.linear(input)) if hasattr(self, 'bn') else self.linear(input)
+ gate = torch.matmul(gate_input, self.gate_mask) if hasattr(self, 'gate_mask') else None
+ gated_pre_activ = gate * pre_activ if hasattr(self, 'gate_mask') else pre_activ
+ output = self.nl(gated_pre_activ) if hasattr(self, 'nl') else gated_pre_activ
+ return (output, gated_pre_activ) if return_pa else output
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ return [self.linear, self.gate] if hasattr(self, 'gate') else [self.linear]
+
+
+
+class fc_multihead_layer(nn.Module):
+ '''Fully connected layer with a separate head for each context.
+
+ Input: [batch_size] x ... x [in_size] tensor & [batch_size] x ... x [n_contexts] tensor
+ Output: [batch_size] x ... x [out_size] tensor'''
+
+ def __init__(self, in_size, classes, n_contexts, nl=nn.ReLU(),
+ drop=0., bias=True, excitability=False, excit_buffer=False, batch_norm=False, device='cpu'):
+ super().__init__()
+ if drop > 0:
+ self.dropout = nn.Dropout(drop)
+ self.linear = em.LinearExcitability(in_size, classes, bias=False if batch_norm else bias,
+ excitability=excitability, excit_buffer=excit_buffer)
+ if batch_norm:
+ self.bn = nn.BatchNorm1d(classes)
+ if n_contexts > 0:
+ self.gate_mask = torch.zeros(size=(n_contexts, classes), dtype=torch.float, device=device)
+ classes_per_context = int(classes/n_contexts)
+ for context_id in range(n_contexts):
+ self.gate_mask[context_id, (context_id * classes_per_context):((context_id + 1) * classes_per_context)] = 1.
+ if isinstance(nl, nn.Module):
+ self.nl = nl
+ elif not nl == "none":
+ self.nl = nn.ReLU() if nl == "relu" else (nn.LeakyReLU() if nl == "leakyrelu" else modules.Identity())
+
+ def forward(self, x, gate_input=None, return_pa=False):
+ input = self.dropout(x) if hasattr(self, 'dropout') else x
+ pre_activ = self.bn(self.linear(input)) if hasattr(self, 'bn') else self.linear(input)
+ gate = torch.matmul(gate_input, self.gate_mask) if hasattr(self, 'gate_mask') else None
+ gated_pre_activ = gate * pre_activ if hasattr(self, 'gate_mask') else pre_activ
+ output = self.nl(gated_pre_activ) if hasattr(self, 'nl') else gated_pre_activ
+ return (output, gated_pre_activ) if return_pa else output
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ return [self.linear, self.gate] if hasattr(self, 'gate') else [self.linear]
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/models/fc/nets.py b/PyTorch/build-in/other/continual-learning/models/fc/nets.py
new file mode 100644
index 000000000..810afe333
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/models/fc/nets.py
@@ -0,0 +1,219 @@
+from torch import nn
+import numpy as np
+from models.utils.modules import Identity
+from models.fc.layers import fc_layer, fc_layer_fixed_gates
+
+
+class MLP(nn.Module):
+ '''Module for a multi-layer perceptron (MLP).
+
+ Input: [batch_size] x ... x [size_per_layer[0]] tensor
+ Output: (tuple of) [batch_size] x ... x [size_per_layer[-1]] tensor'''
+
+ def __init__(self, input_size=1000, output_size=10, layers=2, hid_size=1000, hid_smooth=None, size_per_layer=None,
+ drop=0, batch_norm=False, nl="relu", bias=True, excitability=False, excit_buffer=False, gated=False,
+ phantom=False, output='normal'):
+ '''sizes: 0th=[input], 1st=[hid_size], ..., 1st-to-last=[hid_smooth], last=[output].
+ [input_size] # of inputs
+ [output_size] # of units in final layer
+ [layers] # of layers
+ [hid_size] # of units in each hidden layer
+ [hid_smooth] if None, all hidden layers have [hid_size] units, else # of units linearly in-/decreases s.t.
+ final hidden layer has [hid_smooth] units (if only 1 hidden layer, it has [hid_size] units)
+ [size_per_layer] None or with for each layer number of units (1st element = number of inputs)
+ --> overwrites [input_size], [output_size], [layers], [hid_size] and [hid_smooth]
+ [drop] % of each layer's inputs that is randomly set to zero during training
+ [batch_norm] ; if True, batch-normalization is applied to each layer
+ [nl] ; type of non-linearity to be used (options: "relu", "leakyrelu", "none")
+ [gated] ; if True, each linear layer has an additional learnable gate
+ (whereby the gate is controlled by the same input as that goes through the gate)
+ [phantom] ; if True, add phantom parameters to pre-activations, used for computing KFAC Fisher
+ [output] ; if - "normal", final layer is same as all others
+ - "none", final layer has no non-linearity
+ - "sigmoid", final layer has sigmoid non-linearity'''
+
+ super().__init__()
+ self.output = output
+
+ # get sizes of all layers
+ if size_per_layer is None:
+ hidden_sizes = []
+ if layers > 1:
+ if (hid_smooth is not None):
+ hidden_sizes = [int(x) for x in np.linspace(hid_size, hid_smooth, num=layers-1)]
+ else:
+ hidden_sizes = [int(x) for x in np.repeat(hid_size, layers - 1)]
+ size_per_layer = [input_size] + hidden_sizes + [output_size] if layers>0 else [input_size]
+ self.layers = len(size_per_layer)-1
+
+ # set label for this module
+ # -determine "non-default options"-label
+ nd_label = "{drop}{bias}{exc}{bn}{nl}{gate}".format(
+ drop="" if drop==0 else "d{}".format(drop),
+ bias="" if bias else "n", exc="e" if excitability else "", bn="b" if batch_norm else "",
+ nl="l" if nl=="leakyrelu" else ("n" if nl=="none" else ""), gate="g" if gated else "",
+ )
+ nd_label = "{}{}".format("" if nd_label=="" else "-{}".format(nd_label),
+ "" if output=="normal" else "-{}".format(output))
+ # -set label
+ size_statement = ""
+ for i in size_per_layer:
+ size_statement += "{}{}".format("-" if size_statement=="" else "x", i)
+ self.label = "F{}{}".format(size_statement, nd_label) if self.layers>0 else ""
+
+ # set layers
+ for lay_id in range(1, self.layers+1):
+ # number of units of this layer's input and output
+ in_size = size_per_layer[lay_id-1]
+ out_size = size_per_layer[lay_id]
+ # define and set the fully connected layer
+ layer = fc_layer(
+ in_size, out_size, bias=bias, excitability=excitability, excit_buffer=excit_buffer,
+ batch_norm=False if (lay_id==self.layers and not output=="normal") else batch_norm, gated=gated,
+ nl=("none" if output=="none" else nn.Sigmoid()) if (
+ lay_id==self.layers and not output=="normal"
+ ) else nl, drop=drop if lay_id>1 else 0., phantom=phantom
+ )
+ setattr(self, 'fcLayer{}'.format(lay_id), layer)
+
+ # if no layers, add "identity"-module to indicate in this module's representation nothing happens
+ if self.layers<1:
+ self.noLayers = Identity()
+
+ def forward(self, x, return_intermediate=False):
+ if return_intermediate:
+ intermediate = {}
+ for lay_id in range(1, self.layers + 1):
+ if return_intermediate:
+ intermediate[f"fcLayer{lay_id}"] = x
+ x = getattr(self, "fcLayer{}".format(lay_id))(x)
+ return (x, intermediate) if return_intermediate else x
+
+ @property
+ def name(self):
+ return self.label
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ list = []
+ for layer_id in range(1, self.layers+1):
+ list += getattr(self, 'fcLayer{}'.format(layer_id)).list_init_layers()
+ return list
+
+
+
+class MLP_gates(nn.Module):
+ '''Module for a multi-layer perceptron (MLP). Possible to return (pre)activations of each layer.
+ Also possible to supply a [skip_first]- or [skip_last]-argument to the forward-function to only pass certain layers.
+ With gates controlled by [gate_input] (of size [gate_size]) with a randomly selected masked (prop=[gating_prop]).
+
+ Input: [batch_size] x ... x [size_per_layer[0]] tensor & [batch_size] x [gate_size]
+ Output: (tuple of) [batch_size] x ... x [size_per_layer[-1]] tensor'''
+
+ def __init__(self, input_size=1000, output_size=10, layers=2, hid_size=1000, hid_smooth=None, size_per_layer=None,
+ drop=0, batch_norm=False, nl="relu", bias=True, excitability=False, excit_buffer=False, gate_size=0,
+ gating_prop=0., final_gate=False, output='normal', device='cpu'):
+ '''sizes: 0th=[input], 1st=[hid_size], ..., 1st-to-last=[hid_smooth], last=[output].
+ [input_size] # of inputs
+ [output_size] # of units in final layer
+ [layers] # of layers
+ [hid_size] # of units in each hidden layer
+ [hid_smooth] if None, all hidden layers have [hid_size] units, else # of units linearly in-/decreases s.t.
+ final hidden layer has [hid_smooth] units (if only 1 hidden layer, it has [hid_size] units)
+ [size_per_layer] None or with for each layer number of units (1st element = number of inputs)
+ --> overwrites [input_size], [output_size], [layers], [hid_size] and [hid_smooth]
+ [drop] % of each layer's inputs that is randomly set to zero during training
+ [batch_norm] ; if True, batch-normalization is applied to each layer
+ [nl] ; type of non-linearity to be used (options: "relu", "leakyrelu", "none")
+ [gate_size] ; if>0, each linear layer has gate controlled by separate inputs of size [gate_size]
+ [gating_prop] ; probability for each unit to be gated
+ [final_gate] ; whether final layer is allowed to have a gate
+ [output] ; if - "normal", final layer is same as all others
+ - "none", final layer has no non-linearity
+ - "sigmoid", final layer has sigmoid non-linearity'''
+
+ super().__init__()
+ self.output = output
+
+ # get sizes of all layers
+ if size_per_layer is None:
+ hidden_sizes = []
+ if layers > 1:
+ if (hid_smooth is not None):
+ hidden_sizes = [int(x) for x in np.linspace(hid_size, hid_smooth, num=layers-1)]
+ else:
+ hidden_sizes = [int(x) for x in np.repeat(hid_size, layers - 1)]
+ size_per_layer = [input_size] + hidden_sizes + [output_size] if layers>0 else [input_size]
+ self.layers = len(size_per_layer)-1
+
+ # set label for this module
+ # -determine "non-default options"-label
+ nd_label = "{drop}{bias}{exc}{bn}{nl}{gate}".format(
+ drop="" if drop==0 else "d{}".format(drop),
+ bias="" if bias else "n", exc="e" if excitability else "", bn="b" if batch_norm else "",
+ nl="l" if nl=="leakyrelu" else ("n" if nl=="none" else ""),
+ gate="g{}m{}".format(gate_size, gating_prop) if (gate_size>0 and gating_prop>0.) else "",
+ )
+ nd_label = "{}{}".format("" if nd_label=="" else "-{}".format(nd_label),
+ "" if output=="normal" else "-{}".format(output))
+ # -set label
+ size_statement = ""
+ for i in size_per_layer:
+ size_statement += "{}{}".format("-" if size_statement=="" else "x", i)
+ self.label = "F{}{}".format(size_statement, nd_label) if self.layers>0 else ""
+
+ # set layers
+ for lay_id in range(1, self.layers+1):
+ # number of units of this layer's input and output
+ in_size = size_per_layer[lay_id-1]
+ out_size = size_per_layer[lay_id]
+ # define and set the fully connected layer
+ if (not gate_size>0.) or (not gating_prop>0.) or (lay_id==self.layers and not final_gate):
+ layer = fc_layer(
+ in_size, out_size, bias=bias, excitability=excitability, excit_buffer=excit_buffer,
+ batch_norm=False if (lay_id==self.layers and not output=="normal") else batch_norm,
+ nl=("none" if output=="none" else nn.Sigmoid()) if (
+ lay_id==self.layers and not output=="normal"
+ ) else nl, drop=drop if lay_id>1 else 0.,
+ )
+ else:
+ layer = fc_layer_fixed_gates(
+ in_size, out_size, bias=bias, excitability=excitability, excit_buffer=excit_buffer,
+ batch_norm=False if (lay_id == self.layers and not output == "normal") else batch_norm,
+ gate_size=gate_size, gating_prop=gating_prop, device=device,
+ nl=("none" if output == "none" else nn.Sigmoid()) if (
+ lay_id == self.layers and not output == "normal"
+ ) else nl, drop=drop if lay_id>1 else 0.,
+ )
+ setattr(self, 'fcLayer{}'.format(lay_id), layer)
+
+ # if no layers, add "identity"-module to indicate in this module's representation nothing happens
+ if self.layers<1:
+ self.noLayers = Identity()
+
+ def forward(self, x, gate_input=None, skip_first=0, skip_last=0, return_lists=False):
+ # Initiate for keeping track of intermediate hidden-(pre)activations
+ if return_lists:
+ hidden_act_list = []
+ pre_act_list = []
+ # Sequentially pass [x] through all fc-layers
+ for lay_id in range(skip_first+1, self.layers+1-skip_last):
+ (x, pre_act) = getattr(self, 'fcLayer{}'.format(lay_id))(x, gate_input=gate_input, return_pa=True)
+ if return_lists:
+ pre_act_list.append(pre_act) #-> for each layer, store pre-activations
+ if lay_id<(self.layers-skip_last):
+ hidden_act_list.append(x) #-> for all but last layer, store hidden activations
+ # Return final [x], if requested along with [hidden_act_list] and [pre_act_list]
+ return (x, hidden_act_list, pre_act_list) if return_lists else x
+
+
+ @property
+ def name(self):
+ return self.label
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ list = []
+ for layer_id in range(1, self.layers+1):
+ list += getattr(self, 'fcLayer{}'.format(layer_id)).list_init_layers()
+ return list
diff --git a/PyTorch/build-in/other/continual-learning/models/feature_extractor.py b/PyTorch/build-in/other/continual-learning/models/feature_extractor.py
new file mode 100644
index 000000000..a7b0919d9
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/models/feature_extractor.py
@@ -0,0 +1,122 @@
+import tqdm
+import torch
+from torch import optim
+from torch.nn import functional as F
+from models.utils import modules
+from models.conv.nets import ConvLayers
+from models.fc.layers import fc_layer
+
+
+class FeatureExtractor(torch.nn.Module):
+ '''Model for encoding (i.e., feature extraction) and images.'''
+
+ def __init__(self, image_size, image_channels,
+ # -conv-layers
+ conv_type="standard", depth=0, start_channels=64, reducing_layers=3, conv_bn=True, conv_nl="relu",
+ num_blocks=2, global_pooling=False, no_fnl=True, conv_gated=False):
+
+ # Model configurations
+ super().__init__()
+ self.label = "FeatureExtractor"
+ self.depth = depth
+
+ # Optimizer (needs to be set before training starts))
+ self.optim_type = None
+ self.optimizer = None
+ self.optim_list = []
+
+ ######------SPECIFY MODEL------######
+ #--> convolutional layers
+ self.convE = ConvLayers(
+ conv_type=conv_type, block_type="basic", num_blocks=num_blocks, image_channels=image_channels,
+ depth=depth, start_channels=start_channels, reducing_layers=reducing_layers, batch_norm=conv_bn, nl=conv_nl,
+ global_pooling=global_pooling, gated=conv_gated, output="none" if no_fnl else "normal",
+ )
+ #------------------------------calculate input/output-sizes--------------------------------#
+ self.conv_out_units = self.convE.out_units(image_size)
+ self.conv_out_size = self.convE.out_size(image_size)
+ self.conv_out_channels = self.convE.out_channels
+
+ @property
+ def name(self):
+ return self.convE.name
+
+ def _device(self):
+ return next(self.parameters()).device
+
+ def _is_on_cuda(self):
+ return next(self.parameters()).is_cuda
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ list = self.convE.list_init_layers()
+ return list
+
+ def forward(self, x):
+ return self.convE(x)
+
+ def train_discriminatively(self, train_loader, iters, classes, lr=0.001, optimizer='adam'):
+ '''Train the feature extractor for [iters] iterations on data from [train_loader].
+
+ [model] model to optimize
+ [train_loader] for training [model] on
+ [iters] (max) number of iterations (i.e., batches) to train for
+ [classes] number of possible clasess (softmax layer with that many units will be added to model)
+ '''
+
+ # Create (temporary) classification output layer
+ self.flatten = modules.Flatten()
+ self.classifier = fc_layer(self.conv_out_units, classes, excit_buffer=True, nl='none').to(self._device())
+
+ # Define optimizer
+ optim_list = [{'params': filter(lambda p: p.requires_grad, self.parameters()), 'lr': lr},]
+ self.optimizer = optim.SGD(optim_list) if optimizer=="sgd" else optim.Adam(optim_list, betas=(0.9, 0.999))
+
+ # Set model to training-mode
+ self.train()
+
+ # Create progress-bar (with manual control)
+ bar = tqdm.tqdm(total=iters)
+
+ iteration = epoch = 0
+ while iteration < iters:
+ epoch += 1
+
+ # Loop over all batches of an epoch
+ for batch_idx, (data, y) in enumerate(train_loader):
+ iteration += 1
+
+ # Reset optimizer
+ self.optimizer.zero_grad()
+
+ # Prepare data
+ data, y = data.to(self._device()), y.to(self._device())
+
+ # Run model
+ features = self(data)
+ y_hat = self.classifier(self.flatten(features))
+
+ # Calculate loss
+ loss = F.cross_entropy(input=y_hat, target=y, reduction='mean')
+
+ # Calculate training-accuracy
+ accuracy = None if y is None else (y == y_hat.max(1)[1]).sum().item() / data.size(0)
+
+ # Backpropagate errors
+ loss.backward()
+
+ # Take optimization-step
+ self.optimizer.step()
+
+ # Update progress bar
+ bar.set_description(
+ ' | training loss: {loss:.3} | training accuracy: {prec:.3} |'.format(
+ loss=loss.cpu().item(), prec=accuracy
+ )
+ )
+ bar.update(1)
+
+ # Break if max-number of iterations is reached
+ if iteration == iters:
+ bar.close()
+ break
diff --git a/PyTorch/build-in/other/continual-learning/models/generative_classifier.py b/PyTorch/build-in/other/continual-learning/models/generative_classifier.py
new file mode 100644
index 000000000..d361b9e0d
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/models/generative_classifier.py
@@ -0,0 +1,114 @@
+import numpy as np
+import torch
+from torch import nn
+from models.vae import VAE
+
+
+
+class GenerativeClassifier(nn.Module):
+ """Class for generative classifier with separate VAE for each class to be learned."""
+
+ def __init__(self, image_size, image_channels, classes,
+ # -conv-layers
+ conv_type="standard", depth=0, start_channels=64, reducing_layers=3, conv_bn=True, conv_nl="relu",
+ num_blocks=2, global_pooling=False, no_fnl=True, conv_gated=False,
+ # -fc-layers
+ fc_layers=3, fc_units=1000, fc_drop=0, fc_bn=False, fc_nl="relu", excit_buffer=False, fc_gated=False,
+ # -prior
+ z_dim=20, prior="standard", n_modes=1,
+ # -decoder
+ recon_loss='BCE', network_output="sigmoid", deconv_type="standard"):
+
+ # Set configurations for setting up the model
+ super().__init__()
+ self.classes = classes
+ self.label = "GenClassifier"
+
+ # Atributes defining how to do inference
+ self.S = "mean" # "mean": use [z_mu] as single (importance) sample; : use this much (importance) samples
+ self.importance = True
+ self.from_latent = False
+
+ # Define a VAE for each class to be learned
+ for class_id in range(classes):
+ new_vae = VAE(image_size, image_channels,
+ # -conv-layers
+ conv_type=conv_type, depth=depth, start_channels=start_channels,
+ reducing_layers=reducing_layers, conv_bn=conv_bn, conv_nl=conv_nl,
+ num_blocks=num_blocks, global_pooling=global_pooling, no_fnl=no_fnl,
+ conv_gated=conv_gated,
+ # -fc-layers
+ fc_layers=fc_layers, fc_units=fc_units, fc_drop=fc_drop, fc_bn=fc_bn,
+ fc_nl=fc_nl, excit_buffer=excit_buffer, fc_gated=fc_gated,
+ # -prior
+ z_dim=z_dim, prior=prior, n_modes=n_modes,
+ # -decoder
+ recon_loss=recon_loss, network_output=network_output, deconv_type=deconv_type)
+ setattr(self, "vae{}".format(class_id), new_vae)
+
+
+ ##------ NAMES --------##
+
+ def get_name(self):
+ return "x{}-{}".format(self.classes, self.vae0.get_name())
+
+ @property
+ def name(self):
+ return self.get_name()
+
+
+ ##------ UTILITIES --------##
+
+ def _device(self):
+ return next(self.parameters()).device
+
+ def _is_on_cuda(self):
+ return next(self.parameters()).is_cuda
+
+
+ ##------ SAMPLE FUNCTIONS --------##
+
+ def sample(self, size, only_x=True, class_id=None, **kwargs):
+ '''Generate [size] samples from the model. Outputs are tensors (not "requiring grad"), on same device.'''
+
+ for sample_id in range(size):
+ # sample from which class-specific VAE to sample
+ selected_class_id = np.random.randint(0, self.classes, 1)[0] if class_id is None else class_id
+ model_to_sample_from = getattr(self, 'vae{}'.format(selected_class_id))
+
+ # sample from that VAE
+ new_sample = model_to_sample_from.sample(1)
+
+ # concatanate generated X (and y)
+ X = torch.cat([X, new_sample], dim=0) if sample_id>0 else new_sample
+ if not only_x:
+ y = torch.cat([y, torch.LongTensor([selected_class_id]).to(self._device())]) if (
+ sample_id>0
+ ) else torch.LongTensor([selected_class_id]).to(self._device())
+
+ # return samples as [size]x[channels]x[image_size]x[image_size] tensor (and labels as [size] tensor)
+ return X if only_x else (X, y)
+
+
+ ##------ CLASSIFICATION FUNCTIONS --------##
+
+ def classify(self, x, allowed_classes=None, **kwargs):
+ '''Given an input [x], get the scores based on [self.S] importance samples (if self.S=='mean', use [z_mu]).
+
+ Input: - [x] <4D-tensor> of shape [batch]x[channels]x[image_size]x[image_size]
+
+ Output: - [scores] <2D-tensor> of shape [batch]x[allowed_classes]
+ '''
+ # If not provided, set [allowed_classes] to all possible classes
+ if allowed_classes is None:
+ allowed_classes = list(range(self.classes))
+ # For each possible class, compute its 'score' (i.e., likelihood of input under generative model of that class)
+ scores = torch.zeros([x.size(0), len(allowed_classes)], dtype=torch.float32, device=self._device())
+ for class_id in allowed_classes:
+ if self.from_latent:
+ scores[:,class_id] = getattr(self, 'vae{}'.format(class_id)).get_latent_lls(x)
+ else:
+ scores[:,class_id] = getattr(self, 'vae{}'.format(class_id)).estimate_lls(
+ x, S=self.S, importance=self.importance
+ )
+ return scores
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/models/separate_classifiers.py b/PyTorch/build-in/other/continual-learning/models/separate_classifiers.py
new file mode 100644
index 000000000..ab8abae5f
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/models/separate_classifiers.py
@@ -0,0 +1,85 @@
+from torch import nn
+from models.classifier import Classifier
+
+
+class SeparateClassifiers(nn.Module):
+ '''Model for classifying images with a separate network for each context.'''
+
+ def __init__(self, image_size, image_channels, classes_per_context, contexts,
+ # -conv-layers
+ conv_type="standard", depth=0, start_channels=64, reducing_layers=3, conv_bn=True, conv_nl="relu",
+ num_blocks=2, global_pooling=False, no_fnl=True, conv_gated=False,
+ # -fc-layers
+ fc_layers=3, fc_units=1000, fc_drop=0, fc_bn=True, fc_nl="relu", fc_gated=False,
+ bias=True, excitability=False, excit_buffer=False):
+
+ # Configurations
+ super().__init__()
+ self.classes_per_context = classes_per_context
+ self.contexts = contexts
+ self.label = "SeparateClassifiers"
+ self.depth = depth
+ self.fc_layers = fc_layers
+ self.fc_drop = fc_drop
+
+ # Check whether there is at least 1 fc-layer
+ if fc_layers<1:
+ raise ValueError("The classifier needs to have at least 1 fully-connected layer.")
+
+ # Define a separate network for each context to be learned
+ for context_id in range(self.contexts):
+ new_network = Classifier(
+ image_size, image_channels, classes_per_context,
+ # -conv-layers
+ conv_type=conv_type, depth=depth, start_channels=start_channels, reducing_layers=reducing_layers,
+ conv_bn=conv_bn, conv_nl=conv_nl, num_blocks=num_blocks, global_pooling=global_pooling, no_fnl=no_fnl,
+ conv_gated=conv_gated,
+ # -fc-layers
+ fc_layers=fc_layers, fc_units=fc_units, fc_drop=fc_drop, fc_bn=fc_bn, fc_nl=fc_nl, fc_gated=fc_gated,
+ bias=bias, excitability=excitability, excit_buffer=excit_buffer
+ )
+ setattr(self, 'context{}'.format(context_id+1), new_network)
+
+
+ def _device(self):
+ return next(self.parameters()).device
+
+ def _is_on_cuda(self):
+ return next(self.parameters()).is_cuda
+
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ list = []
+ for context_id in range(self.contexts):
+ list += getattr(self, 'context{}'.format(context_id+1)).list_init_layers()
+ return list
+
+ @property
+ def name(self):
+ return "SepNets-{}".format(self.context1.name)
+
+
+ def train_a_batch(self, x, y, c=None, context=None, **kwargs):
+ '''Train model for one batch ([x],[y]) from the indicated context.
+
+ [x] batch of inputs (could be None, in which case only 'replayed' data is used)
+ [y] batch of corresponding labels
+ [c] <1D-tensor> or ; for each batch-element in [x] its context-ID
+ [context] the context, can be used if all elements in [x] are from same context
+ '''
+
+ # Train the sub-network of the indicated context on this batch
+ if context is not None:
+ loss_dict = getattr(self, 'context{}'.format(context)).train_a_batch(x, y)
+ else:
+ for context_id in range(self.contexts):
+ if context_id in c:
+ x_to_use = x[c == context_id]
+ y_to_use = y[c == context_id]
+ loss_dict = getattr(self, 'context{}'.format(context_id+1)).train_a_batch(x_to_use, y_to_use)
+ # NOTE: this way, only the [lost_dict] of the last context in the batch is returned
+
+ # Return the dictionary with different training-loss split in categories
+ return loss_dict
+
diff --git a/PyTorch/build-in/other/continual-learning/models/utils/__init__.py b/PyTorch/build-in/other/continual-learning/models/utils/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/PyTorch/build-in/other/continual-learning/models/utils/loss_functions.py b/PyTorch/build-in/other/continual-learning/models/utils/loss_functions.py
new file mode 100644
index 000000000..62271b307
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/models/utils/loss_functions.py
@@ -0,0 +1,154 @@
+import numpy as np
+import torch
+from torch.nn import functional as F
+
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+####################
+## Loss functions ##
+####################
+
+def loss_fn_kd(scores, target_scores, T=2., weights=None, norm=False):
+ """Compute knowledge-distillation (KD) loss given [scores] and [target_scores].
+
+ Both [scores] and [target_scores] should be <2D-tensors>, although [target_scores] should be repackaged.
+ 'Hyperparameter': temperature"""
+
+ device = scores.device
+
+ log_scores_norm = F.log_softmax(scores / T, dim=1)
+ targets_norm = F.softmax(target_scores / T, dim=1)
+
+ # If [scores] and [target_scores] do not have equal size, append 0's to [targets_norm]
+ n = scores.size(1)
+ if n>target_scores.size(1):
+ n_batch = scores.size(0)
+ zeros_to_add = torch.zeros(n_batch, n-target_scores.size(1))
+ zeros_to_add = zeros_to_add.to(device)
+ targets_norm = torch.cat([targets_norm, zeros_to_add], dim=1)
+
+ # Calculate distillation loss (see e.g., Li and Hoiem, 2017)
+ KD_loss_unnorm = -targets_norm * log_scores_norm
+
+ # Sum over the prob-scores of all classes (1) and then average over all elements in the batch (2)
+ KD_loss_unnorm = KD_loss_unnorm.sum(dim=1) #-> sum over classes
+ KD_loss_unnorm = weighted_average(KD_loss_unnorm, weights=weights, dim=0) #-> average over batch
+
+ # Normalize
+ KD_loss = (KD_loss_unnorm * T**2) if norm else KD_loss_unnorm
+
+ return KD_loss
+
+
+
+def loss_fn_kd_binary(scores, target_scores, T=2., weights=None, norm=False):
+ """Compute binary knowledge-distillation (KD) loss given [scores] and [target_scores].
+
+ Both [scores] and [target_scores] should be tensors, although [target_scores] should be repackaged.
+ 'Hyperparameter': temperature"""
+
+ device = scores.device
+
+ scores_norm = torch.sigmoid(scores / T)
+ targets_norm = torch.sigmoid(target_scores / T)
+
+ # If [scores] and [target_scores] do not have equal size, append 0's to [targets_norm]
+ n = scores.size(1)
+ if n>target_scores.size(1):
+ n_batch = scores.size(0)
+ zeros_to_add = torch.zeros(n_batch, n-target_scores.size(1))
+ zeros_to_add = zeros_to_add.to(device)
+ targets_norm = torch.cat([targets_norm, zeros_to_add], dim=1)
+
+ # Calculate distillation loss (see e.g., Li and Hoiem, 2017)
+ KD_loss_unnorm = -( targets_norm * torch.log(scores_norm) + (1-targets_norm) * torch.log(1-scores_norm) )
+
+ # Sum over the prob-scores of all classes (1) and then average over all elements in the batch (2)
+ KD_loss_unnorm = KD_loss_unnorm.sum(dim=1) #-> sum over classes
+ KD_loss_unnorm = weighted_average(KD_loss_unnorm, weights=weights, dim=0) #-> average over batch
+
+ # Normalize
+ KD_loss = (KD_loss_unnorm * T**2) if norm else KD_loss_unnorm
+
+
+ return KD_loss
+
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+######################
+## Helper functions ##
+######################
+
+def weighted_average(tensor, weights=None, dim=0):
+ '''Computes weighted average of [tensor] over dimension [dim].'''
+ if weights is None:
+ mean = torch.mean(tensor, dim=dim)
+ else:
+ batch_size = tensor.size(dim) if len(tensor.size())>0 else 1
+ assert len(weights)==batch_size
+ #sum_weights = sum(weights)
+ #norm_weights = torch.Tensor([weight/sum_weights for weight in weights]).to(tensor.device)
+ norm_weights = torch.tensor([weight for weight in weights]).to(tensor.device)
+ mean = torch.mean(norm_weights*tensor, dim=dim)
+ return mean
+
+def to_one_hot(y, classes, device=None):
+ '''Convert or with integers [y] to a 2D "one-hot" .'''
+ if type(y)==torch.Tensor:
+ device=y.device
+ y = y.cpu()
+ c = np.zeros(shape=[len(y), classes], dtype='float32')
+ c[range(len(y)), y] = 1.
+ c = torch.from_numpy(c)
+ return c if device is None else c.to(device)
+
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+########################################################
+## Calculate log-likelihood for various distributions ##
+########################################################
+
+def log_Normal_standard(x, mean=0, average=False, dim=None):
+ '''Calculate log-likelihood of sample [x] under Gaussian distribution(s) with mu=[mean], diag_var=I.
+ NOTES: [dim]=-1 summing / averaging over all but the first dimension
+ [dim]=None summing / averaging is done over all dimensions'''
+ log_normal = -0.5 * torch.pow(x-mean, 2)
+ if dim is not None and dim == -1:
+ log_normal = log_normal.view(log_normal.size(0), -1)
+ dim = 1
+ if average:
+ return torch.mean(log_normal, dim) if dim is not None else torch.mean(log_normal)
+ else:
+ return torch.sum(log_normal, dim) if dim is not None else torch.sum(log_normal)
+
+def log_Normal_diag(x, mean, log_var, average=False, dim=None):
+ '''Calculate log-likelihood of sample [x] under Gaussian distribution(s) with mu=[mean], diag_var=exp[log_var].
+ NOTES: [dim]=-1 summing / averaging over all but the first dimension
+ [dim]=None summing / averaging is done over all dimensions'''
+ log_normal = -0.5 * (log_var + torch.pow(x-mean, 2) / torch.exp(log_var))
+ if dim is not None and dim==-1:
+ log_normal = log_normal.view(log_normal.size(0), -1)
+ dim = 1
+ if average:
+ return torch.mean(log_normal, dim) if dim is not None else torch.mean(log_normal)
+ else:
+ return torch.sum(log_normal, dim) if dim is not None else torch.sum(log_normal)
+
+def log_Bernoulli(x, mean, average=False, dim=None):
+ '''Calculate log-likelihood of sample [x] under Bernoulli distribution(s) with mu=[mean].
+ NOTES: [dim]=-1 summing / averaging over all but the first dimension
+ [dim]=None summing / averaging is done over all dimensions'''
+ probs = torch.clamp(mean, min=1e-5, max=1.-1e-5)
+ log_bernoulli = x*torch.log(probs) + (1. - x)*torch.log(1. - probs)
+ if dim is not None and dim==-1:
+ log_bernoulli = log_bernoulli.view(log_bernoulli.size(0), -1)
+ dim = 1
+ if average:
+ return torch.mean(log_bernoulli, dim) if dim is not None else torch.mean(log_bernoulli)
+ else:
+ return torch.sum(log_bernoulli, dim) if dim is not None else torch.sum(log_bernoulli)
+
+##-------------------------------------------------------------------------------------------------------------------##
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/models/utils/modules.py b/PyTorch/build-in/other/continual-learning/models/utils/modules.py
new file mode 100644
index 000000000..56f3343b8
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/models/utils/modules.py
@@ -0,0 +1,68 @@
+import numpy as np
+from torch import nn
+
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+#################################
+## Custom-written "nn-Modules" ##
+#################################
+
+class Identity(nn.Module):
+ '''A nn-module to simply pass on the input data.'''
+ def forward(self, x):
+ return x
+
+ def __repr__(self):
+ tmpstr = self.__class__.__name__ + '()'
+ return tmpstr
+
+
+class Shape(nn.Module):
+ '''A nn-module to shape a tensor of shape [shape].'''
+ def __init__(self, shape):
+ super().__init__()
+ self.shape = shape
+ self.dim = len(shape)
+
+ def forward(self, x):
+ return x.view(*self.shape)
+
+ def __repr__(self):
+ tmpstr = self.__class__.__name__ + '(shape = {})'.format(self.shape)
+ return tmpstr
+
+
+class Reshape(nn.Module):
+ '''A nn-module to reshape a tensor(-tuple) to a 4-dim "image"-tensor(-tuple) with [image_channels] channels.'''
+ def __init__(self, image_channels):
+ super().__init__()
+ self.image_channels = image_channels
+
+ def forward(self, x):
+ if type(x)==tuple:
+ batch_size = x[0].size(0) # first dimenstion should be batch-dimension.
+ image_size = int(np.sqrt(x[0].nelement() / (batch_size*self.image_channels)))
+ return (x_item.view(batch_size, self.image_channels, image_size, image_size) for x_item in x)
+ else:
+ batch_size = x.size(0) # first dimenstion should be batch-dimension.
+ image_size = int(np.sqrt(x.nelement() / (batch_size*self.image_channels)))
+ return x.view(batch_size, self.image_channels, image_size, image_size)
+
+ def __repr__(self):
+ tmpstr = self.__class__.__name__ + '(channels = {})'.format(self.image_channels)
+ return tmpstr
+
+
+class Flatten(nn.Module):
+ '''A nn-module to flatten a multi-dimensional tensor to 2-dim tensor.'''
+ def forward(self, x):
+ batch_size = x.size(0) # first dimenstion should be batch-dimension.
+ return x.view(batch_size, -1)
+
+ def __repr__(self):
+ tmpstr = self.__class__.__name__ + '()'
+ return tmpstr
+
+
+##-------------------------------------------------------------------------------------------------------------------##
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/models/utils/ncl.py b/PyTorch/build-in/other/continual-learning/models/utils/ncl.py
new file mode 100644
index 000000000..44fd94d8f
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/models/utils/ncl.py
@@ -0,0 +1,24 @@
+"""NCL utility functions"""
+import torch
+
+def additive_nearest_kf(B, C):
+ """Here it is assumed that all these matrices are symmetric, which is NOT CHECKED explicitly"""
+ BR, BL = B["A"], B["G"]
+ CR, CL = C["A"], C["G"]
+
+ trBL, trBR, trCL, trCR = (
+ torch.trace(BL),
+ torch.trace(BR),
+ torch.trace(CL),
+ torch.trace(CR),
+ )
+ if min(trBL, trBR) <= 0:
+ print("zero trace!")
+ return CR, CL
+ elif min(trCL, trCR) <= 0:
+ print("zero trace!")
+ return BR, BL
+
+ pi = torch.sqrt(torch.trace(BL) * torch.trace(CR)) / torch.sqrt(torch.trace(CL) * torch.trace(BR))
+
+ return BR + CR / pi, BL + CL * pi
diff --git a/PyTorch/build-in/other/continual-learning/models/vae.py b/PyTorch/build-in/other/continual-learning/models/vae.py
new file mode 100644
index 000000000..6397aaeaa
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/models/vae.py
@@ -0,0 +1,535 @@
+import numpy as np
+import math
+import torch
+from torch import nn
+from torch.nn import functional as F
+from models.fc.layers import fc_layer,fc_layer_split
+from models.fc.nets import MLP
+from models.conv.nets import ConvLayers,DeconvLayers
+from models.cl.continual_learner import ContinualLearner
+from models.utils import loss_functions as lf, modules
+
+
+class VAE(ContinualLearner):
+ """Class for variational auto-encoder (VAE) model."""
+
+ def __init__(self, image_size, image_channels,
+ # -conv-layers
+ conv_type="standard", depth=0, start_channels=64, reducing_layers=3, conv_bn=True, conv_nl="relu",
+ num_blocks=2, global_pooling=False, no_fnl=True, conv_gated=False,
+ # -fc-layers
+ fc_layers=3, fc_units=1000, fc_drop=0, fc_bn=False, fc_nl="relu", fc_gated=False, excit_buffer=False,
+ # -prior
+ prior="standard", z_dim=20, n_modes=1,
+ # -decoder
+ recon_loss='BCE', network_output="sigmoid", deconv_type="standard", **kwargs):
+ '''Class for variational auto-encoder (VAE) models.'''
+
+ # Set configurations for setting up the model
+ super().__init__()
+ self.label = "VAE"
+ self.image_size = image_size
+ self.image_channels = image_channels
+ self.fc_layers = fc_layers
+ self.z_dim = z_dim
+ self.fc_units = fc_units
+ self.fc_drop = fc_drop
+ self.depth = depth
+ # -type of loss to be used for reconstruction
+ self.recon_loss = recon_loss # options: BCE|MSE
+ self.network_output = network_output
+
+ # Optimizer (needs to be set before training starts))
+ self.optimizer = None
+ self.optim_list = []
+
+ # Prior-related parameters
+ self.prior = prior
+ self.n_modes = n_modes
+
+ # Weigths of different components of the loss function
+ self.lamda_rcl = 1.
+ self.lamda_vl = 1.
+
+ self.average = True #--> makes that [reconL] and [variatL] are both divided by number of input-pixels
+
+ # Check whether there is at least 1 fc-layer
+ if fc_layers<1:
+ raise ValueError("VAE cannot have 0 fully-connected layers!")
+
+
+ ######------SPECIFY MODEL------######
+
+ ##>----Encoder (= q[z|x])----<##
+ self.convE = ConvLayers(conv_type=conv_type, block_type="basic", num_blocks=num_blocks,
+ image_channels=image_channels, depth=self.depth, start_channels=start_channels,
+ reducing_layers=reducing_layers, batch_norm=conv_bn, nl=conv_nl,
+ output="none" if no_fnl else "normal", global_pooling=global_pooling,
+ gated=conv_gated)
+ # -flatten image to 2D-tensor
+ self.flatten = modules.Flatten()
+ #------------------------------calculate input/output-sizes--------------------------------#
+ self.conv_out_units = self.convE.out_units(image_size)
+ self.conv_out_size = self.convE.out_size(image_size)
+ self.conv_out_channels = self.convE.out_channels
+ #------------------------------------------------------------------------------------------#
+ # -fully connected hidden layers
+ self.fcE = MLP(input_size=self.conv_out_units, output_size=fc_units, layers=fc_layers-1,
+ hid_size=fc_units, drop=fc_drop, batch_norm=fc_bn, nl=fc_nl, gated=fc_gated,
+ excit_buffer=excit_buffer)
+ mlp_output_size = fc_units if fc_layers > 1 else self.conv_out_units
+ # -to z
+ self.toZ = fc_layer_split(mlp_output_size, z_dim, nl_mean='none', nl_logvar='none')
+
+ ##>----Decoder (= p[x|z])----<##
+ out_nl = True if fc_layers > 1 else (True if (self.depth > 0 and not no_fnl) else False)
+ real_h_dim_down = fc_units if fc_layers > 1 else self.convE.out_units(image_size, ignore_gp=True)
+ self.fromZ = fc_layer(z_dim, real_h_dim_down, batch_norm=(out_nl and fc_bn), nl=fc_nl if out_nl else "none")
+ # -> if 'gp' is used in forward pass, size of first/final hidden layer differs between forward and backward pass
+ self.fcD = MLP(input_size=fc_units, output_size=self.convE.out_units(image_size, ignore_gp=True),
+ layers=fc_layers-1, hid_size=fc_units, drop=fc_drop, batch_norm=fc_bn, nl=fc_nl,
+ gated=fc_gated, output=self.network_output if self.depth==0 else 'normal')
+ # to image-shape
+ self.to_image = modules.Reshape(image_channels=self.convE.out_channels if self.depth>0 else image_channels)
+ # through deconv-layers
+ self.convD = DeconvLayers(
+ image_channels=image_channels, final_channels=start_channels, depth=self.depth,
+ reducing_layers=reducing_layers, batch_norm=conv_bn, nl=conv_nl, gated=conv_gated,
+ output=self.network_output, deconv_type=deconv_type,
+ )
+
+ ##>----Prior----<##
+ # -if using the GMM-prior, add its parameters
+ if self.prior=="GMM":
+ # -create
+ self.z_class_means = nn.Parameter(torch.Tensor(self.n_modes, self.z_dim))
+ self.z_class_logvars = nn.Parameter(torch.Tensor(self.n_modes, self.z_dim))
+ # -initialize
+ self.z_class_means.data.normal_()
+ self.z_class_logvars.data.normal_()
+
+ # Flags whether parts of the network are frozen (so they can be set to evaluation mode during training)
+ self.convE.frozen = False
+ self.fcE.frozen = False
+
+
+
+ ##------ NAMES --------##
+
+ def get_name(self):
+ convE_label = "{}--".format(self.convE.name) if self.depth>0 else ""
+ fcE_label = "{}--".format(self.fcE.name) if self.fc_layers>1 else "{}{}-".format("h" if self.depth>0 else "i",
+ self.conv_out_units)
+ z_label = "z{}{}".format(self.z_dim, "" if self.prior=="standard" else "-{}{}".format(self.prior, self.n_modes))
+ decoder_label = "--{}".format(self.network_output)
+ return "{}={}{}{}{}".format(self.label, convE_label, fcE_label, z_label, decoder_label)
+
+ @property
+ def name(self):
+ return self.get_name()
+
+
+
+ ##------ LAYERS --------##
+
+ def list_init_layers(self):
+ '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
+ list = []
+ list += self.convE.list_init_layers()
+ list += self.fcE.list_init_layers()
+ list += self.toZ.list_init_layers()
+ list += self.fromZ.list_init_layers()
+ list += self.fcD.list_init_layers()
+ list += self.convD.list_init_layers()
+ return list
+
+ def layer_info(self):
+ '''Return list with shape of all hidden layers.'''
+ # create list with hidden convolutional layers
+ layer_list = self.convE.layer_info(image_size=self.image_size)
+ # add output of final convolutional layer (if there was at least one conv-layer and there's fc-layers after)
+ if (self.fc_layers>0 and self.depth>0):
+ layer_list.append([self.conv_out_channels, self.conv_out_size, self.conv_out_size])
+ # add layers of the MLP
+ if self.fc_layers>1:
+ for layer_id in range(1, self.fc_layers):
+ layer_list.append([self.fc_layer_sizes[layer_id]])
+ return layer_list
+
+
+
+ ##------ FORWARD FUNCTIONS --------##
+
+ def encode(self, x):
+ '''Pass input through feed-forward connections, to get [z_mean], [z_logvar] and [hE].'''
+ # Forward-pass through conv-layers
+ hidden_x = self.convE(x)
+ image_features = self.flatten(hidden_x)
+ # Forward-pass through fc-layers
+ hE = self.fcE(image_features)
+ # Get parameters for reparametrization
+ (z_mean, z_logvar) = self.toZ(hE)
+ return z_mean, z_logvar, hE, hidden_x
+
+ def reparameterize(self, mu, logvar):
+ '''Perform "reparametrization trick" to make these stochastic variables differentiable.'''
+ std = logvar.mul(0.5).exp_()
+ eps = std.new(std.size()).normal_()#.requires_grad_()
+ return eps.mul(std).add_(mu)
+
+ def decode(self, z):
+ '''Decode latent variable activations [z] (=<2D-tensor>) into [image_recon] (=<4D-tensor>).'''
+ hD = self.fromZ(z)
+ image_features = self.fcD(hD)
+ image_recon = self.convD(self.to_image(image_features))
+ return image_recon
+
+ def forward(self, x, full=False, reparameterize=True, **kwargs):
+ '''Forward function to propagate [x] through the encoder, reparametrization and decoder.
+
+ Input: - [x] <4D-tensor> of shape [batch_size]x[channels]x[image_size]x[image_size]
+
+ If [full] is True, output should be a consisting of:
+ - [x_recon] <4D-tensor> reconstructed image (features) in same shape as [x] (or 2 of those: mean & logvar)
+ - [mu] <2D-tensor> with either [z] or the estimated mean of [z]
+ - [logvar] None or <2D-tensor> estimated log(SD^2) of [z]
+ - [z] <2D-tensor> reparameterized [z] used for reconstruction
+ If [full] is False, output is the reconstructed image (i.e., [x_recon]).
+ '''
+ # -encode (forward), reparameterize and decode (backward)
+ mu, logvar, hE, hidden_x = self.encode(x)
+ z = self.reparameterize(mu, logvar) if reparameterize else mu
+ x_recon = self.decode(z)
+ # -return
+ return (x_recon, mu, logvar, z) if full else x_recon
+
+ def feature_extractor(self, images):
+ '''Extract "final features" (i.e., after both conv- and fc-layers of forward pass) from provided images.'''
+ return self.fcE(self.flatten(self.convE(images)))
+
+
+
+ ##------ SAMPLE FUNCTIONS --------##
+
+ def sample(self, size, sample_mode=None, **kwargs):
+ '''Generate [size] samples from the model. Outputs are tensors (not "requiring grad"), on same device as .
+
+ INPUT: - [sample_mode] to sample from specific mode of [z]-distribution
+
+ OUTPUT: - [X] <4D-tensor> generated images / image-features'''
+
+ # set model to eval()-mode
+ self.eval()
+
+ # pick for each sample the prior-mode to be used
+ if self.prior=="GMM":
+ if sample_mode is None:
+ # -randomly sample modes from all possible modes
+ sampled_modes = np.random.randint(0, self.n_modes, size)
+ else:
+ # -always sample from the provided mode
+ sampled_modes = np.repeat(sample_mode, size)
+
+ # sample z
+ if self.prior=="GMM":
+ prior_means = self.z_class_means
+ prior_logvars = self.z_class_logvars
+ # -for each sample to be generated, select the previously sampled mode
+ z_means = prior_means[sampled_modes, :]
+ z_logvars = prior_logvars[sampled_modes, :]
+ with torch.no_grad():
+ z = self.reparameterize(z_means, z_logvars)
+ else:
+ z = torch.randn(size, self.z_dim).to(self._device())
+
+ # decode z into image X
+ with torch.no_grad():
+ X = self.decode(z)
+
+ # return samples as [batch_size]x[channels]x[image_size]x[image_size] tensor
+ return X
+
+
+
+ ##------ LOSS FUNCTIONS --------##
+
+ def calculate_recon_loss(self, x, x_recon, average=False):
+ '''Calculate reconstruction loss for each element in the batch.
+
+ INPUT: - [x] with original input (1st dimension (ie, dim=0) is "batch-dimension")
+ - [x_recon] (tuple of 2x) with reconstructed input in same shape as [x]
+ - [average] , if True, loss is average over all pixels; otherwise it is summed
+
+ OUTPUT: - [reconL] <1D-tensor> of length [batch_size]'''
+
+ batch_size = x.size(0)
+ if self.recon_loss=="MSE":
+ # reconL = F.mse_loss(input=x_recon.view(batch_size, -1), target=x.view(batch_size, -1), reduction='none')
+ # reconL = torch.mean(reconL, dim=1) if average else torch.sum(reconL, dim=1)
+ reconL = -lf.log_Normal_standard(x=x, mean=x_recon, average=average, dim=-1)
+ elif self.recon_loss=="BCE":
+ reconL = F.binary_cross_entropy(input=x_recon.view(batch_size, -1), target=x.view(batch_size, -1),
+ reduction='none')
+ reconL = torch.mean(reconL, dim=1) if average else torch.sum(reconL, dim=1)
+ else:
+ raise NotImplementedError("Wrong choice for type of reconstruction-loss!")
+ # --> if [average]=True, reconstruction loss is averaged over all pixels/elements (otherwise it is summed)
+ # (averaging over all elements in the batch will be done later)
+ return reconL
+
+
+ def calculate_log_p_z(self, z):
+ '''Calculate log-likelihood of sampled [z] under the prior distribution.
+
+ INPUT: - [z] <2D-tensor> with sampled latent variables (1st dimension (ie, dim=0) is "batch-dimension")
+
+ OUTPUT: - [log_p_z] <1D-tensor> of length [batch_size]'''
+
+ if self.prior == "standard":
+ log_p_z = lf.log_Normal_standard(z, average=False, dim=1) # [batch_size]
+
+ if self.prior == "GMM":
+ ## Get [means] and [logvars] of all (possible) modes
+ allowed_modes = list(range(self.n_modes))
+ # -calculate/retireve the means and logvars for the selected modes
+ prior_means = self.z_class_means[allowed_modes, :]
+ prior_logvars = self.z_class_logvars[allowed_modes, :]
+ # -rearrange / select for each batch prior-modes to be used
+ z_expand = z.unsqueeze(1) # [batch_size] x 1 x [z_dim]
+ means = prior_means.unsqueeze(0) # 1 x [n_modes] x [z_dim]
+ logvars = prior_logvars.unsqueeze(0) # 1 x [n_modes] x [z_dim]
+
+ ## Calculate "log_p_z" (log-likelihood of "reparameterized" [z] based on selected priors)
+ n_modes = len(allowed_modes)
+ a = lf.log_Normal_diag(z_expand, mean=means, log_var=logvars, average=False, dim=2) - math.log(n_modes)
+ # --> for each element in batch, calculate log-likelihood for all pseudoinputs: [batch_size] x [n_modes]
+ a_max, _ = torch.max(a, dim=1) # [batch_size]
+ # --> for each element in batch, take highest log-likelihood over all pseudoinputs
+ # this is calculated and used to avoid underflow in the below computation
+ a_exp = torch.exp(a - a_max.unsqueeze(1)) # [batch_size] x [n_modes]
+ a_logsum = torch.log(torch.clamp(torch.sum(a_exp, dim=1), min=1e-40)) # -> sum over modes: [batch_size]
+ log_p_z = a_logsum + a_max # [batch_size]
+
+ return log_p_z
+
+
+ def calculate_variat_loss(self, z, mu, logvar):
+ '''Calculate reconstruction loss for each element in the batch.
+
+ INPUT: - [z] <2D-tensor> with sampled latent variables (1st dimension (ie, dim=0) is "batch-dimension")
+ - [mu] <2D-tensor> by encoder predicted mean for [z]
+ - [logvar] <2D-tensor> by encoder predicted logvar for [z]
+
+ OUTPUT: - [variatL] <1D-tensor> of length [batch_size]'''
+
+ if self.prior == "standard":
+ # --> calculate analytically
+ # ---- see Appendix B from: Kingma & Welling (2014) Auto-Encoding Variational Bayes, ICLR ----#
+ variatL = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)
+
+ elif self.prior=="GMM":
+ # --> calculate "by estimation"
+
+ ## Calculate "log_p_z" (log-likelihood of "reparameterized" [z] based on selected priors)
+ log_p_z = self.calculate_log_p_z(z)
+ # -----> log_p_z: [batch_size]
+
+ ## Calculate "log_q_z_x" (entropy of "reparameterized" [z] given [x])
+ log_q_z_x = lf.log_Normal_diag(z, mean=mu, log_var=logvar, average=False, dim=1)
+ # -----> mu: [batch_size] x [z_dim]; logvar: [batch_size] x [z_dim]; z: [batch_size] x [z_dim]
+ # -----> log_q_z_x: [batch_size]
+
+ ## Combine
+ variatL = -(log_p_z - log_q_z_x)
+
+ return variatL
+
+
+ def loss_function(self, x, x_recon, mu, z, logvar=None, batch_weights=None):
+ '''Calculate and return various losses that could be used for training and/or evaluating the model.
+
+ INPUT: - [x] <4D-tensor> original image
+ - [x_recon] (tuple of 2x) <4D-tensor> reconstructed image in same shape as [x]
+ - [mu] <2D-tensor> with either [z] or the estimated mean of [z]
+ - [z] <2D-tensor> with reparameterized [z]
+ - [logvar] None or <2D-tensor> with estimated log(SD^2) of [z]
+ - [batch_weights] <1D-tensor> with a weight for each batch-element (if None, normal average over batch)
+
+ OUTPUT: - [reconL] reconstruction loss indicating how well [x] and [x_recon] match
+ - [variatL] variational (KL-divergence) loss "indicating how close distribion [z] is to prior"
+ '''
+
+ ###-----Reconstruction loss-----###
+ batch_size = x.size(0)
+ x_recon = (x_recon[0].view(batch_size, -1), x_recon[1].view(batch_size, -1)) if self.network_output=='split' else x_recon.view(batch_size, -1)
+ reconL = self.calculate_recon_loss(x=x.view(batch_size, -1), average=True, x_recon=x_recon)#-average over pixels
+ reconL = lf.weighted_average(reconL, weights=batch_weights, dim=0) #-average over batch
+
+ ###-----Variational loss-----###
+ if logvar is not None:
+ variatL = self.calculate_variat_loss(z=z, mu=mu, logvar=logvar)
+ variatL = lf.weighted_average(variatL, weights=batch_weights, dim=0) # -> average over batch
+ variatL /= (self.image_channels * self.image_size ** 2) # -> divide by # of input-pixels
+ else:
+ variatL = torch.tensor(0., device=self._device())
+
+ # Return a tuple of the calculated losses
+ return reconL, variatL
+
+
+
+ ##------ EVALUATION FUNCTIONS --------##
+
+ def get_latent_lls(self, x):
+ '''Encode [x] as [z!x] and return log-likelihood.
+
+ Input: - [x] <4D-tensor> of shape [batch]x[channels]x[image_size]x[image_size]
+
+ Output: - [log_likelihood] <1D-tensor> of shape [batch]
+ '''
+
+ # Run forward pass of model to get [z_mu] and [z_logvar]
+ z_mu, z_logvar, _, _ = self.encode(x)
+
+ # Calculate log_p_z
+ log_p_z = self.calculate_log_p_z(z_mu)
+
+ ## NOTE: we could additionally use [z_logvar] and compute KL-divergence with prior?
+ return log_p_z
+
+
+ def estimate_lls(self, x, S='mean', importance=True):
+ '''Estimate log-likelihood for [x] using [S] importance samples (or Monte Carlo samples, if [importance]=False).
+
+ Input: - [x] <4D-tensor> of shape [batch]x[channels]x[image_size]x[image_size]
+ - [S] (= # importance samples) or 'mean' (= use [z_mu] as single importance sample)
+ - [importance] if True do importance sampling, otherwise do Monte Carlo sampling
+
+ Output: - [log_likelihood] <1D-tensor> of shape [batch]
+ '''
+ # Run forward pass of model to get [z_mu] and [z_logvar]
+ if importance:
+ z_mu, z_logvar, _, _ = self.encode(x)
+
+ if S=='mean':
+ if importance:
+ # --> Use [z_mu] as a 'single importance sample'
+ # Calculate log_p_z
+ log_p_z = self.calculate_log_p_z(z_mu)
+ # Calculate log_q_z_x
+ z_mu_dummy = torch.zeros_like(z_mu) # to avoid unnecessary gradient tracking in next computation
+ log_q_z_x = lf.log_Normal_diag(z_mu_dummy, mean=z_mu_dummy, log_var=z_logvar, average=False, dim=1)
+ else:
+ # --> Use the overall prior mean as a 'single Monte Carlo sample'
+ if self.prior=="GMM":
+ sampled_modes = np.random.randint(0, self.n_modes, x.size(0))
+ z_mu = self.z_class_means[sampled_modes, :]
+ ## NOTE: if using a GMM-prior with multiple modes, this does not really make sense!!!
+ else:
+ z_mu = torch.zeros(x.size(0), self.z_dim).to(self._device())
+ # Calcuate p_x_z
+ # -reconstruct input
+ x_recon = self.decode(z_mu)
+ # -calculate p_x_z (under Gaussian observation model with unit variance)
+ log_p_x_z = lf.log_Normal_standard(x=x, mean=x_recon, average=False, dim=-1)
+ # Calculate log-likelihood
+ log_likelihood = (log_p_x_z + log_p_z - log_q_z_x) if importance else log_p_x_z
+ else:
+ #--> Use [S] importance/Monte Carlo samples
+ # Define tensor in which to store the log-likelihoods of each sample
+ all_lls = torch.zeros([S, x.size(0)], dtype=torch.float32, device=self._device())
+ # For each sample, calculate log_likelihood
+ for s_id in range(S):
+ if importance:
+ # Reparameterize (i.e., sample z_s)
+ z = self.reparameterize(z_mu, z_logvar)
+ # Calculate log_p_z
+ log_p_z = self.calculate_log_p_z(z)
+ # Calculate log_q_z_x
+ log_q_z_x = lf.log_Normal_diag(z, mean=z_mu, log_var=z_logvar, average=False, dim=1)
+ else:
+ # Sample z_s
+ if self.prior == "GMM":
+ # -randomly pick for each sample the prior-mode to be used
+ sampled_modes = np.random.randint(0, self.n_modes, x.size(0))
+ # -for each sample to be generated, select the mean & logvar for the sampled mode
+ z_means = self.z_class_means[sampled_modes, :]
+ z_logvars = self.z_class_logvars[sampled_modes, :]
+ # -sample using the selected means & logvars
+ z = self.reparameterize(z_means, z_logvars)
+ else:
+ # -sample from standard normal distribution
+ z = torch.randn(x.size(0), self.z_dim).to(self._device())
+ # Calcuate p_x_z
+ # -reconstruct input
+ x_recon = self.decode(z)
+ # -calculate p_x_z (under Gaussian observation model with unit variance)
+ log_p_x_z = lf.log_Normal_standard(x=x, mean=x_recon, average=False, dim=-1)
+ # Calculate log-likelihoods for this importance sample
+ all_lls[s_id] = (log_p_x_z + log_p_z - log_q_z_x) if importance else log_p_x_z
+ # Calculate average log-likelihood over all (importance) samples for this test sample
+ # (for this, convert log-likelihoods back to likelihoods before summing them!)
+ log_likelihood = all_lls.logsumexp(dim=0) - np.log(S)
+ return log_likelihood
+
+
+
+ ##------ TRAINING FUNCTIONS --------##
+
+ def train_a_batch(self, x, x_=None, rnt=0.5, **kwargs):
+ '''Train model for one batch ([x]), possibly supplemented with replayed data ([x_]).
+
+ [x] batch of inputs (could be None, in which case only 'replayed' data is used)
+ [x_] None or ( of) batch of replayed inputs
+ [rnt] in [0,1], relative importance of new context
+ '''
+
+ # Set model to training-mode
+ self.train()
+ # -however, if some layers are frozen, they should be set to eval() to prevent batch-norm layers from changing
+ if self.convE.frozen:
+ self.convE.eval()
+ if self.fcE.frozen:
+ self.fcE.eval()
+
+ # Reset optimizer
+ self.optimizer.zero_grad()
+
+ ##--(1)-- CURRENT DATA --##
+ if x is not None:
+ # Run the model
+ recon_batch, mu, logvar, z = self(x, full=True, reparameterize=True)
+
+ # Calculate losses
+ reconL, variatL = self.loss_function(x=x, x_recon=recon_batch, mu=mu, z=z, logvar=logvar)
+
+ # Weigh losses as requested
+ loss_cur = self.lamda_rcl*reconL + self.lamda_vl*variatL
+
+ ##--(2)-- REPLAYED DATA --##
+ if x_ is not None:
+ # Run the model
+ recon_batch, mu, logvar, z = self(x_, full=True, reparameterize=True)
+
+ # Calculate losses
+ reconL_r, variatL_r = self.loss_function(x=x_, x_recon=recon_batch, mu=mu, z=z, logvar=logvar)
+
+ # Weigh losses as requested
+ loss_replay = self.lamda_rcl*reconL_r + self.lamda_vl*variatL_r
+
+ # Calculate total loss
+ loss_total = loss_replay if (x is None) else (loss_cur if x_ is None else rnt*loss_cur+(1-rnt)*loss_replay)
+
+ # Backpropagate errors
+ loss_total.backward()
+ # Take optimization-step
+ self.optimizer.step()
+
+ # Return the dictionary with different training-loss split in categories
+ return {
+ 'loss_total': loss_total.item(),
+ 'recon': reconL.item() if x is not None else 0,
+ 'variat': variatL.item() if x is not None else 0,
+ 'recon_r': reconL_r.item() if x_ is not None else 0,
+ 'variat_r': variatL_r.item() if x_ is not None else 0,
+ }
diff --git a/PyTorch/build-in/other/continual-learning/params/__init__.py b/PyTorch/build-in/other/continual-learning/params/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/PyTorch/build-in/other/continual-learning/params/options.py b/PyTorch/build-in/other/continual-learning/params/options.py
new file mode 100644
index 000000000..76d5559e4
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/params/options.py
@@ -0,0 +1,351 @@
+import argparse
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+# Where to store the data / results / models / plots
+store = "./store"
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+####################
+## Define options ##
+####################
+
+def define_args(filename, description):
+ parser = argparse.ArgumentParser('./{}.py'.format(filename), description=description)
+ return parser
+
+def add_general_options(parser, main=False, comparison=False, compare_hyper=False, pretrain=False, **kwargs):
+ if main:
+ parser.add_argument('--get-stamp', action='store_true', help='print param-stamp & exit')
+ parser.add_argument('--seed', type=int, default=0, help='[first] random seed (for each random-module used)')
+ if comparison and (not compare_hyper):
+ parser.add_argument('--n-seeds', type=int, default=1, help='how often to repeat?')
+ parser.add_argument('--no-gpus', action='store_false', dest='gpu', help="don't use GPUs")
+ parser.add_argument('--no-save', action='store_false', dest='save', help="don't save trained models")
+ parser.add_argument('--full-stag', type=str, metavar='STAG', default='none', help="tag for saving full model")
+ parser.add_argument('--full-ltag', type=str, metavar='LTAG', default='none', help="tag for loading full model")
+ if pretrain:
+ parser.add_argument('--convE-stag', type=str, metavar='STAG', default='none',
+ help="tag for saving convE-layers")
+ parser.add_argument('--seed-to-stag', action='store_true', help="add seed to tag for saving convE-layers")
+ if main:
+ parser.add_argument('--test', action='store_false', dest='train', help='evaluate previously saved model')
+ parser.add_argument('--data-dir', type=str, default='{}/datasets'.format(store), dest='d_dir',
+ help="default: %(default)s")
+ parser.add_argument('--model-dir', type=str, default='{}/models'.format(store), dest='m_dir',
+ help="default: %(default)s")
+ if not pretrain:
+ parser.add_argument('--plot-dir', type=str, default='{}/plots'.format(store), dest='p_dir',
+ help="default: %(default)s")
+ parser.add_argument('--results-dir', type=str, default='{}/results'.format(store), dest='r_dir',
+ help="default: %(default)s")
+ return parser
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+def add_eval_options(parser, main=False, comparison=False, pretrain=False, compare_replay=False, no_boundaries=False,
+ **kwargs):
+ eval_params = parser.add_argument_group('Evaluation Parameters')
+ if not pretrain:
+ eval_params.add_argument('--time', action='store_true', help="keep track of total training time")
+ if main:
+ eval_params.add_argument('--pdf', action='store_true', help="generate pdf with results")
+ eval_params.add_argument('--visdom', action='store_true', help="use visdom for on-the-fly plots")
+ eval_params.add_argument('--results-dict', action='store_true', help="output dict with results after each task")
+ if not comparison:
+ eval_params.add_argument('--loss-log', type=int, metavar="N",
+ help="# iters after which to plot loss (def: # iters)")
+ eval_params.add_argument('--acc-log', type=int, metavar="N",
+ help="# iters after which to plot accuracy (def: # iters)")
+ eval_params.add_argument('--acc-n', type=int, default=1024,
+ help="# samples to evaluate accuracy (after each context)")
+ if (not no_boundaries) and (not comparison) and (not pretrain):
+ eval_params.add_argument('--sample-log', type=int, metavar="N",
+ help="# iters after which to plot samples (def: # iters)")
+ if (not no_boundaries) and (not pretrain) and (not compare_replay):
+ eval_params.add_argument('--sample-n', type=int, default=64, help="# images to show")
+ eval_params.add_argument('--no-samples', action='store_true', help="don't plot generated images")
+ return parser
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+def add_problem_options(parser, pretrain=False, no_boundaries=False, **kwargs):
+ problem_params = parser.add_argument_group('Problem Specification')
+ cl_protocols = ['splitMNIST', 'permMNIST', 'CIFAR10', 'CIFAR100']
+ problem_params.add_argument('--experiment', type=str, default='CIFAR10' if pretrain else 'splitMNIST',
+ choices=['CIFAR10', 'CIFAR100', 'MNIST', 'MNIST32'] if pretrain else cl_protocols)
+ if no_boundaries:
+ problem_params.add_argument('--stream', type=str, default='fuzzy-boundaries',
+ choices=['fuzzy-boundaries', 'academic-setting', 'random'])
+ problem_params.add_argument('--fuzziness', metavar='ITERS', type=int, default=500, help='amount of fuzziness')
+ if not pretrain:
+ problem_params.add_argument('--scenario', type=str, default='class', choices=['task', 'domain', 'class'])
+ problem_params.add_argument('--contexts', type=int, metavar='N', help='number of contexts')
+ problem_params.add_argument('--iters', type=int, help="# iterations (mini-batches) per context")
+ problem_params.add_argument('--batch', type=int, help="mini batch size (# observations per iteration)")
+ if pretrain:
+ problem_params.add_argument('--augment', action='store_true',
+ help="augment training data (random crop & horizontal flip)")
+ problem_params.add_argument('--no-norm', action='store_false', dest='normalize',
+ help="don't normalize images (only for CIFAR)")
+ return parser
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+def add_model_options(parser, pretrain=False, compare_replay=False, **kwargs):
+ model = parser.add_argument_group('Parameters Main Model')
+ # 'Convenience-commands' that select the defaults for specific architectures
+ model.add_argument('--reducedResNet', action='store_true', help="select defaults for 'Reduced ResNet-18' (e.g., as in Hess et al, 2023)")
+ # -convolutional layers
+ model.add_argument('--conv-type', type=str, default="standard", choices=["standard", "resNet"])
+ model.add_argument('--n-blocks', type=int, default=2, help="# blocks per conv-layer (only for 'resNet')")
+ model.add_argument('--depth', type=int, default=None, help="# of convolutional layers (0 = only fc-layers)")
+ model.add_argument('--reducing-layers', type=int, dest='rl', default=None,
+ help="# of layers with stride (=image-size halved)")
+ model.add_argument('--channels', type=int, default=None, help="# of channels 1st conv-layer (doubled every 'rl')")
+ model.add_argument('--conv-bn', type=str, default="yes", help="use batch-norm in the conv-layers (yes|no)")
+ model.add_argument('--conv-nl', type=str, default="relu", choices=["relu", "leakyrelu"])
+ model.add_argument('--global-pooling', action='store_true', dest='gp', help="ave global pool after conv-layers")
+ # -fully connected layers
+ model.add_argument('--fc-layers', type=int, default=None, dest='fc_lay', help="# of fully-connected layers")
+ model.add_argument('--fc-units', type=int, metavar="N", help="# of units in hidden fc-layers")
+ model.add_argument('--fc-drop', type=float, default=0., help="dropout probability for fc-units")
+ model.add_argument('--fc-bn', type=str, default="no", help="use batch-norm in the fc-layers (no|yes)")
+ model.add_argument('--fc-nl', type=str, default="relu", choices=["relu", "leakyrelu", "none"])
+ if (not pretrain) and (not compare_replay):
+ model.add_argument('--z-dim', type=int, default=100, help='size of latent representation (if used, def=100)')
+ if not pretrain:
+ model.add_argument('--singlehead', action='store_true',
+ help="for Task-IL: use a 'single-headed' output layer (instead of a 'multi-headed' one)")
+ return parser
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+def add_train_options(parser, main=False, no_boundaries=False, pretrain=False, compare_replay=False, **kwargs):
+
+ ## Training hyperparameters
+ train_params = parser.add_argument_group('Training Parameters')
+ if pretrain:
+ iter_epochs = train_params.add_mutually_exclusive_group(required=False)
+ iter_epochs.add_argument('--epochs', type=int, default=10, metavar='N', help='# epochs (default: %(default)d)')
+ iter_epochs.add_argument('--iters', type=int, metavar='N', help='# iterations (replaces "--epochs")')
+ train_params.add_argument('--batch', type=int, help="mini batch size")
+ train_params.add_argument('--lr', type=float, help="learning rate")
+ if not pretrain:
+ train_params.add_argument('--optimizer', type=str, default='adam',
+ choices=['adam', 'sgd'] if no_boundaries else ['adam', 'adam_reset', 'sgd'])
+ train_params.add_argument("--momentum", type=float, default=0., help="momentum (if using SGD optimizer)")
+ # -initialization / pretraining
+ train_params.add_argument('--pre-convE', action='store_true', help="use pretrained convE-layers")
+ train_params.add_argument('--convE-ltag', type=str, metavar='LTAG', default='e100',
+ help="tag for loading convE-layers")
+ train_params.add_argument('--seed-to-ltag', action='store_true', help="add seed to tag when loading convE-layers")
+ train_params.add_argument('--freeze-convE', action='store_true', help="freeze convE-layers")
+ # -for Class-IL, which output units should be set to 'active'?
+ if (not pretrain) and (not no_boundaries):
+ train_params.add_argument('--active-classes', type=str, default='all', choices=["all", "all-so-far", "current"],
+ dest='neg_samples', help="for Class-IL: which classes to set to 'active'?")
+ #--> the above command controls which output units will be set to "active" (the active classes can also
+ # be thought of as 'negative classes', see Li et al., 2020, https://arxiv.org/abs/2011.12216):
+ # - "all-so-far": the output units of all classes seen so far are set to active
+ # - "all": always the output units of all classes are set to active
+ # - "current": only output units of the classes in the current context are set to active
+
+ ## Loss function(s) to be used
+ if (not pretrain) and (not compare_replay):
+ loss_params = parser.add_argument_group('Loss Parameters')
+ loss_params.add_argument('--recon-loss', type=str, choices=['MSE', 'BCE'])
+ if main:
+ loss_params.add_argument('--bce', action='store_true',
+ help="use binary (instead of multi-class) classification loss")
+ if main and (not no_boundaries):
+ loss_params.add_argument('--bce-distill', action='store_true', help='distilled loss on previous classes for new'
+ ' examples (if --bce & --scenario="class")')
+ return parser
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+def add_cl_options(parser, main=False, compare_all=False, compare_replay=False, compare_hyper=False,
+ no_boundaries=False, **kwargs):
+
+ ## Baselines
+ if main and (not no_boundaries):
+ baseline_options = parser.add_argument_group('Baseline Options')
+ baseline_options.add_argument('--joint', action='store_true', help="train once on data of all contexts")
+ baseline_options.add_argument('--cummulative', action='store_true',
+ help="train incrementally on data of all contexts so far")
+ #---> Explanation for these two "upper-target" baselines:
+ # - "joint": means that the network is trained on a single dataset consisting of the data of all contexts
+ # - "cummulative": means that the network is incrementally trained on all contexts, whereby the training data
+ # always consists of the training data from all contexts seen so far
+
+ ## Stream-specific options
+ if no_boundaries:
+ stream_options = parser.add_argument_group('Stream Options')
+ stream_options.add_argument('--update-every', metavar='N', type=int, default=100,
+ help='after how many iterations to consolidate model')
+ if compare_all:
+ stream_options.add_argument('--replay-update', metavar='N', type=int, default=1,
+ help='after how many iterations to start replaying observed samples')
+
+ ## Context-specific components
+ context_spec = parser.add_argument_group('Context-Specific Component')
+ xdg_message = "use 'Context-dependent Gating' (Masse et al, 2018)" if main else "combine all methods with XdG"
+ context_spec.add_argument('--xdg', action='store_true', help=xdg_message)
+ context_spec.add_argument('--gating-prop', type=float, metavar="PROP",
+ help="-> XdG: prop neurons per layer to gate")
+ if main:
+ context_spec.add_argument('--separate-networks', action='store_true', help="train separate network per context")
+ if compare_all:
+ context_spec.add_argument('--fc-units-sep', type=int, metavar="N",
+ help="# of hidden units with separate network per context")
+
+ ## Parameter regularization
+ if not compare_replay:
+ param_reg = parser.add_argument_group('Parameter Regularization')
+ if main and no_boundaries:
+ # With the flexible, 'task-free' CL experiments, currently the only supported param reg option is SI
+ param_reg.add_argument('--si', action='store_true', help="select defaults for 'SI' (Zenke et al, 2017)")
+ param_reg.add_argument("--weight-penalty", action='store_true',
+ help="penalize parameters important for past contexts")
+ param_reg.add_argument('--reg-strength', type=float, metavar='LAMDA',
+ help="regularisation strength for weight penalty")
+ if main and not no_boundaries:
+ # 'Convenience-commands' that select the defaults for specific methods
+ param_reg.add_argument('--ewc', action='store_true',
+ help="select defaults for 'EWC' (Kirkpatrick et al, 2017)")
+ param_reg.add_argument('--si', action='store_true', help="select defaults for 'SI' (Zenke et al, 2017)")
+ param_reg.add_argument("--ncl", action="store_true",
+ help="select defaults for 'NCL' (Kao, Jensen et al., 2021)")
+ param_reg.add_argument("--ewc-kfac", action="store_true",
+ help="select defaults for 'KFAC-EWC' (Ritter et al. 2018)")
+ param_reg.add_argument("--owm", action="store_true", help="select defaults for 'OWM' (Zeng et al. 2019)")
+ # Custom commands for specifying how parameter regularization should be performed
+ param_reg.add_argument("--weight-penalty", action='store_true',
+ help="penalize parameters important for past contexts")
+ param_reg.add_argument('--reg-strength', type=float, metavar='LAMDA',
+ help="regularisation strength for weight penalty")
+ param_reg.add_argument("--precondition", action='store_true',
+ help="parameter regularization by gradient projection")
+ param_reg.add_argument("--alpha", type=float, default=1e-10,
+ help="small constant stabilizing inversion importance matrix")
+ param_reg.add_argument("--importance-weighting", type=str, choices=['fisher', 'si', 'owm'])
+ if not no_boundaries:
+ param_reg.add_argument('--fisher-n', type=int, help="-> Fisher: sample size estimating Fisher Information")
+ param_reg.add_argument('--fisher-batch', type=int, default=1, metavar='N',
+ help="-> Fisher: batch size estimating FI (should be 1)")
+ param_reg.add_argument('--fisher-labels', type=str, default='all', choices=['all', 'sample', 'pred', 'true'],
+ help="-> Fisher: what labels to use to calculate FI?")
+ param_reg.add_argument("--fisher-kfac", action='store_true',
+ help="-> Fisher: use KFAC approximation rather than diagonal")
+ param_reg.add_argument("--fisher-init", action='store_true', help="-> Fisher: start with prior (as in NCL)")
+ param_reg.add_argument("--fisher-prior", type=float, metavar='SIZE', dest='data_size',
+ help="-> Fisher: prior-strength in 'data_size' (as in NCL)")
+ param_reg.add_argument('--epsilon', type=float, default=0.1, dest="epsilon", help="-> SI: dampening parameter")
+ if main and not no_boundaries:
+ param_reg.add_argument('--offline', action='store_true',
+ help="separate penalty term per context (as original EWC)")
+ param_reg.add_argument('--gamma', type=float, default=1.,
+ help="forgetting coefficient Fishers (as in Online EWC)")
+ # For the comparison script in which EWC and SI are both run, to enable different hyper-params for both:
+ if compare_all and not no_boundaries:
+ param_reg.add_argument('--lambda', type=float, dest="ewc_lambda", help="-> EWC: regularisation strength")
+ if compare_all:
+ param_reg.add_argument('--c', type=float, dest="si_c", help="-> SI: regularisation strength")
+
+ ## Functional regularization
+ func_reg = parser.add_argument_group('Functional Regularization')
+ if main:
+ func_reg.add_argument('--lwf', action='store_true', help="select defaults for 'LwF' (Li & Hoiem, 2017)")
+ func_reg.add_argument('--distill', action='store_true', help="use distillation-loss for the replayed data")
+ if not compare_replay:
+ func_reg.add_argument('--temp', type=float, default=2., dest='temp', help="temperature for distillation loss")
+ if main and not no_boundaries:
+ func_reg.add_argument('--fromp', action='store_true', help="use 'FROMP' (Pan et al, 2020)")
+ if (not compare_hyper) and not no_boundaries:
+ func_reg.add_argument('--tau', type=float, help="-> FROMP: regularization strength")
+ if compare_replay:
+ func_reg.add_argument('--tau-per-budget', action='store_true',
+ help="-> FROMP: use separate tau for each different budget")
+
+ ## Memory buffer parameters (if data is stored)
+ buffer = parser.add_argument_group('Memory Buffer Parameters')
+ if not compare_replay:
+ buffer.add_argument('--budget', type=int, help="how many samples can be stored{}".format(
+ " (total budget)" if no_boundaries else " of each class?"
+ ), default=1000 if no_boundaries else None)
+ if not no_boundaries:
+ buffer.add_argument('--use-full-capacity', action='store_true',
+ help="use budget of future classes to initially store more")
+ if main and not no_boundaries:
+ buffer.add_argument('--sample-selection', type=str, choices=['random', 'herding', 'fromp'])
+ buffer.add_argument('--add-buffer', action='store_true',
+ help="add memory buffer to current context's training data")
+
+ ## Replay
+ replay_params = parser.add_argument_group('Replay')
+ if main:
+ replay_choices = ['none', 'current', 'buffer'] if no_boundaries else ['none', 'all', 'generative',
+ 'current', 'buffer']
+ replay_params.add_argument('--replay', type=str, default='none', choices=replay_choices)
+ replay_params.add_argument('--use-replay', type=str, default='normal', choices=['normal', 'inequality', 'both'])
+ #---> Explanation for these three ways to use replay:
+ # - "normal": add the loss on the replayed data to the loss on the data of the current context
+ # - "inequality": use the gradient of the loss on the replayed data as an inequality constraint (as in A-GEM)
+ # - "both": do both of the above
+ replay_params.add_argument('--agem', action='store_true',
+ help="select defaults for 'A-GEM' (Chaudhry et al, 2019)")
+ replay_params.add_argument('--eps-agem', type=float, default=1e-7,
+ help="parameter to ensure numerical stability of A-GEM")
+ if (not compare_replay) and (not no_boundaries):
+ # -parameters for the generative model (if it is a separate model)
+ if not compare_hyper:
+ replay_params.add_argument('--g-z-dim', type=int, help='size latent space generator (def: as classifier)')
+ replay_params.add_argument('--g-fc-lay', type=int, help='[fc_layers] in generator (def: as classifier)')
+ replay_params.add_argument('--g-fc-uni', type=int, help='[fc_units] in generator (def: as classifier)')
+ replay_params.add_argument('--g-iters', type=int, help="# batches to train generator (def: as classifier)")
+ replay_params.add_argument('--lr-gen', type=float, help="learning rate generator (def: as classifier)")
+ # -parameters for brain-inspired replay
+ if main:
+ replay_params.add_argument('--brain-inspired', action='store_true',
+ help="select defaults for 'BI-R' (van de Ven et al, 2020)")
+ replay_params.add_argument('--feedback', action="store_true",
+ help="equip main model with feedback connections")
+ replay_params.add_argument('--prior', type=str, default="standard", choices=["standard", "GMM"])
+ replay_params.add_argument('--per-class', action='store_true',
+ help="if selected, each class has its own modes")
+ replay_params.add_argument('--n-modes', type=int, default=1,
+ help="how many modes for prior (per class)? (def=1)")
+ if main:
+ replay_params.add_argument('--dg-gates', action='store_true', help="use context-specific gates in decoder")
+ replay_params.add_argument('--dg-type', type=str, metavar="TYPE",
+ help="decoder-gates: based on contexts or classes?")
+ if not compare_hyper:
+ replay_params.add_argument('--dg-prop', type=float, help="decoder-gates: masking-prop")
+ if main:
+ replay_params.add_argument('--hidden', action="store_true",
+ help="gen models at 'internal level' (after conv-layers)")
+
+ ## Template-based classification
+ if not compare_replay:
+ templ_cl = parser.add_argument_group('Template-Based Classification')
+ if main:
+ templ_cl.add_argument('--icarl', action='store_true',
+ help="select defaults for '{}iCaRL' (Rebuffi et al, 2017)".format(
+ 'Modified ' if no_boundaries else ''
+ ))
+ templ_cl.add_argument('--prototypes', action='store_true', help="classify using nearest-exemplar-mean rule")
+ templ_cl.add_argument('--gen-classifier', action='store_true',
+ help="use 'Generative Classifier' (van de Ven et al, 2021)")
+ if not compare_hyper:
+ templ_cl.add_argument('--eval-s', type=int, default=50,
+ help="-> Generative Classifier: number of importance samples")
+ if compare_all:
+ templ_cl.add_argument('--fc-units-gc', type=int, metavar="N",
+ help="# of hidden units with generative classifier")
+ templ_cl.add_argument('--fc-lay-gc', type=int, metavar="N", help="# fc-layers with generative classifier")
+ templ_cl.add_argument('--z-dim-gc', type=int, metavar="N", help="size latent space generative classifier")
+ return parser
+
+##-------------------------------------------------------------------------------------------------------------------##
diff --git a/PyTorch/build-in/other/continual-learning/params/param_stamp.py b/PyTorch/build-in/other/continual-learning/params/param_stamp.py
new file mode 100644
index 000000000..2ddb3cf34
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/params/param_stamp.py
@@ -0,0 +1,201 @@
+from data.load import get_context_set
+from models import define_models as define
+from utils import checkattr
+
+
+def visdom_name(args):
+ '''Get name for graph in visdom from [args].'''
+ iCaRL = (checkattr(args, 'prototypes') and checkattr(args, 'add_buffer') and checkattr(args, 'bce')
+ and checkattr(args, 'bce_distill'))
+ name = "{fb}{replay}{param_reg}{xdg}{icarl}{fromp}{bud}".format(
+ fb="1M-" if checkattr(args, 'feedback') else "",
+ replay="{}{}{}".format(args.replay, "D" if checkattr(args, 'distill') else "",
+ "-aGEM" if hasattr(args, 'use_replay') and args.use_replay=='inequality' else ""),
+ param_reg="-par{}-{}".format(args.reg_strength,
+ args.importance_weighting) if checkattr(args, 'weight_penalty') else '',
+ xdg="" if (not checkattr(args, 'xdg')) or args.gating_prop == 0 else "-XdG{}".format(args.gating_prop),
+ icarl="-iCaRL" if iCaRL else "",
+ fromp="-FROMP{}".format(args.tau) if checkattr(args, 'fromp') else "",
+ bud="-bud{}".format(args.budget) if args.replay=='buffer' or iCaRL else "",
+ )
+ return name
+
+
+def get_param_stamp_from_args(args, no_boundaries=False):
+ '''To get param-stamp a bit quicker.'''
+
+ config = get_context_set(
+ name=args.experiment, scenario=args.scenario, contexts=args.contexts, data_dir=args.d_dir, only_config=True,
+ normalize=checkattr(args, "normalize"), verbose=False, singlehead=checkattr(args, 'singlehead'),
+ )
+
+ # -get feature extractor architecture (if used)
+ feature_extractor_name = None
+ depth = args.depth if hasattr(args, 'depth') else 0
+ use_feature_extractor = checkattr(args, 'hidden') or (
+ checkattr(args, 'freeze_convE') and (not args.replay=="generative") and (not checkattr(args, "add_buffer"))
+ and (not checkattr(args, "augment")) and (not checkattr(args, 'gen_classifier'))
+ )
+ if use_feature_extractor:
+ feature_extractor = define.define_feature_extractor(args=args, config=config, device='cpu')
+ feature_extractor_name = feature_extractor.name if depth > 0 else None
+ config = config.copy() # -> make a copy to avoid overwriting info in the original config-file
+ config['size'] = feature_extractor.conv_out_size
+ config['channels'] = feature_extractor.conv_out_channels
+ depth = 0
+ # -get classifier architecture
+ model = define.define_classifier(args=args, config=config, device='cpu', depth=depth, stream=no_boundaries)
+ # -get generator architecture (if used)
+ train_gen = True if (args.replay=="generative" and not checkattr(args, 'feedback')) else False
+ if train_gen:
+ generator = define.define_vae(args=args, config=config, device='cpu', depth=depth)
+
+ model_name = model.name
+ replay_model_name = generator.name if train_gen else None
+ param_stamp = get_param_stamp(args, model_name, verbose=False, replay_model_name=replay_model_name,
+ feature_extractor_name=feature_extractor_name, no_boundaries=no_boundaries)
+ return param_stamp
+
+
+def get_param_stamp(args, model_name, verbose=True, replay_model_name=None, feature_extractor_name=None,
+ no_boundaries=False):
+ '''Based on the input-arguments, produce a "parameter-stamp".'''
+
+ # -for problem specification
+ multi_n_stamp = "{n}{joint}{cum}-{sce}".format(n=args.contexts, joint="-Joint" if checkattr(args, 'joint') else "",
+ cum="-Cummulative" if checkattr(args, 'cummulative') else "",
+ sce=args.scenario) if hasattr(args, "contexts") else ""
+ stream_stamp = "-{stream}{fuzz}".format(
+ stream=args.stream, fuzz="{}-".format(args.fuzziness) if args.stream=="fuzzy-boundaries" else "-"
+ ) if no_boundaries else ""
+ problem_stamp = "{exp}{stream}{norm}{aug}{multi_n}".format(
+ exp=args.experiment, stream=stream_stamp, norm="-N" if hasattr(args, 'normalize') and args.normalize else "",
+ aug="+" if hasattr(args, "augment") and args.augment else "", multi_n=multi_n_stamp
+ )
+ if verbose:
+ print(" --> problem: "+problem_stamp)
+
+ # -for model
+ model_stamp = model_name if feature_extractor_name is None else "H{}--{}".format(feature_extractor_name, model_name)
+ if verbose:
+ print(" --> model: "+model_stamp)
+
+ # -for training settings
+ if checkattr(args, "pre_convE") and hasattr(args, 'depth') and args.depth>0:
+ ltag = "" if ((not hasattr(args, "convE_ltag")) or args.convE_ltag=="none") else "-{}{}".format(
+ args.convE_ltag, "-ps" if checkattr(args, 'seed_to_ltag') else ""
+ )
+ pre = "-pCvE{}".format(ltag)
+ else:
+ pre = ""
+ freeze_conv = (checkattr(args, "freeze_convE") and hasattr(args, 'depth') and args.depth>0)
+ freeze = "-fCvE" if (freeze_conv and (feature_extractor_name is None)) else ""
+ train_stamp = "i{num}-lr{lr}-b{bsz}{pre}{freeze}-{optim}{mom}{neg}{recon}".format(
+ num=args.iters, lr=args.lr, bsz=args.batch, pre=pre, freeze=freeze, optim=args.optimizer, mom="-m{}".format(
+ args.momentum
+ ) if args.optimizer=='sgd' and hasattr(args, 'momentum') and args.momentum>0 else "",
+ neg="-{}".format(args.neg_samples) if (
+ args.scenario=="class" and (not checkattr(args, 'gen_classifier')) and (not no_boundaries)
+ ) else "",
+ recon="-{}".format(args.recon_loss) if (
+ checkattr(args, 'gen_classifier') or (hasattr(args, 'replay') and args.replay=="generative")
+ ) else "",
+ )
+ if verbose:
+ print(" --> train-params: " + train_stamp)
+
+ # -for parameter regularization
+ param_reg_stamp = ""
+ if checkattr(args, 'weight_penalty') or checkattr(args, 'precondition'):
+ param_reg_stamp = "-"
+ # -how is parameter regularization done (weight penalty and/or preconditioning)?
+ if checkattr(args, 'weight_penalty'):
+ param_reg_stamp += "-PReg{}".format(args.reg_strength)
+ if checkattr(args, 'precondition'):
+ param_reg_stamp += "-PreC{}".format(args.alpha)
+ # -how is the parameter importance computed?
+ if args.importance_weighting=='fisher':
+ param_reg_stamp += "-FI{}{}{}{}{}{}".format(
+ "kfac" if checkattr(args, 'fisher_kfac') else 'diag',
+ "I{}".format(args.data_size) if checkattr(args, 'fisher_init') else "",
+ "N" if args.fisher_n is None else args.fisher_n,
+ "Emp" if args.fisher_labels=="true" else ("Pred" if args.fisher_labels=="pred" else (
+ "Sam" if args.fisher_labels=="sample" else "All"
+ )),
+ "B{}".format(args.fisher_batch) if (hasattr(args, 'fisher_batch') and args.fisher_batch>1) else "",
+ # -use a separate term per task or a forgetting coefficient:
+ "-offline" if checkattr(args, 'offline') else (
+ "-forg{}".format(args.gamma) if hasattr(args, 'gamma') and args.gamma < 1 else ""
+ ),
+ )
+ elif args.importance_weighting=='si':
+ param_reg_stamp += "-SI{}".format(args.epsilon)
+ elif args.importance_weighting=='owm':
+ param_reg_stamp += "-OWM"
+
+ # -for context-specific components
+ xdg_stamp = ""
+ if checkattr(args, 'xdg') and args.gating_prop>0:
+ xdg_stamp = "--XdG{}".format(args.gating_prop)
+ if verbose:
+ print(" --> XdG: " + "gating = {}".format(args.gating_prop))
+
+ # -for replay / functional regularization (except FROMP)
+ replay_stamp = ""
+ if hasattr(args, 'replay') and not args.replay=="none":
+ replay_stamp = "{rep}{KD}{use}{model}{gi}{lrg}".format(
+ rep=args.replay,
+ KD="-KD{}".format(args.temp) if checkattr(args, 'distill') else "",
+ use="-{}{}".format(
+ "A-GEM" if args.use_replay=='inequality' else "both",
+ "" if ((not hasattr(args, 'eps_agem')) or args.eps_agem==0) else args.eps_agem
+ ) if hasattr(args, 'use_replay') and (not args.use_replay=='normal') else "",
+ model="" if (replay_model_name is None) else "-{}".format(replay_model_name),
+ gi="-gi{}".format(args.gen_iters) if (
+ hasattr(args, "gen_iters") and (replay_model_name is not None) and (not args.iters==args.gen_iters)
+ ) else "",
+ lrg="-glr{}".format(args.lr_gen) if (
+ hasattr(args, "lr_gen") and (replay_model_name is not None) and (not args.lr==args.lr_gen)
+ ) else "",
+ )
+ if verbose:
+ print(" --> replay: " + replay_stamp)
+ replay_stamp = "--{}".format(replay_stamp)
+
+ # -for memory-buffer & its use (e.g., FROMP, iCaRL)
+ memory_buffer_stamp = ""
+ use_memory_buffer = checkattr(args, 'prototypes') or checkattr(args, 'add_buffer') or args.replay=="buffer" \
+ or checkattr(args, 'fromp')
+ if use_memory_buffer:
+ buffer_opts = "b{bud}{cap}{sel}".format(
+ bud=args.budget, cap="-FC" if checkattr(args, 'use_full_capacity') else "",
+ sel=args.sample_selection if hasattr(args, 'sample_selection') else 'random'
+ )
+ use = "{}{}{}".format("addB-" if checkattr(args, 'add_buffer') else "",
+ "useB-" if checkattr(args, 'prototypes') else "",
+ "fromp{}-".format(args.tau) if checkattr(args, 'fromp') else "")
+ memory_buffer_stamp = "--{}{}".format(use, buffer_opts)
+ if verbose:
+ print(" --> memory buffer: " + "{}{}".format(use, buffer_opts))
+
+ # -for binary classification loss (e.g., iCaRL)
+ bin_stamp = ""
+ if checkattr(args, 'bce'):
+ bin_stamp = '--BCE_dist' if (checkattr(args, 'bce_distill') and args.scenario=="class") else '--BCE'
+
+ # -specific to task-free protocol: how often to update the 'previous_model' relative to which to stay close
+ stream_stamp = ""
+ if no_boundaries and hasattr(args, 'update_every') and not args.update_every==1:
+ if use_memory_buffer or replay_stamp or param_reg_stamp:
+ stream_stamp = '--upEv{}'.format(args.update_every)
+
+ # --> combine
+ param_stamp = "{}--{}--{}{}{}{}{}{}{}{}".format(
+ problem_stamp, model_stamp, train_stamp, param_reg_stamp, xdg_stamp, replay_stamp, memory_buffer_stamp,
+ bin_stamp, stream_stamp, "-s{}".format(args.seed) if not args.seed==0 else ""
+ )
+
+ ## Print param-stamp on screen and return
+ if verbose:
+ print(param_stamp)
+ return param_stamp
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/params/param_values.py b/PyTorch/build-in/other/continual-learning/params/param_values.py
new file mode 100644
index 000000000..15ec765e7
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/params/param_values.py
@@ -0,0 +1,220 @@
+from utils import checkattr
+
+
+def set_method_options(args, **kwargs):
+ # If the 'convenience' option for a specific method is selected, select the corresponding defaults
+ if checkattr(args, 'ewc'):
+ args.weight_penalty = True
+ args.importance_weighting = 'fisher'
+ args.offline = True
+ if checkattr(args, 'si'):
+ args.weight_penalty = True
+ args.importance_weighting = 'si'
+ if checkattr(args, 'ncl'):
+ args.weight_penalty = True
+ args.precondition = True
+ args.importance_weighting = 'fisher'
+ args.fisher_kfac = True
+ args.fisher_init = True
+ if checkattr(args, 'kfac_ewc'):
+ args.weight_penalty = True
+ args.importance_weighting = 'fisher'
+ args.fisher_kfac = True
+ if checkattr(args, 'owm'):
+ args.precondition = True
+ args.importance_weighting = 'owm'
+ if checkattr(args, "lwf"):
+ args.replay = "current"
+ args.distill = True
+ if checkattr(args, 'agem'):
+ args.replay = "buffer"
+ args.use_replay = "inequality"
+ if checkattr(args, 'brain_inspired'):
+ args.replay = "generative"
+ args.feedback = True # --> replay-through-feedback
+ args.prior = 'GMM' # --> conditional replay
+ args.per_class = True # --> conditional replay
+ args.dg_gates = True # --> gating based on internal context (has hyper-param 'dg_prop')
+ args.hidden = True # --> internal replay
+ args.pre_convE = True # --> internal replay
+ args.distill = True # --> distillation
+ if checkattr(args, "icarl"):
+ args.prototypes = True
+ args.add_buffer = True
+ args.bce = True
+ args.bce_distill = True
+ args.sample_selection = 'herding'
+
+
+def set_default_values(args, also_hyper_params=True, single_context=False, no_boundaries=False):
+ # -set default-values for certain arguments based on chosen experiment
+ args.normalize = args.normalize if args.experiment in ('CIFAR10', 'CIFAR100') else False
+ args.depth = (
+ 5 if (args.experiment in ('CIFAR10', 'CIFAR100')) or checkattr(args, 'reducedResNet') else 0
+ ) if args.depth is None else args.depth
+ args.fc_lay = (1 if checkattr(args, 'reducedResNet') else 3) if args.fc_lay is None else args.fc_lay
+ args.channels = (20 if checkattr(args, 'reducedResNet') else 16) if args.channels is None else args.channels
+ args.rl = 3 if checkattr(args, 'reducedResNet') and (args.rl is None) else args.rl
+ args.gp = True if checkattr(args, 'reducedResNet') else args.gp
+ args.conv_type = 'resNet' if checkattr(args, 'reducedResNet') else args.conv_type
+ if not single_context:
+ args.contexts = (
+ 5 if args.experiment in ('splitMNIST', 'CIFAR10') else 10
+ ) if args.contexts is None else args.contexts
+ args.iters = (2000 if args.experiment == 'splitMNIST' else 5000) if args.iters is None else args.iters
+ args.lr = (0.001 if args.experiment == 'splitMNIST' else 0.0001) if args.lr is None else args.lr
+ args.batch = (128 if args.experiment in ('splitMNIST', 'permMNIST') else 256) if args.batch is None else args.batch
+ if checkattr(args, 'separate_networks'):
+ args.fc_units = (100 if args.experiment == 'splitMNIST' else 400) if args.fc_units is None else args.fc_units
+ else:
+ args.fc_units = (400 if args.experiment == 'splitMNIST' else (
+ 1000 if args.experiment == 'permMNIST' else 2000
+ )) if args.fc_units is None else args.fc_units
+ if hasattr(args, 'fc_units_sep'):
+ args.fc_units_sep = (
+ 100 if args.experiment == 'splitMNIST' else 400
+ ) if args.fc_units_sep is None else args.fc_units_sep
+ if hasattr(args, 'fc_units_gc'):
+ args.fc_units_gc = 85 if args.fc_units_gc is None else args.fc_units_gc
+ args.fc_lay_gc = (3 if args.experiment == 'splitMNIST' else 2) if args.fc_lay_gc is None else args.fc_lay_gc
+ args.z_dim_gc = (5 if args.experiment == 'splitMNIST' else 20) if args.z_dim_gc is None else args.z_dim_gc
+ if hasattr(args, 'recon_loss'):
+ args.recon_loss = (
+ "MSE" if args.experiment in ('CIFAR10', 'CIFAR100') else "BCE"
+ ) if args.recon_loss is None else args.recon_loss
+ if hasattr(args, "dg_type"):
+ args.dg_type = ("context" if args.scenario == 'domain' else "class") if args.dg_type is None else args.dg_type
+ if hasattr(args, 'budget'):
+ args.budget = (10 if args.experiment == 'permMNIST' else 100) if args.budget is None else args.budget
+ if hasattr(args, 'sample_selection'):
+ args.sample_selection = ('fromp' if checkattr(args, 'fromp') else (
+ 'herding' if checkattr(args, 'icarl') else 'random'
+ )) if args.sample_selection is None else args.sample_selection
+ # -set other default arguments (if they were not selected)
+ if hasattr(args, 'lr_gen'):
+ args.lr_gen = args.lr if args.lr_gen is None else args.lr_gen
+ args.g_iters = args.iters if args.g_iters is None else args.g_iters
+ args.g_z_dim = args.z_dim if args.g_z_dim is None else args.g_z_dim
+ args.g_fc_lay = args.fc_lay if args.g_fc_lay is None else args.g_fc_lay
+ args.g_fc_uni = args.fc_units if args.g_fc_uni is None else args.g_fc_uni
+ # -unless the number of iterations after which to log is explicitly set, set them equal to # of iters per context
+ if not single_context:
+ args.acc_log = args.iters if (not hasattr(args, 'acc_log')) or args.acc_log is None else args.acc_log
+ args.loss_log = args.iters if (not hasattr(args, 'loss_log')) or args.loss_log is None else args.loss_log
+ args.sample_log = args.iters if (not hasattr(args,'sample_log')) or args.sample_log is None else args.sample_log
+
+ # -set default-values for certain arguments based on chosen scenario & experiment
+ if hasattr(args, 'scenario') and args.scenario == 'task' and hasattr(args, 'gating_prop'):
+ # -context-specific gating
+ args.gating_prop = (
+ 0.85 if args.experiment == 'CIFAR100' else (0.9 if args.experiment == 'splitMNIST' else 0.6)
+ ) if args.gating_prop is None else args.gating_prop
+ if also_hyper_params:
+ # -regularization strength
+ if not hasattr(args, 'si_c'):
+ args.si_c = None
+ if not hasattr(args, 'ewc_lambda'):
+ args.ewc_lambda = None
+ if no_boundaries:
+ args.si_c = 10. if args.si_c is None else args.si_c
+ elif args.scenario == 'task':
+ args.si_c = (
+ 10. if args.experiment == 'splitMNIST' else (100. if args.experiment == 'CIFAR100' else 10.)
+ ) if args.si_c is None else args.si_c
+ args.ewc_lambda = (
+ 100000. if args.experiment == 'splitMNIST' else (1000. if args.experiment == 'CIFAR100' else 100.)
+ ) if args.ewc_lambda is None else args.ewc_lambda
+ elif args.scenario == 'domain':
+ args.si_c = (
+ 50000. if args.experiment == 'splitMNIST' else (500. if args.experiment == 'CIFAR100' else 10.)
+ ) if args.si_c is None else args.si_c
+ args.ewc_lambda = (
+ 10000000000. if args.experiment == 'splitMNIST' else (1000. if args.experiment == 'CIFAR100' else 100.)
+ ) if args.ewc_lambda is None else args.ewc_lambda
+ elif args.scenario == 'class':
+ args.si_c = (5000. if args.experiment == 'splitMNIST' else 5.) if args.si_c is None else args.si_c
+ args.ewc_lambda = (
+ 1000000000. if args.experiment == 'splitMNIST' else 100.
+ ) if args.ewc_lambda is None else args.ewc_lambda
+ if hasattr(args, 'reg_strength'):
+ args.reg_strength = (
+ args.si_c if checkattr(args, 'si') else (args.ewc_lambda if checkattr(args, 'ewc') else 1.)
+ ) if args.reg_strength is None else args.reg_strength
+ # -use a prior for the Fisher (as in NCL)
+ if hasattr(args, 'data_size'):
+ args.data_size = (12000 if args.experiment == 'splitMNIST' else (
+ 60000 if args.experiment == 'permMNIST' else (5000 if args.experiment == 'CIFAR100' else 10000)
+ )) if args.data_size is None else args.data_size
+ # -gating based on internal context (brain-inspired replay)
+ if args.scenario == 'task' and hasattr(args, 'dg_prop'):
+ args.dg_prop = (0. if args.experiment == 'splitMNIST' else 0.) if args.dg_prop is None else args.dg_prop
+ elif args.scenario == 'domain' and hasattr(args, 'dg_prop'):
+ args.dg_prop = (0.1 if args.experiment == 'splitMNIST' else 0.5) if args.dg_prop is None else args.dg_prop
+ elif args.scenario == 'class' and hasattr(args, 'dg_prop'):
+ args.dg_prop = (0.1 if args.experiment == 'splitMNIST' else 0.7) if args.dg_prop is None else args.dg_prop
+ if hasattr(args, 'tau'):
+ # -fromp
+ args.tau = ((0.01 if args.scenario == 'task' else (
+ 10. if args.scenario == 'domain' else 1000.
+ )) if args.experiment == 'splitMNIST' else 1.) if args.tau is None else args.tau
+
+
+def check_for_errors(args, pretrain=False, **kwargs):
+ if pretrain:
+ if checkattr(args, 'augment') and not args.experiment in ('CIFAR10', 'CIFAR100'):
+ raise ValueError("Augmentation is only supported for 'CIFAR10' or 'CIFAR-100'.")
+ if not pretrain:
+ if (checkattr(args, 'separate_networks') or checkattr(args, 'xdg')) and (not args.scenario == "task"):
+ raise ValueError("'XdG' or 'SeparateNetworks' can only be used with --scenario='task'.")
+ # -Replay-through-Feedback model is not (yet) implemented with all possible options
+ if checkattr(args, 'feedback') and (checkattr(args, 'precondition') or (
+ hasattr(args, 'use_replay') and args.use_replay in ('inequality', 'both')
+ )):
+ raise NotImplementedError('Replay-through-Feedback currently does not support gradient projection.')
+ if checkattr(args, 'feedback') and checkattr(args, 'xdg'):
+ raise NotImplementedError('Replay-through-Feedback currently does not support XdG (in the encoder).')
+ if checkattr(args, 'feedback') and args.importance_weighting=='fisher' and checkattr(args, 'fisher_kfac'):
+ raise NotImplementedError('Replay-through-Feedback currently does not support using KFAC Fisher.')
+ if checkattr(args, 'feedback') and checkattr(args, 'bce'):
+ raise NotImplementedError('Replay-through-Feedback currently does not support binary classification loss.')
+ # -if 'BCEdistill' is selected for other than scenario=="class", give error
+ if checkattr(args, 'bce_distill') and not args.scenario=="class":
+ raise ValueError("BCE-distill can only be used for class-incremental learning.")
+ # -with parameter regularization, not (yet) all combinations are implemented
+ if hasattr(args, 'importance_weighting') and args.importance_weighting=='owm' and \
+ checkattr(args, 'weight_penalty'):
+ raise NotImplementedError('OWM-based importance weighting not supported with parameter weight penalty.')
+ if hasattr(args, 'importance_weighting') and args.importance_weighting=='si' and \
+ checkattr(args, 'precondition'):
+ raise NotImplementedError('SI-based importance weighting not supported with parameter pre-conditioning.')
+ # -FROMP has a limited range of options it can be combined with
+ if checkattr(args, 'fromp') and hasattr(args, 'optimizer') and args.optimizer=="sgd":
+ raise NotImplementedError('FROMP is only supported with ADAM optimizer.')
+ if checkattr(args, 'fromp') and hasattr(args, 'replay') and not args.replay=="none":
+ raise NotImplementedError('FROMP is not supported combined with replay.')
+ if checkattr(args, 'fromp') and (checkattr(args, 'weight_penalty') or checkattr(args, 'precondition')):
+ raise NotImplementedError('FROMP is not supported combined with parameter regularization.')
+ # -the Generative Classifier implemented here cannot be combined with other approaches
+ if checkattr(args, 'gen_classifier') and hasattr(args, 'replay') and not args.replay == "none":
+ raise NotImplementedError('The Generative Classifier is not supported with replay.')
+ if checkattr(args, 'gen_classifier') and (checkattr(args, 'weight_penalty') or checkattr(args, 'precondition')):
+ raise NotImplementedError('The Generative Classifier is not supported with parameter regularization.')
+ if checkattr(args, 'gen_classifier') and checkattr(args, 'fromp'):
+ raise NotImplementedError('The Generative Classifier is not supported with FROMP.')
+ # -a conditional generative model for GR is only supported in combination with Replay-through-Feedback
+ if (checkattr(args, 'per_class') or checkattr(args, 'dg_gates')) and not checkattr(args, 'feedback'):
+ raise NotImplementedError('A VAE with separate mode per class or context-specific gates in the decoder is '
+ 'only supported in combination with the replay-through-feedback model.')
+ # -warning about that XdG and FROMP and KFAC are only applied to fully connected layers?
+ trainable_conv = hasattr(args, 'depth') and args.depth>0 and ((not checkattr(args, 'freeze_convE')) or
+ checkattr(args, 'hidden'))
+ if checkattr(args, 'xdg') and trainable_conv:
+ print('Note that XdG is only applied to the fully connected layers of the network.')
+ if checkattr(args, 'fromp') and trainable_conv:
+ print('Note that FROMP is only applied to the fully connected layers of the network.')
+ if checkattr(args, 'fisher_kfac') and trainable_conv:
+ print('Note that parameter regularization based on KFAC Fisher is only applied to '
+ 'the fully connected layers of the network.')
+ if hasattr(args, 'importance_weighting') and args.importance_weighting=='owm' and trainable_conv:
+ print('Note that OWM is only applied to the fully connected layers of the network.')
+
diff --git a/PyTorch/build-in/other/continual-learning/requirements.txt b/PyTorch/build-in/other/continual-learning/requirements.txt
new file mode 100644
index 000000000..0934f044d
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/requirements.txt
@@ -0,0 +1,11 @@
+numpy
+scipy
+pandas
+torch
+torchvision
+tqdm
+scikit-learn
+matplotlib
+visdom
+jupyterlab
+ipywidgets
diff --git a/PyTorch/build-in/other/continual-learning/store/README.md b/PyTorch/build-in/other/continual-learning/store/README.md
new file mode 100644
index 000000000..0b0163d75
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/store/README.md
@@ -0,0 +1,7 @@
+All outputs generated by `main.py`, `main_task_free.py` and `main_pretrain.py` will by default be stored
+in this directory, organized into the following subdirectories:
+
+- datasets
+- models
+- plots
+- results
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/train/__init__.py b/PyTorch/build-in/other/continual-learning/train/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/PyTorch/build-in/other/continual-learning/train/train_standard.py b/PyTorch/build-in/other/continual-learning/train/train_standard.py
new file mode 100644
index 000000000..e355fb98b
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/train/train_standard.py
@@ -0,0 +1,43 @@
+import tqdm
+
+
+def train(model, train_loader, iters, loss_cbs=list(), eval_cbs=list()):
+ '''Train a model with a "train_a_batch" method for [iters] iterations on data from [train_loader].
+
+ [model] model to optimize
+ [train_loader] for training [model] on
+ [iters] (max) number of iterations (i.e., batches) to train for
+ [loss_cbs] of callback- to keep track of training progress
+ [eval_cbs] of callback- to evaluate model on separate data-set'''
+
+ device = model._device()
+
+ # Create progress-bar (with manual control)
+ bar = tqdm.tqdm(total=iters)
+
+ iteration = epoch = 0
+ while iteration < iters:
+ epoch += 1
+
+ # Loop over all batches of an epoch
+ for batch_idx, (data, y) in enumerate(train_loader):
+ iteration += 1
+
+ # Perform training-step on this batch
+ data, y = data.to(device), y.to(device)
+ loss_dict = model.train_a_batch(data, y=y)
+
+ # Fire training-callbacks (for visualization of training-progress)
+ for loss_cb in loss_cbs:
+ if loss_cb is not None:
+ loss_cb(bar, iteration, loss_dict)
+
+ # Fire evaluation-callbacks (to be executed every [eval_log] iterations, as specified within the functions)
+ for eval_cb in eval_cbs:
+ if eval_cb is not None:
+ eval_cb(model, iteration)
+
+ # Break if max-number of iterations is reached
+ if iteration == iters:
+ bar.close()
+ break
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/train/train_stream.py b/PyTorch/build-in/other/continual-learning/train/train_stream.py
new file mode 100644
index 000000000..a4a13059c
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/train/train_stream.py
@@ -0,0 +1,143 @@
+import torch
+import tqdm
+import copy
+from utils import checkattr
+from models.cl.continual_learner import ContinualLearner
+
+
+def train_on_stream(model, datastream, iters=2000, loss_cbs=list(), eval_cbs=list()):
+ '''Incrementally train a model on a ('task-free') stream of data.
+ Args:
+ model (Classifier): model to be trained, must have a built-in `train_a_batch`-method
+ datastream (DataStream): iterator-object that returns for each iteration the training data
+ iters (int, optional): max number of iterations, could be smaller if `datastream` runs out (default: ``2000``)
+ *_cbs (list of callback-functions, optional): for evaluating training-progress (defaults: empty lists)
+ '''
+
+ # Define tqdm progress bar(s)
+ progress = tqdm.tqdm(range(1, iters + 1))
+
+ ##--> SI: Register starting parameter values
+ if isinstance(model, ContinualLearner) and model.importance_weighting=='si':
+ start_new_W = True
+ model.register_starting_param_values()
+
+ previous_model = None
+
+ for batch_id, (x,y,c) in enumerate(datastream, 1):
+
+ if batch_id > iters:
+ break
+
+ ##--> SI: Prepare to store running importance estimates and param-values before update
+ if isinstance(model, ContinualLearner) and model.importance_weighting=='si':
+ if start_new_W:
+ W, p_old = model.prepare_importance_estimates_dicts()
+ start_new_W = False
+
+ # Move data to correct device
+ x = x.to(model._device())
+ y = y.to(model._device())
+ if c is not None:
+ c = c.to(model._device())
+
+ # If using separate networks, the y-targets need to be adjusted
+ if model.label == "SeparateClassifiers":
+ for sample_id in range(x.shape[0]):
+ y[sample_id] = y[sample_id] - model.classes_per_context * c[sample_id]
+
+ # Add replay...
+ (x_, y_, c_, scores_) = (None, None, None, None)
+ if hasattr(model, 'replay_mode') and model.replay_mode=='buffer' and previous_model is not None:
+ # ... from the memory buffer
+ (x_, y_, c_) = previous_model.sample_from_buffer(x.shape[0])
+ if model.replay_targets=='soft':
+ with torch.no_grad():
+ scores_ = previous_model.classify(x_, c_, no_prototypes=True)
+ elif hasattr(model, 'replay_mode') and model.replay_mode=='current' and previous_model is not None:
+ # ... using the data from the current batch (as in LwF)
+ x_ = x
+ if c is not None:
+ c_ = previous_model.sample_contexts(x_.shape[0]).to(model._device())
+ with torch.no_grad():
+ scores_ = previous_model.classify(x, c_, no_prototypes=True)
+ _, y_ = torch.max(scores_, dim=1)
+ # -only keep [y_] or [scores_], depending on whether replay is with 'hard' or 'soft' targets
+ y_ = y_ if (hasattr(model, 'replay_targets') and model.replay_targets == "hard") else None
+ scores_ = scores_ if (hasattr(model, 'replay_targets') and model.replay_targets == "soft") else None
+
+ # Train the model on this batch
+ loss_dict = model.train_a_batch(x, y, c, x_=x_, y_=y_, c_=c_, scores_=scores_, rnt=0.5)
+
+ ##--> SI: Update running parameter importance estimates in W (needed for SI)
+ if isinstance(model, ContinualLearner) and model.importance_weighting=='si':
+ model.update_importance_estimates(W, p_old)
+
+ # Add the observed data to the memory buffer (if selected by the algorithm that fills the memory buffer)
+ if checkattr(model, 'use_memory_buffer'):
+ model.add_new_samples(x, y, c)
+ if hasattr(model, 'replay_mode') and model.replay_mode == 'current' and c is not None:
+ model.keep_track_of_contexts_so_far(c)
+
+ # Fire callbacks (for visualization of training-progress / evaluating performance after each task)
+ for loss_cb in loss_cbs:
+ if loss_cb is not None:
+ loss_cb(progress, batch_id, loss_dict)
+ for eval_cb in eval_cbs:
+ if eval_cb is not None:
+ eval_cb(model, batch_id, context=None)
+
+ ##--> SI: calculate and update the normalized path integral
+ if isinstance(model, ContinualLearner) and model.importance_weighting=='si' and model.weight_penalty:
+ if (batch_id % model.update_every)==0:
+ model.update_omega(W, model.epsilon)
+ start_new_W = True
+
+ ##--> Replay: update source for replay
+ if hasattr(model, 'replay_mode') and (not model.replay_mode=="none"):
+ if (batch_id % model.update_every)==0:
+ previous_model = copy.deepcopy(model).eval()
+
+ # Close progres-bar(s)
+ progress.close()
+
+#------------------------------------------------------------------------------------------------------------#
+
+def train_gen_classifier_on_stream(model, datastream, iters=2000, loss_cbs=list(), eval_cbs=list()):
+ '''Incrementally train a generative classifier model on a ('task-free') stream of data.
+ Args:
+ model (Classifier): generative classifier, each generative model must have a built-in `train_a_batch`-method
+ datastream (DataStream): iterator-object that returns for each iteration the training data
+ iters (int, optional): max number of iterations, could be smaller if `datastream` runs out (default: ``2000``)
+ *_cbs (list of callback-functions, optional): for evaluating training-progress (defaults: empty lists)
+ '''
+
+ # Define tqdm progress bar(s)
+ progress = tqdm.tqdm(range(1, iters + 1))
+
+ for batch_id, (x,y,_) in enumerate(datastream, 1):
+
+ if batch_id > iters:
+ break
+
+ # Move data to correct device
+ x = x.to(model._device())
+ y = y.to(model._device())
+
+ # Cycle through all classes. For each class present, take training step on corresponding generative model
+ for class_id in range(model.classes):
+ if class_id in y:
+ x_to_use = x[y==class_id]
+ loss_dict = getattr(model, "vae{}".format(class_id)).train_a_batch(x_to_use)
+ # NOTE: this way, only the [lost_dict] of the last class present in the batch enters into the [loss_cb]
+
+ # Fire callbacks (for visualization of training-progress / evaluating performance after each task)
+ for loss_cb in loss_cbs:
+ if loss_cb is not None:
+ loss_cb(progress, batch_id, loss_dict)
+ for eval_cb in eval_cbs:
+ if eval_cb is not None:
+ eval_cb(model, batch_id, context=None)
+
+ # Close progres-bar(s)
+ progress.close()
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/train/train_task_based.py b/PyTorch/build-in/other/continual-learning/train/train_task_based.py
new file mode 100644
index 000000000..9232e86d7
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/train/train_task_based.py
@@ -0,0 +1,635 @@
+import torch
+from torch import optim
+from torch.utils.data.dataloader import DataLoader
+from torch.utils.data import ConcatDataset
+import numpy as np
+import tqdm
+import copy
+from utils import get_data_loader,checkattr
+from data.manipulate import SubDataset, MemorySetDataset
+from models.cl.continual_learner import ContinualLearner
+
+
+def train_cl(model, train_datasets, iters=2000, batch_size=32, baseline='none',
+ loss_cbs=list(), eval_cbs=list(), sample_cbs=list(), context_cbs=list(),
+ generator=None, gen_iters=0, gen_loss_cbs=list(), **kwargs):
+ '''Train a model (with a "train_a_batch" method) on multiple contexts.
+
+ [model] main model to optimize across all contexts
+ [train_datasets] with for each context the training
+ [iters] , # of optimization-steps (i.e., # of mini-batches) per context
+ [batch_size] , # of samples per mini-batch
+ [baseline] , 'joint': model trained once on data from all contexts
+ 'cummulative': model trained incrementally, always using data all contexts so far
+ [generator] None or , if separate generative model is trained (for [gen_iters] per context)
+ [*_cbs] of call-back functions to evaluate training-progress
+ '''
+
+ # Set model in training-mode
+ model.train()
+
+ # Use cuda?
+ cuda = model._is_on_cuda()
+ device = model._device()
+
+ # Initiate possible sources for replay (no replay for 1st context)
+ ReplayStoredData = ReplayGeneratedData = ReplayCurrentData = False
+ previous_model = None
+
+ # Register starting parameter values (needed for SI)
+ if isinstance(model, ContinualLearner) and model.importance_weighting=='si':
+ model.register_starting_param_values()
+
+ # Are there different active classes per context (or just potentially a different mask per context)?
+ per_context = (model.scenario=="task" or (model.scenario=="class" and model.neg_samples=="current"))
+ per_context_singlehead = per_context and (model.scenario=="task" and model.singlehead)
+
+ # Loop over all contexts.
+ for context, train_dataset in enumerate(train_datasets, 1):
+
+ # If using the "joint" baseline, skip to last context, as model is only be trained once on data of all contexts
+ if baseline=='joint':
+ if context1:
+ if model.scenario=="domain" or per_context_singlehead:
+ target_transform = (lambda y, x=model.classes_per_context: y % x)
+ else:
+ target_transform = None
+ memory_dataset = MemorySetDataset(model.memory_sets, target_transform=target_transform)
+ training_dataset = ConcatDataset([train_dataset, memory_dataset])
+ else:
+ training_dataset = train_dataset
+
+ # Prepare to store running importance estimates and param-values before update (needed for SI)
+ if isinstance(model, ContinualLearner) and model.importance_weighting=='si':
+ W, p_old = model.prepare_importance_estimates_dicts()
+
+ # Find [active_classes]
+ if model.scenario=="task":
+ if not model.singlehead:
+ # -for Task-IL scenario, create with for all contexts so far a with the active classes
+ active_classes = [list(
+ range(model.classes_per_context * i, model.classes_per_context * (i+1))
+ ) for i in range(context)]
+ else:
+ #--> if a single-headed output layer is used in the Task-IL scenario, all output units are always active
+ active_classes = None
+ elif model.scenario=="domain":
+ # -for Domain-IL scenario, always all classes are active
+ active_classes = None
+ elif model.scenario=="class":
+ # -for Class-IL scenario, the active classes are determined by [model.neg_samples]
+ if model.neg_samples=="all-so-far":
+ # --> one with active classes of all contexts so far
+ active_classes = list(range(model.classes_per_context * context))
+ elif model.neg_samples=="all":
+ #--> always all classes are active
+ active_classes = None
+ elif model.neg_samples=="current":
+ #--> only those classes in the current or replayed context are active (i.e., train "as if Task-IL")
+ active_classes = [list(
+ range(model.classes_per_context * i, model.classes_per_context * (i + 1))
+ ) for i in range(context)]
+
+ # Reset state of optimizer(s) for every context (if requested)
+ if (not model.label=="SeparateClassifiers") and model.optim_type=="adam_reset":
+ model.optimizer = optim.Adam(model.optim_list, betas=(0.9, 0.999))
+ if (generator is not None) and generator.optim_type=="adam_reset":
+ generator.optimizer = optim.Adam(model.optim_list, betas=(0.9, 0.999))
+
+ # Initialize # iters left on current data-loader(s)
+ iters_left = iters_left_previous = 1
+ if per_context:
+ up_to_context = context if baseline=="cummulative" else context-1
+ iters_left_previous = [1]*up_to_context
+ data_loader_previous = [None]*up_to_context
+
+ # Define tqdm progress bar(s)
+ progress = tqdm.tqdm(range(1, iters+1))
+ if generator is not None:
+ progress_gen = tqdm.tqdm(range(1, gen_iters+1))
+
+ # Loop over all iterations
+ iters_to_use = iters if (generator is None) else max(iters, gen_iters)
+ for batch_index in range(1, iters_to_use+1):
+
+ # Update # iters left on current data-loader(s) and, if needed, create new one(s)
+ iters_left -= 1
+ if iters_left==0:
+ data_loader = iter(get_data_loader(training_dataset, batch_size, cuda=cuda, drop_last=True))
+ # NOTE: [train_dataset] is training-set of current context
+ # [training_dataset] is training-set of current context with stored samples added (if requested)
+ iters_left = len(data_loader)
+ if ReplayStoredData:
+ if per_context:
+ up_to_context = context if baseline=="cummulative" else context-1
+ batch_size_replay = int(np.ceil(batch_size/up_to_context)) if (up_to_context>1) else batch_size
+ # -if different active classes per context (e.g., Task-IL), need separate replay for each context
+ for context_id in range(up_to_context):
+ batch_size_to_use = min(batch_size_replay, len(previous_datasets[context_id]))
+ iters_left_previous[context_id] -= 1
+ if iters_left_previous[context_id]==0:
+ data_loader_previous[context_id] = iter(get_data_loader(
+ previous_datasets[context_id], batch_size_to_use, cuda=cuda, drop_last=True
+ ))
+ iters_left_previous[context_id] = len(data_loader_previous[context_id])
+ else:
+ iters_left_previous -= 1
+ if iters_left_previous==0:
+ batch_size_to_use = min(batch_size, len(ConcatDataset(previous_datasets)))
+ data_loader_previous = iter(get_data_loader(ConcatDataset(previous_datasets),
+ batch_size_to_use, cuda=cuda, drop_last=True))
+ iters_left_previous = len(data_loader_previous)
+
+
+ # -----------------Collect data------------------#
+
+ #####-----CURRENT BATCH-----#####
+ if baseline=="cummulative" and per_context:
+ x = y = scores = None
+ else:
+ x, y = next(data_loader) #--> sample training data of current context
+ y = y-model.classes_per_context*(context-1) if per_context and not per_context_singlehead else y
+ # --> adjust the y-targets to the 'active range'
+ x, y = x.to(device), y.to(device) #--> transfer them to correct device
+ # If --bce & --bce-distill, calculate scores for past classes of current batch with previous model
+ binary_distillation = hasattr(model, "binaryCE") and model.binaryCE and model.binaryCE_distill
+ if binary_distillation and model.scenario in ("class", "all") and (previous_model is not None):
+ with torch.no_grad():
+ scores = previous_model.classify(
+ x, no_prototypes=True
+ )[:, :(model.classes_per_context * (context - 1))]
+ else:
+ scores = None
+
+
+ #####-----REPLAYED BATCH-----#####
+ if not ReplayStoredData and not ReplayGeneratedData and not ReplayCurrentData:
+ x_ = y_ = scores_ = context_used = None #-> if no replay
+
+ ##-->> Replay of stored data <<--##
+ if ReplayStoredData:
+ scores_ = context_used = None
+ if not per_context:
+ # Sample replayed training data, move to correct device
+ x_, y_ = next(data_loader_previous)
+ x_ = x_.to(device)
+ y_ = y_.to(device) if (model.replay_targets=="hard") else None
+ # If required, get target scores (i.e, [scores_]) -- using previous model, with no_grad()
+ if (model.replay_targets=="soft"):
+ with torch.no_grad():
+ scores_ = previous_model.classify(x_, no_prototypes=True)
+ if model.scenario=="class" and model.neg_samples=="all-so-far":
+ scores_ = scores_[:, :(model.classes_per_context*(context-1))]
+ #-> if [scores_] is not same length as [x_], zero probs are added in [loss_fn_kd]-function
+ else:
+ # Sample replayed training data, move to correct device and store in lists
+ x_ = list()
+ y_ = list()
+ up_to_context = context if baseline=="cummulative" else context-1
+ for context_id in range(up_to_context):
+ x_temp, y_temp = next(data_loader_previous[context_id])
+ x_.append(x_temp.to(device))
+ # -only keep [y_] if required (as otherwise unnecessary computations will be done)
+ if model.replay_targets=="hard":
+ if not per_context_singlehead:
+ y_temp = y_temp - (model.classes_per_context*context_id) #-> adjust y to 'active range'
+ y_.append(y_temp.to(device))
+ else:
+ y_.append(None)
+ # If required, get target scores (i.e, [scores_]) -- using previous model, with no_grad()
+ if (model.replay_targets=="soft") and (previous_model is not None):
+ scores_ = list()
+ for context_id in range(up_to_context):
+ with torch.no_grad():
+ scores_temp = previous_model.classify(x_[context_id], no_prototypes=True)
+ if active_classes is not None:
+ scores_temp = scores_temp[:, active_classes[context_id]]
+ scores_.append(scores_temp)
+
+ ##-->> Generative / Current Replay <<--##
+
+ #---INPUTS---#
+ if ReplayCurrentData:
+ x_ = x #--> use current context inputs
+ context_used = None
+
+ if ReplayGeneratedData:
+ conditional_gen = True if previous_generator.label=='CondVAE' and \
+ ((previous_generator.per_class and previous_generator.prior=="GMM")
+ or checkattr(previous_generator, 'dg_gates')) else False
+ if conditional_gen and per_context:
+ # -if a cond generator is used with different active classes per context, generate data per context
+ x_ = list()
+ context_used = list()
+ for context_id in range(context-1):
+ allowed_domains = list(range(context - 1))
+ allowed_classes = list(
+ range(model.classes_per_context*context_id, model.classes_per_context*(context_id+1))
+ )
+ batch_size_to_use = int(np.ceil(batch_size / (context-1)))
+ x_temp_ = previous_generator.sample(batch_size_to_use, allowed_domains=allowed_domains,
+ allowed_classes=allowed_classes, only_x=False)
+ x_.append(x_temp_[0])
+ context_used.append(x_temp_[2])
+ else:
+ # -which classes are allowed to be generated? (relevant if conditional generator / decoder-gates)
+ allowed_classes = None if model.scenario=="domain" else list(
+ range(model.classes_per_context*(context-1))
+ )
+ # -which contexts are allowed to be generated? (only relevant if "Domain-IL" with context-gates)
+ allowed_domains = list(range(context-1))
+ # -generate inputs representative of previous contexts
+ x_temp_ = previous_generator.sample(batch_size, allowed_classes=allowed_classes,
+ allowed_domains=allowed_domains, only_x=False)
+ x_ = x_temp_[0] if type(x_temp_)==tuple else x_temp_
+ context_used = x_temp_[2] if type(x_temp_)==tuple else None
+
+ #---OUTPUTS---#
+ if ReplayGeneratedData or ReplayCurrentData:
+ # Get target scores and labels (i.e., [scores_] / [y_]) -- using previous model, with no_grad()
+ if not per_context:
+ # -if replay does not need to be evaluated separately for each context
+ with torch.no_grad():
+ scores_ = previous_model.classify(x_, no_prototypes=True)
+ if model.scenario == "class" and model.neg_samples == "all-so-far":
+ scores_ = scores_[:, :(model.classes_per_context * (context - 1))]
+ # -> if [scores_] is not same length as [x_], zero probs are added in [loss_fn_kd]-function
+ # -also get the 'hard target'
+ _, y_ = torch.max(scores_, dim=1)
+ else:
+ # -[x_] needs to be evaluated according to each past context, so make list with entry per context
+ scores_ = list()
+ y_ = list()
+ # -if no context-mask and no conditional generator, all scores can be calculated in one go
+ if previous_model.mask_dict is None and not type(x_)==list:
+ with torch.no_grad():
+ all_scores_ = previous_model.classify(x_, no_prototypes=True)
+ for context_id in range(context-1):
+ # -if there is a context-mask (i.e., XdG), obtain predicted scores for each context separately
+ if previous_model.mask_dict is not None:
+ previous_model.apply_XdGmask(context=context_id+1)
+ if previous_model.mask_dict is not None or type(x_)==list:
+ with torch.no_grad():
+ all_scores_ = previous_model.classify(x_[context_id] if type(x_)==list else x_,
+ no_prototypes=True)
+ temp_scores_ = all_scores_
+ if active_classes is not None:
+ temp_scores_ = temp_scores_[:, active_classes[context_id]]
+ scores_.append(temp_scores_)
+ # - also get hard target
+ _, temp_y_ = torch.max(temp_scores_, dim=1)
+ y_.append(temp_y_)
+
+ # Only keep predicted y/scores if required (as otherwise unnecessary computations will be done)
+ y_ = y_ if (model.replay_targets == "hard") else None
+ scores_ = scores_ if (model.replay_targets == "soft") else None
+
+
+ #---> Train MAIN MODEL
+ if batch_index <= iters:
+
+ # Train the main model with this batch
+ loss_dict = model.train_a_batch(x, y, x_=x_, y_=y_, scores=scores, scores_=scores_, rnt = 1./context,
+ contexts_=context_used, active_classes=active_classes, context=context)
+
+ # Update running parameter importance estimates in W (needed for SI)
+ if isinstance(model, ContinualLearner) and model.importance_weighting=='si':
+ model.update_importance_estimates(W, p_old)
+
+ # Fire callbacks (for visualization of training-progress / evaluating performance after each context)
+ for loss_cb in loss_cbs:
+ if loss_cb is not None:
+ loss_cb(progress, batch_index, loss_dict, context=context)
+ for eval_cb in eval_cbs:
+ if eval_cb is not None:
+ eval_cb(model, batch_index, context=context)
+ if model.label == "VAE":
+ for sample_cb in sample_cbs:
+ if sample_cb is not None:
+ sample_cb(model, batch_index, context=context)
+
+
+ #---> Train GENERATOR
+ if generator is not None and batch_index <= gen_iters:
+
+ # Train the generator with this batch
+ loss_dict = generator.train_a_batch(x, x_=x_, rnt=1./context)
+
+ # Fire callbacks on each iteration
+ for loss_cb in gen_loss_cbs:
+ if loss_cb is not None:
+ loss_cb(progress_gen, batch_index, loss_dict, context=context)
+ for sample_cb in sample_cbs:
+ if sample_cb is not None:
+ sample_cb(generator, batch_index, context=context)
+
+
+ ##----------> UPON FINISHING EACH CONTEXT...
+
+ # Close progres-bar(s)
+ progress.close()
+ if generator is not None:
+ progress_gen.close()
+
+ # Parameter regularization: update and compute the parameter importance estimates
+ if context EWC/NCL: estimate the Fisher Information matrix
+ if model.importance_weighting=='fisher' and (model.weight_penalty or model.precondition):
+ if model.fisher_kfac:
+ model.estimate_kfac_fisher(training_dataset, allowed_classes=allowed_classes)
+ else:
+ model.estimate_fisher(training_dataset, allowed_classes=allowed_classes)
+ ##--> OWM: calculate and update the projection matrix
+ if model.importance_weighting=='owm' and (model.weight_penalty or model.precondition):
+ model.estimate_owm_fisher(training_dataset, allowed_classes=allowed_classes)
+ ##--> SI: calculate and update the normalized path integral
+ if model.importance_weighting=='si' and (model.weight_penalty or model.precondition):
+ model.update_omega(W, model.epsilon)
+
+ # MEMORY BUFFER: update the memory buffer
+ if checkattr(model, 'use_memory_buffer'):
+ samples_per_class = model.budget_per_class if (not model.use_full_capacity) else int(
+ np.floor((model.budget_per_class*len(train_datasets))/context)
+ )
+ # reduce examplar-sets (only needed when '--use-full-capacity' is selected)
+ model.reduce_memory_sets(samples_per_class)
+ # for each new class trained on, construct examplar-set
+ new_classes = list(range(model.classes_per_context)) if (
+ model.scenario=="domain" or per_context_singlehead
+ ) else list(range(model.classes_per_context*(context-1), model.classes_per_context*context))
+ for class_id in new_classes:
+ # create new dataset containing only all examples of this class
+ class_dataset = SubDataset(original_dataset=train_dataset, sub_labels=[class_id])
+ # based on this dataset, construct new memory-set for this class
+ allowed_classes = active_classes[-1] if per_context and not per_context_singlehead else active_classes
+ model.construct_memory_set(dataset=class_dataset, n=samples_per_class, label_set=allowed_classes)
+ model.compute_means = True
+
+ # Run the callbacks after finishing each context
+ for context_cb in context_cbs:
+ if context_cb is not None:
+ context_cb(model, iters, context=context)
+
+ # REPLAY: update source for replay
+ if context main model to optimize across all contexts
+ [train_datasets] with for each context the training
+ [iters] , # of optimization-steps (i.e., # of mini-batches) per context
+ [batch_size] , # of samples per mini-batch
+ [*_cbs] of call-back functions to evaluate training-progress
+ '''
+
+ # Set model in training-mode
+ model.train()
+
+ # Use cuda?
+ cuda = model._is_on_cuda()
+ device = model._device()
+
+ # Are there different active classes per context (or just potentially a different mask per context)?
+ per_context = (model.scenario=="task" or (model.scenario=="class" and model.neg_samples=="current"))
+ per_context_singlehead = per_context and (model.scenario=="task" and model.singlehead)
+
+ # Loop over all contexts.
+ for context, train_dataset in enumerate(train_datasets, 1):
+
+ # Find [active_classes]
+ if model.scenario=="task":
+ if not model.singlehead:
+ # -for Task-IL scenario, create with for all contexts so far a with the active classes
+ active_classes = [list(
+ range(model.classes_per_context * i, model.classes_per_context * (i+1))
+ ) for i in range(context)]
+ else:
+ #--> if a single-headed output layer is used in the Task-IL scenario, all output units are always active
+ active_classes = None
+ elif model.scenario=="domain":
+ # -for Domain-IL scenario, always all classes are active
+ active_classes = None
+ elif model.scenario=="class":
+ # -for Class-IL scenario, the active classes are determined by [model.neg_samples]
+ if model.neg_samples=="all-so-far":
+ # --> one with active classes of all contexts so far
+ active_classes = list(range(model.classes_per_context * context))
+ elif model.neg_samples=="all":
+ #--> always all classes are active
+ active_classes = None
+ elif model.neg_samples=="current":
+ #--> only those classes in the current or replayed context are active (i.e., train "as if Task-IL")
+ active_classes = [list(
+ range(model.classes_per_context * i, model.classes_per_context * (i + 1))
+ ) for i in range(context)]
+
+ # Find [label_sets] (i.e., when replaying/revisiting/regularizing previous contexts, which labels to consider)
+ label_sets = active_classes if (per_context and not per_context_singlehead) else [active_classes]*context
+ # NOTE: With Class-IL, when revisiting previous contexts, consider all labels up to *now*
+ # (and not up to when that context was encountered!)
+
+ # FROMP: calculate and store regularisation-term-related quantities
+ if context > 1:
+ model.optimizer.init_context(context-1, reset=(model.optim_type=="adam_reset"),
+ classes_per_context=model.classes_per_context, label_sets=label_sets)
+
+ # Initialize # iters left on current data-loader(s)
+ iters_left = 1
+
+ # Define tqdm progress bar(s)
+ progress = tqdm.tqdm(range(1, iters+1))
+
+ # Loop over all iterations
+ for batch_index in range(1, iters+1):
+
+ # Update # iters left on current data-loader(s) and, if needed, create new one(s)
+ iters_left -= 1
+ if iters_left==0:
+ data_loader = iter(get_data_loader(train_dataset, batch_size, cuda=cuda, drop_last=True))
+ iters_left = len(data_loader)
+
+ # -----------------Collect data------------------#
+ x, y = next(data_loader) #--> sample training data of current context
+ y = y - model.classes_per_context * (context - 1) if (per_context and not per_context_singlehead) else y
+ # --> adjust the y-targets to the 'active range'
+ x, y = x.to(device), y.to(device) # --> transfer them to correct device
+
+ #---> Train MAIN MODEL
+ if batch_index <= iters:
+
+ # Optimiser step
+ loss_dict = model.optimizer.step(x, y, label_sets, context-1, model.classes_per_context)
+
+ # Fire callbacks (for visualization of training-progress / evaluating performance after each context)
+ for loss_cb in loss_cbs:
+ if loss_cb is not None:
+ loss_cb(progress, batch_index, loss_dict, context=context)
+ for eval_cb in eval_cbs:
+ if eval_cb is not None:
+ eval_cb(model, batch_index, context=context)
+
+ ##----------> UPON FINISHING EACH CONTEXT...
+
+ # Close progres-bar(s)
+ progress.close()
+
+ # MEMORY BUFFER: update the memory buffer
+ if checkattr(model, 'use_memory_buffer'):
+ samples_per_class = model.budget_per_class if (not model.use_full_capacity) else int(
+ np.floor((model.budget_per_class*len(train_datasets))/context)
+ )
+ # reduce examplar-sets (only needed when '--use-full-capacity' is selected)
+ model.reduce_memory_sets(samples_per_class)
+ # for each new class trained on, construct examplar-set
+ new_classes = list(range(model.classes_per_context)) if (
+ model.scenario=="domain" or per_context_singlehead
+ ) else list(range(model.classes_per_context*(context-1), model.classes_per_context*context))
+ for class_id in new_classes:
+ # create new dataset containing only all examples of this class
+ class_dataset = SubDataset(original_dataset=train_dataset, sub_labels=[class_id])
+ # based on this dataset, construct new memory-set for this class
+ allowed_classes = active_classes[-1] if per_context and not per_context_singlehead else active_classes
+ model.construct_memory_set(dataset=class_dataset, n=samples_per_class, label_set=allowed_classes)
+ model.compute_means = True
+
+ # FROMP: update covariance (\Sigma)
+ if context the generative classifier to train
+ [train_datasets] with for each class the training
+ [iters] , # of optimization-steps (i.e., # of mini-batches) per class
+ [batch_size] , # of samples per mini-batch
+ [*_cbs] of call-back functions to evaluate training-progress
+ '''
+
+ # Use cuda?
+ device = model._device()
+ cuda = model._is_on_cuda()
+
+ # Loop over all contexts.
+ classes_in_current_context = 0
+ context = 1
+ for class_id, train_dataset in enumerate(train_datasets):
+
+ # Initialize # iters left on data-loader(s)
+ iters_left = 1
+
+ if epochs is not None:
+ data_loader = iter(get_data_loader(train_dataset, batch_size, cuda=cuda, drop_last=False))
+ iters = len(data_loader)*epochs
+
+ # Define a tqdm progress bar(s)
+ progress = tqdm.tqdm(range(1, iters+1))
+
+ # Loop over all iterations
+ for batch_index in range(1, iters+1):
+
+ # Update # iters left on current data-loader(s) and, if needed, create new one(s)
+ iters_left -= 1
+ if iters_left==0:
+ data_loader = iter(get_data_loader(train_dataset, batch_size, cuda=cuda,
+ drop_last=True if epochs is None else False))
+ iters_left = len(data_loader)
+
+ # Collect data
+ x, y = next(data_loader) #--> sample training data of current context
+ x, y = x.to(device), y.to(device) #--> transfer them to correct device
+ #y = y.expand(1) if len(y.size())==1 else y #--> hack for if batch-size is 1
+
+ # Select model to be trained
+ model_to_be_trained = getattr(model, "vae{}".format(class_id))
+
+ # Train the VAE model of this class with this batch
+ loss_dict = model_to_be_trained.train_a_batch(x)
+
+ # Fire callbacks (for visualization of training-progress)
+ for loss_cb in loss_cbs:
+ if loss_cb is not None:
+ loss_cb(progress, batch_index, loss_dict, class_id=class_id)
+ for eval_cb in eval_cbs:
+ if eval_cb is not None:
+ eval_cb(model, batch_index+classes_in_current_context*iters, context=context)
+ for sample_cb in sample_cbs:
+ if sample_cb is not None:
+ sample_cb(model_to_be_trained, batch_index, class_id=class_id)
+
+ # Close progres-bar(s)
+ progress.close()
+
+ # Did a context just finish?
+ classes_in_current_context += 1
+ if classes_in_current_context==model.classes_per_context:
+ # Run the callbacks after finishing each context
+ for context_cb in context_cbs:
+ if context_cb is not None:
+ context_cb(model, iters, context=context)
+ # Updated counts
+ classes_in_current_context = 0
+ context += 1
diff --git a/PyTorch/build-in/other/continual-learning/utils.py b/PyTorch/build-in/other/continual-learning/utils.py
new file mode 100644
index 000000000..095d3ac0f
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/utils.py
@@ -0,0 +1,233 @@
+import os
+import numpy as np
+import pickle
+import torch
+from torchvision import transforms
+import copy
+import tqdm
+from torch import nn
+from torch.utils.data import DataLoader,TensorDataset
+from models.fc import excitability_modules as em
+from data.available import AVAILABLE_TRANSFORMS
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+#######################
+## General utilities ##
+#######################
+
+def checkattr(args, attr):
+ '''Check whether attribute exists, whether it's a boolean and whether its value is True.'''
+ return hasattr(args, attr) and type(getattr(args, attr))==bool and getattr(args, attr)
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+#############################
+## Data-handling functions ##
+#############################
+
+def get_data_loader(dataset, batch_size, cuda=False, drop_last=False, augment=False):
+ '''Return -object for the provided -object [dataset].'''
+
+ # If requested, make copy of original dataset to add augmenting transform (without altering original dataset)
+ if augment:
+ dataset_ = copy.deepcopy(dataset)
+ dataset_.transform = transforms.Compose([dataset.transform, *AVAILABLE_TRANSFORMS['augment']])
+ else:
+ dataset_ = dataset
+
+ # Create and return the -object
+ return DataLoader(
+ dataset_, batch_size=batch_size, shuffle=True, drop_last=drop_last,
+ **({'num_workers': 0, 'pin_memory': True} if cuda else {})
+ )
+
+def to_one_hot(y, classes):
+ '''Convert a nd-array with integers [y] to a 2D "one-hot" tensor.'''
+ c = np.zeros(shape=[len(y), classes], dtype='float32')
+ c[range(len(y)), y] = 1.
+ c = torch.from_numpy(c)
+ return c
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+##########################################
+## Object-saving and -loading functions ##
+##########################################
+
+def save_object(object, path):
+ with open(path + '.pkl', 'wb') as f:
+ pickle.dump(object, f, pickle.HIGHEST_PROTOCOL)
+
+def load_object(path):
+ with open(path + '.pkl', 'rb') as f:
+ return pickle.load(f)
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+#########################################
+## Model-saving and -loading functions ##
+#########################################
+
+def save_checkpoint(model, model_dir, verbose=True, name=None):
+ '''Save state of [model] as dictionary to [model_dir] (if name is None, use "model.name").'''
+ # -name/path to store the checkpoint
+ name = model.name if name is None else name
+ path = os.path.join(model_dir, name)
+ # -if required, create directory in which to save checkpoint
+ if not os.path.exists(model_dir):
+ os.makedirs(model_dir)
+ # -create the dictionary containing the checkpoint
+ checkpoint = {'state': model.state_dict()}
+ if hasattr(model, 'mask_dict') and model.mask_dict is not None:
+ checkpoint['mask_dict'] = model.mask_dict
+ # -(try to) save the checkpoint
+ try:
+ torch.save(checkpoint, path)
+ if verbose:
+ print(' --> saved model {name} to {path}'.format(name=name, path=model_dir))
+ except OSError:
+ print(" --> saving model '{}' failed!!".format(name))
+
+def load_checkpoint(model, model_dir, verbose=True, name=None, strict=True):
+ '''Load saved state (in form of dictionary) at [model_dir] (if name is None, use "model.name") to [model].'''
+ # -path from where to load checkpoint
+ name = model.name if name is None else name
+ path = os.path.join(model_dir, name)
+ # load parameters (i.e., [model] will now have the state of the loaded model)
+ checkpoint = torch.load(path)
+ model.load_state_dict(checkpoint['state'], strict=strict)
+ if 'mask_dict' in checkpoint:
+ model.mask_dict = checkpoint['mask_dict']
+ # notify that we succesfully loaded the checkpoint
+ if verbose:
+ print(' --> loaded checkpoint of {name} from {path}'.format(name=name, path=model_dir))
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+################################
+## Model-inspection functions ##
+################################
+
+def count_parameters(model, verbose=True):
+ '''Count number of parameters, print to screen.'''
+ total_params = learnable_params = fixed_params = 0
+ for param in model.parameters():
+ n_params = index_dims = 0
+ for dim in param.size():
+ n_params = dim if index_dims==0 else n_params*dim
+ index_dims += 1
+ total_params += n_params
+ if param.requires_grad:
+ learnable_params += n_params
+ else:
+ fixed_params += n_params
+ if verbose:
+ print( "--> this network has {} parameters (~{} million)"
+ .format(total_params, round(total_params / 1000000, 1)))
+ print(" of which: - learnable: {} (~{} million)".format(learnable_params,
+ round(learnable_params / 1000000, 1)))
+ print(" - fixed: {} (~{} million)".format(fixed_params, round(fixed_params / 1000000, 1)))
+ return total_params, learnable_params, fixed_params
+
+def print_model_info(model, message=None):
+ '''Print information on [model] onto the screen.'''
+ print(55*"-" if message is None else ' {} '.format(message).center(55, '-'))
+ print(model)
+ print(55*"-")
+ _ = count_parameters(model)
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+########################################
+## Parameter-initialization functions ##
+########################################
+
+def weight_reset(m):
+ '''Reinitializes parameters of [m] according to default initialization scheme.'''
+ if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) or isinstance(m, em.LinearExcitability):
+ m.reset_parameters()
+
+def weight_init(model, strategy="xavier_normal", std=0.01):
+ '''Initialize weight-parameters of [model] according to [strategy].
+
+ [xavier_normal] "normalized initialization" (Glorot & Bengio, 2010) with Gaussian distribution
+ [xavier_uniform] "normalized initialization" (Glorot & Bengio, 2010) with uniform distribution
+ [normal] initialize with Gaussian(mean=0, std=[std])
+ [...] ...'''
+
+ # If [model] has an "list_init_layers"-attribute, only initialize parameters in those layers
+ if hasattr(model, "list_init_layers"):
+ module_list = model.list_init_layers()
+ parameters = [p for m in module_list for p in m.parameters()]
+ else:
+ parameters = [p for p in model.parameters()]
+
+ # Initialize all weight-parameters (i.e., with dim of at least 2)
+ for p in parameters:
+ if p.dim() >= 2:
+ if strategy=="xavier_normal":
+ nn.init.xavier_normal_(p)
+ elif strategy=="xavier_uniform":
+ nn.init.xavier_uniform_(p)
+ elif strategy=="normal":
+ nn.init.normal_(p, std=std)
+ else:
+ raise ValueError("Invalid weight-initialization strategy {}".format(strategy))
+
+def bias_init(model, strategy="constant", value=0.01):
+ '''Initialize bias-parameters of [model] according to [strategy].
+
+ [zero] set them all to zero
+ [constant] set them all to [value]
+ [positive] initialize with Uniform(a=0, b=[value])
+ [any] initialize with Uniform(a=-[value], b=[value])
+ [...] ...'''
+
+ # If [model] has an "list_init_layers"-attribute, only initialize parameters in those layers
+ if hasattr(model, "list_init_layers"):
+ module_list = model.list_init_layers()
+ parameters = [p for m in module_list for p in m.parameters()]
+ else:
+ parameters = [p for p in model.parameters()]
+
+ # Initialize all weight-parameters (i.e., with dim of at least 2)
+ for p in parameters:
+ if p.dim() == 1:
+ ## NOTE: be careful if excitability-parameters are added to the model!!!!
+ if strategy == "zero":
+ nn.init.constant_(p, val=0)
+ elif strategy == "constant":
+ nn.init.constant_(p, val=value)
+ elif strategy == "positive":
+ nn.init.uniform_(p, a=0, b=value)
+ elif strategy == "any":
+ nn.init.uniform_(p, a=-value, b=value)
+ else:
+ raise ValueError("Invalid bias-initialization strategy {}".format(strategy))
+
+##-------------------------------------------------------------------------------------------------------------------##
+
+def preprocess(feature_extractor, dataset_list, config, batch=128, message=''):
+ '''Put a list of datasets through a feature-extractor, to return a new list of pre-processed datasets.'''
+ device = feature_extractor._device()
+ new_dataset_list = []
+ progress_bar = tqdm.tqdm(total=len(dataset_list))
+ progress_bar.set_description('{} | dataset {}/{} |'.format(message, 0, len(dataset_list)))
+ for dataset_id in range(len(dataset_list)):
+ loader = get_data_loader(dataset_list[dataset_id], batch_size=batch, drop_last=False,
+ cuda=feature_extractor._is_on_cuda())
+ # -pre-allocate tensors, which will be filled slice-by-slice
+ all_features = torch.empty((len(loader.dataset), config['channels'], config['size'], config['size']))
+ all_labels = torch.empty((len(loader.dataset)), dtype=torch.long)
+ count = 0
+ for x, y in loader:
+ x = feature_extractor(x.to(device)).cpu()
+ all_features[count:(count+x.shape[0])] = x
+ all_labels[count:(count+x.shape[0])] = y
+ count += x.shape[0]
+ new_dataset_list.append(TensorDataset(all_features, all_labels))
+ progress_bar.update(1)
+ progress_bar.set_description('{} | dataset {}/{} |'.format(message, dataset_id + 1, len(dataset_list)))
+ progress_bar.close()
+ return new_dataset_list
\ No newline at end of file
diff --git a/PyTorch/build-in/other/continual-learning/visual/__init__.py b/PyTorch/build-in/other/continual-learning/visual/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/PyTorch/build-in/other/continual-learning/visual/visual_plt.py b/PyTorch/build-in/other/continual-learning/visual/visual_plt.py
new file mode 100644
index 000000000..c5ec0deba
--- /dev/null
+++ b/PyTorch/build-in/other/continual-learning/visual/visual_plt.py
@@ -0,0 +1,380 @@
+import matplotlib
+matplotlib.use('Agg')
+# above 2 lines set the matplotlib backend to 'Agg', which
+# enables matplotlib-plots to also be generated if no X-server
+# is defined (e.g., when running in basic Docker-container)
+import matplotlib.pyplot as plt
+from matplotlib.backends.backend_pdf import PdfPages
+from torchvision.utils import make_grid
+import numpy as np
+
+
+def open_pdf(full_path):
+ return PdfPages(full_path)
+
+
+def plot_images_from_tensor(image_tensor, pdf=None, nrow=8, title=None):
+ '''Plot images in [image_tensor] as a grid with [nrow] into [pdf].
+
+ [image_tensor]