From e9649ff593dae61409ac38cd1488e1d1320acd6f Mon Sep 17 00:00:00 2001 From: BradleyEdelman Date: Tue, 21 Jan 2025 22:42:00 +0100 Subject: [PATCH 1/7] starting new priority-based adjustments for multiple training parameters --- .github/workflows/ci.yml | 4 +- .gitignore | 3 + edgetrain/adjust_train_parameters.py | 48 ++++++++++++++++ edgetrain/calculate_priorities.py | 37 ++++++++++++ edgetrain/calculate_scores.py | 29 ++++++++++ edgetrain/dynamic_train.py | 14 ++++- edgetrain/resource_adjust.py | 2 + tests/test_adjust_pruning.py | 84 ++++++++++++++++++++++++++++ 8 files changed, 217 insertions(+), 4 deletions(-) create mode 100644 edgetrain/adjust_train_parameters.py create mode 100644 edgetrain/calculate_priorities.py create mode 100644 edgetrain/calculate_scores.py create mode 100644 tests/test_adjust_pruning.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index dd0df31..2fbccf9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,9 +4,7 @@ on: push: branches: - main - pull_request: - branches: - - main + jobs: test: diff --git a/.gitignore b/.gitignore index 218fd06..73c0e95 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,9 @@ # Ignore Databricks folder .databricks/ +# virtual environment +venv/ + # Ignore edgetrain folders models/ logs/ diff --git a/edgetrain/adjust_train_parameters.py b/edgetrain/adjust_train_parameters.py new file mode 100644 index 0000000..cd095fc --- /dev/null +++ b/edgetrain/adjust_train_parameters.py @@ -0,0 +1,48 @@ +def adjust_training_parameters(priority_scores, batch_size, pruning_ratio, lr, system_resources): + """ + Adjust the training parameters (batch size, pruning ratio, learning rate) based on the highest priority score. + + Parameters: + - priority_scores (dict): Dictionary containing priority scores for batch size, pruning, and learning rate. + - batch_size (int): Current batch size. + - pruning_ratio (float): Current pruning ratio. + - lr (float): Current learning rate. + - system_resources (dict): System resource usage data (CPU and GPU). + + Returns: + - adjusted_batch_size (int): Adjusted batch size. + - adjusted_pruning_ratio (float): Adjusted pruning ratio. + - adjusted_lr (float): Adjusted learning rate. + """ + # Determine which parameter has the highest priority score + highest_priority = max(priority_scores, key=priority_scores.get) + + # Adjust the parameter based on system resources and highest priority score + if highest_priority == "batch_size": + # Only adjust batch size if memory usage is high + if system_resources["cpu_memory_percent"] > 75 or system_resources["gpu_memory_percent"] > 75: + adjusted_batch_size = max(16, batch_size // 2) # Halve batch size + else: + adjusted_batch_size = batch_size + adjusted_pruning_ratio = pruning_ratio + adjusted_lr = lr + + elif highest_priority == "pruning": + # Adjust pruning ratio if memory usage is high + if system_resources["cpu_memory_percent"] > 75 or system_resources["gpu_memory_percent"] > 75: + adjusted_pruning_ratio = min(0.8, pruning_ratio + 0.1) # Increase pruning + else: + adjusted_pruning_ratio = pruning_ratio + adjusted_batch_size = batch_size + adjusted_lr = lr + + elif highest_priority == "learning_rate": + # Adjust learning rate based on accuracy stagnation + if accuracy_stagnation_score > 0.05: # Example threshold for stagnation + adjusted_lr = max(1e-5, lr * 0.5) # Reduce learning rate if stagnation detected + else: + adjusted_lr = lr + adjusted_batch_size = batch_size + adjusted_pruning_ratio = pruning_ratio + + return adjusted_batch_size, adjusted_pruning_ratio, adjusted_lr diff --git a/edgetrain/calculate_priorities.py b/edgetrain/calculate_priorities.py new file mode 100644 index 0000000..502c6ed --- /dev/null +++ b/edgetrain/calculate_priorities.py @@ -0,0 +1,37 @@ +def define_priorities(memory_usage_score, accuracy_stagnation_score, loss_stagnation_score, user_priorities=None): + """ + Calculate priority scores for adjustments based on resource usage, accuracy, and loss. + + Parameters: + - memory_usage_score (float): Score indicating memory usage pressure (0-1). + - accuracy_stagnation_score (float): Score indicating stagnation in accuracy improvement (0-1). + - loss_stagnation_score (float): Score indicating stagnation in loss reduction (0-1). + - user_priorities (dict): Optional user-defined priorities for resource conservation, accuracy, and loss. + + Returns: + - priority_scores (dict): A dictionary of priority scores for batch size, pruning, and learning rate. + """ + # Default weights if user priorities are not provided + default_priorities = { + "resource_conservation": 0.4, + "accuracy_improvement": 0.4, + "loss_reduction": 0.2, + } + + # Use user-defined priorities if available + if user_priorities: + priorities = user_priorities + else: + priorities = default_priorities + + # Calculate weighted priority scores + priority_scores = { + "batch_size": priorities["resource_conservation"] * memory_usage_score, + "pruning": priorities["resource_conservation"] * memory_usage_score, + "learning_rate": ( + priorities["accuracy_improvement"] * accuracy_stagnation_score + + priorities["loss_reduction"] * loss_stagnation_score + ), + } + + return priority_scores diff --git a/edgetrain/calculate_scores.py b/edgetrain/calculate_scores.py new file mode 100644 index 0000000..7254cb2 --- /dev/null +++ b/edgetrain/calculate_scores.py @@ -0,0 +1,29 @@ +def compute_scores(system_resources, previous_accuracy, current_accuracy, previous_loss, current_loss, max_accuracy_range, max_loss_range): + """ + Compute memory usage, accuracy stagnation, and loss stagnation scores, and normalize them. + + Parameters: + - system_resources (dict): Dictionary containing system resource metrics (CPU, GPU memory usage). + - previous_accuracy (float): Accuracy from the previous epoch. + - current_accuracy (float): Current accuracy. + - previous_loss (float): Loss from the previous epoch. + - current_loss (float): Current loss. + - max_accuracy_range (float): Maximum possible accuracy improvement. + - max_loss_range (float): Maximum possible loss reduction. + + Returns: + - normalized_scores (dict): Dictionary of normalized scores. + """ + # Calculate memory usage score (average of CPU and GPU memory utilization) + memory_usage_score = (system_resources["cpu_memory_percent"] + system_resources["gpu_memory_percent"]) / 2 + + # Calculate accuracy stagnation score + accuracy_stagnation_score = max(0, previous_accuracy - current_accuracy) + + # Calculate loss stagnation score + loss_stagnation_score = max(0, current_loss - previous_loss) + + # Normalize the scores + normalized_scores = normalize_scores(memory_usage_score, accuracy_stagnation_score, loss_stagnation_score, max_accuracy_range, max_loss_range) + + return normalized_scores diff --git a/edgetrain/dynamic_train.py b/edgetrain/dynamic_train.py index 1177051..73b5348 100644 --- a/edgetrain/dynamic_train.py +++ b/edgetrain/dynamic_train.py @@ -1,6 +1,6 @@ import tensorflow as tf from tensorflow.keras.optimizers import Adam -from edgetrain import log_usage_once, adjust_threads, adjust_batch_size, adjust_learning_rate, create_model_tf, get_edgetrain_folder +from edgetrain import log_usage_once, adjust_batch_size, adjust_learning_rate, create_model_tf, sys_resources def dynamic_train(train_dataset, epochs=10, batch_size=32, lr=1e-3, grad_accum=1, log_file="resource_log.csv", dynamic_adjustments=True): """ @@ -25,10 +25,22 @@ def dynamic_train(train_dataset, epochs=10, batch_size=32, lr=1e-3, grad_accum=1 # Create the MirroredStrategy for distributed training strategy = tf.distribute.MirroredStrategy() + # Initialize a few key variables history_list = [] + prev_accuracy = 0.0 + prev_loss = 0.0 + for epoch in range(epochs): print(f"Epoch {epoch + 1}/{epochs}") + # Calculate performance and usage scores + scores = calculate_scores(system_resources, current_accuracy, previous_accuracy, current_loss, previous_loss) + + priorities = calculat_priorities + + train_param = adjust_param + + # Adjust resources dynamically based on system usage if dynamic_adjustments: batch_size=adjust_batch_size(batch_size=batch_size) diff --git a/edgetrain/resource_adjust.py b/edgetrain/resource_adjust.py index 8e6b6b8..5b883a8 100644 --- a/edgetrain/resource_adjust.py +++ b/edgetrain/resource_adjust.py @@ -111,9 +111,11 @@ def adjust_pruning(model, pruning_ratio, cpu_threshold=[20, 80], gpu_threshold=[ if cpu_memory_percent > cpu_threshold[1] or gpu_memory_percent > gpu_threshold[1]: print(f"High memory usage detected: CPU={cpu_memory_percent}%, GPU={gpu_memory_percent}%") new_pruning_ratio = min(pruning_ratio + increment, max_pruning_ratio) # Increase pruning ratio + elif cpu_memory_percent < cpu_threshold[0] and gpu_memory_percent < gpu_threshold[0]: print(f"Low memory usage detected: CPU={cpu_memory_percent}%, GPU={gpu_memory_percent}%") new_pruning_ratio = max(pruning_ratio - increment, min_pruning_ratio) # Decrease pruning ratio + else: print(f"Memory usage is under control: CPU={cpu_memory_percent}%, GPU={gpu_memory_percent}%") new_pruning_ratio = pruning_ratio # Keep pruning ratio the same diff --git a/tests/test_adjust_pruning.py b/tests/test_adjust_pruning.py new file mode 100644 index 0000000..3714b06 --- /dev/null +++ b/tests/test_adjust_pruning.py @@ -0,0 +1,84 @@ +import pytest +from unittest.mock import patch, MagicMock +import tensorflow as tf +from your_module import adjust_pruning # Replace `your_module` with the actual module name. + +# Mock sys_resources to simulate CPU and GPU memory usage +@pytest.fixture +def mock_sys_resources(): + with patch("your_module.sys_resources") as mock: + yield mock + +# Helper function to create a simple model +@pytest.fixture +def simple_model(): + model = tf.keras.Sequential([ + tf.keras.layers.Dense(10, input_shape=(10,)), + tf.keras.layers.Dense(1) + ]) + return model + +def test_pruning_ratio_increase(mock_sys_resources, simple_model): + mock_sys_resources.return_value = { + 'cpu_memory_percent': 85, # Above the high threshold + 'gpu_memory_percent': 85 + } + model = simple_model + initial_pruning_ratio = 0.1 + _, new_pruning_ratio = adjust_pruning( + model, pruning_ratio=initial_pruning_ratio, + cpu_threshold=[20, 80], gpu_threshold=[20, 80] + ) + assert new_pruning_ratio > initial_pruning_ratio # Pruning ratio should increase + +def test_pruning_ratio_decrease(mock_sys_resources, simple_model): + mock_sys_resources.return_value = { + 'cpu_memory_percent': 15, # Below the low threshold + 'gpu_memory_percent': 15 + } + model = simple_model + initial_pruning_ratio = 0.2 + _, new_pruning_ratio = adjust_pruning( + model, pruning_ratio=initial_pruning_ratio, + cpu_threshold=[20, 80], gpu_threshold=[20, 80] + ) + assert new_pruning_ratio < initial_pruning_ratio # Pruning ratio should decrease + +def test_pruning_ratio_no_change(mock_sys_resources, simple_model): + mock_sys_resources.return_value = { + 'cpu_memory_percent': 50, # Within the threshold + 'gpu_memory_percent': 50 + } + model = simple_model + initial_pruning_ratio = 0.3 + _, new_pruning_ratio = adjust_pruning( + model, pruning_ratio=initial_pruning_ratio, + cpu_threshold=[20, 80], gpu_threshold=[20, 80] + ) + assert new_pruning_ratio == initial_pruning_ratio # Pruning ratio should not change + +def test_pruning_ratio_min_boundary(mock_sys_resources, simple_model): + mock_sys_resources.return_value = { + 'cpu_memory_percent': 15, # Below the low threshold + 'gpu_memory_percent': 15 + } + model = simple_model + initial_pruning_ratio = 0.1 # At the minimum boundary + _, new_pruning_ratio = adjust_pruning( + model, pruning_ratio=initial_pruning_ratio, + cpu_threshold=[20, 80], gpu_threshold=[20, 80] + ) + assert new_pruning_ratio == 0.1 # Pruning ratio should not go below minimum + +def test_pruning_ratio_max_boundary(mock_sys_resources, simple_model): + mock_sys_resources.return_value = { + 'cpu_memory_percent': 85, # Above the high threshold + 'gpu_memory_percent': 85 + } + model = simple_model + initial_pruning_ratio = 0.8 # At the maximum boundary + _, new_pruning_ratio = adjust_pruning( + model, pruning_ratio=initial_pruning_ratio, + cpu_threshold=[20, 80], gpu_threshold=[20, 80] + ) + assert new_pruning_ratio == 0.8 # Pruning ratio should not exceed maximum From 93f6e363f728c10d6873c0496482d85b91c6f52b Mon Sep 17 00:00:00 2001 From: BradleyEdelman Date: Mon, 27 Jan 2025 17:17:21 +0100 Subject: [PATCH 2/7] Preparing new strategy of scores -> priorities -> param adjustments. Still needs a lot of testing --- edgetrain/adjust_train_parameters.py | 33 +++-- edgetrain/calculate_priorities.py | 27 ++-- edgetrain/calculate_scores.py | 65 +++++++-- edgetrain/dynamic_train.py | 100 +++++++------ edgetrain/resource_adjust.py | 210 --------------------------- edgetrain/resource_monitor.py | 6 +- setup.py | 2 +- tests/test_adjust_batch_size.py | 31 ---- tests/test_adjust_learning_rate.py | 31 ---- tests/test_adjust_pruning.py | 84 ----------- 10 files changed, 144 insertions(+), 445 deletions(-) delete mode 100644 edgetrain/resource_adjust.py delete mode 100644 tests/test_adjust_batch_size.py delete mode 100644 tests/test_adjust_learning_rate.py delete mode 100644 tests/test_adjust_pruning.py diff --git a/edgetrain/adjust_train_parameters.py b/edgetrain/adjust_train_parameters.py index cd095fc..3e8bc07 100644 --- a/edgetrain/adjust_train_parameters.py +++ b/edgetrain/adjust_train_parameters.py @@ -1,45 +1,58 @@ -def adjust_training_parameters(priority_scores, batch_size, pruning_ratio, lr, system_resources): +from edgetrain import sys_resources + +def adjust_training_parameters(priority_value, batch_size, pruning_ratio, lr, accuracy_score): """ - Adjust the training parameters (batch size, pruning ratio, learning rate) based on the highest priority score. + Adjust the training parameters (batch size, pruning ratio, learning rate) based on the highest priority score, + moving parameters in the opposite direction if resource usage or accuracy trends improve. Parameters: - priority_scores (dict): Dictionary containing priority scores for batch size, pruning, and learning rate. - batch_size (int): Current batch size. - pruning_ratio (float): Current pruning ratio. - lr (float): Current learning rate. - - system_resources (dict): System resource usage data (CPU and GPU). + - accuracy_score (float): Current accuracy score from the latest epoch (0-1). Returns: - adjusted_batch_size (int): Adjusted batch size. - adjusted_pruning_ratio (float): Adjusted pruning ratio. - adjusted_lr (float): Adjusted learning rate. """ + + # Get system resources + resources = sys_resources() + # Determine which parameter has the highest priority score highest_priority = max(priority_scores, key=priority_scores.get) # Adjust the parameter based on system resources and highest priority score if highest_priority == "batch_size": - # Only adjust batch size if memory usage is high - if system_resources["cpu_memory_percent"] > 75 or system_resources["gpu_memory_percent"] > 75: + # Adjust batch size based on memory usage + if resources["cpu_memory_percent"] > 75 or resources["gpu_memory_percent"] > 75: adjusted_batch_size = max(16, batch_size // 2) # Halve batch size + elif resources["cpu_memory_percent"] < 50 and resources["gpu_memory_percent"] < 50: + adjusted_batch_size = min(128, batch_size * 2) # Double batch size else: adjusted_batch_size = batch_size adjusted_pruning_ratio = pruning_ratio adjusted_lr = lr elif highest_priority == "pruning": - # Adjust pruning ratio if memory usage is high - if system_resources["cpu_memory_percent"] > 75 or system_resources["gpu_memory_percent"] > 75: + # Adjust pruning ratio based on memory usage + if resources["cpu_memory_percent"] > 75 or resources["gpu_memory_percent"] > 75: adjusted_pruning_ratio = min(0.8, pruning_ratio + 0.1) # Increase pruning + elif resources["cpu_memory_percent"] < 50 and resources["gpu_memory_percent"] < 50: + adjusted_pruning_ratio = max(0.1, pruning_ratio - 0.1) # Decrease pruning else: adjusted_pruning_ratio = pruning_ratio adjusted_batch_size = batch_size adjusted_lr = lr elif highest_priority == "learning_rate": - # Adjust learning rate based on accuracy stagnation - if accuracy_stagnation_score > 0.05: # Example threshold for stagnation - adjusted_lr = max(1e-5, lr * 0.5) # Reduce learning rate if stagnation detected + # Adjust learning rate based on accuracy score + if accuracy_score < 0.05: # Example threshold for low accuracy + adjusted_lr = max(1e-5, lr * 0.5) # Reduce learning rate + elif accuracy_score > 0.95: # Example threshold for high accuracy + adjusted_lr = min(1e-2, lr * 1.2) # Slightly increase learning rate else: adjusted_lr = lr adjusted_batch_size = batch_size diff --git a/edgetrain/calculate_priorities.py b/edgetrain/calculate_priorities.py index 502c6ed..ca57a82 100644 --- a/edgetrain/calculate_priorities.py +++ b/edgetrain/calculate_priorities.py @@ -1,4 +1,4 @@ -def define_priorities(memory_usage_score, accuracy_stagnation_score, loss_stagnation_score, user_priorities=None): +def define_priorities(normalized_scores, user_priorities=None): """ Calculate priority scores for adjustments based on resource usage, accuracy, and loss. @@ -9,29 +9,24 @@ def define_priorities(memory_usage_score, accuracy_stagnation_score, loss_stagna - user_priorities (dict): Optional user-defined priorities for resource conservation, accuracy, and loss. Returns: - - priority_scores (dict): A dictionary of priority scores for batch size, pruning, and learning rate. + - priority_value (dict): A dictionary of priority scores for batch size, pruning, and learning rate. """ # Default weights if user priorities are not provided default_priorities = { - "resource_conservation": 0.4, + "batch_size_adjustment": 0.3, + "pruning_adjustment": 0.3, "accuracy_improvement": 0.4, - "loss_reduction": 0.2, } # Use user-defined priorities if available - if user_priorities: - priorities = user_priorities - else: - priorities = default_priorities + priorities = user_priorities if user_priorities else default_priorities # Calculate weighted priority scores - priority_scores = { - "batch_size": priorities["resource_conservation"] * memory_usage_score, - "pruning": priorities["resource_conservation"] * memory_usage_score, - "learning_rate": ( - priorities["accuracy_improvement"] * accuracy_stagnation_score + - priorities["loss_reduction"] * loss_stagnation_score - ), + priority_value = { + "batch_size": priorities["batch_size_adjustment"] * normalized_scores.get('memory_score'), + "pruning": (priorities["pruning_adjustment"] * normalized_scores.get('memory_score') + + priorities["accuracy_improvement"] * normalized_scores.get('accuracy_score')) / 2, + "learning_rate": (priorities["accuracy_improvement"] * normalized_scores.get('accuracy_score')), } - return priority_scores + return priority_value diff --git a/edgetrain/calculate_scores.py b/edgetrain/calculate_scores.py index 7254cb2..8231bb6 100644 --- a/edgetrain/calculate_scores.py +++ b/edgetrain/calculate_scores.py @@ -1,29 +1,66 @@ -def compute_scores(system_resources, previous_accuracy, current_accuracy, previous_loss, current_loss, max_accuracy_range, max_loss_range): +from edgetrain import sys_resources + +def compute_scores(previous_accuracy, current_accuracy, score_ranges=None): """ - Compute memory usage, accuracy stagnation, and loss stagnation scores, and normalize them. + Compute memory, accuracy, and loss scores, and normalize them. Parameters: - - system_resources (dict): Dictionary containing system resource metrics (CPU, GPU memory usage). - previous_accuracy (float): Accuracy from the previous epoch. - current_accuracy (float): Current accuracy. - - previous_loss (float): Loss from the previous epoch. - - current_loss (float): Current loss. - - max_accuracy_range (float): Maximum possible accuracy improvement. - - max_loss_range (float): Maximum possible loss reduction. + - score_ranges (float): Maximum possible accuracy improvement. Returns: - normalized_scores (dict): Dictionary of normalized scores. """ - # Calculate memory usage score (average of CPU and GPU memory utilization) - memory_usage_score = (system_resources["cpu_memory_percent"] + system_resources["gpu_memory_percent"]) / 2 + + # Get system resources + resources = sys_resources() - # Calculate accuracy stagnation score - accuracy_stagnation_score = max(0, previous_accuracy - current_accuracy) + # Default score ranges + if score_ranges is None: + score_ranges = { + "memory_score_range": 100, # Default 0-100 range for memory score + "accuracy_score_range": 1, # Default 0-1 range for accuracy score + } - # Calculate loss stagnation score - loss_stagnation_score = max(0, current_loss - previous_loss) + # Calculate memory score + # # If there is a gpu average gpu and cpu for memory score, otherwise, just use cpu + if resources.get('num_gpus') > 0: + memory_score = (resources.get('cpu_memory_percent') + resources.get('gpu_memory_percent')) / 2 + else: + memory_score = resources.get('cpu_memory_percent') + + # Calculate accuracy score + accuracy_score = max(0, previous_accuracy - current_accuracy) + + # store all three scores in a dictionary + raw_scores = { + "memory_score": memory_score, + "accuracy_score": accuracy_score + } # Normalize the scores - normalized_scores = normalize_scores(memory_usage_score, accuracy_stagnation_score, loss_stagnation_score, max_accuracy_range, max_loss_range) + normalized_scores = normalize_scores(raw_scores, score_ranges) return normalized_scores + + +def normalize_scores(raw_scores, score_ranges): + """ + Normalize raw scores based on predefined score ranges. + + Parameters: + - raw_scores (dict): Dictionary of raw scores. + - score_ranges (dict): Dictionary of maximum possible improvements for each score. + + Returns: + - normalized_scores (dict): Dictionary of normalized scores. + """ + normalized_scores = {} + + for score_name, score_value in raw_scores.items(): + score_range = score_ranges.get(score_name, 1) # Default range is 1 if not specified + normalized_score = score_value / score_range + normalized_scores[score_name] = normalized_score + + return normalized_scores \ No newline at end of file diff --git a/edgetrain/dynamic_train.py b/edgetrain/dynamic_train.py index 73b5348..a61c537 100644 --- a/edgetrain/dynamic_train.py +++ b/edgetrain/dynamic_train.py @@ -1,8 +1,16 @@ import tensorflow as tf from tensorflow.keras.optimizers import Adam -from edgetrain import log_usage_once, adjust_batch_size, adjust_learning_rate, create_model_tf, sys_resources +from edgetrain import log_usage_once, create_model_tf, compute_scores, define_priorities, adjust_training_parameters -def dynamic_train(train_dataset, epochs=10, batch_size=32, lr=1e-3, grad_accum=1, log_file="resource_log.csv", dynamic_adjustments=True): +def dynamic_train( + train_dataset, + epochs=10, + batch_size=32, + lr=1e-3, + pruning_ratio=0.2, + log_file="resource_log.csv", + dynamic_adjustments=True +): """ Train the model with optional dynamic resource adjustment. @@ -11,7 +19,7 @@ def dynamic_train(train_dataset, epochs=10, batch_size=32, lr=1e-3, grad_accum=1 - epochs (int): Number of epochs to train the model. - batch_size (int): The base batch size to use. - lr (float): The initial learning rate. - - grad_accum (int): The number of gradient accumulation steps. + - pruning_ratio (float): Initial pruning ratio (for dynamic adjustment). - log_file (str): The path to the log file where resource usage is saved. - dynamic_adjustments (bool): A flag to control if dynamic adjustments are enabled (True) or not (False). @@ -19,61 +27,65 @@ def dynamic_train(train_dataset, epochs=10, batch_size=32, lr=1e-3, grad_accum=1 - history_list (list): A list of training history for each epoch. """ - # Log resource usage (regardless of dynamic adjustments) - log_usage_once(log_file, lr=lr, batch_size=batch_size, grad_accum=grad_accum, num_epoch=0) - + # Log initial resource usage + log_usage_once(log_file, lr=lr, batch_size=batch_size, num_epoch=0) + # Create the MirroredStrategy for distributed training strategy = tf.distribute.MirroredStrategy() - # Initialize a few key variables + # Initialize variables history_list = [] prev_accuracy = 0.0 - prev_loss = 0.0 + + # Create the model once + train_images, train_labels = train_dataset['images'], train_dataset['labels'] + with strategy.scope(): + model = create_model_tf(input_shape=train_images[0].shape) for epoch in range(epochs): print(f"Epoch {epoch + 1}/{epochs}") - - # Calculate performance and usage scores - scores = calculate_scores(system_resources, current_accuracy, previous_accuracy, current_loss, previous_loss) - - priorities = calculat_priorities - train_param = adjust_param - - - # Adjust resources dynamically based on system usage - if dynamic_adjustments: - batch_size=adjust_batch_size(batch_size=batch_size) - lr=adjust_learning_rate(lr=lr) - # grad_accum=adjust_grad_accum(grad_accum=grad_accum) - else: - # Keep default batch size and workers fixed - batch_size=batch_size - lr=lr - grad_accum=grad_accum - - # Deploy training - train_images, train_labels = train_dataset['images'], train_dataset['labels'] + # Compile the model with the current parameters with strategy.scope(): - model = create_model_tf(input_shape=train_images[0].shape) - - # Create optimizer with learning rate optimizer = Adam(learning_rate=lr) model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy']) - # Train for 1 epoch at a time - history = model.fit( - train_images, - train_labels, - batch_size=batch_size, - epochs=1 - ) - - # save results - history_list.append(history.history) + # Train for 1 epoch + history = model.fit( + train_images, + train_labels, + batch_size=batch_size, + epochs=1 + ) + # Save training history + history_list.append(history.history) + + # Update accuracy + curr_accuracy = history.history['accuracy'][-1] + # Log resource usage for the current epoch - log_usage_once(log_file, lr=lr, batch_size=batch_size, grad_accum=grad_accum, num_epoch=epoch) + log_usage_once(log_file, lr=lr, batch_size=batch_size, num_epoch=epoch + 1) - return history_list + if dynamic_adjustments: + # Calculate performance and resource usage scores + normalized_scores = compute_scores(prev_accuracy, curr_accuracy) + + # Define priority values based on normalized scores + priority_value = define_priorities(normalized_scores) + # Adjust training parameters + batch_size, pruning_ratio, lr = adjust_training_parameters( + priority_scores=priority_value, + batch_size=batch_size, + pruning_ratio=pruning_ratio, + lr=lr, + accuracy_score=curr_accuracy + ) + + print(f"Adjusted parameters for next epoch: batch_size={batch_size}, pruning_ratio={pruning_ratio}, learning_rate={lr}") + + # Update previous accuracy + prev_accuracy = curr_accuracy + + return history_list diff --git a/edgetrain/resource_adjust.py b/edgetrain/resource_adjust.py deleted file mode 100644 index 5b883a8..0000000 --- a/edgetrain/resource_adjust.py +++ /dev/null @@ -1,210 +0,0 @@ -import tensorflow as tf -import psutil, GPUtil -from edgetrain import sys_resources - -def adjust_batch_size(batch_size, cpu_threshold=[20, 80], gpu_threshold=[20, 80], increment=8, resources=None): - """ - Adjusts the batch size based on CPU and GPU memory usage. - - Parameters: - - batch_size (int): The initial batch size to be adjusted. - - cpu_threshold (list): CPU memory usage upper and lower threshold (%) to trigger adjustment. - - gpu_threshold (list): GPU memory usage upper and lower threshold (%) to trigger adjustment. - - increment (int): The amount by which to increase or decrease the batch size. - - Returns: - - batch_size (int): The adjusted batch size. - """ - - if resources is None: - resources = sys_resources() - cpu_memory_percent = resources.get('cpu_memory_percent') - gpu_memory_percent = resources.get('gpu_memory_percent') - - min_batch_size = 8 - max_batch_size = 128 - - if cpu_memory_percent > cpu_threshold[1] or gpu_memory_percent > gpu_threshold[1]: - print(f"High memory usage detected: CPU={cpu_memory_percent}%, GPU={gpu_memory_percent}%") - batch_size_new = max(batch_size - increment, min_batch_size) # Reduce batch_size if above threshold - - elif cpu_memory_percent < cpu_threshold[0] and gpu_memory_percent < gpu_threshold[0]: - print(f"Low memory usage detected: CPU={cpu_memory_percent}%, GPU={gpu_memory_percent}%") - batch_size_new = min(batch_size + increment, max_batch_size) # Increase batch_size if below threshold - - else: - print(f"Memory usage is under control: CPU={cpu_memory_percent}%, GPU={gpu_memory_percent}%") - batch_size_new = batch_size # Keep batch_size the same - - print(f"Updated batch size:{batch_size_new}") - - batch_size = batch_size_new - return batch_size - - -def adjust_learning_rate(lr, cpu_threshold=[20, 80], gpu_threshold=[20, 80], increment=0.01, resources=None): - """ - Dynamically adjust the learning rate based on CPU and GPU resource usage. - - Parameters: - - lr (float): Current learning rate. - - cpu_threshold (list): CPU compute usage upper and lower threshold (%) to trigger adjustment. - - gpu_threshold (list): GPU compute usage upper and lower threshold (%) to trigger adjustment. - - increment (float): Learning rate adjustment per iteration. - - Returns: - - adjusted_lr (float): Updated learning rate. - """ - - if resources is None: - resources = sys_resources() - cpu_compute_percent = resources.get('cpu_compute_percent') - gpu_compute_percent = resources.get('gpu_compute_percent') - - min_lr = 1e-6 - max_lr = 0.1 - - # Adjust learning rate based on CPU/GPU compute - if cpu_compute_percent > cpu_threshold[1] or gpu_compute_percent > gpu_threshold[1]: - print(f"High resource usage detected: CPU={cpu_compute_percent}%, GPU={gpu_compute_percent}%") - adjusted_lr = max(lr*(1-increment), min_lr) # Decrease learning rate to slow down training - - elif cpu_compute_percent < cpu_threshold[0] or gpu_compute_percent < gpu_threshold[0]: - print(f"Low resource usage detected: CPU={cpu_compute_percent}%, GPU={gpu_compute_percent}%") - adjusted_lr = min(lr*(1+increment), max_lr) # Increase learning rate to speed up training - - else: - adjusted_lr = lr # Keep the current learning rate if within acceptable thresholds - - print(f"Adjusted learning rate: {adjusted_lr}") - - lr = adjusted_lr - return lr - - -def adjust_pruning(model, pruning_ratio, cpu_threshold=[20, 80], gpu_threshold=[20, 80], increment=0.05): - """ - Adjusts the pruning ratio of a model based on CPU and GPU memory usage. - - Parameters: - - model (tf.keras.Model): The current model to be pruned. - - pruning_ratio (float): The initial pruning ratio (fraction of weights to prune, e.g., 0.1 for 10%). - - cpu_threshold (list): CPU memory usage upper and lower threshold (%) to trigger adjustment. - - gpu_threshold (list): GPU memory usage upper and lower threshold (%) to trigger adjustment. - - increment (float): The amount by which to increase or decrease the pruning ratio. - - Returns: - - pruned_model (tf.keras.Model): The pruned model. - - new_pruning_ratio (float): The adjusted pruning ratio. - """ - import tensorflow_model_optimization as tfmot - - # Monitor memory usage - resources = sys_resources() - cpu_memory_percent = resources.get('cpu_memory_percent') - gpu_memory_percent = resources.get('gpu_memory_percent') - - # Adjust pruning ratio - min_pruning_ratio = 0.1 - max_pruning_ratio = 0.8 - - if cpu_memory_percent > cpu_threshold[1] or gpu_memory_percent > gpu_threshold[1]: - print(f"High memory usage detected: CPU={cpu_memory_percent}%, GPU={gpu_memory_percent}%") - new_pruning_ratio = min(pruning_ratio + increment, max_pruning_ratio) # Increase pruning ratio - - elif cpu_memory_percent < cpu_threshold[0] and gpu_memory_percent < gpu_threshold[0]: - print(f"Low memory usage detected: CPU={cpu_memory_percent}%, GPU={gpu_memory_percent}%") - new_pruning_ratio = max(pruning_ratio - increment, min_pruning_ratio) # Decrease pruning ratio - - else: - print(f"Memory usage is under control: CPU={cpu_memory_percent}%, GPU={gpu_memory_percent}%") - new_pruning_ratio = pruning_ratio # Keep pruning ratio the same - - # Apply pruning to the model - prune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude - pruning_params = { - 'pruning_schedule': tfmot.sparsity.keras.ConstantSparsity(new_pruning_ratio, begin_step=0), - } - pruned_model = prune_low_magnitude(model, **pruning_params) - - print(f"Updated pruning ratio: {new_pruning_ratio}") - - return pruned_model, new_pruning_ratio - - -def adjust_grad_accum(cpu_threshold=[20, 80], gpu_memory_threshold=[50, 90], current_grad_accum=1, increment=1): - """ - Dynamically adjust the number of gradient accumulation steps based on memory usage and compute resource utilization. - - Parameters: - - cpu_threshold (list): CPU computation usage upper and lower threshold (%) to trigger adjustment. - - gpu_memory_threshold (list): GPU memory usage upper and lower threshold (%) to trigger adjustment. - - current_grad_accum (int): Current number of gradient accumulation steps. - - increment (int): Number of gradient accumulation steps to increase or decrease at a time. - - Returns: - - grad_accum_steps (int): Updated number of gradient accumulation steps. - """ - - resources = sys_resources() - cpu_compute_percent = resources.get('cpu_compute_percent') - gpu_memory_percent = resources.get('gpu_memory_percent') - - # Adjust gradient accumulation based on memory and compute - if gpu_memory_percent > gpu_memory_threshold[1]: - print(f"High GPU memory usage detected: {gpu_memory_percent}%") - grad_accum_steps = min(current_grad_accum + increment, 16) # Increase gradient accumulation (larger batch sim.) - elif gpu_memory_percent < gpu_memory_threshold[0] and cpu_compute_percent < cpu_threshold[0]: - print(f"Low resource usage detected: CPU={cpu_compute_percent}%, GPU={gpu_memory_percent}%") - grad_accum_steps = max(current_grad_accum - increment, 1) # Decrease gradient accumulation to speed up training - else: - grad_accum_steps = current_grad_accum - - print(f"Adjusted gradient accumulation steps: {grad_accum_steps}") - return grad_accum_steps - - -def adjust_threads(cpu_threshold=[20, 80], gpu_threshold=[20, 80], increment=1): - """ - Dynamically adjust the number of inter-op and intra-op threads based on CPU and GPU computation usage. - - Parameters: - - cpu_threshold (list): CPU computation usage upper and lower threshold (%) to trigger adjustment. - - gpu_threshold (list): GPU computation usage upper and lower threshold (%) to trigger adjustment. - - increment (int): Number of threads to increase or decrease at a time. - - Returns: - - inter_threads (int): Updated inter-op threads. - - intra_threads (int): Updated intra-op threads. - """ - # Get current resource usage - resources = sys_resources() - cpu_compute_percent = resources.get('cpu_compute_percent') - gpu_compute_percent = resources.get('gpu_compute_percent') - - # Get current threading settings - inter_threads = tf.config.threading.get_inter_op_parallelism_threads() - intra_threads = tf.config.threading.get_intra_op_parallelism_threads() - - # Set min and max thresholds for inter and intra threads - min_threads = 1 - max_threads = resources.get('cpu_cores') - - if cpu_compute_percent > cpu_threshold[1] or gpu_compute_percent > gpu_threshold[1]: - print(f"High resource usage detected: CPU={cpu_compute_percent}%, GPU={gpu_compute_percent}%") - inter_threads = max(inter_threads - increment, min_threads) # Reduce threads if above threshold - intra_threads = max(intra_threads - increment, min_threads) - elif cpu_compute_percent < cpu_threshold[0] or gpu_compute_percent < gpu_threshold[0]: - print(f"Low resource usage detected: CPU={cpu_compute_percent}%, GPU={gpu_compute_percent}%") - inter_threads = min(inter_threads + increment, max_threads) # Increase threads if below threshold - intra_threads = min(intra_threads + increment, max_threads) - else: - print(f"Resources are under control: CPU={cpu_compute_percent}%, GPU={gpu_compute_percent}%") - - # Apply adjusted thread settings - tf.config.threading.set_inter_op_parallelism_threads(inter_threads) - tf.config.threading.set_intra_op_parallelism_threads(intra_threads) - - print(f"Updated thread settings: inter-op = {inter_threads}, intra-op = {intra_threads}") - - return inter_threads, intra_threads diff --git a/edgetrain/resource_monitor.py b/edgetrain/resource_monitor.py index 7263bc7..464cfec 100644 --- a/edgetrain/resource_monitor.py +++ b/edgetrain/resource_monitor.py @@ -53,7 +53,7 @@ def sys_resources(): # Function to log resource usage and batch size -def log_usage_once(log_file, lr, batch_size, grad_accum, num_epoch=0, resources=None): +def log_usage_once(log_file, lr, batch_size, num_epoch=0, resources=None): """ Log GPU and CPU resource usage once. @@ -61,7 +61,6 @@ def log_usage_once(log_file, lr, batch_size, grad_accum, num_epoch=0, resources= - log_file (str): Path to the log file. - lr (float): Learning rate. - batch_size (int): Current batch size. - - grad_accum (int): Gradient accumulation steps. - num_epoch (int, optional): Current epoch number. Default is 0. """ @@ -97,8 +96,7 @@ def log_usage_once(log_file, lr, batch_size, grad_accum, num_epoch=0, resources= gpu_compute_percent, gpu_memory_percent, batch_size, - lr, - grad_accum + lr ] # Append log entry to the file diff --git a/setup.py b/setup.py index 7f068c8..fddb64e 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ "pandas>=1.5.0", "numpy>=1.24.0", "pynvml>=8.0.0", - "torch>=2.5.1", + "torch>=2.5.1", ], extras_require={ 'dev': [ diff --git a/tests/test_adjust_batch_size.py b/tests/test_adjust_batch_size.py deleted file mode 100644 index 5e36b5a..0000000 --- a/tests/test_adjust_batch_size.py +++ /dev/null @@ -1,31 +0,0 @@ -from edgetrain import adjust_batch_size - -def test_adjust_batch_size_high_usage(): - # Mock system resources with high memory usage - mock_resources = {'cpu_memory_percent': 85, 'gpu_memory_percent': 90} - batch_size = adjust_batch_size(32, resources=mock_resources) - assert batch_size < 32, f"Expected batch size to decrease under high memory usage, but got {batch_size}." - -def test_adjust_batch_size_low_usage(): - # Mock system resources with low memory usage - mock_resources = {'cpu_memory_percent': 15, 'gpu_memory_percent': 10} - batch_size = adjust_batch_size(32, resources=mock_resources) - assert batch_size > 32, f"Expected batch size to increase under low memory usage, but got {batch_size}." - -def test_adjust_batch_size_normal_usage(): - # Mock system resources with normal memory usage - mock_resources = {'cpu_memory_percent': 50, 'gpu_memory_percent': 50} - batch_size = adjust_batch_size(32, resources=mock_resources) - assert batch_size == 32, f"Expected batch size to remain unchanged under normal memory usage, but got {batch_size}." - -def test_adjust_batch_size_min_limit(): - # Mock system resources with high memory usage, ensuring batch size doesn't go below the minimum - mock_resources = {'cpu_memory_percent': 85, 'gpu_memory_percent': 90} - batch_size = adjust_batch_size(8, resources=mock_resources) - assert batch_size == 8, f"Expected batch size to remain at 8 (min limit), but got {batch_size}." - -def test_adjust_batch_size_max_limit(): - # Mock system resources with low memory usage, ensuring batch size doesn't exceed the maximum - mock_resources = {'cpu_memory_percent': 10, 'gpu_memory_percent': 10} - batch_size = adjust_batch_size(128, resources=mock_resources) - assert batch_size == 128, f"Expected batch size to remain at 128 (max limit), but got {batch_size}." diff --git a/tests/test_adjust_learning_rate.py b/tests/test_adjust_learning_rate.py deleted file mode 100644 index e3b269d..0000000 --- a/tests/test_adjust_learning_rate.py +++ /dev/null @@ -1,31 +0,0 @@ -from edgetrain import adjust_learning_rate - -def test_adjust_learning_rate_high_usage(): - # Mock system resources with high compute usage - mock_resources = {'cpu_compute_percent': 85, 'gpu_compute_percent': 90} - adjusted_lr = adjust_learning_rate(0.05, resources=mock_resources) - assert adjusted_lr < 0.05, "Learning rate should decrease under high resource usage." - -def test_adjust_learning_rate_low_usage(): - # Mock system resources with low compute usage - mock_resources = {'cpu_compute_percent': 10, 'gpu_compute_percent': 15} - adjusted_lr = adjust_learning_rate(0.05, resources=mock_resources) - assert adjusted_lr > 0.05, "Learning rate should increase under low resource usage." - -def test_adjust_learning_rate_normal_usage(): - # Mock system resources with normal compute usage - mock_resources = {'cpu_compute_percent': 50, 'gpu_compute_percent': 50} - adjusted_lr = adjust_learning_rate(0.05, resources=mock_resources) - assert adjusted_lr == 0.05, "Learning rate should remain unchanged under normal resource usage." - -def test_adjust_learning_rate_min_limit(): - # Mock system resources with high compute usage and ensure learning rate doesn't drop below min - mock_resources = {'cpu_compute_percent': 85, 'gpu_compute_percent': 90} - adjusted_lr = adjust_learning_rate(1e-6, resources=mock_resources) - assert adjusted_lr == 1e-6, "Learning rate should not go below the minimum limit." - -def test_adjust_learning_rate_max_limit(): - # Mock system resources with low compute usage and ensure learning rate doesn't exceed max - mock_resources = {'cpu_compute_percent': 10, 'gpu_compute_percent': 15} - adjusted_lr = adjust_learning_rate(0.1, resources=mock_resources) - assert adjusted_lr == 0.1, "Learning rate should not exceed the maximum limit." diff --git a/tests/test_adjust_pruning.py b/tests/test_adjust_pruning.py deleted file mode 100644 index 3714b06..0000000 --- a/tests/test_adjust_pruning.py +++ /dev/null @@ -1,84 +0,0 @@ -import pytest -from unittest.mock import patch, MagicMock -import tensorflow as tf -from your_module import adjust_pruning # Replace `your_module` with the actual module name. - -# Mock sys_resources to simulate CPU and GPU memory usage -@pytest.fixture -def mock_sys_resources(): - with patch("your_module.sys_resources") as mock: - yield mock - -# Helper function to create a simple model -@pytest.fixture -def simple_model(): - model = tf.keras.Sequential([ - tf.keras.layers.Dense(10, input_shape=(10,)), - tf.keras.layers.Dense(1) - ]) - return model - -def test_pruning_ratio_increase(mock_sys_resources, simple_model): - mock_sys_resources.return_value = { - 'cpu_memory_percent': 85, # Above the high threshold - 'gpu_memory_percent': 85 - } - model = simple_model - initial_pruning_ratio = 0.1 - _, new_pruning_ratio = adjust_pruning( - model, pruning_ratio=initial_pruning_ratio, - cpu_threshold=[20, 80], gpu_threshold=[20, 80] - ) - assert new_pruning_ratio > initial_pruning_ratio # Pruning ratio should increase - -def test_pruning_ratio_decrease(mock_sys_resources, simple_model): - mock_sys_resources.return_value = { - 'cpu_memory_percent': 15, # Below the low threshold - 'gpu_memory_percent': 15 - } - model = simple_model - initial_pruning_ratio = 0.2 - _, new_pruning_ratio = adjust_pruning( - model, pruning_ratio=initial_pruning_ratio, - cpu_threshold=[20, 80], gpu_threshold=[20, 80] - ) - assert new_pruning_ratio < initial_pruning_ratio # Pruning ratio should decrease - -def test_pruning_ratio_no_change(mock_sys_resources, simple_model): - mock_sys_resources.return_value = { - 'cpu_memory_percent': 50, # Within the threshold - 'gpu_memory_percent': 50 - } - model = simple_model - initial_pruning_ratio = 0.3 - _, new_pruning_ratio = adjust_pruning( - model, pruning_ratio=initial_pruning_ratio, - cpu_threshold=[20, 80], gpu_threshold=[20, 80] - ) - assert new_pruning_ratio == initial_pruning_ratio # Pruning ratio should not change - -def test_pruning_ratio_min_boundary(mock_sys_resources, simple_model): - mock_sys_resources.return_value = { - 'cpu_memory_percent': 15, # Below the low threshold - 'gpu_memory_percent': 15 - } - model = simple_model - initial_pruning_ratio = 0.1 # At the minimum boundary - _, new_pruning_ratio = adjust_pruning( - model, pruning_ratio=initial_pruning_ratio, - cpu_threshold=[20, 80], gpu_threshold=[20, 80] - ) - assert new_pruning_ratio == 0.1 # Pruning ratio should not go below minimum - -def test_pruning_ratio_max_boundary(mock_sys_resources, simple_model): - mock_sys_resources.return_value = { - 'cpu_memory_percent': 85, # Above the high threshold - 'gpu_memory_percent': 85 - } - model = simple_model - initial_pruning_ratio = 0.8 # At the maximum boundary - _, new_pruning_ratio = adjust_pruning( - model, pruning_ratio=initial_pruning_ratio, - cpu_threshold=[20, 80], gpu_threshold=[20, 80] - ) - assert new_pruning_ratio == 0.8 # Pruning ratio should not exceed maximum From 9e8a8979f0050e665ff860f7b9d0b9ef353bf958 Mon Sep 17 00:00:00 2001 From: BradleyEdelman Date: Thu, 30 Jan 2025 14:11:06 +0100 Subject: [PATCH 3/7] All tests pass for new scoring/priority system...still need to implement actual pruning :/ --- .gitignore | 3 +- edgetrain/__init__.py | 4 +- edgetrain/adjust_train_parameters.py | 9 +- edgetrain/calculate_scores.py | 7 +- edgetrain/dynamic_train.py | 23 ++++-- edgetrain/resource_monitor.py | 21 ++++- tests/__init__.py | 4 +- tests/test_adjust_train_parameters.py | 113 ++++++++++++++++++++++++++ tests/test_calculate_priorities.py | 64 +++++++++++++++ tests/test_calculate_scores.py | 81 ++++++++++++++++++ tests/test_log_usage_once.py | 37 ++++++--- 11 files changed, 332 insertions(+), 34 deletions(-) create mode 100644 tests/test_adjust_train_parameters.py create mode 100644 tests/test_calculate_priorities.py create mode 100644 tests/test_calculate_scores.py diff --git a/.gitignore b/.gitignore index 73c0e95..4635fc3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ -# Ignore Databricks folder +# Ignore Databricks, vscode .databricks/ +.vscode/ # virtual environment venv/ diff --git a/edgetrain/__init__.py b/edgetrain/__init__.py index f4fbcee..d1cce49 100644 --- a/edgetrain/__init__.py +++ b/edgetrain/__init__.py @@ -1,5 +1,7 @@ from .resource_monitor import sys_resources, log_usage_once -from .resource_adjust import adjust_threads, adjust_batch_size, adjust_grad_accum, adjust_learning_rate +from .calculate_scores import compute_scores, normalize_scores +from .calculate_priorities import define_priorities +from .adjust_train_parameters import adjust_training_parameters from .edgetrain_folder import get_edgetrain_folder from .train_visualize import log_usage_plot, log_train_time, training_history_plot from .create_model import create_model_tf, create_model_torch diff --git a/edgetrain/adjust_train_parameters.py b/edgetrain/adjust_train_parameters.py index 3e8bc07..c53ea9e 100644 --- a/edgetrain/adjust_train_parameters.py +++ b/edgetrain/adjust_train_parameters.py @@ -1,12 +1,12 @@ from edgetrain import sys_resources -def adjust_training_parameters(priority_value, batch_size, pruning_ratio, lr, accuracy_score): +def adjust_training_parameters(priority_values, batch_size, pruning_ratio, lr, accuracy_score, resources=None): """ Adjust the training parameters (batch size, pruning ratio, learning rate) based on the highest priority score, moving parameters in the opposite direction if resource usage or accuracy trends improve. Parameters: - - priority_scores (dict): Dictionary containing priority scores for batch size, pruning, and learning rate. + - priority_values (dict): Dictionary containing priority scores for batch size, pruning, and learning rate. - batch_size (int): Current batch size. - pruning_ratio (float): Current pruning ratio. - lr (float): Current learning rate. @@ -19,10 +19,11 @@ def adjust_training_parameters(priority_value, batch_size, pruning_ratio, lr, ac """ # Get system resources - resources = sys_resources() + if resources is None: + resources = sys_resources() # Determine which parameter has the highest priority score - highest_priority = max(priority_scores, key=priority_scores.get) + highest_priority = max(priority_values, key=priority_values.get) # Adjust the parameter based on system resources and highest priority score if highest_priority == "batch_size": diff --git a/edgetrain/calculate_scores.py b/edgetrain/calculate_scores.py index 8231bb6..4dc5005 100644 --- a/edgetrain/calculate_scores.py +++ b/edgetrain/calculate_scores.py @@ -1,6 +1,6 @@ from edgetrain import sys_resources -def compute_scores(previous_accuracy, current_accuracy, score_ranges=None): +def compute_scores(previous_accuracy, current_accuracy, score_ranges=None, resources=None): """ Compute memory, accuracy, and loss scores, and normalize them. @@ -14,7 +14,8 @@ def compute_scores(previous_accuracy, current_accuracy, score_ranges=None): """ # Get system resources - resources = sys_resources() + if resources is None: + resources = sys_resources() # Default score ranges if score_ranges is None: @@ -59,7 +60,7 @@ def normalize_scores(raw_scores, score_ranges): normalized_scores = {} for score_name, score_value in raw_scores.items(): - score_range = score_ranges.get(score_name, 1) # Default range is 1 if not specified + score_range = score_ranges.get(f'{score_name}_range', 1) # Default range is 1 if not specified normalized_score = score_value / score_range normalized_scores[score_name] = normalized_score diff --git a/edgetrain/dynamic_train.py b/edgetrain/dynamic_train.py index a61c537..9df0af8 100644 --- a/edgetrain/dynamic_train.py +++ b/edgetrain/dynamic_train.py @@ -7,7 +7,7 @@ def dynamic_train( epochs=10, batch_size=32, lr=1e-3, - pruning_ratio=0.2, + pruning=0.2, log_file="resource_log.csv", dynamic_adjustments=True ): @@ -28,7 +28,9 @@ def dynamic_train( """ # Log initial resource usage - log_usage_once(log_file, lr=lr, batch_size=batch_size, num_epoch=0) + normalized_scores = {"memory_score": 0, "accuracy_score": 0} + priority_value = {"batch_size": 0, "pruning": 0, "learning_rate": 0,} + log_usage_once(log_file, batch_size, pruning, lr, normalized_scores, priority_value, num_epoch=0, resources=None) # Create the MirroredStrategy for distributed training strategy = tf.distribute.MirroredStrategy() @@ -64,9 +66,6 @@ def dynamic_train( # Update accuracy curr_accuracy = history.history['accuracy'][-1] - # Log resource usage for the current epoch - log_usage_once(log_file, lr=lr, batch_size=batch_size, num_epoch=epoch + 1) - if dynamic_adjustments: # Calculate performance and resource usage scores normalized_scores = compute_scores(prev_accuracy, curr_accuracy) @@ -75,15 +74,21 @@ def dynamic_train( priority_value = define_priorities(normalized_scores) # Adjust training parameters - batch_size, pruning_ratio, lr = adjust_training_parameters( - priority_scores=priority_value, + adjusted_batch_size, adjusted_pruning_ratio, adjusted_lr = adjust_training_parameters( + priority_values=priority_value, batch_size=batch_size, - pruning_ratio=pruning_ratio, + pruning_ratio=pruning, lr=lr, accuracy_score=curr_accuracy ) + batch_size = adjusted_batch_size + pruning = adjusted_pruning_ratio + lr = adjusted_lr - print(f"Adjusted parameters for next epoch: batch_size={batch_size}, pruning_ratio={pruning_ratio}, learning_rate={lr}") + print(f"Adjusted parameters for next epoch: batch_size={batch_size}, pruning_ratio={pruning}, learning_rate={lr}") + + # Log resource usage for the current epoch + log_usage_once(log_file, batch_size, pruning, lr, normalized_scores, priority_value, num_epoch=epoch + 1, resources=None) # Update previous accuracy prev_accuracy = curr_accuracy diff --git a/edgetrain/resource_monitor.py b/edgetrain/resource_monitor.py index 464cfec..74e6d26 100644 --- a/edgetrain/resource_monitor.py +++ b/edgetrain/resource_monitor.py @@ -53,7 +53,7 @@ def sys_resources(): # Function to log resource usage and batch size -def log_usage_once(log_file, lr, batch_size, num_epoch=0, resources=None): +def log_usage_once(log_file, batch_size, pruning, lr, normalize_scores, priority_value, num_epoch=0, resources=None): """ Log GPU and CPU resource usage once. @@ -74,10 +74,13 @@ def log_usage_once(log_file, lr, batch_size, num_epoch=0, resources=None): header = [ 'Timestamp', 'Epoch #', 'CPU Usage (%)', 'CPU RAM (%)', 'GPU RAM (%)', 'GPU Usage (%)', - 'Batch Size', 'Learning Rate', 'Grad Accum' + 'Mem Score', 'Acc Score', + 'Priority Batch Size', 'Priority Pruning', 'Priority Learning Rate', + 'Batch Size', 'Pruning', 'Learning Rate', ] writer.writerow(header) + # Get resource usage if resources is None: resources = sys_resources() @@ -85,7 +88,13 @@ def log_usage_once(log_file, lr, batch_size, num_epoch=0, resources=None): cpu_memory_percent = resources.get('cpu_memory_percent') gpu_compute_percent = resources.get('gpu_compute_percent') gpu_memory_percent = resources.get('gpu_memory_percent') - + + memory_score = normalize_scores.get('memory_score') + accuracy_score = normalize_scores.get('accuracy_score') + batch_size_priority_value = priority_value.get('batch_size') + pruning_priority_value = priority_value.get('pruning') + learning_rate_priority_value = priority_value.get('learning_rate') + # Prepare log entry log_entry = [ @@ -95,7 +104,13 @@ def log_usage_once(log_file, lr, batch_size, num_epoch=0, resources=None): cpu_memory_percent, gpu_compute_percent, gpu_memory_percent, + memory_score, + accuracy_score, + batch_size_priority_value, + pruning_priority_value, + learning_rate_priority_value, batch_size, + pruning, lr ] diff --git a/tests/__init__.py b/tests/__init__.py index 3ca42cb..5b81147 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -7,7 +7,9 @@ # Import modules from scaleml from edgetrain.resource_monitor import sys_resources, log_usage_once -from edgetrain.resource_adjust import adjust_threads, adjust_batch_size, adjust_grad_accum, adjust_learning_rate +from edgetrain.calculate_scores import compute_scores, normalize_scores +from edgetrain.calculate_priorities import define_priorities +from edgetrain.adjust_train_parameters import adjust_training_parameters from edgetrain.edgetrain_folder import get_edgetrain_folder from edgetrain.train_visualize import log_usage_plot, log_train_time, training_history_plot from edgetrain.create_model import create_model_tf, create_model_torch diff --git a/tests/test_adjust_train_parameters.py b/tests/test_adjust_train_parameters.py new file mode 100644 index 0000000..71d57f1 --- /dev/null +++ b/tests/test_adjust_train_parameters.py @@ -0,0 +1,113 @@ +import pytest +from unittest.mock import patch +from edgetrain import adjust_training_parameters + +@pytest.fixture +def default_parameters(): + return { + "priority_values": {"batch_size": 0.5, "pruning": 0.3, "learning_rate": 0.2}, + "batch_size": 32, + "pruning_ratio": 0.4, + "lr": 0.001, + "accuracy_score": 0.8 + } + + +def test_adjust_batch_size_high_memory(default_parameters): + # Simulate high memory usage + sys_resources = {"cpu_memory_percent": 80, "gpu_memory_percent": 85} + params = default_parameters.copy() + params["priority_values"]["batch_size"] = 0.8 # Highest priority + params["resources"] = sys_resources + + adjusted_batch_size, adjusted_pruning_ratio, adjusted_lr = adjust_training_parameters(**params) + + assert adjusted_batch_size == 16, "Batch size adjustment for high memory usage failed." + assert adjusted_pruning_ratio == params["pruning_ratio"], "Pruning ratio should remain unchanged." + assert adjusted_lr == params["lr"], "Learning rate should remain unchanged." + + +def test_adjust_batch_size_low_memory(default_parameters): + # Simulate low memory usage + sys_resources = {"cpu_memory_percent": 40, "gpu_memory_percent": 35} + params = default_parameters.copy() + params["priority_values"]["batch_size"] = 0.8 # Highest priority + params["resources"] = sys_resources + + adjusted_batch_size, adjusted_pruning_ratio, adjusted_lr = adjust_training_parameters(**params) + + assert adjusted_batch_size == 64, "Batch size adjustment for low memory usage failed." + assert adjusted_pruning_ratio == params["pruning_ratio"], "Pruning ratio should remain unchanged." + assert adjusted_lr == params["lr"], "Learning rate should remain unchanged." + + +def test_adjust_pruning_high_memory(default_parameters): + # Simulate high memory usage + sys_resources = {"cpu_memory_percent": 85, "gpu_memory_percent": 90} + params = default_parameters.copy() + params["priority_values"]["pruning"] = 0.8 # Highest priority + params["resources"] = sys_resources + + adjusted_batch_size, adjusted_pruning_ratio, adjusted_lr = adjust_training_parameters(**params) + print(adjusted_pruning_ratio) + assert adjusted_pruning_ratio == pytest.approx(0.5, rel=1e-3), "Pruning ratio adjustment for high memory usage failed." + assert adjusted_batch_size == params["batch_size"], "Batch size should remain unchanged." + assert adjusted_lr == params["lr"], "Learning rate should remain unchanged." + + +def test_adjust_pruning_low_memory(default_parameters): + # Simulate low memory usage + sys_resources = {"cpu_memory_percent": 40, "gpu_memory_percent": 30} + params = default_parameters.copy() + params["priority_values"]["pruning"] = 0.8 # Highest priority + params["resources"] = sys_resources + + adjusted_batch_size, adjusted_pruning_ratio, adjusted_lr = adjust_training_parameters(**params) + print(adjusted_pruning_ratio) + assert adjusted_pruning_ratio == pytest.approx(0.3, rel=1e-3), "Pruning ratio adjustment for low memory usage failed." + assert adjusted_batch_size == params["batch_size"], "Batch size should remain unchanged." + assert adjusted_lr == params["lr"], "Learning rate should remain unchanged." + + +def test_adjust_learning_rate_low_accuracy(default_parameters): + # Simulate low accuracy + sys_resources = {"cpu_memory_percent": 60, "gpu_memory_percent": 60} + params = default_parameters.copy() + params["priority_values"]["learning_rate"] = 0.8 # Highest priority + params["accuracy_score"] = 0.03 + params["resources"] = sys_resources + + adjusted_batch_size, adjusted_pruning_ratio, adjusted_lr = adjust_training_parameters(**params) + + assert adjusted_lr == pytest.approx(0.0005, rel=1e-2), "Learning rate adjustment for low accuracy failed." + assert adjusted_batch_size == params["batch_size"], "Batch size should remain unchanged." + assert adjusted_pruning_ratio == params["pruning_ratio"], "Pruning ratio should remain unchanged." + + +def test_adjust_learning_rate_high_accuracy(default_parameters): + # Simulate high accuracy + sys_resources = {"cpu_memory_percent": 60, "gpu_memory_percent": 60} + params = default_parameters.copy() + params["priority_values"]["learning_rate"] = 0.8 # Highest priority + params["accuracy_score"] = 0.97 + params["resources"] = sys_resources + + adjusted_batch_size, adjusted_pruning_ratio, adjusted_lr = adjust_training_parameters(**params) + + assert adjusted_lr == pytest.approx(0.0012, rel=1e-2), "Learning rate adjustment for high accuracy failed." + assert adjusted_batch_size == params["batch_size"], "Batch size should remain unchanged." + assert adjusted_pruning_ratio == params["pruning_ratio"], "Pruning ratio should remain unchanged." + + +def test_no_adjustments_when_no_priorities(default_parameters): + # Simulate balanced memory and low priorities + sys_resources = {"cpu_memory_percent": 60, "gpu_memory_percent": 60} + params = default_parameters.copy() + params["priority_values"] = {"batch_size": 0.0, "pruning": 0.0, "learning_rate": 0.0} + params["resources"] = sys_resources + + adjusted_batch_size, adjusted_pruning_ratio, adjusted_lr = adjust_training_parameters(**params) + + assert adjusted_batch_size == params["batch_size"], "Batch size should remain unchanged with no priorities." + assert adjusted_pruning_ratio == params["pruning_ratio"], "Pruning ratio should remain unchanged with no priorities." + assert adjusted_lr == params["lr"], "Learning rate should remain unchanged with no priorities." diff --git a/tests/test_calculate_priorities.py b/tests/test_calculate_priorities.py new file mode 100644 index 0000000..db5d13e --- /dev/null +++ b/tests/test_calculate_priorities.py @@ -0,0 +1,64 @@ +import pytest +from edgetrain import define_priorities + +def test_define_priorities_with_default_priorities(): + # Test default priorities with normalized scores + normalized_scores = { + "memory_score": 0.8, + "accuracy_score": 0.4 + } + + priority_value = define_priorities(normalized_scores) + + # Default priorities: batch_size: 0.3, pruning: 0.3, accuracy_improvement: 0.4 + assert priority_value["batch_size"] == pytest.approx(0.24, rel=1e-3), "Batch size priority calculation failed." + assert priority_value["pruning"] == pytest.approx(0.20, rel=1e-3), "Pruning priority calculation failed." + assert priority_value["learning_rate"] == pytest.approx(0.16, rel=1e-3), "Learning rate priority calculation failed." + + +def test_define_priorities_with_custom_priorities(): + # Test user-defined priorities with normalized scores + normalized_scores = { + "memory_score": 0.5, + "accuracy_score": 0.7 + } + user_priorities = { + "batch_size_adjustment": 0.4, + "pruning_adjustment": 0.4, + "accuracy_improvement": 0.2 + } + + priority_value = define_priorities(normalized_scores, user_priorities) + + # Custom priorities: batch_size: 0.4, pruning: 0.4, accuracy_improvement: 0.2 + assert priority_value["batch_size"] == pytest.approx(0.2, rel=1e-3), "Batch size priority with custom priorities failed." + assert priority_value["pruning"] == pytest.approx(0.17, rel=1e-3), "Pruning priority with custom priorities failed." + assert priority_value["learning_rate"] == pytest.approx(0.14, rel=1e-3), "Learning rate priority with custom priorities failed." + + +def test_define_priorities_with_zero_scores(): + # Test edge case where all normalized scores are zero + normalized_scores = { + "memory_score": 0.0, + "accuracy_score": 0.0 + } + + priority_value = define_priorities(normalized_scores) + + assert priority_value["batch_size"] == 0.0, "Batch size priority with zero scores failed." + assert priority_value["pruning"] == 0.0, "Pruning priority with zero scores failed." + assert priority_value["learning_rate"] == 0.0, "Learning rate priority with zero scores failed." + + +def test_define_priorities_with_extreme_scores(): + # Test edge case with extreme normalized scores + normalized_scores = { + "memory_score": 1.0, + "accuracy_score": 1.0 + } + + priority_value = define_priorities(normalized_scores) + + assert priority_value["batch_size"] == 0.3, "Batch size priority with extreme scores failed." + assert priority_value["pruning"] == 0.35, "Pruning priority with extreme scores failed." + assert priority_value["learning_rate"] == 0.4, "Learning rate priority with extreme scores failed." diff --git a/tests/test_calculate_scores.py b/tests/test_calculate_scores.py new file mode 100644 index 0000000..7c3ec04 --- /dev/null +++ b/tests/test_calculate_scores.py @@ -0,0 +1,81 @@ +import pytest +from edgetrain import compute_scores, normalize_scores + +def test_normalize_scores(): + # Test normalization of raw scores with specified ranges + raw_scores = {"memory_score": 50, "accuracy_score": 0.5} + score_ranges = {"memory_score_range": 100, "accuracy_score_range": 1} + normalized = normalize_scores(raw_scores, score_ranges) + assert normalized["memory_score"] == 0.5, "Memory score normalization failed." + assert normalized["accuracy_score"] == 0.5, "Accuracy score normalization failed." + + +def test_compute_scores(): + # Mock system resource usage + mock_resources = { + "num_gpus": 1, + "cpu_memory_percent": 60, + "gpu_memory_percent": 40 + } + + # Test with a decrease in accuracy + previous_accuracy = 0.8 + current_accuracy = 0.6 + scores = compute_scores(previous_accuracy, current_accuracy, score_ranges=None, resources=mock_resources) + assert scores["memory_score"] == 0.5, "Memory score calculation failed with GPUs." + assert scores["accuracy_score"] == pytest.approx(0.2, rel=1e-3), "Accuracy score calculation failed." + + # Test with no GPUs + mock_resources = { + "num_gpus": 0, + "cpu_memory_percent": 75 + } + scores = compute_scores(previous_accuracy, current_accuracy, score_ranges=None, resources=mock_resources) + assert scores["memory_score"] == 0.75, "Memory score calculation failed without GPUs." + + # Test edge case with accuracy stagnation + previous_accuracy = 0.7 + current_accuracy = 0.7 + scores = compute_scores(previous_accuracy, current_accuracy, score_ranges=None, resources=mock_resources) + assert scores["accuracy_score"] == 0, "Accuracy score should be 0 for stagnation." + + # Test edge case where current accuracy is higher (clamped at 0) + previous_accuracy = 0.6 + current_accuracy = 0.8 + scores = compute_scores(previous_accuracy, current_accuracy, score_ranges=None, resources=mock_resources) + assert scores["accuracy_score"] == 0, "Accuracy score should not be negative." + + +def test_compute_scores_with_custom_ranges(): + # Mock resources + mock_resources = { + "num_gpus": 1, + "cpu_memory_percent": 80, + "gpu_memory_percent": 60 + } + + # Test custom ranges + score_ranges = { + "memory_score_range": 200, + "accuracy_score_range": 0.5 + } + + previous_accuracy=0.8 + current_accuracy=0.6 + scores = compute_scores(previous_accuracy, current_accuracy, score_ranges=score_ranges, resources=mock_resources) + assert scores["memory_score"] == pytest.approx(0.35, rel=1e-3), "Memory score normalization with custom range failed." + assert scores["accuracy_score"] == pytest.approx(0.4, rel=1e-3), "Accuracy score normalization with custom range failed." + + +def test_compute_scores_with_acc_improvement(): + # Mock resources + mock_resources = { + "num_gpus": 1, + "cpu_memory_percent": 80, + "gpu_memory_percent": 60 + } + + previous_accuracy=0.6 + current_accuracy=0.8 + scores = compute_scores(previous_accuracy, current_accuracy, score_ranges=None, resources=mock_resources) + assert scores["accuracy_score"] == pytest.approx(0, rel=1e-3), "Accuracy score normalization with custom range failed." diff --git a/tests/test_log_usage_once.py b/tests/test_log_usage_once.py index 3adfb01..6f48a13 100644 --- a/tests/test_log_usage_once.py +++ b/tests/test_log_usage_once.py @@ -1,13 +1,12 @@ -import os -import csv +import os, csv from datetime import datetime -from unittest.mock import patch, MagicMock -from edgetrain import log_usage_once, sys_resources +from edgetrain import log_usage_once, compute_scores, define_priorities def test_log_usage_once(tmpdir): # Mock resource usage mock_resources = { - 'cpu_compute_percent': 35.0, + 'num_gpus': 0, + 'cpu_compute_percent': 30.0, 'cpu_memory_percent': 40.0, 'gpu_compute_percent': 45.0, 'gpu_memory_percent': 50.0 @@ -19,9 +18,16 @@ def test_log_usage_once(tmpdir): # Call the function to log usage lr = 0.001 batch_size = 32 - grad_accum = 1 + pruning = 0.5 num_epoch = 5 - log_usage_once(log_file, lr, batch_size, grad_accum, num_epoch, resources=mock_resources) + prev_accuracy = 0.8 + curr_accuracy = 0.6 + + # Calculate performance and resource usage scores + normalized_scores = compute_scores(prev_accuracy, curr_accuracy, score_ranges=None, resources=mock_resources) + priority_value = define_priorities(normalized_scores) + + log_usage_once(log_file, batch_size, pruning, lr, normalized_scores, priority_value, num_epoch, resources=mock_resources) # Verify the log file is created assert os.path.exists(log_file), "Log file was not created." @@ -33,19 +39,26 @@ def test_log_usage_once(tmpdir): # Check if the header is correct expected_header = [ - 'Timestamp', 'Epoch #', 'CPU Usage (%)', 'CPU RAM (%)', - 'GPU RAM (%)', 'GPU Usage (%)', - 'Batch Size', 'Learning Rate', 'Grad Accum' - ] + 'Timestamp', 'Epoch #', 'CPU Usage (%)', 'CPU RAM (%)', + 'GPU RAM (%)', 'GPU Usage (%)', + 'Mem Score', 'Acc Score', + 'Priority Batch Size', 'Priority Pruning', 'Priority Learning Rate', + 'Batch Size', 'Pruning', 'Learning Rate', + ] assert reader.fieldnames == expected_header, "Log file header is incorrect." # Check if the log entry contains expected values assert len(rows) == 1, "Log file should contain one entry." log_entry = rows[0] assert log_entry['Epoch #'] == str(num_epoch), "Epoch number mismatch." + assert log_entry['Mem Score'] == str(normalized_scores.get('memory_score')), "Mem score mismatch." + assert log_entry['Acc Score'] == str(normalized_scores.get('accuracy_score')), "Acc score mismatch." + assert log_entry['Priority Batch Size'] == str(priority_value.get('batch_size')), "Priority batch size mismatch." + assert log_entry['Priority Pruning'] == str(priority_value.get('pruning')), "Priority pruning mismatch." + assert log_entry['Priority Learning Rate'] == str(priority_value.get('learning_rate')), "Priority learning rate mismatch." assert log_entry['Batch Size'] == str(batch_size), "Batch size mismatch." + assert log_entry['Pruning'] == str(pruning), "Pruning mismatch." assert log_entry['Learning Rate'] == str(lr), "Learning rate mismatch." - assert log_entry['Grad Accum'] == str(grad_accum), "Grad accumulation mismatch." # Validate timestamp format try: From 0fc4b8cb86ff99bd2dcc863b0cd7f3bee48aac8d Mon Sep 17 00:00:00 2001 From: BradleyEdelman Date: Tue, 4 Feb 2025 17:28:20 +0100 Subject: [PATCH 4/7] Pytests pass, example is stable, want to tweak some things before v1.0.0 release --- edgetrain/adjust_train_parameters.py | 21 +------ edgetrain/calculate_priorities.py | 12 ++-- edgetrain/create_model.py | 2 +- edgetrain/dynamic_train.py | 83 ++++++++++++++----------- edgetrain/resource_monitor.py | 12 ++-- notebooks/EdgeTrain_example.ipynb | 89 ++++++++++++++++++++++----- requirements.txt | 8 ++- tests/__init__.py | 25 ++------ tests/test_adjust_train_parameters.py | 50 +++------------ tests/test_calculate_priorities.py | 21 +++---- tests/test_log_usage_once.py | 11 ++-- 11 files changed, 168 insertions(+), 166 deletions(-) diff --git a/edgetrain/adjust_train_parameters.py b/edgetrain/adjust_train_parameters.py index c53ea9e..224453c 100644 --- a/edgetrain/adjust_train_parameters.py +++ b/edgetrain/adjust_train_parameters.py @@ -1,20 +1,18 @@ from edgetrain import sys_resources -def adjust_training_parameters(priority_values, batch_size, pruning_ratio, lr, accuracy_score, resources=None): +def adjust_training_parameters(priority_values, batch_size, lr, accuracy_score, resources=None): """ - Adjust the training parameters (batch size, pruning ratio, learning rate) based on the highest priority score, + Adjust the training parameters (batch size, learning rate) based on the highest priority score, moving parameters in the opposite direction if resource usage or accuracy trends improve. Parameters: - priority_values (dict): Dictionary containing priority scores for batch size, pruning, and learning rate. - batch_size (int): Current batch size. - - pruning_ratio (float): Current pruning ratio. - lr (float): Current learning rate. - accuracy_score (float): Current accuracy score from the latest epoch (0-1). Returns: - adjusted_batch_size (int): Adjusted batch size. - - adjusted_pruning_ratio (float): Adjusted pruning ratio. - adjusted_lr (float): Adjusted learning rate. """ @@ -34,18 +32,6 @@ def adjust_training_parameters(priority_values, batch_size, pruning_ratio, lr, a adjusted_batch_size = min(128, batch_size * 2) # Double batch size else: adjusted_batch_size = batch_size - adjusted_pruning_ratio = pruning_ratio - adjusted_lr = lr - - elif highest_priority == "pruning": - # Adjust pruning ratio based on memory usage - if resources["cpu_memory_percent"] > 75 or resources["gpu_memory_percent"] > 75: - adjusted_pruning_ratio = min(0.8, pruning_ratio + 0.1) # Increase pruning - elif resources["cpu_memory_percent"] < 50 and resources["gpu_memory_percent"] < 50: - adjusted_pruning_ratio = max(0.1, pruning_ratio - 0.1) # Decrease pruning - else: - adjusted_pruning_ratio = pruning_ratio - adjusted_batch_size = batch_size adjusted_lr = lr elif highest_priority == "learning_rate": @@ -57,6 +43,5 @@ def adjust_training_parameters(priority_values, batch_size, pruning_ratio, lr, a else: adjusted_lr = lr adjusted_batch_size = batch_size - adjusted_pruning_ratio = pruning_ratio - return adjusted_batch_size, adjusted_pruning_ratio, adjusted_lr + return adjusted_batch_size, adjusted_lr diff --git a/edgetrain/calculate_priorities.py b/edgetrain/calculate_priorities.py index ca57a82..bd85b9a 100644 --- a/edgetrain/calculate_priorities.py +++ b/edgetrain/calculate_priorities.py @@ -3,9 +3,8 @@ def define_priorities(normalized_scores, user_priorities=None): Calculate priority scores for adjustments based on resource usage, accuracy, and loss. Parameters: - - memory_usage_score (float): Score indicating memory usage pressure (0-1). - - accuracy_stagnation_score (float): Score indicating stagnation in accuracy improvement (0-1). - - loss_stagnation_score (float): Score indicating stagnation in loss reduction (0-1). + - memory_score (float): Score indicating memory usage pressure (0-100). + - accuracy_score (float): Score indicating stagnation in accuracy improvement (0-1). - user_priorities (dict): Optional user-defined priorities for resource conservation, accuracy, and loss. Returns: @@ -13,9 +12,8 @@ def define_priorities(normalized_scores, user_priorities=None): """ # Default weights if user priorities are not provided default_priorities = { - "batch_size_adjustment": 0.3, - "pruning_adjustment": 0.3, - "accuracy_improvement": 0.4, + "batch_size_adjustment": 0.05, + "accuracy_improvement": 0.95, } # Use user-defined priorities if available @@ -24,8 +22,6 @@ def define_priorities(normalized_scores, user_priorities=None): # Calculate weighted priority scores priority_value = { "batch_size": priorities["batch_size_adjustment"] * normalized_scores.get('memory_score'), - "pruning": (priorities["pruning_adjustment"] * normalized_scores.get('memory_score') + - priorities["accuracy_improvement"] * normalized_scores.get('accuracy_score')) / 2, "learning_rate": (priorities["accuracy_improvement"] * normalized_scores.get('accuracy_score')), } diff --git a/edgetrain/create_model.py b/edgetrain/create_model.py index 4e8222a..61d6cad 100644 --- a/edgetrain/create_model.py +++ b/edgetrain/create_model.py @@ -22,7 +22,7 @@ def create_model_tf(input_shape, model_path=None): else: # Define a Sequential model with input layer, Conv2D, MaxPooling2D, Flatten, and Dense layers model = models.Sequential([ - layers.InputLayer(shape=input_shape), + layers.Input(shape=input_shape), layers.Conv2D(32, (3, 3), activation='relu'), layers.MaxPooling2D((2, 2)), layers.Conv2D(64, (3, 3), activation='relu'), diff --git a/edgetrain/dynamic_train.py b/edgetrain/dynamic_train.py index 9df0af8..40002b5 100644 --- a/edgetrain/dynamic_train.py +++ b/edgetrain/dynamic_train.py @@ -1,6 +1,10 @@ import tensorflow as tf -from tensorflow.keras.optimizers import Adam -from edgetrain import log_usage_once, create_model_tf, compute_scores, define_priorities, adjust_training_parameters +from tensorflow import keras +import tensorflow_model_optimization as tfmot +from edgetrain import ( + log_usage_once, create_model_tf, compute_scores, + define_priorities, adjust_training_parameters +) def dynamic_train( train_dataset, @@ -13,84 +17,93 @@ def dynamic_train( ): """ Train the model with optional dynamic resource adjustment. - + Parameters: - train_dataset (dict): The training dataset. - epochs (int): Number of epochs to train the model. - batch_size (int): The base batch size to use. - lr (float): The initial learning rate. - - pruning_ratio (float): Initial pruning ratio (for dynamic adjustment). - - log_file (str): The path to the log file where resource usage is saved. - - dynamic_adjustments (bool): A flag to control if dynamic adjustments are enabled (True) or not (False). - + - pruning (float): Initial pruning ratio (for dynamic adjustment). + - log_file (str): Path to the log file where resource usage is saved. + - dynamic_adjustments (bool): Flag to enable/disable dynamic adjustments. + Returns: - - history_list (list): A list of training history for each epoch. + - final_model: The trained and stripped model. + - history_list: A list of training history for each epoch. """ - + # Log initial resource usage - normalized_scores = {"memory_score": 0, "accuracy_score": 0} - priority_value = {"batch_size": 0, "pruning": 0, "learning_rate": 0,} - log_usage_once(log_file, batch_size, pruning, lr, normalized_scores, priority_value, num_epoch=0, resources=None) + normalized_scores = {"memory_score": 0, "accuracy_score": 0} + priority_value = {"batch_size": 0, "learning_rate": 0} + log_usage_once(log_file, pruning, batch_size, lr, normalized_scores, priority_value, num_epoch=0, resources=None) - # Create the MirroredStrategy for distributed training + # Create MirroredStrategy for distributed training strategy = tf.distribute.MirroredStrategy() - # Initialize variables + # Initialize training variables history_list = [] prev_accuracy = 0.0 - # Create the model once + # Prepare training data train_images, train_labels = train_dataset['images'], train_dataset['labels'] + + # Create model within scope and apply initial pruning with strategy.scope(): - model = create_model_tf(input_shape=train_images[0].shape) + base_model = create_model_tf(input_shape=train_images[0].shape) + optimizer = keras.optimizers.Adam(learning_rate=lr) + + pruning_schedule = tfmot.sparsity.keras.ConstantSparsity(pruning, begin_step=0) + model = tfmot.sparsity.keras.prune_low_magnitude(base_model, pruning_schedule=pruning_schedule) + model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy']) + # Training model one epoch at a time for epoch in range(epochs): print(f"Epoch {epoch + 1}/{epochs}") - # Compile the model with the current parameters - with strategy.scope(): - optimizer = Adam(learning_rate=lr) - model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy']) + # Add pruning update callback + callbacks = [tfmot.sparsity.keras.UpdatePruningStep()] - # Train for 1 epoch history = model.fit( train_images, train_labels, batch_size=batch_size, - epochs=1 + epochs=1, + callbacks=callbacks ) - + # Save training history history_list.append(history.history) - - # Update accuracy + + # Update "current" accuracy curr_accuracy = history.history['accuracy'][-1] + # If dynamic adjustments are enabled if dynamic_adjustments: - # Calculate performance and resource usage scores + # Compute scores & priorities normalized_scores = compute_scores(prev_accuracy, curr_accuracy) - - # Define priority values based on normalized scores priority_value = define_priorities(normalized_scores) - # Adjust training parameters - adjusted_batch_size, adjusted_pruning_ratio, adjusted_lr = adjust_training_parameters( + # Adjust highest priority parameter + adjusted_batch_size, adjusted_lr = adjust_training_parameters( priority_values=priority_value, batch_size=batch_size, - pruning_ratio=pruning, lr=lr, accuracy_score=curr_accuracy ) + batch_size = adjusted_batch_size - pruning = adjusted_pruning_ratio lr = adjusted_lr print(f"Adjusted parameters for next epoch: batch_size={batch_size}, pruning_ratio={pruning}, learning_rate={lr}") - # Log resource usage for the current epoch - log_usage_once(log_file, batch_size, pruning, lr, normalized_scores, priority_value, num_epoch=epoch + 1, resources=None) + # Log resource usage + log_usage_once(log_file, pruning, batch_size, lr, normalized_scores, priority_value, num_epoch=epoch + 1, resources=None) # Update previous accuracy prev_accuracy = curr_accuracy - return history_list + # Strip pruning for final model deployment + final_model = tfmot.sparsity.keras.strip_pruning(model) + print("Pruning stripped. Model ready for deployment.") + + return final_model, history_list diff --git a/edgetrain/resource_monitor.py b/edgetrain/resource_monitor.py index 74e6d26..2b364d1 100644 --- a/edgetrain/resource_monitor.py +++ b/edgetrain/resource_monitor.py @@ -1,4 +1,4 @@ -import psutil, GPUtil, time, csv +import psutil, GPUtil, csv from datetime import datetime from pynvml import nvmlInit, nvmlDeviceGetHandleByIndex, nvmlDeviceGetUtilizationRates, nvmlShutdown @@ -53,7 +53,7 @@ def sys_resources(): # Function to log resource usage and batch size -def log_usage_once(log_file, batch_size, pruning, lr, normalize_scores, priority_value, num_epoch=0, resources=None): +def log_usage_once(log_file, pruning, batch_size, lr, normalize_scores, priority_value, num_epoch=0, resources=None): """ Log GPU and CPU resource usage once. @@ -75,8 +75,8 @@ def log_usage_once(log_file, batch_size, pruning, lr, normalize_scores, priority 'Timestamp', 'Epoch #', 'CPU Usage (%)', 'CPU RAM (%)', 'GPU RAM (%)', 'GPU Usage (%)', 'Mem Score', 'Acc Score', - 'Priority Batch Size', 'Priority Pruning', 'Priority Learning Rate', - 'Batch Size', 'Pruning', 'Learning Rate', + 'Priority Batch Size', 'Priority Learning Rate', + 'Pruning', 'Batch Size', 'Learning Rate', ] writer.writerow(header) @@ -92,7 +92,6 @@ def log_usage_once(log_file, batch_size, pruning, lr, normalize_scores, priority memory_score = normalize_scores.get('memory_score') accuracy_score = normalize_scores.get('accuracy_score') batch_size_priority_value = priority_value.get('batch_size') - pruning_priority_value = priority_value.get('pruning') learning_rate_priority_value = priority_value.get('learning_rate') @@ -107,10 +106,9 @@ def log_usage_once(log_file, batch_size, pruning, lr, normalize_scores, priority memory_score, accuracy_score, batch_size_priority_value, - pruning_priority_value, learning_rate_priority_value, - batch_size, pruning, + batch_size, lr ] diff --git a/notebooks/EdgeTrain_example.ipynb b/notebooks/EdgeTrain_example.ipynb index 433210b..3437618 100644 --- a/notebooks/EdgeTrain_example.ipynb +++ b/notebooks/EdgeTrain_example.ipynb @@ -2,7 +2,17 @@ "cells": [ { "cell_type": "code", - "execution_count": 0, + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": 2, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -20,14 +30,11 @@ "source": [ "\n", "# Import libraries\n", - "import tensorflow as tf\n", - "from tensorflow.keras import layers, models\n", "from tensorflow.keras.datasets import mnist\n", - "import time\n", "from datetime import datetime\n", "\n", "import sys\n", - "sys.path.append('/Workspace/Users/bjedelma@gmail.com/EdgeTrain/edgetrain')\n", + "sys.path.append('C:/Users/bedelman/Documents/GitHub/EdgeTrain')\n", "from edgetrain import dynamic_train, log_usage_plot, training_history_plot, get_edgetrain_folder\n", "\n", "from IPython.display import clear_output\n", @@ -36,7 +43,7 @@ }, { "cell_type": "code", - "execution_count": 0, + "execution_count": 3, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -56,12 +63,12 @@ "(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n", "train_images = train_images.reshape(-1, 28, 28, 1).astype('float32') / 255.0\n", "test_images = test_images.reshape(-1, 28, 28, 1).astype('float32') / 255.0\n", - "train_dataset = {'images': train_images, 'labels': train_labels}\n" + "train_dataset = {'images': train_images, 'labels': train_labels}" ] }, { "cell_type": "code", - "execution_count": 0, + "execution_count": 4, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -75,7 +82,17 @@ "title": "" } }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "EdgeTrain folder and subfolders are set up at: c:\\Users\\bedelman\\Documents\\GitHub\\EdgeTrain\n", + "\n", + "c:\\Users\\bedelman\\Documents\\GitHub\\EdgeTrain/logs/20250203_115319_resource_usage_log.csv\n" + ] + } + ], "source": [ "# Create folders for saving\n", "edgetrain_folder = get_edgetrain_folder()\n", @@ -90,7 +107,7 @@ }, { "cell_type": "code", - "execution_count": 0, + "execution_count": 5, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -104,11 +121,41 @@ "title": "" } }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:CPU:0',)\n" + ] + }, + { + "ename": "ValueError", + "evalue": "`prune_low_magnitude` can only prune an object of the following types: keras.models.Sequential, keras functional model, keras.layers.Layer, list of keras.layers.Layer. You passed an object of type: Sequential.", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mValueError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[1;32mIn[5], line 2\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[38;5;66;03m# Perform dynamic training with edgetrain and log resource usage\u001b[39;00m\n\u001b[1;32m----> 2\u001b[0m history_list \u001b[38;5;241m=\u001b[39m \u001b[43mdynamic_train\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 3\u001b[0m \u001b[43m \u001b[49m\u001b[43mtrain_dataset\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[0;32m 4\u001b[0m \u001b[43m \u001b[49m\u001b[43mepochs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m10\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[0;32m 5\u001b[0m \u001b[43m \u001b[49m\u001b[43mbatch_size\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m32\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[0;32m 6\u001b[0m \u001b[43m \u001b[49m\u001b[43mlr\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1e-3\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[0;32m 7\u001b[0m \u001b[43m \u001b[49m\u001b[43mpruning\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m0.2\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[0;32m 8\u001b[0m \u001b[43m \u001b[49m\u001b[43mlog_file\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlog_file\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[0;32m 9\u001b[0m \u001b[43m \u001b[49m\u001b[43mdynamic_adjustments\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\n\u001b[0;32m 10\u001b[0m \u001b[43m)\u001b[49m\n\u001b[0;32m 11\u001b[0m clear_output(wait\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m)\n", + "File \u001b[1;32mC:\\Users/bedelman/Documents/GitHub/EdgeTrain\\edgetrain\\dynamic_train.py:53\u001b[0m, in \u001b[0;36mdynamic_train\u001b[1;34m(train_dataset, epochs, batch_size, lr, pruning, log_file, dynamic_adjustments)\u001b[0m\n\u001b[0;32m 51\u001b[0m \u001b[38;5;66;03m# Apply initial pruning\u001b[39;00m\n\u001b[0;32m 52\u001b[0m pruning_schedule \u001b[38;5;241m=\u001b[39m tfmot\u001b[38;5;241m.\u001b[39msparsity\u001b[38;5;241m.\u001b[39mkeras\u001b[38;5;241m.\u001b[39mConstantSparsity(pruning, begin_step\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m)\n\u001b[1;32m---> 53\u001b[0m pruned_model \u001b[38;5;241m=\u001b[39m \u001b[43mtfmot\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msparsity\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mkeras\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mprune_low_magnitude\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpruning_schedule\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpruning_schedule\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 54\u001b[0m pruned_model\u001b[38;5;241m.\u001b[39mcompile(optimizer\u001b[38;5;241m=\u001b[39moptimizer, loss\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124msparse_categorical_crossentropy\u001b[39m\u001b[38;5;124m'\u001b[39m, metrics\u001b[38;5;241m=\u001b[39m[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124maccuracy\u001b[39m\u001b[38;5;124m'\u001b[39m])\n\u001b[0;32m 56\u001b[0m \u001b[38;5;66;03m# Train the model one epoch at a time and adjust parameters, if specified\u001b[39;00m\n", + "File \u001b[1;32mc:\\Users\\bedelman\\Documents\\GitHub\\EdgeTrain\\venv\\lib\\site-packages\\tensorflow_model_optimization\\python\\core\\keras\\metrics.py:74\u001b[0m, in \u001b[0;36mMonitorBoolGauge.__call__..inner\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 72\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m error:\n\u001b[0;32m 73\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbool_gauge\u001b[38;5;241m.\u001b[39mget_cell(MonitorBoolGauge\u001b[38;5;241m.\u001b[39m_FAILURE_LABEL)\u001b[38;5;241m.\u001b[39mset(\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[1;32m---> 74\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m error\n", + "File \u001b[1;32mc:\\Users\\bedelman\\Documents\\GitHub\\EdgeTrain\\venv\\lib\\site-packages\\tensorflow_model_optimization\\python\\core\\keras\\metrics.py:69\u001b[0m, in \u001b[0;36mMonitorBoolGauge.__call__..inner\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 66\u001b[0m \u001b[38;5;129m@functools\u001b[39m\u001b[38;5;241m.\u001b[39mwraps(func)\n\u001b[0;32m 67\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21minner\u001b[39m(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m 68\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m---> 69\u001b[0m results \u001b[38;5;241m=\u001b[39m func(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m 70\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbool_gauge\u001b[38;5;241m.\u001b[39mget_cell(MonitorBoolGauge\u001b[38;5;241m.\u001b[39m_SUCCESS_LABEL)\u001b[38;5;241m.\u001b[39mset(\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[0;32m 71\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m results\n", + "File \u001b[1;32mc:\\Users\\bedelman\\Documents\\GitHub\\EdgeTrain\\venv\\lib\\site-packages\\tensorflow_model_optimization\\python\\core\\sparsity\\keras\\prune.py:216\u001b[0m, in \u001b[0;36mprune_low_magnitude\u001b[1;34m(to_prune, pruning_schedule, block_size, block_pooling_type, pruning_policy, sparsity_m_by_n, **kwargs)\u001b[0m\n\u001b[0;32m 214\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m pruning_wrapper\u001b[38;5;241m.\u001b[39mPruneLowMagnitude(to_prune, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mparams)\n\u001b[0;32m 215\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m--> 216\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m 217\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m`prune_low_magnitude` can only prune an object of the following \u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[0;32m 218\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtypes: keras.models.Sequential, keras functional model, \u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[0;32m 219\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mkeras.layers.Layer, list of keras.layers.Layer. You passed \u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[0;32m 220\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124man object of type: \u001b[39m\u001b[38;5;132;01m{input}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;241m.\u001b[39mformat(\u001b[38;5;28minput\u001b[39m\u001b[38;5;241m=\u001b[39mto_prune\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m)\n\u001b[0;32m 221\u001b[0m )\n", + "\u001b[1;31mValueError\u001b[0m: `prune_low_magnitude` can only prune an object of the following types: keras.models.Sequential, keras functional model, keras.layers.Layer, list of keras.layers.Layer. You passed an object of type: Sequential." + ] + } + ], "source": [ "# Perform dynamic training with edgetrain and log resource usage\n", - "history_list = dynamic_train(train_dataset, epochs=25, batch_size=32, lr=1e-2, grad_accum=1, log_file=log_file, dynamic_adjustments=True)\n", - "\n", + "history_list = dynamic_train(\n", + " train_dataset, \n", + " epochs=10, \n", + " batch_size=32, \n", + " lr=1e-3, \n", + " pruning=0.2, \n", + " log_file=log_file, \n", + " dynamic_adjustments=True\n", + ")\n", "clear_output(wait=False)" ] }, @@ -172,8 +219,22 @@ "notebookName": "EdgeTrain_example", "widgets": {} }, + "kernelspec": { + "display_name": "venv", + "language": "python", + "name": "python3" + }, "language_info": { - "name": "python" + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.8" } }, "nbformat": 4, diff --git a/requirements.txt b/requirements.txt index 3cdb8ba..fa92b3c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,14 @@ -tensorflow>=2.0.0 psutil>=5.0.0 GPUtil>=1.4.0 matplotlib>=3.7.0 pandas>=1.5.0 -numpy>=1.24.0 +# numpy>=1.24.0 pynvml>=8.0.0 +# Last stable combination of TF + Keras + TFMOT +tensorflow==2.12.0 +keras==2.12.0 +tensorflow-model-optimization==0.7.3 +jupyter diff --git a/tests/__init__.py b/tests/__init__.py index 5b81147..efd313d 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,26 +1,13 @@ import sys import os -# Add the 'edgetrain' directory to sys.path to make the modules accessible -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'edgetrain'))) -print(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'edgetrain'))) +# Debug: Print the path being added (for troubleshooting) +edgetrain_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'edgetrain')) -# Import modules from scaleml -from edgetrain.resource_monitor import sys_resources, log_usage_once -from edgetrain.calculate_scores import compute_scores, normalize_scores -from edgetrain.calculate_priorities import define_priorities -from edgetrain.adjust_train_parameters import adjust_training_parameters -from edgetrain.edgetrain_folder import get_edgetrain_folder -from edgetrain.train_visualize import log_usage_plot, log_train_time, training_history_plot -from edgetrain.create_model import create_model_tf, create_model_torch -from edgetrain.dynamic_train import dynamic_train +# Ensure edgetrain is in the path BEFORE importing anything +if edgetrain_path not in sys.path: + sys.path.insert(0, edgetrain_path) -# Import other testing libraries +# Import pytest (only needed here) import pytest -import tensorflow as tf -import psutil -import GPUtil - - - diff --git a/tests/test_adjust_train_parameters.py b/tests/test_adjust_train_parameters.py index 71d57f1..c89b45f 100644 --- a/tests/test_adjust_train_parameters.py +++ b/tests/test_adjust_train_parameters.py @@ -5,9 +5,8 @@ @pytest.fixture def default_parameters(): return { - "priority_values": {"batch_size": 0.5, "pruning": 0.3, "learning_rate": 0.2}, + "priority_values": {"batch_size": 0.6, "learning_rate": 0.4}, "batch_size": 32, - "pruning_ratio": 0.4, "lr": 0.001, "accuracy_score": 0.8 } @@ -20,10 +19,9 @@ def test_adjust_batch_size_high_memory(default_parameters): params["priority_values"]["batch_size"] = 0.8 # Highest priority params["resources"] = sys_resources - adjusted_batch_size, adjusted_pruning_ratio, adjusted_lr = adjust_training_parameters(**params) + adjusted_batch_size, adjusted_lr = adjust_training_parameters(**params) assert adjusted_batch_size == 16, "Batch size adjustment for high memory usage failed." - assert adjusted_pruning_ratio == params["pruning_ratio"], "Pruning ratio should remain unchanged." assert adjusted_lr == params["lr"], "Learning rate should remain unchanged." @@ -34,38 +32,9 @@ def test_adjust_batch_size_low_memory(default_parameters): params["priority_values"]["batch_size"] = 0.8 # Highest priority params["resources"] = sys_resources - adjusted_batch_size, adjusted_pruning_ratio, adjusted_lr = adjust_training_parameters(**params) + adjusted_batch_size, adjusted_lr = adjust_training_parameters(**params) assert adjusted_batch_size == 64, "Batch size adjustment for low memory usage failed." - assert adjusted_pruning_ratio == params["pruning_ratio"], "Pruning ratio should remain unchanged." - assert adjusted_lr == params["lr"], "Learning rate should remain unchanged." - - -def test_adjust_pruning_high_memory(default_parameters): - # Simulate high memory usage - sys_resources = {"cpu_memory_percent": 85, "gpu_memory_percent": 90} - params = default_parameters.copy() - params["priority_values"]["pruning"] = 0.8 # Highest priority - params["resources"] = sys_resources - - adjusted_batch_size, adjusted_pruning_ratio, adjusted_lr = adjust_training_parameters(**params) - print(adjusted_pruning_ratio) - assert adjusted_pruning_ratio == pytest.approx(0.5, rel=1e-3), "Pruning ratio adjustment for high memory usage failed." - assert adjusted_batch_size == params["batch_size"], "Batch size should remain unchanged." - assert adjusted_lr == params["lr"], "Learning rate should remain unchanged." - - -def test_adjust_pruning_low_memory(default_parameters): - # Simulate low memory usage - sys_resources = {"cpu_memory_percent": 40, "gpu_memory_percent": 30} - params = default_parameters.copy() - params["priority_values"]["pruning"] = 0.8 # Highest priority - params["resources"] = sys_resources - - adjusted_batch_size, adjusted_pruning_ratio, adjusted_lr = adjust_training_parameters(**params) - print(adjusted_pruning_ratio) - assert adjusted_pruning_ratio == pytest.approx(0.3, rel=1e-3), "Pruning ratio adjustment for low memory usage failed." - assert adjusted_batch_size == params["batch_size"], "Batch size should remain unchanged." assert adjusted_lr == params["lr"], "Learning rate should remain unchanged." @@ -77,12 +46,10 @@ def test_adjust_learning_rate_low_accuracy(default_parameters): params["accuracy_score"] = 0.03 params["resources"] = sys_resources - adjusted_batch_size, adjusted_pruning_ratio, adjusted_lr = adjust_training_parameters(**params) - + adjusted_batch_size, adjusted_lr = adjust_training_parameters(**params) assert adjusted_lr == pytest.approx(0.0005, rel=1e-2), "Learning rate adjustment for low accuracy failed." assert adjusted_batch_size == params["batch_size"], "Batch size should remain unchanged." - assert adjusted_pruning_ratio == params["pruning_ratio"], "Pruning ratio should remain unchanged." - + def test_adjust_learning_rate_high_accuracy(default_parameters): # Simulate high accuracy @@ -92,11 +59,10 @@ def test_adjust_learning_rate_high_accuracy(default_parameters): params["accuracy_score"] = 0.97 params["resources"] = sys_resources - adjusted_batch_size, adjusted_pruning_ratio, adjusted_lr = adjust_training_parameters(**params) + adjusted_batch_size, adjusted_lr = adjust_training_parameters(**params) assert adjusted_lr == pytest.approx(0.0012, rel=1e-2), "Learning rate adjustment for high accuracy failed." assert adjusted_batch_size == params["batch_size"], "Batch size should remain unchanged." - assert adjusted_pruning_ratio == params["pruning_ratio"], "Pruning ratio should remain unchanged." def test_no_adjustments_when_no_priorities(default_parameters): @@ -106,8 +72,6 @@ def test_no_adjustments_when_no_priorities(default_parameters): params["priority_values"] = {"batch_size": 0.0, "pruning": 0.0, "learning_rate": 0.0} params["resources"] = sys_resources - adjusted_batch_size, adjusted_pruning_ratio, adjusted_lr = adjust_training_parameters(**params) - + adjusted_batch_size, adjusted_lr = adjust_training_parameters(**params) assert adjusted_batch_size == params["batch_size"], "Batch size should remain unchanged with no priorities." - assert adjusted_pruning_ratio == params["pruning_ratio"], "Pruning ratio should remain unchanged with no priorities." assert adjusted_lr == params["lr"], "Learning rate should remain unchanged with no priorities." diff --git a/tests/test_calculate_priorities.py b/tests/test_calculate_priorities.py index db5d13e..4c5586b 100644 --- a/tests/test_calculate_priorities.py +++ b/tests/test_calculate_priorities.py @@ -10,10 +10,9 @@ def test_define_priorities_with_default_priorities(): priority_value = define_priorities(normalized_scores) - # Default priorities: batch_size: 0.3, pruning: 0.3, accuracy_improvement: 0.4 - assert priority_value["batch_size"] == pytest.approx(0.24, rel=1e-3), "Batch size priority calculation failed." - assert priority_value["pruning"] == pytest.approx(0.20, rel=1e-3), "Pruning priority calculation failed." - assert priority_value["learning_rate"] == pytest.approx(0.16, rel=1e-3), "Learning rate priority calculation failed." + # Default priorities: batch_size: 0.35, accuracy_improvement: 0.65 + assert priority_value["batch_size"] == pytest.approx(0.28, rel=1e-3), "Batch size priority calculation failed." + assert priority_value["learning_rate"] == pytest.approx(0.26, rel=1e-3), "Learning rate priority calculation failed." def test_define_priorities_with_custom_priorities(): @@ -23,16 +22,14 @@ def test_define_priorities_with_custom_priorities(): "accuracy_score": 0.7 } user_priorities = { - "batch_size_adjustment": 0.4, - "pruning_adjustment": 0.4, + "batch_size_adjustment": 0.8, "accuracy_improvement": 0.2 } priority_value = define_priorities(normalized_scores, user_priorities) - # Custom priorities: batch_size: 0.4, pruning: 0.4, accuracy_improvement: 0.2 - assert priority_value["batch_size"] == pytest.approx(0.2, rel=1e-3), "Batch size priority with custom priorities failed." - assert priority_value["pruning"] == pytest.approx(0.17, rel=1e-3), "Pruning priority with custom priorities failed." + # Custom priorities: batch_size: 0.8, accuracy_improvement: 0.2 + assert priority_value["batch_size"] == pytest.approx(0.4, rel=1e-3), "Batch size priority with custom priorities failed." assert priority_value["learning_rate"] == pytest.approx(0.14, rel=1e-3), "Learning rate priority with custom priorities failed." @@ -46,7 +43,6 @@ def test_define_priorities_with_zero_scores(): priority_value = define_priorities(normalized_scores) assert priority_value["batch_size"] == 0.0, "Batch size priority with zero scores failed." - assert priority_value["pruning"] == 0.0, "Pruning priority with zero scores failed." assert priority_value["learning_rate"] == 0.0, "Learning rate priority with zero scores failed." @@ -59,6 +55,5 @@ def test_define_priorities_with_extreme_scores(): priority_value = define_priorities(normalized_scores) - assert priority_value["batch_size"] == 0.3, "Batch size priority with extreme scores failed." - assert priority_value["pruning"] == 0.35, "Pruning priority with extreme scores failed." - assert priority_value["learning_rate"] == 0.4, "Learning rate priority with extreme scores failed." + assert priority_value["batch_size"] == 0.35, "Batch size priority with extreme scores failed." + assert priority_value["learning_rate"] == 0.65, "Learning rate priority with extreme scores failed." diff --git a/tests/test_log_usage_once.py b/tests/test_log_usage_once.py index 6f48a13..46066fc 100644 --- a/tests/test_log_usage_once.py +++ b/tests/test_log_usage_once.py @@ -18,7 +18,7 @@ def test_log_usage_once(tmpdir): # Call the function to log usage lr = 0.001 batch_size = 32 - pruning = 0.5 + pruning = 0.2 num_epoch = 5 prev_accuracy = 0.8 curr_accuracy = 0.6 @@ -27,7 +27,7 @@ def test_log_usage_once(tmpdir): normalized_scores = compute_scores(prev_accuracy, curr_accuracy, score_ranges=None, resources=mock_resources) priority_value = define_priorities(normalized_scores) - log_usage_once(log_file, batch_size, pruning, lr, normalized_scores, priority_value, num_epoch, resources=mock_resources) + log_usage_once(log_file, pruning, batch_size, lr, normalized_scores, priority_value, num_epoch, resources=mock_resources) # Verify the log file is created assert os.path.exists(log_file), "Log file was not created." @@ -42,8 +42,8 @@ def test_log_usage_once(tmpdir): 'Timestamp', 'Epoch #', 'CPU Usage (%)', 'CPU RAM (%)', 'GPU RAM (%)', 'GPU Usage (%)', 'Mem Score', 'Acc Score', - 'Priority Batch Size', 'Priority Pruning', 'Priority Learning Rate', - 'Batch Size', 'Pruning', 'Learning Rate', + 'Priority Batch Size', 'Priority Learning Rate', + 'Pruning', 'Batch Size', 'Learning Rate', ] assert reader.fieldnames == expected_header, "Log file header is incorrect." @@ -54,10 +54,9 @@ def test_log_usage_once(tmpdir): assert log_entry['Mem Score'] == str(normalized_scores.get('memory_score')), "Mem score mismatch." assert log_entry['Acc Score'] == str(normalized_scores.get('accuracy_score')), "Acc score mismatch." assert log_entry['Priority Batch Size'] == str(priority_value.get('batch_size')), "Priority batch size mismatch." - assert log_entry['Priority Pruning'] == str(priority_value.get('pruning')), "Priority pruning mismatch." assert log_entry['Priority Learning Rate'] == str(priority_value.get('learning_rate')), "Priority learning rate mismatch." + assert log_entry['Pruning'] == str(pruning), "Pruning ratio mismatch." assert log_entry['Batch Size'] == str(batch_size), "Batch size mismatch." - assert log_entry['Pruning'] == str(pruning), "Pruning mismatch." assert log_entry['Learning Rate'] == str(lr), "Learning rate mismatch." # Validate timestamp format From 15bdd35c6118b5bc9c10034c9548f9d1e3130c79 Mon Sep 17 00:00:00 2001 From: BradleyEdelman Date: Mon, 10 Feb 2025 17:20:42 +0100 Subject: [PATCH 5/7] Fixed accuracy score issue, unit tests pass, example works. need to fix history plot issue and all good...? --- .gitignore | 4 +- edgetrain/__init__.py | 2 +- edgetrain/calculate_priorities.py | 18 +++--- edgetrain/calculate_scores.py | 11 ++-- edgetrain/create_model.py | 44 +++++---------- edgetrain/dynamic_train.py | 6 +- edgetrain/edgetrain_folder.py | 6 +- edgetrain/resource_monitor.py | 40 +++++++------- edgetrain/train_visualize.py | 55 +++++++++++------- notebooks/EdgeTrain_example.ipynb | 89 +++++++++++++++++++----------- tests/__init__.py | 10 +--- tests/test_calculate_priorities.py | 10 ++-- tests/test_calculate_scores.py | 10 ++-- 13 files changed, 164 insertions(+), 141 deletions(-) diff --git a/.gitignore b/.gitignore index 4635fc3..d48cdb9 100644 --- a/.gitignore +++ b/.gitignore @@ -2,8 +2,8 @@ .databricks/ .vscode/ -# virtual environment -venv/ +# virtual environments +venv*/ # Ignore edgetrain folders models/ diff --git a/edgetrain/__init__.py b/edgetrain/__init__.py index d1cce49..0a87bd6 100644 --- a/edgetrain/__init__.py +++ b/edgetrain/__init__.py @@ -4,5 +4,5 @@ from .adjust_train_parameters import adjust_training_parameters from .edgetrain_folder import get_edgetrain_folder from .train_visualize import log_usage_plot, log_train_time, training_history_plot -from .create_model import create_model_tf, create_model_torch +from .create_model import create_model_tf, check_sparsity from .dynamic_train import dynamic_train \ No newline at end of file diff --git a/edgetrain/calculate_priorities.py b/edgetrain/calculate_priorities.py index bd85b9a..9fb9922 100644 --- a/edgetrain/calculate_priorities.py +++ b/edgetrain/calculate_priorities.py @@ -1,19 +1,21 @@ def define_priorities(normalized_scores, user_priorities=None): """ - Calculate priority scores for adjustments based on resource usage, accuracy, and loss. + Calculate priority scores for adjustments based on resource usage and accuracy. Parameters: - - memory_score (float): Score indicating memory usage pressure (0-100). - - accuracy_score (float): Score indicating stagnation in accuracy improvement (0-1). - - user_priorities (dict): Optional user-defined priorities for resource conservation, accuracy, and loss. + - normalized_scores (dict): Dictionary containing normalized scores for memory usage and accuracy. + - memory_score (float): Score indicating memory usage pressure (0-100). + - accuracy_score (float): Score indicating stagnation in accuracy improvement (0-1). + - user_priorities (dict, optional): Optional user-defined priorities for resource conservation and accuracy improvement. Returns: - - priority_value (dict): A dictionary of priority scores for batch size, pruning, and learning rate. + - priority_value (dict): A dictionary of priority scores for batch size and learning rate. """ + # Default weights if user priorities are not provided default_priorities = { - "batch_size_adjustment": 0.05, - "accuracy_improvement": 0.95, + "batch_size_adjustment": 0.4, + "accuracy_improvement": 0.6, } # Use user-defined priorities if available @@ -25,4 +27,4 @@ def define_priorities(normalized_scores, user_priorities=None): "learning_rate": (priorities["accuracy_improvement"] * normalized_scores.get('accuracy_score')), } - return priority_value + return priority_value \ No newline at end of file diff --git a/edgetrain/calculate_scores.py b/edgetrain/calculate_scores.py index 4dc5005..1eda006 100644 --- a/edgetrain/calculate_scores.py +++ b/edgetrain/calculate_scores.py @@ -2,12 +2,13 @@ def compute_scores(previous_accuracy, current_accuracy, score_ranges=None, resources=None): """ - Compute memory, accuracy, and loss scores, and normalize them. + Compute memory and accuracy scores, and normalize them. Parameters: - previous_accuracy (float): Accuracy from the previous epoch. - current_accuracy (float): Current accuracy. - - score_ranges (float): Maximum possible accuracy improvement. + - score_ranges (dict, optional): Dictionary of maximum possible improvements for each score. + - resources (dict, optional): Dictionary containing system resource usage metrics. If None, system resources will be fetched. Returns: - normalized_scores (dict): Dictionary of normalized scores. @@ -25,16 +26,16 @@ def compute_scores(previous_accuracy, current_accuracy, score_ranges=None, resou } # Calculate memory score - # # If there is a gpu average gpu and cpu for memory score, otherwise, just use cpu + # If there is a GPU, average GPU and CPU for memory score, otherwise, just use CPU if resources.get('num_gpus') > 0: memory_score = (resources.get('cpu_memory_percent') + resources.get('gpu_memory_percent')) / 2 else: memory_score = resources.get('cpu_memory_percent') # Calculate accuracy score - accuracy_score = max(0, previous_accuracy - current_accuracy) + accuracy_score = 1 - max(0, current_accuracy - previous_accuracy) - # store all three scores in a dictionary + # Store all scores in a dictionary raw_scores = { "memory_score": memory_score, "accuracy_score": accuracy_score diff --git a/edgetrain/create_model.py b/edgetrain/create_model.py index 61d6cad..f3a4786 100644 --- a/edgetrain/create_model.py +++ b/edgetrain/create_model.py @@ -1,5 +1,6 @@ import tensorflow as tf from tensorflow.keras import layers, models +import numpy as np def create_model_tf(input_shape, model_path=None): """ @@ -35,39 +36,24 @@ def create_model_tf(input_shape, model_path=None): return model - -def create_model_torch(input_shape, model_path=None): +def check_sparsity(model): """ - Create a Convolutional Neural Network (CNN) model. + Calculate the sparsity of a given model. Parameters: - - input_shape (tuple): the shape of the input data (e.g., (1, 28, 28) for MNIST). - - model_path (str): the path to load the model. + - model (tf.keras.Model): The TensorFlow model to check sparsity for. Returns: - - model: A compiled pytorch model. + - float: The sparsity of the model, defined as the ratio of zero-valued parameters to the total number of parameters. """ - import torch.nn as nn - import torch.nn.functional as F - - class CNN(nn.Module): - def __init__(self): - super(CNN, self).__init__() - self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3) - self.pool = nn.MaxPool2d(kernel_size=2, stride=2) - self.conv2 = nn.Conv2d(32, 64, 3) - self.flatten = nn.Flatten() - self.fc1 = nn.Linear(64 * 5 * 5, 128) - self.fc2 = nn.Linear(128, 10) - - def forward(self, x): - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = self.flatten(x) - x = F.relu(self.fc1(x)) - x = self.fc2(x) - return x - - model = CNN() - return model \ No newline at end of file + total_params = 0 + zero_params = 0 + for layer in model.layers: + if hasattr(layer, 'weights'): + for weight in layer.weights: + weight_values = weight.numpy() + total_params += np.prod(weight_values.shape) + zero_params += np.sum(np.isclose(weight_values, 0)) + sparsity = (zero_params / total_params) if total_params > 0 else 0 + return sparsity \ No newline at end of file diff --git a/edgetrain/dynamic_train.py b/edgetrain/dynamic_train.py index 40002b5..cdd100c 100644 --- a/edgetrain/dynamic_train.py +++ b/edgetrain/dynamic_train.py @@ -19,7 +19,7 @@ def dynamic_train( Train the model with optional dynamic resource adjustment. Parameters: - - train_dataset (dict): The training dataset. + - train_dataset (dict): The training dataset containing 'images' and 'labels'. - epochs (int): Number of epochs to train the model. - batch_size (int): The base batch size to use. - lr (float): The initial learning rate. @@ -28,8 +28,8 @@ def dynamic_train( - dynamic_adjustments (bool): Flag to enable/disable dynamic adjustments. Returns: - - final_model: The trained and stripped model. - - history_list: A list of training history for each epoch. + - final_model (tf.keras.Model): The trained and stripped model. + - history_list (list): A list of training history for each epoch. """ # Log initial resource usage diff --git a/edgetrain/edgetrain_folder.py b/edgetrain/edgetrain_folder.py index 25408d2..c96ac4f 100644 --- a/edgetrain/edgetrain_folder.py +++ b/edgetrain/edgetrain_folder.py @@ -2,14 +2,14 @@ def get_edgetrain_folder(): """ - Creates the necessary folder structure for the EdgeTrain project. + Create the necessary folder structure for the EdgeTrain project. This function navigates from the current working directory to the root directory, - then creates a "EdgeTrain" directory with subdirectories for models, logs, and images + then creates an "EdgeTrain" directory with subdirectories for models, logs, and images if they do not already exist. Returns: - str: The path to the "EdgeTrain" directory. + - str: The path to the "EdgeTrain" directory. """ # Get the current working directory (assumed to be within the 'notebooks' folder) diff --git a/edgetrain/resource_monitor.py b/edgetrain/resource_monitor.py index 2b364d1..4e5a2a4 100644 --- a/edgetrain/resource_monitor.py +++ b/edgetrain/resource_monitor.py @@ -7,14 +7,15 @@ def sys_resources(): Monitor system resources, including CPU and GPU utilization and memory usage. Returns: - - cpu_cores: Number of logical CPU cores. - - cpu_compute_percent: CPU utilization as a percentage. - - cpu_memory_percent: RAM usage as a percentage. - - gpu_compute_percent: Average GPU compute utilization as a percentage. - - gpu_memory_usage: Total GPU memory used across all GPUs (in MB). - - gpu_memory_total: Total available GPU memory across all GPUs (in MB). - - gpu_memory_percent: Average GPU memory utilization as a percentage. - - num_gpus: Number of GPUs available. + - dict: A dictionary containing the following keys: + - cpu_cores (int): Number of logical CPU cores. + - cpu_compute_percent (float): CPU utilization as a percentage. + - cpu_memory_percent (float): RAM usage as a percentage. + - gpu_compute_percent (float): Average GPU compute utilization as a percentage. + - gpu_memory_usage (float): Total GPU memory used across all GPUs (in MB). + - gpu_memory_total (float): Total available GPU memory across all GPUs (in MB). + - gpu_memory_percent (float): Average GPU memory utilization as a percentage. + - num_gpus (int): Number of GPUs available. """ # Check CPU usage (compute and RAM) @@ -52,16 +53,19 @@ def sys_resources(): } -# Function to log resource usage and batch size def log_usage_once(log_file, pruning, batch_size, lr, normalize_scores, priority_value, num_epoch=0, resources=None): """ Log GPU and CPU resource usage once. Parameters: - log_file (str): Path to the log file. - - lr (float): Learning rate. + - pruning (bool): Whether pruning is enabled. - batch_size (int): Current batch size. + - lr (float): Learning rate. + - normalize_scores (dict): Dictionary of normalized scores. + - priority_value (dict): Dictionary of priority values. - num_epoch (int, optional): Current epoch number. Default is 0. + - resources (dict, optional): Dictionary containing system resource usage metrics. If None, system resources will be fetched. """ # Create CSV header if the file doesn't exist @@ -73,20 +77,19 @@ def log_usage_once(log_file, pruning, batch_size, lr, normalize_scores, priority writer = csv.writer(f) header = [ 'Timestamp', 'Epoch #', 'CPU Usage (%)', 'CPU RAM (%)', - 'GPU RAM (%)', 'GPU Usage (%)', - 'Mem Score', 'Acc Score', - 'Priority Batch Size', 'Priority Learning Rate', - 'Pruning', 'Batch Size', 'Learning Rate', + 'GPU RAM (%)', 'GPU Usage (%)', + 'Mem Score', 'Acc Score', + 'Priority Batch Size', 'Priority Learning Rate', + 'Pruning', 'Batch Size', 'Learning Rate', ] writer.writerow(header) - # Get resource usage if resources is None: resources = sys_resources() - cpu_compute_percent=resources.get('cpu_compute_percent') + cpu_compute_percent = resources.get('cpu_compute_percent') cpu_memory_percent = resources.get('cpu_memory_percent') - gpu_compute_percent = resources.get('gpu_compute_percent') + gpu_compute_percent = resources.get('gpu_compute_percent') gpu_memory_percent = resources.get('gpu_memory_percent') memory_score = normalize_scores.get('memory_score') @@ -94,7 +97,6 @@ def log_usage_once(log_file, pruning, batch_size, lr, normalize_scores, priority batch_size_priority_value = priority_value.get('batch_size') learning_rate_priority_value = priority_value.get('learning_rate') - # Prepare log entry log_entry = [ datetime.now().strftime("%Y-%m-%d %H:%M:%S"), @@ -115,4 +117,4 @@ def log_usage_once(log_file, pruning, batch_size, lr, normalize_scores, priority # Append log entry to the file with open(log_file, 'a', newline='') as f: writer = csv.writer(f) - writer.writerow(log_entry) + writer.writerow(log_entry) \ No newline at end of file diff --git a/edgetrain/train_visualize.py b/edgetrain/train_visualize.py index 7a42618..4aaaca4 100644 --- a/edgetrain/train_visualize.py +++ b/edgetrain/train_visualize.py @@ -6,10 +6,10 @@ def log_usage_plot(log_file): """ Load the resource usage log from the CSV file and plot CPU and GPU usage, - as well as batch size and number of workers over time (epochs). + as well as batch size and learning rate over time (epochs). Parameters: - - log_file: The path to the log file (CSV format) that contains the resource usage data. + - log_file (str): The path to the log file (CSV format) that contains the resource usage data. Returns: - None @@ -23,7 +23,7 @@ def log_usage_plot(log_file): return # Plot CPU and GPU usage over time on the same plot with workers on a separate y axis - fig, ax1 = plt.subplots(3, 1, figsize=(7, 6), sharex=True) + fig, ax1 = plt.subplots(5, 1, figsize=(7, 10), sharex=True) ax1[0].plot(df['Epoch #'], df['CPU Usage (%)'], label='CPU Usage (%)', color='tab:blue', linewidth=1.5) ax1[0].plot(df['Epoch #'], df['GPU Usage (%)'], label='GPU Usage (%)', color='tab:orange', linewidth=1.5) @@ -41,19 +41,36 @@ def log_usage_plot(log_file): ax1[1].legend(loc='upper left') ax1[1].grid(True, which='both', linestyle='--', linewidth=0.5, alpha=0.7) - # Plot Batch Size, Learning Rate, and Grad Accum over time on the same plot - ax2 = ax1[2].twinx() - ax1[2].plot(df['Epoch #'], df['Batch Size'], label='Batch Size', color='tab:green', linewidth=1.5) - ax1[2].plot(df['Epoch #'], df['Grad Accum'], label='Grad Accum', color='tab:red', linewidth=1.5) - ax1[2].set_ylim(0, 50) - ax2.plot(df['Epoch #'], df['Learning Rate'], label='Learning Rate', color='tab:purple', linewidth=1.5) - ax2.set_ylabel('Values (Learning Rate)') - ax2.set_ylim(0, .0025) - ax1[2].set_xlabel('Epoch #') - ax1[2].set_ylabel('Values (Batch size, Grad accum)') - ax1[2].set_title('Batch Size, Learning Rate, and Grad Accum Over Time') + # Plot memory and accuracy scores + ax1[2].plot(df['Epoch #'], df['Mem Score'], label='Mem Score', color='tab:blue', linewidth=1.5) + ax1[2].plot(df['Epoch #'], df['Acc Score'], label='Acc Score', color='tab:orange', linewidth=1.5) + ax1[2].set_ylabel('Score') + ax1[2].set_title('Scores over time') ax1[2].legend(loc='upper left') - ax1[2].grid(True, which='both', linestyle='--', linewidth=0.5) + ax1[2].grid(True, which='both', linestyle='--', linewidth=0.5, alpha=0.7) + + # Plot memory and accuracy scores + ax1[3].plot(df['Epoch #'], df['Priority Batch Size'], label='Priority Batch Size', color='tab:green', linewidth=1.5) + ax1[3].plot(df['Epoch #'], df['Priority Learning Rate'], label='Priority Learning Rate', color='tab:purple', linewidth=1.5) + ax1[3].set_ylabel('Priority') + ax1[3].set_title('Priorities over time') + ax1[3].legend(loc='upper left') + ax1[3].grid(True, which='both', linestyle='--', linewidth=0.5, alpha=0.7) + + # Plot Batch Size, Learning Rate, and Grad Accum over time on the same plot + ax2 = ax1[4].twinx() + ax1[4].plot(df['Epoch #'], df['Batch Size'], label='Batch Size', color='tab:green', linewidth=1.5) + ax1[4].set_ylim(0, 50) + ax2.plot(df['Epoch #'], df['Learning Rate']*100, label='Learning Rate', color='tab:purple', linewidth=1.5) + ax2.plot(df['Epoch #'], df['Pruning'], label='Pruning', color='tab:red', linewidth=1.5) + ax2.set_ylabel('Value') + ax2.set_ylim(0, 1) + ax1[4].set_xlabel('Epoch #') + ax1[4].set_ylabel('Values') + ax1[4].set_title('Training Param Over Time') + ax1[4].legend(loc='upper left') + ax2.legend(loc='upper right') + ax1[4].grid(True, which='both', linestyle='--', linewidth=0.5) # Set x tick marks as integers every 5 for ax in ax1: @@ -76,10 +93,10 @@ def log_train_time(log_file): Calculate and print the total training time from the log file based on timestamps. Parameters: - - log_file: The path to the log file (CSV format) containing the timestamps. + - log_file (str): The path to the log file (CSV format) containing the timestamps. Returns: - - None + - total_training_time (timedelta): The total training time. """ # Load the log file into a DataFrame try: @@ -107,9 +124,9 @@ def training_history_plot(history_list, log_file): Plot the training loss and accuracy over epochs. Parameters: - - history_list: List of dictionaries containing 'accuracy' and 'loss' for each epoch. + - history_list (list): List of dictionaries containing 'accuracy' and 'loss' for each epoch. Example: [{'accuracy': 0.8, 'loss': 0.5}, {'accuracy': 0.85, 'loss': 0.4}, ...] - - log_file: The path to the log file (CSV format) that contains the resource usage data. + - log_file (str): The path to the log file (CSV format) that contains the resource usage data. Returns: - None diff --git a/notebooks/EdgeTrain_example.ipynb b/notebooks/EdgeTrain_example.ipynb index 3437618..dfd2098 100644 --- a/notebooks/EdgeTrain_example.ipynb +++ b/notebooks/EdgeTrain_example.ipynb @@ -68,7 +68,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 19, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -89,7 +89,7 @@ "text": [ "EdgeTrain folder and subfolders are set up at: c:\\Users\\bedelman\\Documents\\GitHub\\EdgeTrain\n", "\n", - "c:\\Users\\bedelman\\Documents\\GitHub\\EdgeTrain/logs/20250203_115319_resource_usage_log.csv\n" + "c:\\Users\\bedelman\\Documents\\GitHub\\EdgeTrain/logs/20250210_165940_resource_usage_log.csv\n" ] } ], @@ -107,7 +107,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 20, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -121,35 +121,12 @@ "title": "" } }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:CPU:0',)\n" - ] - }, - { - "ename": "ValueError", - "evalue": "`prune_low_magnitude` can only prune an object of the following types: keras.models.Sequential, keras functional model, keras.layers.Layer, list of keras.layers.Layer. You passed an object of type: Sequential.", - "output_type": "error", - "traceback": [ - "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[1;31mValueError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[1;32mIn[5], line 2\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[38;5;66;03m# Perform dynamic training with edgetrain and log resource usage\u001b[39;00m\n\u001b[1;32m----> 2\u001b[0m history_list \u001b[38;5;241m=\u001b[39m \u001b[43mdynamic_train\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 3\u001b[0m \u001b[43m \u001b[49m\u001b[43mtrain_dataset\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[0;32m 4\u001b[0m \u001b[43m \u001b[49m\u001b[43mepochs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m10\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[0;32m 5\u001b[0m \u001b[43m \u001b[49m\u001b[43mbatch_size\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m32\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[0;32m 6\u001b[0m \u001b[43m \u001b[49m\u001b[43mlr\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1e-3\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[0;32m 7\u001b[0m \u001b[43m \u001b[49m\u001b[43mpruning\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m0.2\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[0;32m 8\u001b[0m \u001b[43m \u001b[49m\u001b[43mlog_file\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlog_file\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[0;32m 9\u001b[0m \u001b[43m \u001b[49m\u001b[43mdynamic_adjustments\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\n\u001b[0;32m 10\u001b[0m \u001b[43m)\u001b[49m\n\u001b[0;32m 11\u001b[0m clear_output(wait\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m)\n", - "File \u001b[1;32mC:\\Users/bedelman/Documents/GitHub/EdgeTrain\\edgetrain\\dynamic_train.py:53\u001b[0m, in \u001b[0;36mdynamic_train\u001b[1;34m(train_dataset, epochs, batch_size, lr, pruning, log_file, dynamic_adjustments)\u001b[0m\n\u001b[0;32m 51\u001b[0m \u001b[38;5;66;03m# Apply initial pruning\u001b[39;00m\n\u001b[0;32m 52\u001b[0m pruning_schedule \u001b[38;5;241m=\u001b[39m tfmot\u001b[38;5;241m.\u001b[39msparsity\u001b[38;5;241m.\u001b[39mkeras\u001b[38;5;241m.\u001b[39mConstantSparsity(pruning, begin_step\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m)\n\u001b[1;32m---> 53\u001b[0m pruned_model \u001b[38;5;241m=\u001b[39m \u001b[43mtfmot\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msparsity\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mkeras\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mprune_low_magnitude\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpruning_schedule\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpruning_schedule\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 54\u001b[0m pruned_model\u001b[38;5;241m.\u001b[39mcompile(optimizer\u001b[38;5;241m=\u001b[39moptimizer, loss\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124msparse_categorical_crossentropy\u001b[39m\u001b[38;5;124m'\u001b[39m, metrics\u001b[38;5;241m=\u001b[39m[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124maccuracy\u001b[39m\u001b[38;5;124m'\u001b[39m])\n\u001b[0;32m 56\u001b[0m \u001b[38;5;66;03m# Train the model one epoch at a time and adjust parameters, if specified\u001b[39;00m\n", - "File \u001b[1;32mc:\\Users\\bedelman\\Documents\\GitHub\\EdgeTrain\\venv\\lib\\site-packages\\tensorflow_model_optimization\\python\\core\\keras\\metrics.py:74\u001b[0m, in \u001b[0;36mMonitorBoolGauge.__call__..inner\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 72\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m error:\n\u001b[0;32m 73\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbool_gauge\u001b[38;5;241m.\u001b[39mget_cell(MonitorBoolGauge\u001b[38;5;241m.\u001b[39m_FAILURE_LABEL)\u001b[38;5;241m.\u001b[39mset(\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[1;32m---> 74\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m error\n", - "File \u001b[1;32mc:\\Users\\bedelman\\Documents\\GitHub\\EdgeTrain\\venv\\lib\\site-packages\\tensorflow_model_optimization\\python\\core\\keras\\metrics.py:69\u001b[0m, in \u001b[0;36mMonitorBoolGauge.__call__..inner\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 66\u001b[0m \u001b[38;5;129m@functools\u001b[39m\u001b[38;5;241m.\u001b[39mwraps(func)\n\u001b[0;32m 67\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21minner\u001b[39m(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m 68\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m---> 69\u001b[0m results \u001b[38;5;241m=\u001b[39m func(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m 70\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbool_gauge\u001b[38;5;241m.\u001b[39mget_cell(MonitorBoolGauge\u001b[38;5;241m.\u001b[39m_SUCCESS_LABEL)\u001b[38;5;241m.\u001b[39mset(\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[0;32m 71\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m results\n", - "File \u001b[1;32mc:\\Users\\bedelman\\Documents\\GitHub\\EdgeTrain\\venv\\lib\\site-packages\\tensorflow_model_optimization\\python\\core\\sparsity\\keras\\prune.py:216\u001b[0m, in \u001b[0;36mprune_low_magnitude\u001b[1;34m(to_prune, pruning_schedule, block_size, block_pooling_type, pruning_policy, sparsity_m_by_n, **kwargs)\u001b[0m\n\u001b[0;32m 214\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m pruning_wrapper\u001b[38;5;241m.\u001b[39mPruneLowMagnitude(to_prune, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mparams)\n\u001b[0;32m 215\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m--> 216\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m 217\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m`prune_low_magnitude` can only prune an object of the following \u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[0;32m 218\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtypes: keras.models.Sequential, keras functional model, \u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[0;32m 219\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mkeras.layers.Layer, list of keras.layers.Layer. You passed \u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[0;32m 220\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124man object of type: \u001b[39m\u001b[38;5;132;01m{input}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;241m.\u001b[39mformat(\u001b[38;5;28minput\u001b[39m\u001b[38;5;241m=\u001b[39mto_prune\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m)\n\u001b[0;32m 221\u001b[0m )\n", - "\u001b[1;31mValueError\u001b[0m: `prune_low_magnitude` can only prune an object of the following types: keras.models.Sequential, keras functional model, keras.layers.Layer, list of keras.layers.Layer. You passed an object of type: Sequential." - ] - } - ], + "outputs": [], "source": [ "# Perform dynamic training with edgetrain and log resource usage\n", "history_list = dynamic_train(\n", " train_dataset, \n", - " epochs=10, \n", + " epochs=20, \n", " batch_size=32, \n", " lr=1e-3, \n", " pruning=0.2, \n", @@ -161,7 +138,28 @@ }, { "cell_type": "code", - "execution_count": 0, + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "ename": "TypeError", + "evalue": "tuple indices must be integers or slices, not str", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[1;32mIn[25], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m \u001b[43mhistory_list\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43maccuracy\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\n", + "\u001b[1;31mTypeError\u001b[0m: tuple indices must be integers or slices, not str" + ] + } + ], + "source": [ + "history_list" + ] + }, + { + "cell_type": "code", + "execution_count": 26, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -175,7 +173,21 @@ "title": "" } }, - "outputs": [], + "outputs": [ + { + "ename": "TypeError", + "evalue": "'Sequential' object is not subscriptable", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[1;32mIn[26], line 2\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[38;5;66;03m# View model training history\u001b[39;00m\n\u001b[1;32m----> 2\u001b[0m \u001b[43mtraining_history_plot\u001b[49m\u001b[43m(\u001b[49m\u001b[43mhistory_list\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlog_file\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[1;32mC:\\Users/bedelman/Documents/GitHub/EdgeTrain\\edgetrain\\train_visualize.py:135\u001b[0m, in \u001b[0;36mtraining_history_plot\u001b[1;34m(history_list, log_file)\u001b[0m\n\u001b[0;32m 122\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21mtraining_history_plot\u001b[39m(history_list, log_file):\n\u001b[0;32m 123\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m 124\u001b[0m \u001b[38;5;124;03m Plot the training loss and accuracy over epochs.\u001b[39;00m\n\u001b[0;32m 125\u001b[0m \n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 132\u001b[0m \u001b[38;5;124;03m - None\u001b[39;00m\n\u001b[0;32m 133\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[1;32m--> 135\u001b[0m accuracy_values \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39marray([epoch[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124maccuracy\u001b[39m\u001b[38;5;124m'\u001b[39m] \u001b[38;5;28;01mfor\u001b[39;00m epoch \u001b[38;5;129;01min\u001b[39;00m history_list])\n\u001b[0;32m 136\u001b[0m loss_values \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39marray([epoch[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mloss\u001b[39m\u001b[38;5;124m'\u001b[39m] \u001b[38;5;28;01mfor\u001b[39;00m epoch \u001b[38;5;129;01min\u001b[39;00m history_list])\n\u001b[0;32m 138\u001b[0m fig, ax1 \u001b[38;5;241m=\u001b[39m plt\u001b[38;5;241m.\u001b[39msubplots(figsize\u001b[38;5;241m=\u001b[39m(\u001b[38;5;241m6\u001b[39m, \u001b[38;5;241m4\u001b[39m))\n", + "File \u001b[1;32mC:\\Users/bedelman/Documents/GitHub/EdgeTrain\\edgetrain\\train_visualize.py:135\u001b[0m, in \u001b[0;36m\u001b[1;34m(.0)\u001b[0m\n\u001b[0;32m 122\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21mtraining_history_plot\u001b[39m(history_list, log_file):\n\u001b[0;32m 123\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m 124\u001b[0m \u001b[38;5;124;03m Plot the training loss and accuracy over epochs.\u001b[39;00m\n\u001b[0;32m 125\u001b[0m \n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 132\u001b[0m \u001b[38;5;124;03m - None\u001b[39;00m\n\u001b[0;32m 133\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[1;32m--> 135\u001b[0m accuracy_values \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39marray([\u001b[43mepoch\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43maccuracy\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m \u001b[38;5;28;01mfor\u001b[39;00m epoch \u001b[38;5;129;01min\u001b[39;00m history_list])\n\u001b[0;32m 136\u001b[0m loss_values \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39marray([epoch[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mloss\u001b[39m\u001b[38;5;124m'\u001b[39m] \u001b[38;5;28;01mfor\u001b[39;00m epoch \u001b[38;5;129;01min\u001b[39;00m history_list])\n\u001b[0;32m 138\u001b[0m fig, ax1 \u001b[38;5;241m=\u001b[39m plt\u001b[38;5;241m.\u001b[39msubplots(figsize\u001b[38;5;241m=\u001b[39m(\u001b[38;5;241m6\u001b[39m, \u001b[38;5;241m4\u001b[39m))\n", + "\u001b[1;31mTypeError\u001b[0m: 'Sequential' object is not subscriptable" + ] + } + ], "source": [ "# View model training history\n", "training_history_plot(history_list, log_file)" @@ -183,7 +195,7 @@ }, { "cell_type": "code", - "execution_count": 0, + "execution_count": 27, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -197,7 +209,18 @@ "title": "" } }, - "outputs": [], + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAq0AAAPeCAYAAAAmnSNwAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzsnQd4FFUXhk96J/QSQu+9N+lFUOmCgohSFFEBBVGU34KgiGABC0URQUUEUUAUAaX33nuH0AmEJKSX/Z/vbGbd3Wz6brbc8/IMuzszO3Pnnjubb84991w3nU6nI0EQBEEQBEFwYNztXQBBEARBEARByAoRrYIgCIIgCILDI6JVEARBEARBcHhEtAqCIAiCIAgOj4hWQRAEQRAEweER0SoIgiAIgiA4PCJaBUEQBEEQBIdHRKsgCIIgCILg8IhoFQRBEARBEBweEa2CIOQr7dq140UQ7MmmTZvIzc2NXwVBcA5EtApCPnP+/HkaPnw4VaxYkXx9falAgQLUsmVL+uKLLyguLs6wX/ny5fmPqrYUL16cWrduTcuXLzc5Hvbr1q2bxXPt27ePv7tgwQJyRlJTU+nHH3+khx9+mIoWLUpeXl5cD507d6Zvv/2WEhISTPY3ri93d3cKCQnhfc2FCbaPHDnS4jl/++23bImZ999/n/cLDw+3uL127douI86vXLlCL774Irc1Hx8ftkGvXr1o+/bt5EgMHjzYpA1ktGA/QRCcD097F0AQVGLVqlX0xBNP8B/+Z599loVNYmIibdu2jd544w06fvw4izGN+vXr09ixY/n99evX6ZtvvqHHH3+cZs+ezSLClYGA7927N61du5Yeeughev3116lEiRJ079492rx5M7388su0e/dumjdvnsn3IHBRtzqdji5evEizZs2iDh06cN0/+uijdrseZwXC9LHHHuP3zz//PNWsWZNu3rzJD0J4iMLD1qhRo8gRwMNgp06dDJ9h//fee49eeOEFLqtGpUqVqFmzZtzGvL297VRaQRByiohWQcgn8Ae0f//+VK5cOdqwYQOVKlXKsG3EiBF07tw5FlbGlC5dmgYOHGj4DDFWuXJlmj59usuL1jFjxrBgnTFjBr366qsm2yDkz549S//++2+671WtWtWkziB869aty8cR0ZozIiIiqG/fvuTn58fiFWJP47XXXqMuXbrQ6NGjqVGjRvxgkV/Ex8ez2IQ33ZgWLVrwYtzTANGKdcZtQgM9HYIgOA8SHiAI+cS0adPowYMH7Bk0FqwaEKPm4syckiVLUo0aNVgAWxN4L+HJrFOnDgUGBnLIAgTe4cOHLcYB/vrrrzR58mQKDQ3lP/wdO3Zk0W0OvMYQOhA9TZs2pa1bt2arPGFhYfTdd9/RI488kmGdVKlShb2tWYFrQmiBtessN3z11VdUq1Yt8vf3p0KFClHjxo1p0aJFhu2XL1/ma6pWrRrXWZEiRdgzf+nSpXTHOnLkCLVt25b3gx0+/PBDmj9/PtvHfP/Vq1ezpzEgIICCgoKoa9eu7NXPCnj24VX95JNPTAQrwHl/+OEHPt+kSZNMwlGw3hw8gGDbX3/9ZVh37do1Gjp0KHvQ0fuAuvn+++8ttrnFixfTO++8ww9yqL+oqCiydkwrwjnQ+6HVLc6D+xIhIwAefnhoce2w0bp169IdNzvXJAhC7hBPqyDkE3/++SfHsebFI5WUlMSCDmLGmly4cIFWrFjBAqlChQp069YtFiz4w33ixAmODTXm448/Zi8XhG5kZCQL8qeffpq76zUgztFdi+uFNw7n6NGjBxUuXJjKlCmTaXkgslJSUix6x3LjLcQC8WFP5s6dS6+88gp7LiHE4S2EOEKdDRgwgPfZu3cv7dixgz3yEKIQnwgFgZiCHSCiNGHUvn17Fl3jx49nMQqRD5Fkzk8//USDBg1ir+jUqVMpNjaWj9mqVSs6ePAgx6lm1mbxUPLkk09a3I62guOg5wBd7RDhaON4qME5jVmyZAkLdZQDoI01b97cEF9crFgxtvtzzz3HghRtxpgPPviAvatoc4hltlW3PtoKYsRhA9wPqCu8//nnn7lM6OGAvSDkYUvcj3gQyM01CYKQQ3SCINicyMhIHW63nj17Zvs75cqV03Xu3Fl3584dXg4fPqzr378/H2fUqFEm+3Xt2tXiMfbu3cv7z58/P9NzxcfH61JSUkzWXbx4Uefj46ObNGmSYd3GjRv5eDVq1NAlJCQY1n/xxRe8/ujRo/w5MTFRV7x4cV39+vVN9vv22295v7Zt22ZanjFjxvB+hw4dMlmPY2n1gSU8PNxkO77z3HPP8bbbt2/rdu/erevYsSOv/+yzz0z2GzFihMVzL126lLfjWjNjwoQJvB/OZYlatWqZXCdsj3WZERsbm27dzp07+Tw//vijYR3s7+bmpjt48KBh3d27d3WFCxfmfWE7EB0drStYsKBu2LBhJse8efOmLjg4ON16c/DdevXqZbrPK6+8wuc8cuQIfx4/frzOy8tLd+/ePRO74VhDhw41rIOdSpUqlc6GaOMom1YXWpurWLGixfrJjMzav3ZcYzvDXli3aNEiw7pTp07xOnd3d92uXbsM69euXZvu2Nm9JkEQcoeEBwhCPqB1ZWoemezyzz//sLcGS7169Wjp0qX0zDPPsMfMmsBDp8UHwsN59+5dDhNAF+iBAwfS7T9kyBATT5c2yAXeVK2b+Pbt2+yVMt4Po7aDg4OzXV8ogzF///23oT6wID7YHHh4sQ0j3NGVi1hMxF/a28tVsGBBunr1KntTMwLdzsZeddgBHmJ819gOa9as4ThNDNTTgAcb3m5jEPN7//59euqppzjLgbZ4eHhw3WzcuDHTMkdHR2fZZrXtms369evHZV+2bJlJO0Y5sA3gueH333+n7t2783vjssETC++9ebuD59a4fmwF2hw8qxq4B1D/CMtBnWlo77U2n5trEgQhZ0h4gCDkA4gR1URATsAfRsQqorsRXcP4w4k/oDkF388qtRRGgWOkPWI/IVw1LIUilC1b1uQzun21rlUtNlOLOzUGKavQfZwVmhBCDLAxSA2mDb5C96yllEs9e/bkrllcM46DmEJ0n1u7znJ6jDfffJNjIBHbCyGKVFzoZsY1aaCLfcqUKRybihAAvVNYD0SPBurXeMCRhnkIBAarAWRPyKxdZgTqL6s2q23XbIaHq+rVq3M4ALrFAd4jrlgrx507d1jEIubZOFuGMXjoMQ9FyA8QlmFuezxomYe0aA9fWpvPzTUJgpAzRLQKQj4AcYC40GPHjuXoe/hDb5zCxxKIOTTO72oM4he1fTLjo48+onfffZcHkCB2EF47eF7hnYSgNQeeOksYi6y8ANEDUF8QQRrwoGr1sXDhwgxFR1Z1Bs9yXutM257ZcYyPgQeO06dP80AkeErhlcNDAka3T5w4kfdB6igIVtQ7RCmEEQQUPH+W7JAV2ncQ14pBfOZ4emb+JwBlRtwrYkgtxcsCxOXiYcT4AQUeVQzUg5cRYnblypXs7dXOp5ULMcvmsa8ayPhgTH54WTNr21m1+dxckyAIOUNEqyDkExjcAQ/Mzp07LXrJcgu6yDFIxxIQSdo+mYHR0RjYY57zFJ4jCOfclEnz9Bl7+dBtDE+usRC1BDIXQCRg8It5l7c1QPm0usltnWnbsb+5Fw6CFQN04E01Bh5fCDosyM+LnLsQdxhMBYELO0DwfPbZZ4bvYMAW7GB+bkvZGszXaSP+ESqRlZDPqM2ivSIsxdKgOAwUQ0YIHNtYVOL6IMQhzDGKHqEDxl3uePiAmIVHPzflckRc8ZoEwdGQmFZByCfGjRvHogUJ2jHK2NJMWeiizylI/I5YSYz+NwbeMYwoh2Bp2LBhpseAQDT3kkKooIs6N2AUOf6Iz5kzh8WZBhLSmwswSyD8AF5fjLz++uuvre7VRZ3t2rWL9u/fb7IeZYNQRqyoJc+kMUjzhXhdjC4394Li4SQ5OdkkLyziU43Bd5GoH9cBMZ+RHZAmyzhcAyBGEmLy0KFDJmnLUHbz/eDlhyddO4cx6NLODGR/QPvBxBda7KaxmEZsM8oLb7G5hxapxhAWgAUp3tq0aWPYjuvs06cPi1pLvQ9ZlcsRccVrEgRHQzytgpBPwOuFnJzwQuGPuvGMWEhzBJGYm+klMdsP8kAiPQ+EXoMGDVggQSzgjyemQc0qPRA8asi1CRGCFFVHjx5lAZSd+FNLoLsYsbgQPfC04prhYUXXd3aPickA8B10mSNHJwa4QEChyxmxrEjHhEEyueGtt97i+oaQQhkRjoAZxyCqb9y4weXMCpQFYg25Q3EcpPNC3DFs+csvv7CXFWXWwGcIYcSwwvt48uRJFuTImarFg8IO6MpHWAAELYQp4mDN44rxAITwCMz+hfrRUl5B7EO8ajGZEKwQ1Ri8hwcXeDvxMIFpWTGRBcqS0UMBwHnh/UUZ8X3zGbHg2cWDlqU0brA56gceZMS2mk8EgLRpGAiGuO1hw4bxcVF2DFbCNeO9s+GK1yQIDkUusw4IgpBLzpw5w6mGypcvr/P29tYFBQXpWrZsqfvqq6849VR2UlmZExERwWmiKlSowOmGChQooGvfvr1u9erV2fo+zjt27FhO1+Pn58flQaolpAAyTtukpQlCWihjkGLJUmqhWbNmcZmQOqtx48a6LVu2pDtmZiQnJ/MxO3TowOmcPD09dUWLFuU0VnPmzNHFxcWZ7J9ZKitzrl69qnv++ed1pUuX5uPi+N26dTNJa5QdFi5cqGvevLkuICCAr7N69eq6iRMnmtgSfPPNN7o2bdroihQpwvtVqlRJ98Ybb3A6NGM7DhkyhK8xMDBQ16VLF065hLYwaNAgk+Mh3VXr1q35WKGhobopU6bovvzyS64DpLQyBnbDsZB2ydfXl889ePBg3b59+7J1jbAv2mzZsmW5faF8PXr00G3dujXD75w9e5bLgmXbtm0W97l16xbbq0yZMnzckiVLsm2RGi2rNmerlFeW0pJldC9aam/ZuSZBEHKHG/6zt3AWBEEQ8g4GcGFSCGRdyGjgkCAIgrMiMa2CIAhOiHnWAoSEILQAM1SJYBUEwRWRmFZBEAQnBBkoML0r4qMxsA+ZHzBKH6nLBEEQXBERrYIgCE4IMiBgkBQyFWDgFQZKQbgaj9IXBEFwJRwqPGDLli082hZJ2PEjbJ7CR0utgvQpyAmIXHjajC8aGJ2JvI4YNYuZgzBq1XxWHUEQBGcHaazOnDnDOWFjYmIM+VIFQRBcFYcSrfjhRdLxmTNnWtw+bdo0+vLLLzn34+7duznNC/IQIl+gBgTr8ePHeapHzDwDIYyUQIIgCIIgCILz4rDZA+BpXb58OfXq1Ys/o5jwwI4dO5Zef/11w1zcyHeIfIHIP4i8h8iLt3fvXk5uDjBdopZ8Hd8XBEEQBEEQnA+niWlFknEktDbu/kICbiRxRgJuiFa8IiRAE6wA+yOpNTyzvXv3tnhszByERQOz2yDMAIm1tSTdgiAIgiAIjoBOp6Po6Gh2xplP3OHKOI1ohWAF8Kwag8/aNrxilhpjPD09qXDhwoZ9LDFlyhSeJ1sQBEEQBMFZCAsLo9DQUFIFpxGttmT8+PH02muvGT4j7ADTIaIxYECXLb3HFSpUsNnxBcdE7K4mYnc1Eburia3tHhUVRWXKlDFMAa0KTiNaMWc3QD5CZA/QwOf69esb9rl9+7bJ95KTk7mrX/u+JXx8fHgxB4LVlqIVA8hseXzBMRG7q4nYXU3E7mqSX3Z3UyyE0WkCIfDEAuG5fv16kycNxKoiyTbA6/3792n//v2GfTZs2MAxqoh9dTRUikMR/kPsriZidzURu6uJ2F2B7AHIp3ru3Dl+36BBA/r888+pffv2HJOK7vqpU6fSxx9/TD/88AOLWMz8cuTIETpx4gT5+vry9x599FH2viItVlJSEg0ZMoQHZi1atCjb5YAYxiAvhAnIE7IgCIIgCI5ElKI6xaEeBfbt28diFQtAnCneY0IBMG7cOBo1ahTnXW3SpAmLXKS00gQr+Pnnn6l69erUsWNHTnWFebgxY4wjgusV1EPsriZidzURu6uJ2F2BmFbMo52Z4xexG5MmTeIlI+CVzYlXNS+kpKSwNzcv3zeeGEFQg+zY3cvLizw8PPKtTEL+2F1QD7G7mojdFRCtzgKENVJoIX42L2AqWowwFNQiu3ZHzmHEcasWaO+qIO+zoB5idzURu9sGEa25QBOsyAnr7++fa1GBJzHxpqlHVnbHQxHmk9cyYRhnyxCcF/Mc0oIaiN3VROxuG0S05kJwaII1r09SiMk1jscV1CA7doc3FkC4oq3Jw43zg2mmHTGLiWBbxO5qInZXYCCWM6DFsMLDKgi2RGtjeYmbFgRBEARXQURrLrFGnKF4WdUku3aXWFbXokqVKvYugmAHxO5qIna3DSJa7YiMLlQTsbu6eRUF9RC7q4nY3TaIaLUj0u3rfLRp0ybPKdWQO/j333+3WpkE5wCTngjqIXZXE7G7bRDRqljWA0zOULFiRfLx8aEyZcpQ9+7dTabGLV++PHdLYwkICKCGDRvS0qVLDdsHDx5MvXr1SnfsTZs28XcySgN26dIl3n7o0CGL+XlHjx5Njs7KlSv5h6h///6GdZgAA7mBUZeY2MIY1Bvq1xxMkvHWW2/x9MKCIAiCIGQPEa12JDAwMN/OBdHYqFEj2rBhA33yySd09OhRnk0M0+SOGDHCZF9M3nDjxg06ePAgzzzWr18/2rFjB6nOl19+ydMCa3NK//nnn+x1/eeff2jatGn0/PPPU3h4OG/D1Hpvv/02zZw5M91xHn/8cYqOjqbVq1fn+zUI9kNGEquJ2F1NxO62QUSrHYmJicm3c7388svs6dyzZw/16dOHqlatSrVq1WJP4a5du0z2DQoK4qT22AeiC+mXINDyA+Qoff/996ls2bLsDQ4JCaFXXnnFsP2nn36ixo0bG8o4YMAAQz5TY48oguAx4Ami/IcffkjnBd62bRu1bt2arw1eUpwjM3vcuXOHBb+x5xQpTeAlRnmeeuopnv9ZmzQA3tSXXnqJr8MczIaFKYYXL16c5/oSnIcDBw7YuwiCHRC7q4nY3TaIaLUCnAw+MTnHS0xCzr9jvGQ25a0x9+7dY68qPKro8rc081JGeHp68pSiiYmJlB8g1nP69On0zTff0NmzZ2nFihVUp04dkzjgDz74gA4fPszb4EFGyIIGRGPfvn05hAH7DB8+nD2expw/f54eeeQRFu9HjhyhJUuWsIgdOXJkhuXCdqSgqlGjhmFdvXr1eH7piIgI2r9/P8XFxVHlypV5X/xgGYttY2C3pk2b0tatW/NYW4IzITHsaiJ2VxOxu22QyQWsQFxSCtV8b22+n/fEpC7k7521Cc+dO8dCqXr16jk6PoTqZ599xl3dHTp0oPzgypUr7EHt1KkTi2V4KiHwNIYOHWp4j9hcdNkjhAEJ+xFuAbFbrVo1DoEAeH/s2DGaPHmy4XtTpkyhp59+2hBHC68sjtO2bVuaPXu2xZRUly9fphIlShhCA0CXLl1o4MCBfH54bOHRxUMBPKwLFizgY3311VdUtGhR+vbbb9mzrT0IwIMcFhbGca3GxxRcl0KFCtm7CIIdELuridjdNshfSwXIrkdW480332QBCM/i1KlT6eOPP6auXbtSfvDEE0+wxxKCdNiwYbR8+XJKTk42bIdHE130ELMIEYDQ1MQuOH36NItIY4xFL4AHFqIS16gtEKAQkFr3vjkokyUxi1AGPBQgRrh3794siDXB/eGHH7LXFbGuzz77rOE72AaRi/MlJCTkscYEZwEPKoJ6iN3VROxuG8TTagX8vDzY65lTHjyIocDAgDydNzvAk4iYzlOnTmVr/zfeeIO73CHm4F00TnKPuE14Hc1BvCimGrUUfqB9D8Bra+m7wcHB/B7xpRCe69ato3///ZdjceE13bx5M3t+IS6xYKR+sWLFWKzic07CF+CVRdiApe57SzGoAN5ShAFkBup34cKFPIDt+++/5/RYKOOTTz7JHmIMvoLQhgBGyAbqSpuuVXB9jh8/LoMzFETsriZid9sgotUKQNRlp5venFRvj1x9L6cgJROEHQZVQaiZC0uIRuO4Vgg0xGZaAt3tGEAEDyEGSmkghrNChQrsRcyoDDguPKWad1RLwAxPJQZ9aUDIwZuKBXG4CGuAJxMe47t377LnF+IWIKbUvHx///23ybq9e/eafEYarxMnTmR4jZZo0KABpwyDcLXU7YOyQQh//vnnLPYxgYAW06S9Gk8qgJAFHFMQBEEQhOwh4QF2xFj02RoIVogmdJVjsBMGOWH0O2I5W7Roke3jIBYUIh3d3RCgEJzwKs6YMYPGjh2b6XeRqeCjjz5iLykGQyGTAY4HbyTSQAF028+bN49F3YULF9hzCRFbrlw59oJ6e3tznCi2IUsABmUZA+EIjydCHM6cOUO//vorHxNoHmNsQwovDLxC3ljUxR9//JHpQCwITIju7du3W9z+3Xff8XVo2QVatmzJ2QaQmQEDy2rWrGl4MIDdMQirc+fO2a53wfmpVKmSvYsg2AGxu5qI3W2ETkhHZGQkgkD51Zy4uDjdiRMn+DWvxMfH6/KT69ev60aMGKErV66cztvbW1e6dGldjx49dBs3bjTsg23Tp0/P9DinT5/W9e7dWxcSEqILCAjQ1atXTzd37lxdampqpt9LTk7Wffnll7o6dero/P39daGhobp+/frpLl68aNhn+fLlumbNmukKFCjAx27evLlu3bp1hu2LFi3SlS9fXufj46Nr0aKFbuXKlWyrgwcPGvb5448/dJUrV+Z92rVrp5s9ezbvY2yzPXv26B5++GFdYGAgn6du3bq6yZMnZ1r+cePG6fr3759u/c2bN7nerl27ZrJ+4sSJusKFC+uqV6+u2717t2H9+fPndV5eXrqwsLBMz2fNtibYn8uXL9u7CIIdELuria3tHpmJTnFl3PCfrQSxs4Iua8RYIv5Si8U0zrGJwTroCrc0MCcnaCPeBduCzAFz5szh0fp5AeEByACAUAh4fnPLmDFjOCcsMgpkhjXbmmB/du/eLTFuCiJ2VxNb2z0qE53iykhMq+ByzJo1izMIFClShLvzMZArs67/7IJUXAhdwOCvvIhWhBFgGldBEARBELKPeFrt6GlF1RuPzBesAzyZmDAAI/QRB/vMM8/Q+PHjOT+qI5Bdu4un1bVATDkybAhqIXZXE1vbPUpRT6sMxLIjSH0kWB8MfLp+/TqLPgzGevfddx1GsAKxu5pgcKGgHmJ3NRG72wYRrXYEyeUF9RC7qwkeogT1ELuridjdNohotSPSZaQmYnc10SbQENRC7K4mYnfbkOc+U8TcIeckZkmKjY3lQSbIaYncnxKHlznIOSqoh9hdTTKabU1wbcTuaiJ2dzBPKxLEI1E9EugiWfuKFStYvCLJ+iOPPMLTf2IKTktTfuaF8uXL8yAW8wUzJ4F27dql2/biiy+SIyKxjWoidlcTzOomqIfYXU3E7g7kaYUnFd4izE+P2ZW0KTU1MMXnzp07ebrPxo0bcwqiJ554wioFxpSc5tNhPvzwwybHHzZsGE2aNMnw2d/f3yrnFgRBEARBEJxItGLud8xlnxGYphIeTyxI7H7p0iWyFgg/MC8LvL3G89lDpCKnpqOTn9O4Co6D2F1N0EskqIfYXU3E7g4UHpCZYDUHCd4bNWpEtiAxMZHnph86dKhJ3kuELmCe+Nq1a3N+TsTaZgY8w8h5ZrzkB5Ii1zlp06YNLVq0KE92b968OfdSCOqQnJxs7yIIdkDsriZid9tg1eSVq1atok2bNnH3fcuWLalPnz5kSxBHe//+fQ5T0BgwYADPVhQSEkJHjhzheNvTp0/TsmXLMjzOlClTaOLEienW79u3jwICAqhhw4Z08uRJjkXEZ3d3d56GE40SXjOIEAhozcsLEawlFsZ2TTQjpALiGts1sB8WHNPPz4+Pa2lfbEtKSuJzmu/r5eXF59JSbBjvi2OgzNgX05Aih+maNWvo6tWrnJC4cuXK9NRTT1G/fv14PyyoP8z6pF1PlSpVaOzYsbwfyoqHBCQ0/uuvv/i4uH7kQcXsU506deLpUhHTjNROKAfAcU+dOkU1a9bk0BHMWKXFdqKO8D08ZEydOpXPiWvB9y3VIbBU36gXDP7LqL5RL/iepfpGHWKdpfrW6hDT7qKNox779u3Ln8E777xDP/74I5cFYSmoH61eVq5cyQIXoTIA5cO1oT7RNnv37s31oNUhymEc84rzHz58mN8jhvzQoUN8DQULFqTQ0FBDLsCKFSvycZGfFuBB8fjx47wOdsZTP+4HAPuiDtAGtHAf5LNFmTGtMNoEzgMQ+oN60WLT69aty4Mvo6OjuY5q1KjB09qC0qVLc51jO6hTpw63BdyjsCG+i/AegJ4Q1On58+f5M9oF6hUTQqAOcM9hGkRQvHhxHol79uxZ/ly9enUKDw/nBWVDW8Jx0V7wsIoFbQ2g7aKt3r59mz9jWkWUF7YtXLgwl+PEiRO8DT022n0CcFzUGdoE6ht1ocWpYcIH2OHatWv82fg3IigoiLcb1zfKhu+izuvXr0/nzp3j9oM6qFq1Kh08eJD3hU3R5o3rGz1VeJBG28E0wvv37+dt+I3DugsXLvBn3D84PuobdsB59uzZY6hv2BbnBbDbrVu3uL7R7tBesC/aIXqzChUqxG0CVKtWjfe7c+eOob7x24g2BKcE7INr1+obZcWxzesbx0SZ0S61+sa9euPGDf6MUDK0Z7RZ2BsDWbT6RvvFvaC1WdQ3bIzv47pwLO0+0QbAaL9h9erV43aG+sY9ivajtVnUN65f6w1Em8X30GZQt6hTXCsoVaoUf19rs7AF7reIiIh0bRa/f7jv0GaxHW0CbfDu3btsX1yr1mZR32iL+DsF0B7wHdQ3fntw38PmuH7sh2Nr9Y17FdeltVn5jXCc3wisw/2Uk98Ibarx7PxGnEgrk3LorMQ777yjq1mzpu6NN97QjR49WlemTBndyJEjdbakc+fOum7dumW6z/r16+HO1J07dy7DfeLj43WRkZGGJSwsjL+D9+bExcXpTpw4wa95JTo6WpdfnD9/XleyZEld9erVdUuWLOFrwLoVK1boHnvsMd0ff/xh2LdcuXK6SZMm6W7cuKE7ffq07oUXXtC5ubnptm/fztsHDRqk69mzZ7pzbNy4kestIiLCYhkuXrzI2w8ePJhuW9u2bXWvvvqqztHp2LGjbsqUKYbPK1eu1JUoUUK3d+9e3aJFi3S+vr66O3fu8Lb79+/rqlSport8+XI6uycnJ/P3/vrrrwzPZc22JtifXbt22bsIgh0Qu6uJre0eGRmZoU5xZXKdPUB7+tTAtJlYN23aNPbm/fnnn9x1byvwVLdu3Tp6/vnnM90PT01A8zBYAk94eNI0XvKD/BwghkwO8CjARk8++SQ//eLJu2fPnuw97N69u8n+eBLEEyae8GbOnMlPzLBpfoCn0/fff589JrANvDOvvPKKYftPP/3EngqtjPCua0/JGvBw4gka3pL27dvTDz/8wF4LPNFrbNu2jVq3bs3XhidknEPzvFoCno8NGzaY1BWenBG7jfLAE422o3kRxo0bRy+99FK61CewO56UH3vsMYMHVnB94GkR1EPsriZid9uQa9GKNFKjR482dMVCAH322WfcxQH3+OzZs1nw2Ir58+dzl0DXrl0z3U/rwkDXjs1AbGpiTI6X+Kh7ufqeYclmTCy6pP755x9OC4auBksYxwSbo3Vba13ytgaxnnjw+eabb7i7B2Eg6EbSQLfNBx98wN2B2IauPeMQEYhGdN/36tWL9xk+fDi9/fbbJudAlxNSsyGEBV00eOiCiB05cmSG5cJ2CE4Ifg10PeJBAN156MJD1w+6zrAvupiMxbaGFsaBrjykiRPUQOuOFNRC7K4mYncHi2lFLMmnn37KTxOffPIJff/99zRq1CgWG4iFadWqVZ4Gq2QGYj8gWgcNGmQypzyECM4JDxZirSBGxowZwwNnECtjM5JiiT4KyfHX8uxn/d91Im/LItQYeJnhvURsmjGI69EEFAQt4knNgVDFwwhifjp06ED5AWLK4EFFnCvEMjyVEHgaiBnVwMPSl19+ybFFiP9BvBXELq4V7RLgPeK6kMnCOI756aef5gcvAK8sjoMsFHjgsjQxBrz7iCdDjJTxoMSBAwfy+eGxhUcXDwbwsC5YsICP9dVXX3Fdf/vttxwHp03jCg8yYpjw2fiYgmuS1YBQwTURu6uJ2N3BRCu6NzGQBPlR8Qcaf6i//vpr/kNsaxAWAGFjLF4AAryxbcaMGdzNiy5feNIwUEZIDwZeQDBBvBkPDgOwLeoNohZCEKnFsvJqWwu0KdgQghTeUDyEoEtee0CBRxPhA/CiwsOpiUC0CQTrw9sPEWmMsegF+C4eapBpQgPCHseCp9bYm6oBL6olMYuyYNHAoD5NcH/44Yfc84BBa88++yyXXZvGFSIX50Pd473g2uA+EtRD7K4mYncHzR4AYbF27VqOM4RHE55NbXYqW9G5c2eL6aIgUjdv3kz5jpe/3uuZQ/LsYcN5swG6q9H9r41ONbYdsCSY3njjDe5yx40H76Jx+ADiNi3NdIZ4UQiyjEIQtFhheG0tfVebqxl2RFnxAPLvv/9yPC68prAtPL/wbmKB4MTIW4hVfM5J+AK8sggbsNR9n9H0e/CWQiRn1SWEWG6M9ETvA+4JlBFxxHjIwoharX4wChbvRbCqAUYeC+ohdlcTsbttyLVigsjAQBN4wOCRQ+oehAwgrQRyUCo1hRkEHbrpc7jEJufue4YlkzhUYxAqgVnD4AnPbKCRuUCD2EU3vXm8K7rbkSrF3DuLGE6k8YCH0RJIH4Ljail7NJAiByEMxjHQEHJoW+iyRxo1pMlCm4IoRIwuPL8YRIXUJuaDsFA+84GCWhoVDYS1IGUIrtF80dJqmYO0L0h1kpFwxYMUhPDnn3/OYh9hMlrKL+0V67RuI4Qs4JiCGmgpmQS1ELuridjdwUQr4kkhUtFlDK8YQgQgjhDHh9hB5P1EF7PgGGAqXeT5wyh3DDrCqHfYDV5BCEGtyzo7IJwAQlbr7obghFcRXfrIP5oZr732Gn300UfsJUUMMkIUcDx4Ix9//HHeB21o3rx5LOqQgxJlhIhFLjt4QSEqESeKbcgSgEFZxkA44prQ/pBb8Ndff+VjAk2AY9uOHTt44BUG62HA1x9//JHpQCwITIhu5KO1xHfffcfXoWUXQK5iZBvYtWsXx3ojdAG5/DQwCAu9BoIgCIIgZIPc5soKDAzUnT17lt8j52T58uVNtiO35Pjx43XOSGb5z6yZOzMhIUGXn1y/fp1z51aoUEHn5eXFNmzatKnuk08+0cXExJjkaZ0+fXqmx0L+1t69e+tCQkJ0AQEBunr16unmzp2rS01NzfR7aCtffvmlrk6dOjp/f39daGiorl+/fpzDVWP58uW6Zs2a6QoUKMDHbt68uW7dunWG7ciHivbm4+Oja9GiBedKNc//iryzlStX5n3atWunmz17Nu9jbLc9e/boHn74Ya4HnKdu3bq6yZMnZ1r+cePG6fr3759u/c2bN7nerl27ZrJ+4sSJusKFC3N+3N27dxvsfvXqVbYBcgJnhORpdS1w/wnqIXZXE1vbPVLRPK1u+I9yAbpXMdgEeVIRd7h06VLuxnUF0F2N+ErEXprnbMXAJAzUQTe4pUE5OQExmBl1RQvWBd7/OXPmGGYcyS0ID0AGAIRCwPObW7u/++67HGaAjAIZYc22JtgfzPxk09R7gkMidlcTW9s9KhOd4srkOjwA3cHo+kSXKdJMIbWPkDPyK++pquEQiGNFCAEGCWIgF0Ja8gpifBG6oE0RmVu7I8eweViD4Nrkpc0IzovYXU3E7g6WPQBz45oPdhEERwExqkg3hRH6iINFrO348eOtcmxMWpBXsor9FfKPn3dfJjdyowHNLGeNEARBEJxYtCKiILMZlATHm8ZVNTDwCYsjInZ3HI5cvU9vLz/G76uUCKQm5Qvb7FyYPU1QD7G7mojdHSg8ADF9mDM9q+5teLuQVQDpiYT0mKeMEtRA7O44zNt20fB+6upTFvM/WwtkyxDUQ+yuJmJ3B/K0It0QUgYh6TvyfyKNEmbCwmARDC5B/kvMvY5cnkghBOEqpAc5OwX1ELs7Btfvx9GqIzf4vbeHO+27HEHrTt6mh2uWsMn5MKGFoB5idzURuzuQaO3YsSPHs0KYIucncm5ihiRMc4k8lhichRyeyL9ZqFAhckW0qUPzgsw3rybZtbs12piQMT/svETJqTpqXrEwNShbiGZvOk/T1pyiDtWLk4e79cOfJCxETcTuaiJ2d8BpXFu1asWLSiBFFUTH9evXOZE8PuclvhdpjQT1yMzu6KJG6M2dO3e4rUlaNOsTk5BMi3brR/c+36oiNalQmD+fvf2Afj9wlZ5sXMbq58TsbYJ6iN3VROzugKJVRSAikDcTOdggXPMa2+jj40POTkqqjuCYksF51rU7ntSR+UA88tZn6b4wio5PpgpFA9iz6u7uRiPaV6KP/j5FM/49Qz3qhZCvV/ZnicsOyO3brFkzqx5TcHzE7moidrcNIlpzATxfEBOYFjUv8YmYm9jZn8Z+3HGJfth5mUoX9KMhLStQ26rFWAAIebM7ptX19PSUBwEbPWR9v/0Svx/aqoKhvT7bojzN336JrkfG0087L9OwNhXtXFJBEATBGBGtuQRiwsvLi5fcEhoa6tQzHWHk9Uf/6EdIXot+QHsWH6XapQvQuC7VqXWVoiK4XNTuzs6/J27RlXuxVNDfi/o0LG1YD8/qmIer0rjfjtDXG8/Rk03KULBf7u9vS3YX1EPsriZid9sg/Y52BJ40Z2XJ3iv0wV8n+P0rHSrTmE5VKcDbg45di6Jnv99DT3+3mw6H3bd3MR0SZ7a7KzBv2wV+fbpZWfL3NrVFn4ahVKV4IEXGJdE3m62bskbsriZidzURu9sGEa125NIlfRels/HXkev01rKj/P6FNhXZO/Vqpyq0ZVx7GtqyAqcP2nH+LvWcuZ1e/nk/nb8jqT9cwe6uwKGw+7T3UgR5ebhxOIA5yBow7hF96Mb32y/SrSjrDZQUu6uJ2F1NxO4OLFqRRPedd96hp556im7fvs3rVq9ezXlaBddi46nbNHrxIUIO9qealqXxj1Y3hAEUCfSh97rXpPVj27LHCqv/PnqTOk/fQuOXHaGbkZIpQXCMyQS61wuhEgUsh2h0qlGcGpcrRPFJqTRj3dl8LqEgCIJgM9G6efNmqlOnDu3evZuWLVtmSKiLwSYTJkzI6+FdGtSbM7Hrwl16ceF+zm2JP/of9qptMW61TGF/+uzJerTm1TYsADDw5Zc9YdT2k400ZfVJioxNIpVxNru70mQCfx/VTybwXKsKGe6HNv3mo3pv66/7wqzWUyB2VxOxu5qI3R1UtL711lv04Ycf0r///muST7JDhw60a9euvB7epblyRZ8n0lnmaH/+h32UkJxKHasXp8+frJdlAvZqJYPou0FN6LcXW1CT8oX4u99svkCtp22gWZvOUVyimjNDOZPdXYkfdlziB6iHKhWhWiHBme7bpHxhwwPXp2tPW+X8Ync1EburidjdQUXr0aNHqXfv3unWFy9enMLDw/N6eJcmMjKSnIEzt6J5cNWDhGRqUbEIzXy6IXl5ZL/pNC5fmH4d3oLmDWpM1UoEUVR8Mk1bc5rafbqRE7onpTjGzE8oB0SKrXEWu7sSaLuL9lzJ0stqzBtdEPpCtPrYTTp4JSLPZRC7q4nYXU3E7g4qWgsWLMiJ9s05ePAglS79XzoZIT3OkPboyt1YGvjdbrofm0T1yhSkuYMa5yrpOrpcO9YoQX+/2pq9tMjreisqgf63/Ch1mb6F54DHTFD5RWqqji7ceUDLDlylCX8c40Fjtd5bSx0+22Tz2FtnsLurTiZQsWgAta9WPFvfQU8BYrPB1DWn8tw+xe5qInZXE7G7bXDT5fGX+PXXX+d41qVLl1LVqlV5Fohbt27Rs88+y4szxrVGRUVRcHAwPykVKFDAZufBxARIIu+oQLz1nbODrkbEsYd0yfDmVNDfOlOKJiSnsJf16w3n6G5MIq+rUzqY3nykOrWqUpSsze3oeDocFslpuA5fvc+v8PhaonrJIFr6YgsK8rVejk5nsrurAe85vPph9+I4Dntg83LZ/u61+3HU/tNNlJicSguGNKF22RS8FsshdlcSsbua2NruUfmkU1xOtGKO9BEjRtCCBQvYSMhNhtcBAwbwOme8WfOrMUDsO+o0b3cfJFC/b3fRudsPqFwRf1o6vAUVz2C0dV67bb/beoHmbrlAMWkxrq0qF2XxWic087jDzOaVP3I10iBOsWCWI3N8PN2pdulgqhdakOqVCWbv74sLD1D4gwQuw/eDm5C3p7tSdndF1hy7wXbFZAI73+pIft45+02avOoEzd16kWqUKkCrRrXK9YxvYnc1Eburia3tHqWoaM1z9lsMvpo7dy699957HN+K7AENGjSgKlWqWKeEQr4TFZ9Eg+bvYcFaKtiXFj7XzCaCFQT6eNLoTlXZ+wWv68+7L9O2c+G07ett1LVOKRrbuSpVLBaYaRzq6ZvRRgI1ks7ejibz0FTEJlYtHsTiFGEOEKro/jWPzZ0/uAn1+3Ynl+GtZUfosyfqycxeTs53W/VprgY2K5djwQpebleZFu8Jo5M3omjl4evUq4GEPQmCIDilaJ00aRKHCJQpU4YXjbi4OPrkk09YzFqT999/nyZOnGiyrlq1anTq1Cl+Hx8fT2PHjqXFixdTQkICdenShWbNmkUlSpQgR6NUqVLkaGBE/3ML9vLMVkUCvOmn55pxCitbUzTQh97vUYsHyUz/9wwtP3SNVh29QWuO36R+TcrQqx2rUPEgH55+Ewniuav/6n06di2SsxKYExLsqxenZQpS/TIF2aMKgZwV8O5ioBkyJSw7cI29r2M7V3N5u7sqGEC177I2mUD2wwKMKRTgTS+2q0SfrD1Nn/5zmh6tU5J8PHMufsXuaiJ2VxOxu4OGB6D7HwOxkC3AmLt37/I6hApYW7T+9ttvtG7dOsM6hCQULaqPg3zppZdo1apVHJoA1/nIkSPJ3d2dtm/f7nBud2RX0MrtCCBub9iP+2jzmTsU5OtJvwxrzmLPHpy6GUWfrDlN60/pJ6vw9XInPy8PirCQ4xVlhTDVd/PjNTjPnmFMU/vm7/pZv6Y8XocnUnBVu7syIxcdoL+O3OABVcgdnFtiE5Op3Seb6HZ0Ak3oXpOGtMxeBgJjxO5qInZXE1vbPUrCA3IHNK+l7lNMLlC4cGGyBRCpJUuWTLcexps3bx4tWrSI88SC+fPnU40aNThnbPPmzcmRwExijvJjlpySSqOXHGTBCnGIbnJ7CVZQvWQBmje4Ce29dI8+Xn2K9l+O4BmKMEVsjZACVD80mOqX1QvV8kUCch1nmBH9mpSla/fj6cv1Z+mdFceoRAEf6lC9hMvZ3ZW5GhHL6apykuYqI/y9PXmq4reXH6OvNpyjvo1CczxQT+yuJmJ3NRG7O5hoLVSoEItVLMgaYCxc4V1FbOuLL75ItuDs2bMUEhLCKSVatGhBU6ZMobJly9L+/fspKSmJOnXqZNi3evXqvG3nzp0ZilaEEWAxfoJRCaR/Gr/sKE+5ClH4zTONOLeqI4Ak75ic4PBVfc67GqWCctU1mxvGdKrCsyj9tv8qjfj5IGdPqBtaMF/OLVhvMoGWlYtQzZC8eyKebFyG5m29SBfCY3hg1msPV7VKOQVBEAQbi9YZM2awl3Xo0KEcYwo3tfHgrPLly7OgtDYYjYeuf8SxIiwB527dujUdO3aMbt68yedG7lhjEM+KbRkB0WseJwv27dtHAQEB1LBhQzp58iTH6QYFBVGFChXoyJEjvE+5cuUoNTWVwsLC+HP9+vXp3LlzLNrxXQh65KwFoaGhHE5x+fJl/lypUiU+LkQyBHitWrVYeANNlF+4cIE/165dm65evUr379/na8R59uzZw9vgdQ4MDOTzAniWkXbs3r177JVu1KgR7wt7FStWjB84zpw5w/uifB+sOknLjkUQHju+fKo++Uddpt27L1CRIkU4xANlBBhch7Li2JotkOIMDwo4Jsp8/Phxw7XFxsYacvg2btyYbYSYY7QVPEhg4B5AW0lOTubrA6hvxCjj+7guHCvhun5WonueZU1mG6lXrx4/0aK+/f39+SEFZdLqG9d/6dIlw7R6+B488qhb1ClsrMUf4fs4FoAtrl+/Tr1D4+jcVW86dCuRnpm7gz5oW5DqVgzh7hg8PGn1ffv2bQ6JgX1xrXv37uV2gfpGj8Pp06cN9Y3vYmQpHvSaNm3KNsf1Yz+0Va2+K1euzNeltV3se+jQIc7YgTaO60OdgooVK3LdoswANoctsA7nQx0bt1k8WGr1jYGTaA8xMTFc3zgvzgMQp47wGq3N1q1bly5evEjR0dHk5+fH167VN/Iyo21iu1bfuC/QZn18fPi7qBetzeL+0Oq7Zs2afJ1os15eXtwGUEcAbRBtRqtv2Bhdb1hQtiZNmhjqG54NLAeOnqCFO/UTAnSr4m84lnGbRX2jHCdOnDC0WdSBVt84LuoMD7Sob9RFz4ruND2caO6W89SpnBfFRejDV7LzGwEboxw5+Y1AnaH92vM3Ar+12O/OnTuG+sZ9gzbkSL8R6N0D+F5+/kZERESka7O4j7XfCJQbdZKT3wgcE/UtvxG2+43QxsGgzcLesI81fiO0Novvof6uXbuW7d+InOiIE2llUo08x7Ru3ryZHnroIW5E9gCNHQb//PPP+QYZMmSIiddUu5Hbt29PU6dOzbanFY3P1rEi+BFAY7Qnn/97hrvAwadP1ONuTyF9Wq4n5+ykEzeiODn97y89xINznNnurs68bRfpg79OUKViAfTvmLZWCx/Bz2WvWTs4UwUGdk3qWTvb3xW7q4nYXU1sbfcoRWNa85yEEk8LeFLG06mlxdbg6QYNA08leLLBEyaErDF46rcUA6uBJzwY3XjJD/A0bU+QH1UTrBN71BLBmgHIOjB/SBPOJICu4ed/3EfxSSlOa3dXB/HZ87frPTnPtapo1XhneL7efESfTQKTY1y+G5Pt74rd1UTsriZidwcdiIUuhczyWFo7e4A5cJ+j++CZZ57h7g54fNevX099+vTh7ehygXi2RahCXrGXdxos3nOFPlyl72J6vXNVGvRQebuVxRkoUcCXZ0TqM3sHDwobvfgQp8byyIUgsqfdVeCfE7d4FrdC/l70eEPr51R9qFJRalO1GG05c4c+/ecMffVUg2x9T+xueSKQsIhYni4a6ezC7sXS5XuxdON+PPVsEMI5cp0dsbuaiN0dNDxAiyPSQBwIYi/QXT958mR6/PHHyZogJ2z37t05JACxOZgmFrE1iO9AbBBSXv39998c9wqP6ahRo/h7O3bsyPY5XN3t/ufh6/TK4oMEyw9vU5HeerS6JNDPJrsv3KVn5u2hxJRUGvxQeU5/JHXnWDw+azsduHKfXulQmV6zco5djePXI6nrl9v4/V+jWtk104ajD/K8FR1vIkqvpAlTvA9/oJ/C2RK4rf4cKXUrCCrqFJt5WhHkbg6CzBFwj8kFrC1aEdj81FNPcUA7RGqrVq04nRXeg+nTp3PgNTytxpMLOCL2mN5vw6lbNGbJIRasA5qVFcGaQ5pVLML5Pkf9cpAW7LhEoYX86PnWFXN0DJnW0XbACw7BiiwYA3M5mUB2qBUSTD3rh9Afh67T1DWneBKOrHBVuyOHbdi9OA6VMBam/D4ijvM/Zwam1y1b2J8nMSmbtqw/eYvWnbxN7/1xjH578SGrp7TLT1zV7kLmiN0dVLRmBEacaqMArQlmusoMjPicOXMmL4IpO8/fpZcWHqDkVB3/wf2gZ20RrLmge70QuhkZT5P/PskhFiWDfalb3RB7F0sgou+36WNZ0b6LB9lm6mGNsQ9Xo7+P3qCtZ8Np+7lwalnZ9XMyRsYm0cLdl+nsreg0YRpH4Q9MB76a4+nuRqUL+aUTptrnYL/03agdqhennec38QPI7weu0hON/5ttURAEdcmzaDXPaYpoAwzMwsxVSCUhZEx+Ti2LqU+f/2EvT3naqUZxzhSQm3hMQc/zrSvQtftx7G19bclhKhbow17Y7OCIUwq7AvDwrT6mT5/0XOu8TSaQHcoW8aenm5XjNoAJMP4Y0TJTj6Cz2x2e1CHz9/JgRHMgPMsVSS9KsZQK9iVPD/ccx5C/0rEKTVl9ij3ZnWuVtChunQFnt7uQO8TuDipaMXrf3FsH4YqUUVl5RVUnv+JQTt+MpkHf76GYxBRqUbEIfT2gIXnl8I+IYAra/LvdatKNyDhae/wWT3+LVFhVSgRl+V2V4o/yE4jHVB1Rq8pFeUa1/GBkh8q0dF8YHb0WSX8fu5Gpx92Z7b7v0j1u45hGOSTYl55pUZ5FKntLC/lTsL/1BSWmyv11XxidvxNDM9adoQnda5Ez4sx2F3KP2N025Fm5bNy4kTZs2GBYNm3axIOiMKLfEUfsOxJaImRbcik8hgbO202RcUlUv0xBmjuoMfl65c+MUq4OPNVf9G9ADcsWpKj4ZBo8fy/djop3CLurRnR8Ei3ZG5ZvXlaNooE+NKyNPqb507WnKSkl1eXs/sehazRg7m4WrHVDg2nFyJb0UrtK9FidUjxIyhaCFXh7utP7PfRC9cedl+nUTeecqdBZ7S7kDbG7g4rWtm3bmiyYnQqzUWCWEcG+wAv49He76U50AlUvGcQpm5BzVLAeeAD4blATqlA0gMMFhizYy5MRCPkLBCvqvXLxQGpbRT8oM7/AQLwiAd506W6sQTi7AugxQx7nVxcf4mwZXWqVoCUvtLB5rLAxrasUo0drl+TpeN/74ziXSRAEdbFKHzFyoY4cOZI6duzIC95rU6QJGYPp7WwFfuRf+eUgC6nyRfzpx+eaUkH/3M/iJGRM4QBv+mFIUyoa6E3Hr0fRyz8fyNTjZku7qzuZgH4azudaVcj3keZ4EET8Jfhi/VkeTe/sdk9ITqGxSw/zjHnghTYVafbTjcjPO/97ad7pVpN8vdxpz8V7tPKwfgpSZ8KZ7C5YD7G7g4rW33//nednxtzISH+FBfP2Yk5hbBMyRpvr2BZgRqC9lyIowNuDfhzaLF+9IyqCQTnzBjUhPy8PTjr/v2VHM/QK2dLuKoKYYjyc4eGhdwPrTyaQHZ5qWpbjO9GroWUwcFa7349N5FzEyw5c4xCYyb1r0/8eq2G3tFOYiW5ke/0kA5NXnXS6ngxnsbtgXcTuDipax40bR+PHj6edO3fyhAJYkMj/f//7H28TMga5Zm3B+TsP6JO1p/n9/7rWYEEl2J56ZQrS1wMaEP62L91/lb1u+Wl3Vflu2wV+Hdi8nN3itRF/Obazfp7xOZsv0L2YRKe0O2Lge8/awV5NeJC/H9yEMyTYG8QNo8fodnQCfZXBfeWoOIPdBesjdndQ0Yr0Vs8++2y69QMHDuRtQsZ4eHjYJCzgjaWHObUVRlEPaFrW6ucQMqZjjRL0Qa/a/H7GurP0q4UYR1vYXeXJBA6mTSbwTHP7iqvudUOoVkgB9gTO3HjO6ey+99I96j1rO10Mj2HvJrJhtK2av/HBGeHj6WHIHjBv20U6dzuanAVHt7tgG8TuDipa27VrR1u3bk23ftu2bTwoS8gYzBxmbeZtu8AJueElmdq3rkweYAfgmdK6M8cvP0qbTt+2ud1VBe0d9GoQQsWCfOxaFnSfj3ukOr//aedluhoR6zR2X3HwGj2dliGgXmgwLR/xEFUrmXX6tvykffXinGMak6O8v/KEww/KQvnw8PLtCXIqkS1YB0e+35UWrT169KA333yTB18tXLiQF7x/6623qHfv3rRy5UrDIphi7RnD8MP46T/6gRPvdqvB3hLBPqCr+PEGpdnzPeLnA3TsWqRhmy1milN1MoE1x27y++da5WwqXVvRpkpReqhSER5trw1icmS7Q1ghB+roJfoMAY/UKkmL8zlDQE54r1stDsXYdi6cVqfZ3lFBmi6Eaf1z4hZ1/XIbxzqnIpGwkCEYxIiBla7Anj177F0El8RNl8fHVXf37OleePxSUlLIGcAsX8HBwRQZGWnTBMHWnJsYN3qfOTvpcNh97tJDeivxstoXzLk+ZMEe2n7uLnsBl730EM8YJHNSW4dJf56g77dfpNZVitJPzzlOfeIe7DlzO+H2W/1qa8NEB45md2QIeOv3o7T84DX+PLxNRXrzkep2G3CVXfAwgFRcmORg3di25O/teGn8tp69w3mb8dAaEuhB1x/o//ZhcpdPn6wnDgUjUEfojVq46zJtOnOHigT40BONQ6l/kzJUrkgAORuY4hhjGv4+eJnWv/kwh7Y4s05xOU9rampqthZnEaz5SbFi1osX+3brBf5jGeTrSR/3qSOC1QGAR2j2wEacIxejygfP38Mjs61pd1WJ4skErhjypDragLyudUoR3AHT1ugHRAJHsntETCI9890eFqzIEDDl8To03o4ZAnLCy+0qUWghP7oeGW8xdtjeYCAs0t5BjD3esDT9+FQVjnNHZpGdF+7SI9O30O/7rzp8eIOtCX+QwPZrM20jPffDPtp4+g7fM1g/e9N5avvJJhowdxenOcMDliMTGZtEP+26TD2/3kYPT99C3265QFejU2jDSckg4HCeVlckv55g7t+/z9Pg5pUzt6Kp25fbuHvvk7516YnGZaxSPsE63IyM5wEuNyLjqWn5wvRV32pUomhhexfLqZm75QJN/vskVSkeSP+MaeNwD2kX7jzgP14QLkteaE7NKhax2v2eVzDQauiCvfwa5ONJswY25CT+zsTa4zdp+E/7eQDe2jFteHIPRwAPpci+gLptVK4QLRrWjOIeRLPdse61Xw/xwEGAUAykEysSaN9Y7PwEcgOpGOFVXX3sBiWl6OVHsJ8XPdk4lPo1KUPnbsfQ4r1XaPMZvYgFhfy96PGGofRU0zJUubhjxFrj3t5+Lpy9qmiP6FkDnu5u1KF6cXq0eiHq1qiCzaZMj1LU02oV0YpYLUznirxk8KoagxRYzoYzhQcgif3js3bw3Oe4UeYNauxwf8AFotM3o6nvnB0UHZ9MDUp4Uf9W1XkKUF6CfHhGJZleN/uhMPDOwNM2tU8d6tfEMTNkvL38KP28+wo1KFuQQ0MQ42bv8ACksnrhp310PzaJu6jnD2lCVUs4hgjICfizhe53CJt21YrR/MH2D4fCbzF6UxAOhLr9Y2RLvr+Nf+fRdr/ZcoGm/3uGB5Rh+8eP16FONUuQq0+zDK/+z7uu0Olb/w1Kw9TiSFXXrW6pdL9/GMj4676rtHRfGD/wazQuV4j6Ny3LvRn2mOwCDx+/7Q/jPMbG5UKPWt9GodSrQel0drcFUYqK1jwHA3300Uf0zjvvULVq1ahEiRImPxz2/hFRgW82n2fBWsDXk7v4pM4dE4zE/uaZRjTo+z108FYSHfz9aLp94PWCgMXMWvjRK5L2+t+S9jnIhyeNUNXWGIADwQqh37O+fSYTyA6vdqzCf9jgWcNgHHv7WJcfvEpv/naUe2SQIWDuoMYOO+AqK9D2J3SvSV1mbKFNp+/QupO36WE7Cz/EWEOw+ntjaufGfK+a4+nhTiPaV+ZxB/C6nrn1gJ7/cR/1a1yG3u1e0+Wm2T5xPYoW7r7M2SliE/Vd/AiT6Fk/hMVq7dLBGX43tJA/vfZwVb6PNp+5Tb/sCaMNp27TvssRvEz88zj1ql+a+jctQ7VCMj6ONUAau7+P3KCl+8PYU6wBD3Gv+iHUt1EZql26gLK/yU7laYVQnTp1Kg0ePJhchfx6gomIiKBChQrl+vsnb0RRj6+3cRfL9H71qHeDUKuWT7A+uy7cpUU7zlN0EmK3Ejl+C4vWTZZdMK2lXtj6UDETcevN60oG+1Kd0sEu573Fz1Wvmdvp8NVIGt2pCo3upE/o76h8svYUzdx4nioVC6DFg+tSsSKF7ZQh4KxhsotHa5ekz5+sbxcvlbWZuuYUxz+WKexH/45pa7f2/tPOS/TuH8d58N23zzQ2EdAZ/c7HJ6XQZ/+cpu+2XeRucMTpwi5NKzh36BCuC13/C3dd4TzKGrgHIFTRzQ+xlxtuRcXTb/uvcvhA2L04w3o8hMH72r1eiNWEPzI97L54j4Xq6qM3KS5JL7oR9t2majF6olEZ6lSzeIYDrfL69z0rohT1tOZZtJYqVYq2bNlCVaro5952BfKrMVy4cIEqVqyY664o/PHGXPedapSguc82kqc8J8Hc7rgFo+KSKTwmgcKjE0zErOl7bE80/HhmhY+nO8dSIg1TqypFqVqJIKdvI/su3aO+c3byILcdb3Ww6M1ytAFjCGVAd/zrbUrRyMca2jdDQNuK9GYXx88QkJMUSR0/28zdtPZ6iNl2NpwGzd/DMY7IvvBSu0o5+p3Hg+zYXw/zVMS4PV9oXZFe61zVZqPObcWVu7H0857LtHTfVcOMcIjv7FKrJIvV5hULW+33B4Jy+/lwWrwnjP45cdPw0A8vd496ISxgIWRzcz6k0vv9wFVejIVxxWIBLFQxVTScArb8+54dokS05o5p06bR9evXacaMGeQqOENM6xfrztL0dWeooL8XD0Rx1m4+FclrrBP+UEO83kkTsnctCNuLd2M4Y4ExxYN8WLwiRVSrysXsnow/N7z4035ac/wmp8P5uE9dcga+23qBPlx1koJ93GhAi4o8aAhepwpFA6lwgLdNMwRgsNKeS/c4Q8CHvWrTUy44Q95fR67TyEUH+SFt3WttOa1cfg64g/MgKj6ZMwV89kS9dEIpO/c7Yj4/+OsEx3ACPGB+3q+ezbu98wqE+sZTtzkEwHjgVKlgX25ruE+LF7Dt36a7DxJYYELAXgiPMYkxRRkQY5qVZzcuMYXWHL/BgnvH+f+mX4XXtnu9Utz937BswRyJYIlpdVDRioFXXbt2pTNnzlDNmjXJy8u0cSxbtoycjfxqDBiY0bRp0xx/7/j1SOr59XYO5P+if32HjusTrGf3nIDb+uztB7TlzB3aejacdl+8S/FJpoMka5QqwF5YjBxvXL6Qw4cSXL4bQ+0+3cR/GPGg5iwDiNBd+vD0zSZeGw08dELEViwayJ6cikUDqEKxACpfJCBP9oCYQoaAS3djOVYaqdfwwOKKoK0//d1uFhvolp/7bON8S3OErCAQShA0i4Y1t2iznNzv/xy/SeOXHaW7MYnk5eHGnuMX21bihw5HAg/Ev+4Lo0W7r7CHWAMPxJhOGYOCEb+b3+0AAw0X7w2jVUdvGEbz42EGg7bgfW1SvpBBeGL/A1ciWKj+deQGx61qtKxchL2q8BLnNozG1r/zUSJacwdmv/ruu++offv26QZigfnz55Oz4ciNATci4lhP3YzmlCmzBzZ0+i5fIX+E04HLEbTlbDgnPkdYiTH4YUcsXZsqxVjcwEvhaO3q/ZXHacGOSxxP9uNQ24p+awMP+MpD13nk8YXwB3TxTgwPJssIVH1IsN9/QhbCtlggv2Jkembd+7sv3KXhC/c7fYaAnCZ0f/SLrfwgj+ttX624Tc+H8Kwh8/fyzFyo4xUjWlqt5wJt5X/LjvLgPYDUWfDglrdzWi9NFCIfKVI8aV3yePB6snEZGtC0rN3LaPxAgYGHGLxlnK0APRz9m5SlpNRUjo29cOc/zyziovs2LEN9GpXmQWCOTpQD6xSHFq1BQUG0ePFi9ra6CvnVGPbv30+NGjXK1WwwyFv3z5i2TtnFqzq5sbu1wR9G5BjcmiZib0WZhhKgXbWuXJRaVy1KLSsXtXv4SWRcErWYsp5HIEOwQrg6u93RJQkRy0L2zoM0Qat/j+7mjMADBjyxELTGYhZ/kDG6+s3fj7CgQDoheB1V+Y2YvOoEzd16ketizejWNo0Jfe+PYzxNK2Iof3vxIaoZUsCq9zv+LENUTfzzBHsAcZ63u9ZgYZhfD5MoA9rk4av36XBYJAv0c7cfGLYjldvAZuWoq4V0VY4CruFg2H1avOcK/Xn4RrrxAMhk8FidUjwDF3JoWzPW29a/81GKitY8D7MrXLgwVapkGnguZI/k5Iz/MFkC89drM8BghhVV/hipbndbgAFMCCvBgh92/DHSvLAYGILuv2UHr/EC4HmFUET3X5PyhfP9jxT+6ECwVi0RyGVwBbuj2xFix1zwwB4YyAIBC4/s+TTPLATE5buxlJCcyt4jYw+SOY/V0WcIcFQxYQte6ViFVqR5s7/bepFTS9kCeBohWMH0fvUzFay5vd8hTDFJTItKRej1pYdp14V79PbyY/TviVs0rU9dm8SJ4p7HrIoQqYfwGnY/3cMTRF6vBiH0dLPM01U5CqjHhmUL8fJut5o8uxbSb3m6u/OAqsfqlrJZmjFH+J13RfLsaUX3/5o1a/jV39/xXeqO9ARz9uzZbGddwCjgHl9t5z9UiM+Z+XT+jkIW7GN3e4C2hlQ1mhf22DXLoQStKheluqEFOQetLQcUoSu2bdpkAviD/WSTMsraHQNfrkXEmQhZ43ADOOGGt6lE47pUc5kMATkBXcJjlhxmcbV+bFsKKehn1eOjd+LZ7/WZAt7oUi1bwjivdsdI+e+3X6Rpa09zeBi64zGorlvdkFwfMyYhmZ0gLE7TPKnGsakayNJRO6QAT00Mz3376sWpgG/u0lWphq1/56MU9bTmWbQ2aNCAzp8/z96B8uXLpxuIdeDAAXI28qsx4DzZPb6W7xEJ1TEIRaWp/1yNnNjdEcDo3O3n79LWtEFdN6PSx2LC648Rz4idrFYykF+xBFjBiwHvyCu/HOQctNve7OC03kNb2x1ZJZKSdRTsr66owN+hJ7/ZyQngrf1wjwcEZApAqAq8dJ8/mT5TgC3tjum6xyw5ZIhHR4L+ST1qZ2lvzMIFZweEqeZJxbFSzf7y41IwLXK90IIGkYoHUltNQ+rq2Pp+j1JUtOb5L0qvXr0oP5kyZQpnJDh16hT5+fnRQw89xJMbYEYujXbt2tHmzZtNvjd8+HCaM2cOORInT57MVkoM/NAggTbAE7YIVucmu3Z3FNDekPsQixZKAPGK0dqnb0XxqHh0LWJB3JsxGNzwn5jVvyIWM7vxhjgfUkaBZ5qXd1rBmh929/f2JLKdw9spgIic2KM2dftqK48gH3AunGOyrTGw57kFe1mwIpYzJ7MPWsvuuHeWv9ySvtpwlsPE/jh0nXZfuEefPFGXM4Bo98vViDhD9z5ej12PTJc5REtLBYFav2xBfq0TGuxyM3LZE2f7nXcW8txCJ0yYQPkJxOiIESOoSZMmHDPyv//9jzp37kwnTpyggID/Ri4OGzaMJk2aZPjsrKELGPWNmCY8FWO2j0frlLJ3kQSFwR/qKiWCeBnaqoKhqxGeGyynbz7Qv96KZhELQYsF02xqIOE4BstULRlkImjLFvZPl9oH0zUeuRrJ3ZQDm7tejlHB+iDG9NkW5TnTxISVx2n1q63z5C2Ep3LEogMcYxwS7MszXtnr4Qn3wdjO1bibHhMSwPv7zLw9/LfhQXwSzxSnJfY3JsjXM82DGmzwpJawcf5UQbAFntYcKYcnC1CrVi0OG7AFiJ81ZsGCBVS8eHE+f5s2bUxEasmSJcmRqVw563goTL+IfJsYODOpR618KZdgf7s7EwgBaFC2EC/G4I+nJmaRou3MTb2YjY5P5jaNZRXdMImVrVJCH1qAgV94/WHHJd7Wp2Fpp+9hcDW7OzJjHq5Kfx6+zr0CC7ZfomFtcj8zEZL+owcBcbJzB+U8G4Mt7I6BRateaUUfrz7Fg8JwrRreHu5UI6QA1Q8NZnGKpUKRACVjnO2J3O8OKlpv375N/fv3p02bNlHBggV53f379zlvK1JhFStm29Q0iOfQshgY8/PPP9PChQtZuHbv3p3efffdDL2tCQkJvBjHiuQHDx48oCJFimS4HYmPv92iDwv4qHdtKmTDwS5C/pGV3V0FDM5qXrEILxrovkRM7Omb/3lmEWJw9tYDHhWPQV/mA7/A0JZ6r64zo4rdHQHMgIQpVcf9foRmrDvD8Z+5GXG/cNdl+sEoU0BuZqiyld0RDjKpZ21OgI+sAui9gECtUSrI6aaAdUXkfndQ0Tpq1CiKjo6m48ePU40aNXgduuoHDRpEr7zyCv3yyy9kKzAb1+jRo6lly5ZUu3Ztw/oBAwZQuXLlKCQkhI4cOUJvvvkmnT59OsPZuRAnO3HixHTr9+3bxyEHDRs2ZC9yXFwc56WtUKECHxfgPChHWFgYf65fvz6dO3eOGyy+W7VqVTp48CBvCw0NJQ8PD7p8Wf8jmJKSQrGxsSySfX192UMNjzEoUrwkjfn1HIcFtC7jQy3LB/E14IHA29ubz4MZNwCEeWBgIJ8XwA63bt2ie/fukaenJ+eKw74QDHiIKFSoEM9gBhALjP3u3LlD7u7uHHaB60bZcMPBi6150DESEmXFsQHidTDQLikpiY+J+kY7AEiDhmu7cUPvSWvcuDEdO3aM4uPjOXi8bNmydPToUd6GAXwI9bh6VT+FIeobMcv4Pq4Lxzp8+DBvw/fAlStX+LVevXo8EBD1jYeS6tWrGwb/ob5x/Zcu6b11derU4e/hQQf1jTaDawWlSpXi7+NYALbA9MQRERE8uBBlwrR8AJNoIPAdo0O1+sbD2927d9m+uNa9e/dyu0B944EKtgNoDzjHzZs3uasdM6bA5rh+7Idja/WNJ3VcF/YF2PfQoUOUmJjID4i4PtQpwBzXqFuUGcDmsAXWoayoY+M2C/tq9Y1eEbSHmJgYrm+cF+cBZcqU4Xahtdm6devSxYsX+Z5HTDmuXavv0qVLc9vEdq2+cV+gzfr4+PB3US+gQsmSVLtmMJ33CScq503Vqrekg2fD6FjYXbr2QEfRHkF0+OJtuvEghR6uEkxFvJIM9Q8bh4eH86K1Wa2+ixYtygvaj9ZmYW/Yx7zNor5x7+D3SmuzqAOtvnFc1BkeaFHfqAutzeI3AHa4dk2fEiw7vxHYjmPn5DcCdYb2a+k3Avcb1mGOc4D2DJvKb4T+N6JMyjWqXMiTzkUk07hfdtFL9f1y9Btx5j7R+5v1U3r2r+lPNQskcpvL6W8EtuPac/Ibge+gvrPzGxHqFUOPlkD+1AdUL7Scy/xGoM3i/tDqGzNu4v5BWzSvb7RBtBntN9kRfiOwDrbOyW9ETnTEibQyqUaeswegoaxbt46NZwx+ABFrisZoK1566SVavXo1bdu2jQ2ZERs2bKCOHTtyI7CUU9aSpxWNz9aj8jKbm/ijv0/St1su8HzxyBZQ0F+8rK6CreekdiWQ7sdVujXF7vnPkav3qefM7Tz175IXmlMzI69/djMF9Kofwl7W3Cb1F7uria3tHqVo9oA857LA04F5miuAddhmKzB97F9//UUbN27MVLACreFoXgZz8IQHoxsv+UFG8xLvv3yP5qaNmMYoVRGsroUt56N2NVxFsAKxe/6DPMJPNdV7XjEoC4OqsgJC9bkf9JkCkPbp4z518zQLldhdTcTuDipaO3ToQK+++qqhywHAHT5mzBj2blobOIYhWJcvX84eVLjYs0LrxkAXsCOhlcsYTO34+tIj7Bno0zCUOtYoYZeyCflrd8H1Ebvbhzc6V+OE/BgMiBjVzICoHYlMAXdiOCXUt882ynOmALG7mojdHVS0fv311+ymRjwMut6xQEhi3VdffUXWBumuMMBq0aJFHBeC2BIsiBMBiH/54IMPOAYIcUorV66kZ599ljMLIF7GkUDckTmfrD3NXVMlCvjQe91r2qVcQv7bXXB9xO72AQNYMXsV+OzfMxT+4L9QMHM+XHWScxBzpoBnG1PxoLynhRK7q4nY3UEHYiH2E0HLiGvVApsReN2pUyeyBbNnzzZMIGAMppEdPHgwB3mjLDNmzOCAaZSvT58+9M4775CjoWVb0Nhz8R7N36EPUEeXFEbACq6Hud0FNRC724/+TcrSL3uucGaKqatP0SdP1Eu3z8+7L3NuVzC9Xz2qXTrnmQIsIXZXE7G7gw7EckXyK8AZolqbEAFTMD76xVa6fDeW+jUuQ1P7OpZXWLCN3QV1ELvbl/2XI6jP7B38ftnLD3GuU40d58Pp2Xl7KDlVR693rkojO1hvznixu5rY2u5RMhArZyCeFCkoLOU0RSUiHcjWrVvzWj6XRktFAqatOc2CFXFUb3fTpw4TXN/ugjqI3e1Lo3KF6IlG+kG77/1xjFKQT5CILoXH0EsLD7BgRT7XEe2tmxRe7K4mYncHE63ofsdUqZYUPtT/8OHD6fPPP89r+ZRg5/m7hm6pqX3qUgFfCQsQBEGwNm8+Wp2nNEWYwOK9V0wyBSAx/9Q8ZgoQBMFBRSsSOT/yyCMZbkeOVi0JtmAZJHvGvO1v/KZPio3ULG2q2nYGMcEx7C6oh9jd/mA67LEPVzUMen355/10Pi1TwNxn8p4pwBJidzURuzuYaMWMJ5bys2pglhHM6CFkDGYiwdzRVyPiqHRBP3q7q4QFqGJ3QT3E7o7BwOblqHrJILofm0Tbz90lXy93faaAXEzzmh3E7moidncw0Yrp2DKL2cD0ZI6WF9XRWHvoEv2UljdwWt+6FOiT52QOghNgnNNYUAexu2Pg6eFOk3r+N+339CfrWy1TgCXE7moidrcNuVZJjz32GL377rscIoD5r41BztQJEyZQt27drFFGlyQ6PonmHHiQ9uRfllpWLmrvIgmCIChB0wqFac7ARuTp7kadasoELoLg8imvEB7QsGFD8vDw4BmqqlXTJ29GrtaZM2dSSkoK528tUcL5fhDyI5XE+GVHOW9gaCE/Wju6DQWIl1UZkpOTOXxGUAuxu5qI3dXE1naPkpRXOQNidMeOHVS7dm0aP3489e7dm5f//e9/vG7btm1OKVjzy8u69aw+3veTvvVEsCrG8ePH7V0EwQ6I3dVE7K4mYnfbkCe1VK5cOfr7778pIiKCzp07R3DaVqlShQoV+i9ps5CeIF8vWjO6Dc39awe1qFTE3sUR8hkJ0FcTsbuaiN3VROxuG6zi4oNIbdKkiTUOpQwYdPVITUlvpSIqdeUI/yF2VxOxu5qI3R0sPEDIO+XLl7d3EQQ7IHZXE7G7mojd1UTsbhtEtNoRpAUT1EPsriZidzURu6uJ2N02iGgVBEEQBEEQHB4RrXYEA9kE9RC7q4nYXU3E7moidrcNIlrtCHLZCuohdlcTsbuaiN3VROxuG0S02pGrV6/auwiCHRC7q4nYXU3E7moidrcNIloFQRAEQRAE153G1ZXJr+nREhMTydvb22bHFxwTsbuaiN3VROyuJra2e5RM4yrkN2fOnLF3EQQ7IHZXE7G7mojd1UTsbhtEtNqRmJgYexdBsANidzURu6uJ2F1NxO62QUSrHQkMDLR3EQQ7IHZXE7G7mojd1UTsbhtEtNqRypUr27sIgh0Qu6uJ2F1NxO5qIna3DSJa7cihQ4fsXQTBDojd1UTsriZidzURu9sGEa2CIAiCIAiCw+OyonXmzJlUvnx58vX1pWbNmtGePXvI0ShTpoy9iyDYAbG7mojd1UTsriZid9vgkqJ1yZIl9Nprr9GECRPowIEDVK9ePerSpQvdvn2bHAl3d5esfiELxO5qInZXE7G7mojdbYMnuSCff/45DRs2jIYMGcKf58yZQ6tWraLvv/+e3nrrLXsXjwjzOSTFUtj5U1SycJC9SyPkM2J3NRG7q4nYXWG7lyhB5OZm76K4FJ6uOAvF/v37afz48SZPPJ06daKdO3da/E5CQgIvxjNN2JSkWKKPQqgJ3v9r21MJjofYXU3E7moidlfY7k2uE3kH2LsoLoXLidbw8HBKSUmhEnjCMQKfT506ZfE7U6ZMoYkTJ6Zbv2/fPgoICKCGDRvSyZMnKS4ujoKCgqhChQp05MgR3qdcuXKUmppKYWFh/Ll+/fp07tw5evDgAX+3atWqdPDgQd4WGhpKHh4e/ATGDVoQBEEQBJfk2rVrdPV2BL+3to44ceIEqYibToe+atfh+vXrVLp0adqxYwe1aNHCsH7cuHG0efNm2r17d7Y8rQiittmcvmnhASdPnaIa1atb//iCQyN2VxOxu5qI3RW2e52GNgsPiIqKouDgYNvpFAfF5TytRYsW5aeQW7dumazH55IlS1r8jo+PDy/5BhqxdwBFxSVL14GCiN3VROyuJmJ3he0u8axWx+WGt3l7e1OjRo1o/fr1hnVwu+OzsefVEfDz87N3EQQ7IHZXE7G7mojd1UTsbhtcztMKkO5q0KBB1LhxY2ratCnNmDGDYmJiDNkEHIUaNWrYuwiCHRC7q4nYXU3E7moidrcNLudpBf369aNPP/2U3nvvPQ5oxnRqa9asSTc4y94gh6ygHmJ3NRG7q4nYXU3E7rbBJT2tYOTIkbzkBm1smq1TX8H7a/P0WoLDIXZXE7G7mojd1cTWdo9KO7aLjaVXV7TmhejoaH6VadgEQRAEQXBkvRIcHEyq4HIpr6wBBm4hdRZyqbnZMF0FRDHysqmUrkJ1xO5qInZXE7G7muSH3XU6HQvWkJAQpaaMFU+rBdAAkMA3P0CDlh8z9RC7q4nYXU3E7mpia7sHK+Rh1VBHnguCIAiCIAhOi4hWQRAEQRAEweER0WonMAPXhAkT8ncmLsHuiN3VROyuJmJ3NRG72w4ZiCUIgiAIgiA4POJpFQRBEARBEBweEa2CIAiCIAiCwyOiVRAEQRAEQXB4RLQKgiAIgiAIDo+IVkEQBEEQBMHhEdEqCIIgCIIgODwiWgVBEARBEASHR0SrIAiCIAiC4PCIaBUEQRAEQRAcHhGtgiAIgiAIgsMjolUQBEEQBEFweES0CoIgCIIgCA6PiFZBEARBEATB4RHRKghChrRr144XQRCyZtOmTeTm5savgiBYHxGtgpAJ58+fp+HDh1PFihXJ19eXChQoQC1btqQvvviC4uLiDPuVL1+e/1hpS/Hixal169a0fPlyk+Nhv27dulk81759+/i7CxYsIGckNTWVfvzxR3r44YepaNGi5OXlxfXQuXNn+vbbbykhIcFkf+P6cnd3p5CQEN7X/A8+to8cOdLiOX/77bdsiQTUqfH5PD09qXTp0jR48GC6du1aht+bNWsW79+sWbMM99GO+fzzz1vc/vbbbxv2CQ8Pz7Sc77//fqb71a5d22UeIq5cuUIvvvgi3xM+Pj7cVnr16kXbt28nRwJtxLjtZLRgP0EQbIunjY8vCE7LqlWr6IknnuA/qM8++ywLhsTERNq2bRu98cYbdPz4cRZjGvXr16exY8fy++vXr9M333xDjz/+OM2ePZv/OLsyEPC9e/emtWvX0kMPPUSvv/46lShRgu7du0ebN2+ml19+mXbv3k3z5s0z+R4ELupWp9PRxYsXWSR26NCB6/7RRx+1ejknTZpEFSpUoPj4eNq1axeLWdjz2LFj/FBizs8//8yias+ePXTu3DmqXLmyxePiu7///juX39vb22TbL7/8wttxTkEPhOljjz3G7yH2a9asSTdv3mR74GEPD4WjRo0iRwAPrZ06dTJ8Rjt977336IUXXuCyalSqVIkfbnAvmLcBQRCshE4QhHRcuHBBFxgYqKtevbru+vXr6bafPXtWN2PGDMPncuXK6bp27Wqyz40bN3QBAQG6qlWrZrqfxt69e3W4JefPn69zFNq2bctLVgwfPpzLblwnxpw5c0Y3c+ZMk3XYf8SIESbrjhw5wus7d+6c6X4aS5cu5e0bN27MtHyoU+yHOjbmzTff5PVLliyx2AawbdmyZbpixYrp3n//fYvHxj69evXSubu761asWGGybfv27by9T58+/Hrnzp1MyzlhwoRM96tVq1a27OHI3Lt3T1eyZEldiRIldOfOnTPZFhsbq2vdujXXJeouP4mLi9OlpKRkuZ8j3qeCoAoSHiAIFpg2bRo9ePCAPYOlSpVKtx0et1dffTXTY5QsWZJq1KjBnhlrAu8lPJl16tShwMBADlmAV/Lw4cMW4+t+/fVXmjx5MoWGhrLHr2PHjuw1NAdeY3iL/Pz8qGnTprR169ZslScsLIy+++47euSRRzKskypVqrC3NStwTQgtsHadZYTmKUMYiCUva6FChahr167Ut29f/pwRCDVo06YNLVq0KN0xcE3w0tuKr776imrVqkX+/v5c3saNG5uU4/Lly1z31apVY9sWKVKEexAuXbqU7lhHjhyhtm3b8n5oLx9++CHNnz+f25H5/qtXr+b6CwgIoKCgIK4n9D5kBXog4FX95JNPuL0Zg/P+8MMPfD54xY3DZrDeHHj2se2vv/4yrEO4x9ChQ9nTj14S1M33339v8d5YvHgxvfPOO2w/1F9UVBRZO6YV4Rywv1a3OA9+PxDaAtATAQ8trh02WrduXbrjZueaBEEFJDxAECzw559/chwrurpzS1JSEgs6iARrcuHCBVqxYgULD3R137p1i4UA/iCeOHGCY0ON+fjjjzlmFEI3MjKSBfnTTz/N3fUaEOfoBsX1jh49ms/Ro0cPKly4MJUpUybT8kC8pKSk0MCBA/N8bREREbxk1A1vbTQhBrFnDgQnwjvQ1fvUU09xmMfevXupSZMmFo81YMAAFu142MHDRHJyMi1dupRee+01m4UGzJ07l1555RUW1Tg3zgNxBNuiPABl3rFjB/Xv35+FKK4Z1wIxhfYCEaUJo/bt27PoGj9+PItRPIxAJJnz008/0aBBg6hLly40depUio2N5WO2atWKDh48yCEVmd1beHh68sknLW5Hm8ZxNmzYwF3tEOG4F/HwhXMas2TJErYdygFwLzRv3twQB12sWDFun8899xwLUrRtYz744AO2L+4NxFzbqlsfbRqx7LAB7lvUFd6jjaFMCB+CvSDkYUv8buBBIDfXJAgujb1dvYLgaERGRnL3X8+ePbP9HXT7o0sb3bpYDh8+rOvfvz8fZ9SoUVYND4iPj0/XjXnx4kWdj4+PbtKkSYZ16DLH8WrUqKFLSEgwrP/iiy94/dGjR/lzYmKirnjx4rr69eub7Pftt9/yfll1R48ZM4b3O3TokMl6HEurDyzh4eEm2/Gd5557jrfdvn1bt3v3bl3Hjh15/WeffWaT8IB169bx+cLCwnS//fYbd/uj3vDZmH379vH+//77L39OTU3VhYaG6l599dV0x9bKh25vb29v3U8//cTrV61apXNzc9NdunQpy27/3IYHoI1iXWagy92cnTt38nl+/PFHwzq0U5T34MGDhnV3797VFS5cmPdFGwPR0dG6ggUL6oYNG2ZyzJs3b+qCg4PTrTcH361Xr16m+7zyyit8ToSLgPHjx+u8vLy4jo3bF441dOhQwzq0p1KlSqVra7gXUTatLrR7o2LFihbrJzMyu0+14xq3R9gL6xYtWmRYd+rUKV6HMIhdu3YZ1q9duzbdsbN7TYKgAhIeIAhmaF2Emqcju/zzzz/sBcFSr1499rI988wz7ImyJvB8wXMK4OG8e/cue/bQtXjgwIF0+w8ZMsTEg6R1icObqnW/3r59m709xvthNHRwcHC26wtlMObvv/821AeWcuXKpfsuPLzYhpHj6CLFAB14Jm3lPcKAGpwP3mN4tOBNXLlyJXsgjYEHDF2x8DwCeLn69evH3cmoc0vA44cQCQy8Auiih+fa0nVbi4IFC9LVq1fZm5oR6HY29v6jvcCTje8at5c1a9ZQixYteEChBjzt8Mob8++//9L9+/fZ+4wsB9ri4eHBNty4cWOmZY6Ojs7y3tK2a20LdY+yL1u2zOR+QzmwDeD5AYPhunfvzu+NywZPLHoZzO8PeG6N68dW4N6AZ1UD9yrqH+FDxpkptPfavZmbaxIEV0bCAwTBDMSIan9ccwL+4CAGEAIHXa74g4Q/TDkF388qtRRGV2OkOmI/jUWUpVCEsmXLmnzWusLRZanFPGpxp8YgZRW6ZbNCExjoFjcGqcEgcAC6PS2lMurZsyd3eeKacRzE6kFIWrvONGbOnElVq1blP/aICdyyZUu67m/UJ8QpBKtxbC3s+9lnn9H69es5NZcl0MWLBxWkc0IIB0IxrI3xtb755pscA4kYZAhRlAtlQN1roIt9ypQpHJuKEAC9c1gP6kED7QCi1RzzUI2zZ8/yK7I8ZHb/ZATsnNW9pW3X2hYeAqtXr87hAOgWB3iP+GetHHfu3GERi9hs46wexuDhzDwUIT/AQ5F5G8UDoXnojfaQqN2bubkmQXBlRLQKgoU/uogLRRqknIA/oMapcSyBWD7j/K7GIC5Q2yczPvroI3r33Xd5YAZi8uANg+cV3kkIWnPgAbOEsXjJCxATAPUFcaEBj6ZWHwsXLszwj3lWdQZRmdc604C4Q4wkQE5QxE5C5J0+fdrgKUYs5Y0bN1i4YjEHXtiMRCvigFFeePAQI5lR3GZGaNeR2fUaXysejFB2DESCp1RLu4WUTBMnTuR9kDoKghXtA6IUwggCCp4/S+0lK7TvIK4Vgw3NQQ7czECZEfeK+rEULwsQl4uHJuMHKXhUMaAQXkaIWXjI4e3VzqeVC7HV5rGvGnXr1jX5nB9e1szuwazuzdxckyC4MiJaBcECGDQBz8bOnTstep9yC7qKMfjFEhAf2j6ZgVHH8AKa5zyFRwbCOTdl0jxoxt4zdMfC02gsRC2BzAX44wsxZ96VbA1QPq1ucltnlkCZ4YFEXX799df01ltv8XpcB8IV4JU1B93TmDBizpw5FgUP1kEMQ6SjXnJqD+06cF3mXjgIVgzQMRfM8ExD0GFBHmEMHoO4w2AqCFy0FwgeeIk1MGAL7cX83JaySpiv00b8o46yeuDI6N7CfYXwGUuD9zBQDJkrcGzjOsb1QYhDmCN0A6EDxl3ueEiCmIWnPDflckRc8ZoEIS9ITKsgWGDcuHEsBpD4HKN3zUGKJHTR5xQkVEcMIrqOjYHXCSO1IQQaNmyYpdgy95JCAGQ2s1NmwPOIP44QYhA9Gkj0bi5sLIHwA3h9MaIZ4s/aXl3UGSYC2L9/v8l6lA0CEzGYljx+2QEj6OF9nTFjBgs5eDghTCGsEPNqviCUAV3X8PJlBEaiT5gwgb3hOQXpyBBXjNHl5l5QPEQhI4HxpAuITzUG30WiftQ3Hjoyai9Ik2Uem4sYSYjJQ4cOmaRXM0/1hf3QGwGPv3YOY9ClnRnIUoF2jgk6tNhNDdgAMdgoL7zF5h5apA9DWAAWpKJDmjENXGefPn1Y1FrqJcmqXI6IK16TIOQF8bQKggXgTcJAGnh38MfSeEYspA+CSMzNtI2YRQexlEh7A6HXoEEDFh74I4w/SpgGNau0OxBUyGGJP+4Y6HP06FEWFtmJP7UEumERiwsxAU8rrhkeVnQpZ/eYEH34Drqi0aWOgSMQJujKRSwr0hxh8ElugAcU9Q2BgjIiHAEzjkFUoxsf5cwLEE+wB46HeF+IUnTzWwKphyDwUd/aACBz4JnOyjudEagziDXkDsX1ohyIj0abwwAveFlRtxr4DMGOGFZ4H0+ePMkPDsiZqsWDor2gKx9hARC0EKaIgzWPf8aDGjzEmKUMdtRSXuGhBOJVi8mEYIWoRuwuHrDg7USdII4XM5mhLBk9vACcF95flBHfN58RC55dPBBaSjeHOkf9wIOM2FZtQKJxejcMBEP88bBhw/i4KDsGK+Ga8d7ZcMVrEoRcY+/0BYLgyGAmJ6TwKV++PKczCgoK0rVs2VL31Vdfceqp7KSyMiciIoLTRFWoUIHT+BQoUEDXvn173erVq7P1fZx37NixnAbHz8+Py4MURuazV2npd5AWyhikLrKUsmfWrFlcJqSAaty4sW7Lli3ZnhELJCcn8zE7dOjAaZI8PT11RYsW5TRWc+bM4RmHjMkslZU5V69e1T3//PO60qVL83Fx/G7dupmkC8rNjFgA6cMqVarEC47p6+uri4mJyfBYgwcPZrtpKYiycx3ZTXmlsXDhQl3z5s15RjXYAzOzTZw40aTNgW+++UbXpk0bXZEiRXg/XMMbb7zBaduM29uQIUPYFpjlrUuXLpxyCW120KBBJsdDuivMSIVjIcXXlClTdF9++SWXHSmtjEH7wrGQdgl1hnOjbpAuLDugHeLeKlu2LNcnytejRw/d1q1bM/wOZqJDWbBs27bN4j63bt1ie5QpU4aPi9m30AaRwi2re8NWKa8spSXL6DfDUnvKzjUJggq44b/cS15BEATBlcEALkxegewQGQ0cEgRByA8kplUQBEGwmLUAoSsILUCWBRGsgiDYG4lpFQRBEBhkysDgNMRxYwAiMlRglH5uBpUJgiBYGxGtgiAIgiFTAwZJIVMBBl5hoBSEq/EofUEQBHshMa2CIAiCIAiCwyMxrYIgCIIgCILDI6JVEARBEARBcHhEtAqCIAiCIAgOjwzEsgCmT8SMO5hRRpsFRhAEQRAEwRHQ6XQ8e19ISEi6meFcGRGtFoBgLVOmjL2LIQiCIAiCkCFhYWEUGhpKqiCi1QLanN1oDJhn21YgDyLmCxfUQuyuJmJ3NRG7q4mt7R4VFcXONU2vqIKIVgtoIQEQrLYUrTdu3LDp8QXHROyuJmJ3NRG7q0l+2d1NsRBGdQIhHBDVGpugR+yuJmJ3NRG7q4nY3TbI5AIZuN2Dg4MpMjJSnpAFQRAEQXAoohTVKRIeYEfOnj1LVapUsXcxhHxG7G6ZyLgkuhoRS9ci4uhqRBxdux9HNyPjqULRAOrVoDRVLh5IzozYXU3E7moidrcNIlpzCRzUycnJlJKSkutj4Pvx8fFWLZfgmHh4eJCnpyd3GSGlmor3y72YRBaixqIUIpXfR8RRdEJyht//euM5qhsaTL0blKbu9UKoaKAPORsq2l0Qu6uK2N02iGjNBYmJiRxkHRsbm6fjQPBevHjRauUSHBt/f38qVaqUS472TE3VUfiDBLrKQlQTprFpwlT/OS4p6we8IgHeVLqQH4UW8qPSBf2oeJAv7bxwlzafuUNHrkby8uGqk9S2ajEWsA/XLEG+Xh7kDLii3YWsEburidjdNkhMaw5jRfD0BLc/PGfFihUjb2/vXAdcQ7TiOLYWE6k6HcHIekvr+JUXsrA+zSumrddaR7r1eGfYriM/bw8K9PUidwk+TwfqDQ86d+7codj4RLqY4E++vr7k4eZGyAmNOvNwd0v7rH+Pde5uZHjP243eY5ul9dox0SaTU1IpMSWVklN0lJSSyktiso6SU/97j1d8Nn6flKzj7/HnlP/em3+OT0rl7nv2nt6Po8TkrD0LxYN89IK0kL9BmEKklinkRyEF/cjf2/JzNATxn4ev0/KD11i4agT5eNKjdUrS4w1DqWn5wlx/jgoecvHgIqiF2F1NbG33KEVjWkW05rAxoDsf3tFy5crluUHiWBAv1gZiBfGBEbFJFJuYcZertfF0d6dC/l5UKMDbabxf+UFSmj3u3o+mK1cu07vr79DNmNyHlTgq0IslC/hSaCF/E2+p9rlUsK9V2sW52w9o+cGrtOLgdRbLGjhXz/oh9HhDxL86npfj9OnTVK1aNXsXQ8hnxO5qYmu7RykqWiU8IJc42rRp8KZGpwnV6PjkNB+qKfC+candzN7jH3vu9CvwHm/hvdPeY3/9a9p7o/Xw5t6PS2JxdudBAi8BPp5UOMCbguF9dWDvl61ISdVRdLzeHg/S7KFLTuW6rlTEm0KK+FCKTu8Jx76wHxb9e/33seCZMoXX03/b0/bHeoRN6bdbfvaEnbw83Mnbw508PdwM77083MiTX/FZe+9mcV/tPX/2dCdP97Rtnu5UooBvmjD1o5LBvrze1mBA1htdqtPYh6vRnkv3aPmBa/T30RssYGdtOs9LndL6+Nce9Z0z/lUQBEFIj3hac+lprVChQp69pHkND4DpYhNTKCI2kT15xsLFz8uDCvp7U0F/LxYZts4bx/MgxyfzYBuINa0k6LIu5O/NAtbVva+ogwcJyXQ/NontAWGp4e/tQQEeOrp78yqVKlmCihQpYvXzswBOE784NQQk6l8F4pNSaN3JWyxgEf+anHYv4PrbVClKvRuGUmc7x79innCJc1MPsbua2NruUeJpFfIbxMfmRrQmJEGoJtH92ESOL9SASIFIhUjM7z/OEMQF/Lx4QWwjhDQELLyviEfEgnhFiNeCfq7jfYVQxQAjCFUsiAnVgCcSDw6F/LzIx8uDH3juu7tRXNx/XdrWBHXqzv5v9UB771Y3hJe7RvGvh69G0sbTd3gJRPxr7ZLUu2Fpal6hSL63QcS4iXhRD7G7mojdbYOIVjsCT6uXl1e241TRBX/fLE4VA28gFBFLii55R5iFQ+s2xqAbpDG69wDe12QuN5YbkW4s5gr7e/MALnsxb948WrJkCf3zzz85/m5Csl6ozpk9hzasW0NfzV9siOsN9vdiYQ7vqiV74AlcsB1FAn1ocMsKvJy/84BWHLzGAhZZDJbuv8pLSLAv9WxQmh5vUJqqlMifPyz379+XOegVROyuJmJ32yCi1YFBd29UvF6oGsepIi4y0NeThWqBHMSM3rx5kyZPnkyrVq2ia9euUfHixal+/fo0evRo6tixI+9Tvnx5unz5Mr/HQDMEko8fP56eeOIJXjd48GC+GVesWGFy7E2bNlH79u0pIiKCChYsqC8nBLWvvozwuHp7/idQAwKDqHzFyjTytXH0ZN/eFOznbdKVPWXKFHrnnXfo448/pjfeeMPkXAsWLKAhQ4ZQ9erV6eTJkybbli5dSk8++SQPlLt06VKGdQGv57vvvsv7a/z77780YsQIrqeePXuyqEV2CIAumCZNmtCSFX9RUNEQw4NDtycG0Kzp0+jMob3UuWM7totkUHAcKhULpLGdq9GYTlVp3+UIWnbgKq06eoOuR8bT7E3nealdugA9XKMkVS0RSFVKBFK5IgH5EpsrWK+3IyEZ2SxSuNcjLlH/yp8TU/Xr8DltvbYPtqOnSovi0cJq/su28l+mFW0dh/yY7JN+f+042jqP5Dh62q0wtaxcVJlwHUGwFSJa7YilmFj8yMUkwotnOU4VXf/w5OX0jyoEXMuWLVlQfvLJJ1SnTh1KSkqitWvXslA7deqUYd9JkybRsGHDOGbms88+o379+lHp0qXpoYceyvW1auX9/vvvqXX7ThR26y7Nm/sNjXr+GSoWsomq1azNoQ0IH0AYAfYbN24cv5qLVhAQEEC3b9+mnTt3UosWLQzrITTLli2bZXl+++03jgNCnWihGgMGDGCB3qVLF+rbty99++239PLLI/jBYeTosdSz/yDyKFCcBSv+9MCzXaiQPw0cOICW/PAt9e32cLbqAvHQQv6CB7umFQrz8n6PWrT+5G3OQLDp9B06di2KFw3EgJcvGkCViwXyoC8IWYhfLHnpGZAR5OkzapgsabHg2oJ4fb3w/E9sap8N29IWRx+ZsfbsHipRwIdnduvTMJSq5pN3X7Afcr/bBhGtVoxrzCnx8Qnk66sf2Ywf4MjYZIqMsxCn6ufFQlWLU9VyZkLEZjcc4OWXX+Z99+zZw4JPo1atWjR06FCTfRGHU7JkSV5mzpxJCxcupD///DNPolWjUKFCVLl8GV7qVfuYfp43hw7s3k5VatTiGFgsR/fupJjYOJrw/vv0448/0o4dO9KdG7NLQWRC1Gqi9erVq+zxHTNmDP3yyy+ZlmPx4sXUvXt3w+fw8HBeUE8+Pj70yGNdae/Bo3TiRhQd2LuLDu7fT2Pfn2oywE0T4j179KCHH36YY1X9/PyyrIMrV65Q1apVc1mDQl7BfdS1bile0N7geT105T6du/OAzt2K5odGpNXCQsf/+x5uNWRJqFI8iMWs8YLehKw4f/48VapUiVwFhCxFxeM3Sx9fr4nNqLRXbUCi8YJtCHOC6LQF8GT6e3mQr7cH36t+hvfu+s/eHmx/hO74enqQl6e7aaaUtCwpxplTtJ4sbZ27UcaV//ZPy8iSlmLFeJ9dp8Jo86UYuhWVQN9svsALslsgNVuPeiEcziK4Hq52vzsKIlqtAARrzffW5vt5T0zqkmEydmPu3btHa9as4dAAY8GqoXXnWwLiEHG3SI5vTTCF7Y8L5vP70CJBVLFoIAuIyPgkWvzzD9S5e286Fx5P3Xr3pW++ncvC1FygQ2y3a9eOvvjiCw5lQNjAI488YhJHZDpRgj6dFNZs27aN+j01gGISEHZB5BtUkEqWLEWLl/1JdZq1po2bt1D3vk9RQmIiffT26/TFrG+oRkhBiwPcGjduzNeze/duLk9W5GXqX8G6wLP/TPNyvAC0kZtR8XT2ll60nr39gM7zazQPfgy7F8fLhlO3TY4DL5q5mK1SPNBEkKCNOBroydFEpvECYWlYH2t5PTJl5BVMDoEH8mC/9At6Mv4TnWmLtzvfg5oANRWmHg4Z1tEwOI6mDWhOG0/dod8PXKWNp27T0WuRvExedZLaVStOfRqWpg41ipOPUQiV4Nw44v3uCohoVYBz587xH2PEgOYECFWEByCes0OHDlYpy1NPPcUZE+CVRJc8YmgRfoBYUCz3Iu7T+r9X0i9/ruPYsI7d+9KQPo/R6Pc+oqIFg3ldeHQCi88CoVWodNny9PW8n6jnE0/R3Hnf07gJH1HY5UvsicYfBUsZ3aIiI/maUnwL8kAdjSkz59GnE9+miIjXqU2HzvTc0Odo3szp1KVTBwotGkwd27Vhb+yoUaNo5MiRhu9BMCP1iBYLnBUyO47jggejUsGYCMGP2lQtZrINWQkgYjUvrF7URrMHTVu2nQs3+Q7iziFmK8Eb65FEhW+e1892xrOXpc14ZjwbWkazpGkzoaXNhmY+e5q2L+4PhLNY9HBa8HwiVj6vICsDRCYGhHKvkLakidF069OWIF9Pzg/s6gQGBrIYfaR2SV7wcL7y0DValja7G1K1YUGddK9Ximd3a1CmoEMMqrUVyDCDMCs8+MQkpKS96gfqoj0j24qvp7v+1cudveI+aa94aPHxdHf4DDSwu2B9RLRaATzhw+uZEy7cieEbFB6CQn7eVMDPM8deApw3O+Q0Fe+bb77Jg6AwWAk3HgZDde3alazB9OnTqVOnTnThwgXuxv/yyy+pcOHChu1Lf13CXSo9OrTgLsRCTRpRSGgZ+mPZb/R4/2d4H30oho5/5Ho8+TQt/WUhFSlZmlOMtGjXiS4vmJvhdaPLLikhnt/7+/txpgOtG69Vq9bUYcM27vqHgD539iwtXrSQDh48SG3atKFXX32VHn30UapduzZ/rlu3ruG4CAvA+bMDBK7gfMBriqV5RdMcuxCJ5028snpBGxYRy95ZTICARc8NckQCvD0MAlMTlbgPjEWm6TZv/TpFhGdeMP59488B3obsFmdvRdPvB65xhgt4+BfuusJLxaIBHD6A/MKYvMMRQP5thIPgdzfGTGzGGASofv1/69LeJ5huNw6Byy2Y+MTHSNjivSZo8Wr83vDK+3nwA55+chZtkhb9BC14jz8bKRbW6/fVD7QznhRGv/2/KdO19UnJKTS9f0GqUDR976aQe0S0WgE8EWenm96YisUCKDkxkQoE2t7rVqVKFS6j8WCrzMDAJ2QJgGBFV7vxEz8GL1nyKCKjADyolsIPjEGcbOXKlXmZP38+PfbYY3TixAnOZKANpDp+/LhJKjB4ZFf/vohGDB/GZYH3Cq9lC/vTi0MH0RcfTaAFX31CAwcOpOohBalYkA//Ia1eskBa3Blx/lItBi2xuB+/FvRI4n0yYvjw4expxvkhXJFBAV7Stm3b0ubNm01EK0IwihUz9cxlxI0bNzINyRCcC8SzNihbiBdjMGDoQvh/XtnTYbcoMKhA2h83/Uxmlv748Sxo2VyvzZ6m/SHl8pgJTBaZZp5Pc6+nI3aruwqIYc9oUA7Srb31aHV6o0s12nE+nJYduEZrjt2kC+Ex9Ok/Z3hpUbEIC9hH65Rir7YtgbC8FB5DF8NjDK8X7+rf4wHM2sBpgGsK8PGgAG+8enJbjk9K5bSCCWmv+IxxH9qkIQDCFwvSKjoqEOmCdRHRaicgcuNTk/PtSR8j4jGo6pVXXkknLCE4jUVU0aJFWVRaAj++GMSUkJDAA5Y0Dhw4wKPis5t3FjRt2pQaNWrEsbaISz169Cjt27ePB1MZeycgCBErevfaJQ5xCPT14sEP+gFRJahHjx7066+/0txvv+F6xR9gt7QfREsgjVXNmjVZLHfu3NniPhDPKAOOjTReANkWtFfjuFQE3MMr3aBBg2xfu+D6IOayVkgwL+D0aRlRLFgG4R2tqxTj5YNeybT66A0WsDsv3DUs7/1xnMMLkH2gRaUiuU6fhYepS2lCVBOkLE7DY3kSmKy8mywwITRZZOrf64Wn9mq8HevSPhvtB88+XnP6sITBf0hvpqU4016N3xtejQSv+SuEsRZaow+vIdMwnEzW61//W2+8HY4RLVTn5o3rVKawhIJZGxGtdiQnAi+vQLAivROEIlJawUuIQHHkJp09e3a6fKcZ8fTTT/P3n332WU5Jha7uLVu20IwZM2jatGk5LhdyxPbu3ZuPBaGI8qHr3RzkSMV2pOsyBwOwZs2alaOpUSHiMRgL5zcHqbQ+/PBD2r59uyHjQY0aNfgaIXLXr19Pb7/9tmH/rVu3UsWKFbM9UjS7HlnBtShVqpS9iyA4gd0h7J5oXIaXqxGxHDqAEAIIS0ySgaVUsG9a+qzSVLl4+vRZEGdX7sbqPaZ39YJUE6cIQ8iMooHeVL5IAKd9q5C24HO5Iv4sNO0JetCwBDhBwoWocgHcqyFYFxGtTjiNa26AqII3FF7NsWPHchc1xBM8nRCt2QUeWYi0t956i72QGNAEr+znn39Ozz33XI7LhdH+8NCiXPCWIp7WEn369OGu+o8++ijdNsSTZifVlDEoK0b9o/zmMaaIXUUdhYSEmAjjQYMGcQwuwicgojWQXgt5bbOLtTMxCM4BeicE9ciL3UML+dPIDlVoRPvKdDDsPk+O8efhG3TDaHKMuqHB1LF6CfaS6gVqDF27H5dp7lqEhECUInZWL1D99eK0aEC20rcJWSP3u21w0+V0lI4CIKk+hAwEDWI4jUE38MWLF1loWZocICfgWHk9hpB7EKPasGFDnlAgtyD+FpkVzpw5k+UAK63tAHhuBbU4ffq0hAcoiLXtDi/qhpO3OX0WJscwjvM099hCjEKUGjymeC0SQIUC9DP9Cc57v0dlolNcGfG0CsqCUANMmpAX4LHGBAiSEUAQhPwAo98xKAsLvKt/Hr5Oh8PuU6mCfixIIUwhVosF+rh02ixBTcTTakdPK6peflTUQWs75cqVk1ytioYDuSMJq6AUYnc1sbXdoxT1tMqdZEcktlFNrl+/bu8iCHYgu5NPCK6F2F1NxO62QUSrHREnt5poqbMEtZCHVDURu6uJ2N02iGi1I9JlpCYy+E5NJCRETcTuaiJ2tw1Op5owVz3iQM2XESNG8HYkoTff9uKLL5Ij4ukp4+BUJCf5ZAXXQZv1TVALsbuaiN1tg9OJ1r179/KIbW1BcnwtfZEGcmYa75ObpPf5gXQfqMm1a9fsXQTBDly6dMneRRDsgNhdTcTutsHpXH3mswl9/PHHPBMR5oM3dstjjntBEARBEATBNXA6T6u5p3LhwoU0dOhQk9RRP//8MxUtWpRq167NieNjY2OznLkC6SOMl/xAwgPURMID1KREiRL2LoJgB8TuaiJ2tw1OrZpWrFhB9+/fp8GDBxvWDRgwgPNgYgrOI0eO8LSgmJli2bJlGR5nypQpNHHixHTrz549S4GBgTxNaVhYGItbLy8vHvWvTdGmCc/k5GR+9fb25tHhWg5W7K+FAZjviylcsQ353LAvvqsdF9uwzvi4eI99gY+Pj8m+GNSljUo33xcDf5Aj1NK+KB/2S0lJSbcv9sP+2d0X16ddq/m+KC+2oV7M9zWvF+N9s6pD8/o2rkNL+2r1ou37zTff0G+//UarV6/OdR1+//33tGrVKvr9998z3RflwvHu3btHERERXIaqVavSuXPnuJ6CgoKoUKFCdOXKFcOc5TgO9gXY98KFC1wutEuIXy2tCnoWUEc4NkCbxXGwDj0PiK/SuqvwHuUIDw83TPGLNFzaDG24d3AerWcD5bx9+zZ/Rn7imzdvUlxcHNupTJkyXH6A8qDOb926ZYg/v3PnDsXExLAN8Rn3FMB14lwI3wFly5bl64yOjuZjoPcE9602dTCuQUsVFhoaani4RN1WqVKFj4trQr5CLFevXuV9cS14aMXvBMAMNefPn+c6zKq+cVzUGWwXEBDAdaHVIf4g4Rh379411Lf2G4EphWEPbfYz1DfaJ2yF68isvvGwjWvS6ht1hve4BrRJ1JNW34ULF+Z1sAfA7x7K8+DBA65DnAcztWVV32ijKD/2RTmR+xHtSwtjQX1jP+SD1NqsVt+oQ9gH155VfeOYKLNxfaO+tDaL+kYdWWqzqG/cI1qbRfuAjbX6xrGM2yxA29PaN65ba7O4HpRJq29cv3GbNa5v1KnWZlF2fN+4DlF2rb4zarM4FuoBdYJ61NqscX2jHrU2W7p0aT6mcX3Lb4Tz/Uag7Dn9jTBus1n9RlxI+6wcOiemc+fOum7dumW6z/r165FXSnfu3LkM94mPj9dFRkYalrCwMP4O3psTFxenO3HiBL/mFWscIyfcuHFD98orr+gqVaqk8/Hx0RUvXlz30EMP6WbNmqWLiYkx7FeuXDm+fiz+/v66Bg0a6H799VfD9kGDBul69uyZ7vgbN27k70RERGRYBu24WIKCgnSNGzfWrVixwuK+H330kc7d3V03bdq0dNvmz5/Px6hevXq6bSgrtuE6sqr/UqVK6bZt22ZY988//+iqVKnCZRs4cKAuISHBsO3+/fu87dKlSybHwT4hISG6LVu2ZHk+tB0sgnqcOnXK3kUQ7IDYXU1sbffIyMgMdYor47ThAXh6XLduHT3//POZ7tesWTN+1Z70LIGnQe3pS1tcDTyVNWjQgP755x/66KOP6ODBg7Rz504aN24c/fXXX1yXxkyaNImfcLFfkyZNqF+/frRjxw6rlGX+/Pl87H379lHLli2pb9++dPTo0XT7wYOJ8uHVEniyxdM9rsOYefPm8VN5VsDDClujDABP4fDUI9sEjonyffvtt4b933rrLd4G74sx8Mjge19++WW260AQBEEQhJzhtKIVwgcu9a5du2a636FDhwxufZsBB2JiTI4XH7fkXH3PsORgcoKXX36Zu1QgxJ588kmqUaMGd0H07NmTu7a7d+9usj+6RdB9ge6mmTNncnfGn3/+aZXqQleOduwPPviAu1A2btxoss/mzZu5ewniGV08lgQzrgdi0VjUostn06ZNvD4rFi9ebHLd6ArDgrqqVasW9ejRg06ePMnbcH5krnj11VctHgvHWblyJZc5K9BdJqgHujMF9RC7q4nY3TY4ZUwrPGIQrYMGDTIZzIQ4lEWLFtFjjz3GsTOIaR0zZgy1adOG6tata7sCJcUSfRSS46/9N3Qsl/zvOpF3QJa7IaZG87DCO2mxLEYD2cxBHRvHlVoLiFV4RTVvpTFY/9RTT/F58YrPDz30ULpjYBAecvN+8cUXHNO0YMECeuSRR7IVBL9t2zZ65plnDJ8Rj4SHG9RVp06daOvWrdzGEK/00ksvsThG/JslGjduzNeze/duLk9mIJ4L8WaCWuCBCjF7glqI3dVE7G4bnNLTiq5sBEZDsBgD4YNtnTt3purVq9PYsWOpT58+VvMQOisIjUA4KQLMjUFAN8QTFgxYswSEKgaqYVBAhw4drFIeiFCcE2EZeKjAjQ3vrwY8q+i6HzhwIH/G66+//sqDE8xByAM8xtgf1wjRat4uLIGge1wTAtyNhTvOA+8vPK04No6FtGrt27fngHiEEqAev/76a5PjQTBjQEV25puW/Lxqog0QFNRC7K4mYnfb4JSeVohS/Zie9N2u6FbOd7z89V7PXDRqCLc8nTcP7Nmzh73WTz/9dLobDCL2nXfe4dGLEJgQblmFYmSX6dOnsycTcbYQrYgFxchcjV9++YW7VurVq8ef69evz3GkS5Ysoeeeey7d8SAs4XlHHCtGocLTbi4qzdG68c2nVG3VqhWHAWhghO+PP/7Isb3w2CM84NFHH+V0auYefIRQZJVeDeTJ5oLTgvYhqIfYXU3E7rbBKUWrw4Gu9Wx005vj5emHXFFka5BqA15ELT2IBjyUGd1cb7zxBqcSg2BFV7tx+AAGL1nyKMJ7ie7zjEIQNBDPijJhgdiEyDxx4oRh2juEAhw/ftwk9APiGt3zlkQrRDcGbL3//vvc3Z+d/LcIH8E1aalLMmL48OH02Wef8fkhXDHzGryqmMwCD0jGohWpZMwnv7BEdvYRXA+bxtULDovYXU3E7rbBKcMDXIX86iaGQHv44YfZ+whPZHZA6ABEJQSmebwruschKs29swcOHOAcfYhDzS5NmzalRo0a0eTJk/kzsghgsBgGU2EQnbbgM0b0nzp1Kt0x4KXFoCmIyOyEBmihJDVr1mSxnBEQz9qxtXyzWm5WvGrrtHhqeKURUpAVWn5AQS2UzauoOGJ3NRG72wYRrYowa9YsHiiEAUPoZseoeHheMaMYhGBGA4wsAc8mhOyzzz5L+/fv55hZeEFnzJjBccQ5ZfTo0ZzkH8nMIRQhZNH1ji54bcFnpN7SBm6Zg1hWjPxHLHN26dKlCw/GsgRSaX344Yf01Vdf8WckmEbGBVwjxPP69esNqbIABm3Bcy0jRgVBEATBNohotSP5OY0rxBS6txFLiqltES8KAQtR9vrrr/Pgo5ykrIJIg7cRXkjEnCIu9fPPP+fu9JyC0f7w0MLbChGNwXOWwHrEl2reTmMQ4pDT6VERavD333/zgCxzELsKAW48UAvCGGmyunXrxuETENHGcbjDhg3L1nkhgAX1kLAQNRG7q4nY3Ta4YYYBGx3bacHodYwEh5gxn2gAXcCYeg0iy3wQT06B5zM/hauQHsSoNmzYkIV8bkGoBDIrYNAW2k1GaG0HohVhF4JaIObZeMChoAZidzWxtd2jMtEprky+KCYM0Fm+fDl75zCAByOs8RSC+D900VrKv6kCIlrtzyeffJLnlGiY3Qse4MwEqzEY/CWiVT0wr7iIF/UQu6uJ2N0JwwOuX7/O06xiFB3iA5FmCF3JHTt2pNDQUJ4FCQOEMCAGcZaCkN8gR+yoUaPydAyEXODhSxAEQRAE22FTNx88qZhRCIN1IEwtASG7YsUKHuASFhbG8ZWqYD4LlKAGeGAT1ENLMSeohdhdTcTuTihakU4oq8ExGECDGZKwYLpRlcCAIkk0r2a3UVBQkL2LIeQzCCPBBBiCWojd1UTs7oThATkdzZ3T/e2JNcavyRg4tdDsLdP7qYk2C5ugFmJ3NRG7u0jKq+joaEO6IIzaRjwh8ms6C1ri/OxM15kV5kn7BddGazN5zTohOCfSq6ImYnc1Ebu7SMqr/v37c0gAUg2he/zbb7/lUfRr164lRyGrVBJw+yMjAqYdxZSeuRWfqHoRrq4P7AzBigkLkOMWmTMka4R6SLYQNRG7q4mt7R4lKa9sw/Tp03nGI02c7d27l/NZajMwYUrQ5s2bkzOhpSuCCMkLEO05mfJUcG4gWNF20P7R7gW1wFS/Ynf1ELuridjdSUUrDNesWTOephPZBJDiqmvXrtSrVy8WbT/99JPTpQuCAEcaL3haLc3OlF20SQoE1wcPJzmZKlcQBEEQhHwWrV9//TXt2rWLhg4dSu3bt6cpU6bwVJ3//vsvpaSkcJjAyJEjyRmBCMmLEIHolfhG9ShatKi9iyDYAbG7mojd1UTsbhvyJdAG3f8IC5g6dSq1aNGCZyH6/fffSXXE86YmYnc1EburidhdTcTuTp49AAHJb7/9Nk+ZiYkE+vbtSzdv3iSVuXXrlr2LINgBsbuaiN3VROyuJmJ3JxWthw8f5vRWSKbesmVLSk1NpfXr13Nc60MPPUSzZ8+2dREEQRAEQRAEJ8fmohWxrK1bt+bwAMSvvvjii7x+yJAhtHv3btq+fTuHDKg6772gHmJ3NRG7q4nYXU3E7k4qWpHe5+WXX6bq1avzRAIYMa+BfJUYlDVx4kRSkbymzBKcE7G7mojd1UTsriZidycdiNWuXTt64YUXeFKBDRs2cIiAOZ07dyYVscasWoLzIXZXE7G7mojd1UTs7qSe1h9//JGna/3jjz+oYsWKEsNqhLe3t72LINgBsbuaiN3VROyuJmJ3F5nG1RnIr+nRMCjN3T3fEjgIDoLYXU3E7moidlcTW9s9StFpXG16J125ciVH+1+7do1U4uzZs/YugmAHxO5qInZXE7G7mojdnVC0ItXV8OHDOXNARuApYe7cuVS7du1sTTjw/vvv8zSqxgsGeWnEx8fTiBEjqEiRIhQYGEh9+vSRfGmCIAiCIAhOjk0HYp04cYImT55MDz/8ME9X2qhRIwoJCeH3ERERvP348eMc8zpt2jR67LHHsnXcWrVq0bp160wmLtAYM2YMrVq1ipYuXcquc0wR+/jjj3NqLUejcOHC9i6CYAfE7moidlcTsbuaiN2dULTC2/n555+zcIWQ3LZtG12+fJni4uJ4Xt6nn36aunTpwl7WnACRWrJkSYte23nz5tGiRYuoQ4cOvG7+/PlUo0YN2rVrF08n60j4+PjYuwiCHRC7q4nYXU3E7moidnfSlFfAz8+Pp23FYq1YEc1ji4kJpkyZQmXLlqX9+/dTUlISderUybAvQgewbefOnQ4nWm/cuKFUALWgR+yuJmJ3NRG7q4nY3YlFqzVp1qwZLViwgKpVq8aNAhMTYMatY8eO0c2bNznNRMGCBU2+U6JECd6WEQkJCbwYj8oTBEEQBEEQHAenE62PPvqo4X3dunVZxJYrV45+/fVX9ujmBnhqLc3KBY8uBnNVrlyZwsLCWNjiHAhN0Gb2Kl68OCFr2J07d/gzctFev36dB4TBEwyP8IULF3gbQiKQAkObKQPHwXGRhBhiGx7hc+fOGeJhsE4T27jGu3fv0oMHDzg8AufBbGOgUKFCfC6IeIDjIGY4OjqaPDw8uPzYF+VEnC+uScvUEBoayvshtAKD2qpWrcrXjXQdQUFB/ACAMgJcC8p6//59/owHh/Pnz1NycjIfE2XWMkaUKlWK6+vevXv8uUqVKhwakpiYSP7+/lxvly5dMjxUpKSkUHh4OH+uVKkSXb161VDfOJZWh5hFDRjXN64bISfojsH1oExafeP6tYF4mFYPda/VN+pUG+GJsuP7xnWIsmv1jTKdPn2at6FOcA2wMyhTpgzXCeoR9sW1Gtc36hHXA0qXLs3XhGNp9Q2b4/qxH2xpXIdoR7AlwL6oB62+EX6DOtXaEupWq2/YHMexVN94D/tq9Z1Zm0V9o5xam61QoQK3Sa2+ce1am0V5UFfG9Q07xcTEkJeXF3/W6juzNptVfcPGeLDEotW31mbh2cCi1XdmbTar+sZxUWfovQkICOC6MG6zOAbuSa2+s/qNwHFwTTn5jTBvs/Ib4Xy/EbC7VifZ/Y3AMY3rW34jnO83AmVC/eXkNyInOuJC2mfVcIk8rchSgJAADPjq2LEjNyhjbyt+eEaPHs2DtLLracWNZuv8Z/ijgB8oQS3E7moidlcTsbua2NruUZKn1TnBEymeivAEhOwEeFpbv369YTuexPCUhNjXjMDToPb0pS35VXZBPcTuaiJ2VxOxu5qI3W2D04UHvP7669S9e3f2nsJ9PmHCBO7eeeqpp/ip47nnnqPXXnuNu3IgPkeNGsWC1dEGYZmn6hLUQeyuJmJ3NRG7q4nY3TbYvFa3bNmSrf3atGmTrf0QhwKBijgRxI+0atWK01lpcUzTp0/neA9MKoAuf6TUmjVrFjkiiMkR1EPsriZidzURu6uJ2N1JY1ohIBGkDTI6FbYjyNxRyK9YEYQuIOhbUAuxu5qI3dVE7K4mtrZ7lKIxrTb3tGLkHUbgDR48mJ555hke+SYIgiAIgiAIDjUQC+kqpk6dysn969SpwzGnO3bs4CcDPCVoi4qY55MV1EDsriZidzURu6uJ2N1JRSty3fXr14/Wrl1Lp06d4tyqI0eO5JRSb7/9NudCUxXklRPUQ+yuJmJ3NRG7q4nY3QVSXiE58HvvvUfr1q3jJMgff/yx0rNPaYmQBbUQu6uJ2F1NxO5qInZ3ctGKkfyLFi3iSQBq167Nsa2rVq3i1FSCIAiCIAiCYNeBWHv27KH58+fT4sWLeYq2IUOG8JSrIlb1038K6iF2VxOxu5qI3dVE7O6kohVJ/REW8Morr/CMVWDbtm3p9uvRowepBuY6lrgX9RC7q4nYXU3E7moidrcN+TJlA6ZR/eCDDzLc7mh5WvOL6OhoexdBsANidzURu6uJ2F1NxO5OKlpTU1NtfQqnBRMvCOohdlcTsbuaiN3VROxuG+xeqxC1f/31F6lIlSpV7F0EwQ6I3dVE7K4mYnc1Ebu7mGg9d+4c/e9//6PQ0FDq3bs3qciZM2fsXQTBDojd1UTsriZidzURu7uAaI2Li6Mff/yR2rRpw3PyYmYs5G29evUqqYhOp7N3EQQ7IHZXE7G7mojd1UTs7sQDsfbu3Uvfffcdp72qVKkSPf300yxYZ82aRTVr1iRVUXX6WtURu6uJ2F1NxO5qInZ3UtGKaVsx69WAAQNYqNaqVYvXv/XWW6Q6QUFB9i6CYAfE7moidlcTsbuaiN2dNDzg9OnTHA7Qvn17pb2qllA1LEJ1xO5qInZXE7G7mojdnVS0XrhwgeNXX3rpJR509frrr9PBgwc5N6sgCIIgCIIgOIRoLV26NL399tucLeCnn36imzdvUsuWLSk5OZkWLFig9Ag71I2gHmJ3NRG7q4nYXU3E7i6QPaBDhw60cOFCunHjBn399de0YcMGql69Ose9qsiDBw/sXQTBDojd1UTsriZidzURu7tQnlaMqnv55Zdp3759dODAAWrRogWpSGRkpL2LINgBsbuaiN3VROyuJmJ3F5wRKyEhgb2tf/zxB6mIxPWqidhdTcTuaiJ2VxOxu5OKVgjT8ePHU+PGjemhhx6iFStW8Pr58+dThQoVaPr06TRmzBhSkapVq9q7CIIdELuridhdTcTuaiJ2d1LRihmvZs+eTeXLl6dLly7RE088QS+88AKL1c8//5zXvfnmm6QiGJwmqIfYXU3E7moidlcTsbuTTi6wdOlSnrq1R48edOzYMR50hcwBhw8fVt59npKSYu8iCHZA7K4mYnc1EburidjdST2tSLDbqFEjfl+7dm3y8fHhcADVBSuQGTPUROyuJmJ3NRG7q4nY3UlFK542vL29DZ89PT0pMDAw18ebMmUKNWnShBtE8eLFqVevXjzrljHt2rVjUWy8vPjii+RoFCpUyN5FEOyA2F1NxO5qInZXE7G7k4YH6HQ6Gjx4MHtYQXx8PAvIgIAAk/2WLVuWreNt3ryZRowYwcIVYQb/+9//qHPnznTixAmTYw4bNowmTZpk+Ozv70+OxpUrV3i2MEEtxO5qInZXE7G7mojdnVS0Dho0yOTzwIED83S8NWvWmHzGrFrwuO7fv5/atGljIlJLliyZp3MJgiAIgiAIiohWpLbKjwS+hQsXNln/888/8+xbEK7du3end9991+G8raVKlbJ3EQQ7IHZXE7G7mojd1UTs7qSi1ZakpqbS6NGjqWXLljzIS2PAgAFUrlw5CgkJoSNHjnBKLcS9ZhSCgFyyWDSioqLypfwIlShQoEC+nEtwHMTuaiJ2VxOxu5qI3W2DU4tWxLYijda2bdtM1iMPrEadOnX4iadjx450/vx5qlSpksXBXRMnTky3/uzZszxorHLlyhQWFsbC1s/Pj723Fy9e5H0QmoC43Tt37vDnihUr0vXr17nB+vr6snC+cOECbytatCi5u7vT7du3+TNicnHM2NhYHqxWtmxZQ243eI6x7ubNm/wZIvzu3bs8nzEGs+E8Z86cMQR841w3btzgzzhOREQERUdHk4eHB5cf+6KcmEIX13Tt2jXeNzQ0lPeDxxoD1pAQGdeNBwIMditYsCBfO8C1oKz379/nz4jXQZ3iOnBMlBlxPAB1jmu7d+8ef65SpQpdvnyZEhMT2eONekOOXlCiRAkesBceHs6fYSNkndDqG8fS6rBYsWL8alzfuO64uDiOm8b1oExafeP6b926xZ+RKxh1r9U36hTXqtU3vm9chyi7Vt8okzbgD3WCa4CdQZkyZbhOUI+wL67VuL5Rj7geULp0aa5P2Eerb9gc14/9YEvjOkQ7wr4A+6IetPouUqQI1ylAm0TdavUNm+M4luob72Ffrb4za7Oob5RTa7OYEARtUqtvXLvWZlEe1JVxfcNOMTEx5OXlxZ+1+s6szWZV37AxHiyxaPWttVn8kcCi1XdmbTar+sZxUWdJSUkcL4+6MG6zOAbuSa2+s/qNQBlx7Jz8Rpi3WfmNcL7fCJQZ156T3wgc07i+5TfC+X4jsB/OnZPfiJzoiAtpn1XDTYeackJGjhzJ079u2bKFb5LMwA2BGxjxsF26dMmWpxU3Gn40bPmkhBtOArXVQ+yuJmJ3NRG7q4mt7R4VFcUPPLbWKY6G03laobFHjRpFy5cvp02bNmUpWMGhQ4cyjTHB06CW3SA/kWne1ETsriZidzURu6uJ2N1J87TaIiQAA6wWLVrErnt0Q2hdEQBu/Q8++ICzCcBNv3LlSnr22Wc5swBm43IkVHXvq47YXU3E7moidlcTsbttcDpP6+zZsw0TCJhnKUA+WMQhrVu3jmbMmMFhAejm79OnD73zzjvkaCBmRlAPsbuaiN3VROyuJmJ32+B0ojWrEFyIVExA4AzkZWYwwXkRu6uJ2F1NxO5qIna3DU4XHuBKYCSloB5idzURu6uJ2F1NxO62QUSrHdFSkQhqIXZXE7G7mojd1UTsbhtEtAqCIAiCIAgOj4hWO4LkwoJ6iN3VROyuJmJ3NRG72wYRrXYEM5EI6iF2VxOxu5qI3dVE7G4bRLTaEW06PUEtxO5qInZXE7G7mojdbYOIVkEQBEEQBMHhcdNllfhUQfJrTt+UlBTy8PCw2fEFx0TsriZidzURu6uJre0elU86xdEQT6sduXLlir2LINgBsbuaiN3VROyuJmJ32yCi1Y5IoLaaiN3VROyuJmJ3NRG72wYRrXbE39/f3kUQ7IDYXU3E7moidlcTsbttENFqR4oXL27vIgh2QOyuJmJ3NRG7q4nY3TaIaLUjly5dsncRBDsgdlcTsbuaiN3VROxuG0S0CoIgCIIgCA6PiFY7It0HaiJ2VxOxu5qI3dVE7G4bRLTakdTUVHsXQbADYnc1EburidhdTcTutkFEqx0JDw+3dxEEOyB2VxOxu5qI3dVE7G4bRLQKgiAIgiAIDo9M42rH6dGSkpLIy8vLZscXHBOxu5qI3dVE7K4mtrZ7lEzjKuQ3169ft3cRBDsgdlcTsbuaiN3VROxuG0S02pH4+Hh7F0GwA2J3NRG7q4nYXU3E7rZBRKsd8fX1tXcRBDsgdlcTsbuaiN3VROxuG0S02pGQkBB7F0GwA2J3NRG7q4nYXU3E7rZBRKsduXDhgr2LINgBsbuaiN3VROyuJmJ32yCiVRAEQRAEQXB4XFa0zpw5k8qXL89xJc2aNaM9e/aQo1GsWDF7F0GwA2J3NRG7q4nYXU3E7rbBJUXrkiVL6LXXXqMJEybQgQMHqF69etSlSxe6ffs2ORJubm72LoJgB8TuaiJ2VxOxu5qI3W2DJ7kgn3/+OQ0bNoyGDBnCn+fMmUOrVq2i77//nt566y17F48I8zkkxdKd65epUIC3vUsj5DNidzURu6uJ2F1huxcsCPVq76K4FC4nWhMTE2n//v00fvx4wzp3d3fq1KkT7dy50+J3EhISeDGeacKmJMUSfRRCVW17FsFBEburidhdTcTuCtv9f9eJvAPsXRSXwuVEa3h4OKWkpFCJEiVM1uPzqVOnLH5nypQpNHHixHTrz549S4GBgVS5cmUKCwtjYevn50clS5akixcv8j7FixcnzIR7584d/lyxYkWeCQOJhRFPi7QX2ijCokWLsoDGE5j8kAmCIAiC6xJ+9y7djbrK762tIy4omp3ATYeaciFg6NKlS9OOHTuoRYsWhvXjxo2jzZs30+7du7PlaS1Tpozt5vRNCw9AA8Z5BLUQu6uJ2F1NxO4K271iNZuFB0RFRVFwcLDtdIqD4nKeVjyFeHh40K1bt0zW4zOebCzh4+PDS76BRuwdQLHJ+ldBLcTuaiJ2VxOxu8J2l3hWq+Ny2QO8vb2pUaNGtH79esO61NRU/mzseXUE8lUoCw6D2F1NxO5qInZXE7G7bXA5TytAuqtBgwZR48aNqWnTpjRjxgyKiYkxZBNwFKTLSE3E7moidlcTsbuaiN1tg8t5WkG/fv3o008/pffee4/q169Phw4dojVr1qQbnGVvzp07Z+8iCHZA7K4mYnc1EburidjdNrikpxWMHDmSl9ygjU2zdeqrBw8e2D69luBwiN3VROyuJmJ3NbG13aPSju1iY+nVFa15ITo6ml/FvS8IgiAIgiPrleDgYFIFl0t5ZQ0wcAups4KCgmw2FZuWVgtpMVRKV6E6Ync1EburidhdTfLD7jqdjgUrcrgib6sqiKfVAmgAoaGh+XIuNGj5MVMPsbuaiN3VROyuJra2e7BCHlYNdeS5IAiCIAiC4LSIaBUEQRAEQRAcHhGtdkw8PGHCBElArBhidzURu6uJ2F1NxO62QwZiCYIgCIIgCA6PeFoFQRAEQRAEh0dEqyAIgiAIguDwiGgVBEEQBEEQHB4RrYIgCIIgCILDI6JVEARBEARBcHhEtAqCIAiCIAgOj4hWQRAEQRAEweER0SoIgiAIgiA4PCJaBUEQBEEQBIdHRKsgCIIgCILg8IhoFQRBEARBEBweEa2CIAiCIAiCwyOiVRAEQRAEQXB4RLQKgiAIjJubG73//vv2LoYgCIJFRLQKgmATjh49Sn379qVy5cqRr68vlS5dmh5++GH66quv7F00pfn7779FmAqC4JS46XQ6nb0LIQiCa7Fjxw5q3749lS1blgYNGkQlS5aksLAw2rVrF50/f57OnTtn7yIqy8iRI2nmzJlk6ac/Pj6ePD09eREEQXA05JdJEASrM3nyZAoODqa9e/dSwYIFTbbdvn07X8sSGxtL/v7+pBK5vWZ4xAVBEBwVCQ8QBMHqwJtaq1atdIIVFC9ePN26hQsXUtOmTVloFSpUiNq0aUP//POPyT6zZs3iY/r4+FBISAiNGDGC7t+/b7JPu3btqHbt2rR//34+Bo73v//9j7clJCTQhAkTqHLlynyMMmXK0Lhx43i9Mf/++y+1atWKyx4YGEjVqlUzHCMzkpOT6YMPPqBKlSrx8cuXL8/fMz5+t27dqGLFiha/36JFC2rcuHG6emnUqBH5+flR4cKFqX///uyxzu41mzN48GD2smrxq9qSUUwr3mPdmTNnaODAgfwgUqxYMXr33XfZU4uy9OzZkwoUKMDe9M8++yzdObNb74IgCFkholUQBKuDOFaIqGPHjmW578SJE+mZZ54hLy8vmjRpEn+GsNmwYYOJeIJIhViFMOrTpw9988031LlzZ0pKSjI53t27d+nRRx+l+vXr04wZMzhMITU1lXr06EGffvopde/eneNqe/XqRdOnT6d+/foZvnv8+HEWlhBUKAvOhe9t3749y+t4/vnn6b333qOGDRvycdu2bUtTpkxhoamBc128eJE90MZcvnyZQyeM94W3+tlnn6UqVarQ559/TqNHj6b169ezMDUX65au2RLDhw/nuGLw008/GZasQLlRhx9//DE1a9aMPvzwQz4PjoVY5alTp7Ioff3112nLli2G72W33gVBELIFYloFQRCsyT///KPz8PDgpUWLFrpx48bp1q5dq0tMTDTZ7+zZszp3d3dd7969dSkpKSbbUlNT+fX27ds6b29vXefOnU32+frrrxGUqfv+++8N69q2bcvr5syZY3Ksn376ic+zdetWk/XYD/tv376dP0+fPp0/37lzJ0fXe+jQIf7e888/b7L+9ddf5/UbNmzgz5GRkTofHx/d2LFjTfabNm2azs3NTXf58mX+fOnSJa67yZMnm+x39OhRnaenp8n6jK45I0aMGMH7WwLrJ0yYYPiM91j3wgsvGNYlJyfrQkNDubwff/yxYX1ERITOz89PN2jQoBzXuyAIQnYQT6sgCFYHHridO3eyl+3w4cM0bdo06tKlC3vlVq5cadhvxYoV7I2Dh9Ld3fTnSOu2XrduHSUmJrKn0XifYcOGcbf0qlWrTL6HLughQ4aYrFu6dCnVqFGDqlevTuHh4YalQ4cOvH3jxo38qoUz/PHHH1yunIzIB6+99prJ+rFjx/KrVkaUFx7RX3/91WQg1JIlS6h58+Y8cA0sW7aMz//kk0+alBdd8PC8auXN7JqtCbzIGh4eHhzGgPI/99xzhvWoO4RSXLhwIcf1LgiCkB1EtAqCYBOaNGnC4isiIoL27NlD48ePp+joaE6DdeLECUPsK4RozZo1MzwOus4BBJEx3t7eHB+qbdeAMMY2Y86ePctd/4jHNF6qVq1qMjgMXdYtW7ZkkVaiRAnurofAzErAogy4DnSRGwORCTFnXEacA7GgEPVaHSCUwri7HOWFKIRANS/zyZMn0w1ms3TN1kQT0xqIbcWgraJFi6ZbD3vntN4FQRCyg2QPEATBpkBMQcBigViBRxAeOAzOsQUYtGQORGedOnU4NtQSiKHVvouYTHgA4R1ds2YNe0HhGcTAMHgZM8N4UFNGILYTg6Ughh966CF+heB94oknTMqLY61evdriOTFALKtrtiaWypBRXRh7kLNb74IgCNlBRKsgCPmGNjr+xo0b/IqR9hA28LxiEFFGg7rA6dOnTUbeI2QAg5o6deqU5XlxHoQpdOzYMUthCQGJ/bBAbH300Uf09ttvs5DN6FwoI64DnkV0h2vcunWLB01p1wACAgJ4sBeEO44PUdy6dWseZGZcXoi/ChUqGLyS1iI7wtpa5KTeBUEQskLCAwRBsDoQeJaS12uxn1pXP0aSQyRipL55F7z2fQhFeGu//PJLk2POmzePIiMjqWvXrlmWB7Gh165do7lz56bbFhcXRzExMfz+3r176bZrYjqzFE2PPfYYv2JEvTGah9G8jAgFuH79On333Xcs6sxH0j/++OPsyUQmBfN6xGdkC8gtEM3APAOBLchuvQuCIGQH8bQKgmB1Ro0axQnue/fuzYNw4BXFLFnwKiJ/qTZoCDGg8GIivym8jRBrGFSElFDwPCJlFGIgEQ8LAffII4/w4C54XZG3FSEHyB+aFUiphW74F198kQU14lZTUlLo1KlTvH7t2rXsBYZ4RngARCa8o4i5xHlCQ0M5d2tG1KtXj2f++vbbb1kMIt0V4nh/+OEHFubmKaggcoOCgjhFFMQpUniZeyiRVgrXfenSJT4G9odnefny5fTCCy/wd3MD8r6CV155hQfH4fzGqbasSXbrXRAEIVtkK8eAIAhCDli9erVu6NChuurVq+sCAwM5ZVXlypV1o0aN0t26dSvd/khb1aBBA04HVahQIU7j9O+//5rsgxRXOJ6Xl5euRIkSupdeeonTLBmD79WqVctimZBua+rUqbxdO0+jRo10EydO5FRUYP369bqePXvqQkJCuMx4feqpp3RnzpzJ8pqTkpL4WBUqVOAylilTRjd+/HhdfHy8xf2ffvppTvvUqVOnDI/5+++/61q1aqULCAjgBdePlFWnT5/O1jVbAimrYIdixYpx2irjPwMZpbwyTwGGtFYojzmWypKdehcEQcgObvgve/JWEARBEARBEOyDxLQKgiAIgiAIDo+IVkEQBEEQBMHhEdEqCIIgCIIgODwiWgVBEARBEASHR0SrIAiCIAiC4PCIaBUEQRAEQRAcHplcwAKYmQez1SCZt0w9KAiCIAiCI6HT6Sg6OponYcGsgqogotUCEKxlypSxdzEEQRAEQRAyJCwsjGfsUwURrRaAh1VrDAUKFLDZeW7dukUlSpSw2fEFx0TsriZidzURu6uJre0eFRXFzjVNr6iCXUUr5vj+5JNPaP/+/XTjxg2eUxtzbGfGpk2b6LXXXqPjx4+zwd555x0aPHiwyT4zZ87k4968eZPnBP/qq6+oadOm2S6XFhIAwWpL0YprtuXxBcdE7K4mYnc1EburSX7Z3U2xEEa7BkLExMSwqITIzA4XL16krl27Uvv27enQoUM0evRoev7552nt2rWGfZYsWcKidsKECXTgwAE+fpcuXej27dvkaKjW2AQ9Ync1EburidhdTcTutsFNh2heBzFwVp7WN998k1atWkXHjh0zrOvfvz/dv3+f1qxZw5+bNWtGTZo0oa+//towqAoe2VGjRtFbb72Vbbd7cHAwRUZGyhOyIAiCIAgORZSiOsWpYlp37txJnTp1MlkHLyo8riAxMZFDDcaPH2/YjlF1+A6+62icPXuWqlSpYu9iqA2e2VKSiFKT9K+61P/Wky6Pr5aPc/nKZSpXtlz6slh8MnfL/X4Wy5WajbJb+C7Xi4V9DddIOfhsVD7jOsr0uyZftLw+J/tmea7cXFfmn/UxbiWJ3NzTbOj233u8ggy3GX02vCfL24ztbGw38zZg2JZqYZul9mL0Xl/YtBetTG7ZWGe0LavvZlSX2a73LGxi0V+TURvI5XaT2Eaj6zTcw27Zq6vs7G/+25PV/Zqj/c2vy6yc6T5nZx9L38mEHNkiB9syPH4G+2ZzP7Z72+eI/AtbPp/g+qIVMarmgc34jCeOuLg4ioiIoJSUFIv7nDp1KsPjJiQk8KKB42UHnCspKYlyS3JyMsXHx+f6+y5LUgJRzG398iBtiblDlBRHlJr835KSQqRLNluXTKRLMXpvtp9hHT6nwIpZl0enI6/4u+SREmeVy7MgVwUFkKE4aiJ2V9ju9TuLaFVZtNqKKVOm0MSJEy16QgMDA6ly5cqcSQDC1s/Pj0XwmTNnOPTA09PTIGABPuM9oi4Q8uDh4cHiVPP6Yp22L97jHNnZ1/i4wMvLyyCYLe2LsmHJal+cE8fM677m5Tff11D+1FRyp1Ryd9NRanIie2/wmQUkFl0KuWneTsafyL08UVB5sispiRR8eS2VPPdLmmPALe3Z2o3c3N3/e/h2cyN3dw9KSdWlOUNQh+6UmraDu0fatjQ88dmovaDOtfp2d4f3BJo51fDdVEPb0R+b61en433ZH5J2bHzWn9PNqEw4jju5e+i9c9iuI7c026Ty93FMD09PSmI7wqae/H39d93Iy8ubklNSiC/PXf8ZPRz6fT24LpKTkknnRuTt7U3JySlcRhzX28ebEhKwL/E53N3cKSlZ3368vX0oOSWVrx37+vr6UFx8gmFfD3d3/Xnc3Pi42E9fT258T+KhVX8tHlyOBC6T/rg4v9Yu/f0D9PvqdHxcXLv+gVV/XNQJt2k3N/L386e4+Hj9vh6evB2fteOi9pISk0jn5kYB/tg3gb+Psvr4+lJsbKz+uD4+/J3YmBgun7+fHyXEx7EtYW8fHy+Kw766VPL28kJQEyVx+XXk6+1NSUmJvC9s7uPlRQkJcdwuPDzcyN3NjZLT9vXy8qSU5CS2DTyvOC/Xd5pNca7EpBS+Nh8fX7Zjcoq+3fn5+VMsX5sbeXh58fUmJOpt4+Prx+0jKa2+AwOD6EHMA253uB4vT0+uUy6vjw/bJTlJX/8B/n4UG4v6TiUPD3feN8FQh16Umop9cR4d+fn6UUJCvP43Iq1twTawK/ZFc9a3F315sQ31jevCtcbHxXMbhp2wT2Lab5Off9q+qfr7BNfD9Y327O3D9aq1F7ZNYhJfA/ZF24qJjTP83uFcmoMD22B/1CPqMCAggB48eMD16+WJOvSg+IR4bntBgYFsR7zHnYr2gjEd+jr0JE8P9zQHho58fLwpJTk5bV99meLiYtPq0IN/MxIT4g1l0qEO09q3r58/xaPOuH148HZ81tqhvn0n69u3fwCfE/WCfbE9DnWI9sHtmwx1yPtyHept4+uD9h2TZkdvLqdWL/5+vvr6TmvfuJf194K+vKjvRK0OfX0pMSmRr5f39fGh2Diz+k6zja+vL18nflNQ3/5GtvH0wm/Ef/c9jsPtG/WSdn/yvtp9j3pJa4dam01K1v+2BgYEUExsLF8/6hrl0O573Ddos9rfvYCAQC4v2qy+Dn3ZVhqJscl05/Rpfm+uI0qWLMnjdEDx4sX5d+bOnTv8uWLFipx6E2XEdSMX64ULF3hb0aJFuV60z6rhVKIVRobL3Rh8RjwHGgEaDRZL++C7GYFwAgzeMk8lga57LVakfPnyJqMC8UODhubv75/rgGvcKCivS4BfSRNPJrrck4263jXPprEgxR+XjHAncveEYiFy99K/al2hGXaluWfcpWapa84tk+42DTc3/jHBj+7tIpXJrcPbVKpUqf++bnQGYzwy+ZwYG8vtJqN9M7spMztuVmXwzGQUpvk270yOBVlljE8m+5ofxzcH+/pl8tkji33/q930x87JvubbAnKwb6DZ52Qju/tlcVyvTOo3szp0z2JfP7OyZ1Z+T7P3PplcW2Am+/rnoL4zK695vfhmcVzjfXNS3+b7Bmayr3k7tFQv+O3w8PdPt29AJp/N6zCzeyGr8ufkXshL+85tfftmsa9xO/Qwq5e87JtZm81sX8rmvrB7YX9/MvazGusIUK1aNZPPhQv/t3e5cuUy3beKoqGFTiVaW7RoQX///bfJun///ZfXa098jRo1ovXr1xsGdOHJEJ9HjhyZ4XF9fHx4ya7QxMAvCNYiRYrk6Xq0pyinJimeKOIiUTKenDMZ0+eu/UWFOPQg8vBKW7zTRGnae7zis7tH9mOd8gE8FAFkoYDt8/Kwgadt8x8gwfURu6uJ2F1NxO4uKFrRlXLu3DnDZ7jKkcoKTxtly5ZlD+i1a9foxx9/5O0vvvgiZwUYN24cDR06lDZs2EC//vorZxTQgMd00KBB1LhxY87NOmPGDO6GGTJkiFXKrHULGHvKlCbuHlGyUVwue0chOtMEqCVxCkHqhGg2RxtwGQ+5IAiCIDgJdhWt+/bt45yrGloXPUTnggULuBv+ypUrhu0VKlRggTpmzBj64osveOqy7777jjMIaPTr14/jQt577z0euFW/fn1Oh2XtmSmskYMNsTJOT1Ja/E5QCFFgsf+66F0Qa+XdQ3ySoB5idzURu6uJ2N3F87Q6S/4zdOnDIwwBndeufXjsnFq4ouncOqaPXS1alcjbPLrHtbCW7WVax/QkpaTSrah4uhGZttyP49fb0fEUWsif2lYtRo3LFyIfT+f1cIvd1UTsrib5MY1rsORpFfITxMfml2jFVLc//PADDR8+nObMmWOybcSIETRr1iyDhzvbpCTqBSviVL3MQ++zz9y5czns4/z58zzADaLwySefNMm360ogJlqlP2LICnArOsEgRG9Epr3ej6cbEKr34+jOA/2I54z4dssF8vPyoIcqFaG21YqxiC1XxLkeklSzu6BH7K4mYnfbIKJVIZARYfHixTR9+nTDwCJ4DxctWsQxxDkGeVOBp2+uwwK+//57nhziyy+/pLZt23I6kCNHjpjMemZtkDpJnxYn/4Fg23s1hu553iU/bw/y9XJnMeZrWNzJ20OfasxZBOltCFJjIWosTCPj6E40UhNlfSwvDzcqGexLpYL9qFTaa9FAbzp1M5o2n7nDx1l/6jYvoHwRvQcWIrZ5xSLk7y0/Z4IgCK6M/MrbEVtnDkCXa0RsInuw4pJSqFbdenTp4kWa//Ni6vPkU7zPsqWLKSS0DJUtV473QRetlnXh6+mf0U8L5tGd27eoYuUqNOaNt6h7r8d5+/atW6hPty605uevadyUWXTm3AXO4gBRjFnJEJ+MQXTdunXjuOOMBq6tXLmSvarPPfecYV2tWrUsitvPPvuMB+5hoF6fPn0MU/Ui7hnT9CJLBPLXPfLII/TVV18ZnnLff/99WrFiBWeQmDx5Ml2+fJmvD0/Cr7/+Ov3xxx8sljF4D4K+Xr16VrVDbGIyrT1+k37ff422nw9P8yjeyHB/pGc1F7IscD09+BVd5PrPaevT9tN/Ry+CvT3dOQdnUmoqJSWnUnKqjhJTUnkdhGZi2ivaSFKqzmwfrNfxNj6GyT7/bUtITqW7D3IoSAv4UamCvvw+JNjP5LVIgLc+N60FEMV08oZevG4+c5v2XYqgS3dj6dLOy/TDzsss9JtWKGwQsVWKBzqc8JeRxGoidlcTsbttENFqR2yd8upWZDzdi9UnW45PTKGEpFTq1ncA/fjDD9Tqkd68/ocFC6hrn6do385tvI8mWud++SmtWr6Uxk/+jMpVqET7d++gES8MJXe/YGrcoiWLYfD+Z9/Q+x98QIm+RWncy0OoV5++FODny95bZIfo3bs3C8g333zTYhmRP3fz5s0sJM3z0mnMnj2bRfDHH39Mjz76KMfwbN++nbdBfPbs2ZMngcBxkEwa4Q4YkLdp0ybDMSB2f//9d1q2bJlh5P8TTzzBHufVq1dzbNA333xDHTt25IkjjPPl5QYk7N5z6R79vv8q/X30BsUk/jfzVrmC3uSFRPWwSXIKv+KBQRN/eMX+xt9xZCBISxSAZ9TYSwph6kchaQK1aIBPhoI0O0CA1gwpwMtL7SrRg4Rk2nEunEXsptN36Nr9ONp2LpyXyX+f5POzgK1ajB6qXJSC/ewfO47wl0qVKtm7GEI+I3ZXE7G7bRDRagXgBYLoyCkQianu+plMcgM8apl5kzTRU8DXi7y9MBuNOw0c+DR9NXUSxd67ydsO7dtN387/kQ7v3cH7FA7Qz0Izb+Z0+mXZX9SoaTPer27NqnTi4B5aueQH6typPRXw1TedD8e9TDVbtaO7CR7Uq99A+vLjSbRq20EKKl2Zyvp70eN9+tDGjRszFK0TJkygxx9/nJMuV61alb21jz32GPXt25e9pnyODz+ksWPH0quvvmr4XpMmTfgV3tWjR4/yACmEPwCkSIO3du/evYb9EBKA9cWKFePP27Ztoz179nDeVS1H76effsoe2d9++41eeOGFXNnk8t0Y+v3ANVp24Cpdjfhv2teyhf2pT8NQerxhaYq9kz5/H9oQPJhoRwlJehEbn5Sa9pqSfn1iCsUnp3Abik9O+2y0HR5TT8wq5OHGs+3AE4nP+vf6Vy9esI87eeLV3d10/7RtvA7bPN3JC8f01B8L3txiQT55FqS5IdDHkzrXKskL6u5CeAxtPg0v7B3adeEuhyYs3hvGi4e7GzUsWzBNxBanWiEF8r28QJu1SFALsbuaiN1tg4hWKwChUPO9tfl+3hOTumQYx4dpN+HFA6UL+VGAtycleXlQvSrlqGvXrrR2+RL+Y9+ta1eqW7ksC2Dsg5Hax49f5GkOn+7zf/bOAz6K4ovjL510IKGG3nuvIqIIYhcrVrD3ihUbov7F3rH3ChYEK4pIUQGRjvReAqGn9+T+n9+7zLF3uSSXy21u7+Z9YT+53dvbnZ23t/ebN2/enOV0TAi/3r178z4NYkIdYrZBUl1qZAuhDi2b8ZSJzVu24i5xLKExdWnP3n8oM6+I4upg6k5nsYDZpRYtWsQxrAsWLKCFCxfygDCEFCBV2aFDh3g6O3hA3bF+/XoWq0qwgi5dulDdunX5PSVa4cVVghWsWrWKPcGuE0RgOkq0kKtDVn4Re1O/WbaH/t1x1LE9PiqczujRhM7v24z6taznaGCk5sW59SRGhtuFIFnAKxhIoO7aNojj5erjW7N4/2f7kTIRe4C2Hsxhu2B5/rdNHIZwQpkXdmj7ZEqK82xikZqC3gBBP8TueiJ2NwcRrUFKXpF9ulS7p8x5kBQmZlAzhE2ZMqXcZ3n+bCLOiZuSkuL0nmPmMJ4BC0kD4nkQFhxXsezRjaBOTRIoPbeIQwggKDAH9I7DOeydS4yJpHoxEeW8xN26dePl5ptv5kkkhg4dyt39iDP1BZgX3PUaIZiNIQQKCN6qKCm10d9bDtG3y/dwvCq8mwD1MKRdMl3Qtxmd0qUxx5y6UtPQA6FyEN+rQgOIutDuI7m0YPNBFrGw2eGcQvpuRSovuAW7pyTScW2TqW/LeuyRNUvEBoPd0SDYfiiH6xi9MuhxsVrssNUIBrsL1Ufsbg4iWn0ABBi8ntUlP7+A6tSJqtF5KwLdxSDGjWjCQCV4TfFjY5yYweiphDjFACeM6K9w+lbgJtUVRDK6jbHghw3ds+hyxgAfDNzBgsFEEK91YyLt3kWX8wPMZBYfH8+hAwgDME5EoejcuTNPl4dFeVvXrVvHg6zUcdzRp08fnnwCKbZc54OuDAxAQvqlL5buo7Sy+F/QrmEcd/+f2zuFYzgrA/UqQfq1R/P6MXTZwJa8FBaX0vJdR+0DujYepHX7Mmn1ngxeFC2TYqhPC7uA7d2iHnVqHM/3b00JNLsjLhthFyt3p9MqLHvSaf2+TA5jUaAhiu8wvNf1YiMoKTaK/9aPieTvfr1Y+1+11IuJZMFrBdDThMGHaIDyXwxOLC09tu74ax+kiEGJzu/ZByq67sfrhu256Qfp0pN6SnYLzQi073ugIN8iHwDx580DKbS0mOqY9CBTotWdsMVAJHSdq9euQChiVD1mHsNAp+OPP94x+AlJjNF9TyV2TyuFV56fVXl6OzeO58EzR3OKKDO/iEMX0jJL6NZbbmZv7ikjTqaObVvRgf1pHMOKrnzEt6rR//C+NmzYkAdiZWVlcVmQMWDEiBHUvXt3uuyyy3jKXsQRwVsLsV2Zlxafw/FHjx5Nzz77LMfTIgwB3mUMHjN+FmI7I6+IDmXk0P7MApr27wFKyyqhujERdHbPpixWezRLFI9TAIAGEtJjYbn/1E50IDOfFmw+RP9uP0LLdh2lLQeyaefhXF7giVUNP9jX7omtx0IWAizYwEQOK3fZxSmE6urdGZRVUD4uD95VCDLEzEOUHcou4MVTYiPDWMzahW6kW4ELcYteC2SogHcXf/HMwGBS4+t8123467RdfdbwGcfx7L0jtcGLfx+kUV0bc6MWuYZ90QgSBB0R0epHzJxYIK/I/mPjrnsaVDWDxhNPPMHCcfLkybRt2zbuMod38sEHH7TPhFUWHkCRnk0qAEEXXyeCF3gkMvKKOXxg0NATaca0z+jzD9+j9PQjVL9+Eg0aNIh+//13R7wpRDIyLSAdFcR0cnIyD9RSx0XKKgjYE044wSnlVVXl+fnnn+mhhx6iq666iqf+RSYDHAOpskptNsrOt5cxM7+YvTK24hJMo0CD2yTRiO7N6KRODb2aoQlhCYI1aJhQh0M5sICM3CJasfsoLd+VTit2HWURB+G2eNsRXhStk2Opd4u6ZR7ZetSxcTz3KASK3XMKimlNagZ7UJUndW/GsZ4DBVKoIXyiZ7O61KtFXf7brF40f38g/vD9OJJTyI3RwzkFdDTHvo6sJce2FZWtF7LItWfGyHMaqGglYEYMPOQeotAQCuOBiCFl6/YBio73sO54r+xvmPP2DfsyaPfRfEdICnqg0NiFgMWgQGnsBidW+r4HEzKNaxBO4wrPILo9QZcmCb5v1RfnEx2ApzaEqEkPrycWAIXFJWXxr3bvqwLeWXgy0fVYWRiEr8krhFAt4jKh+0/BA9XCbXQkbQ+1a9umRraHQDYOChOsCzyK8L4ipGD5TojZozywy533sJdBxELQ4t61gt3xPNi0P9vuQS3zpG7an1Uuvy60U4eG8dSreV3qyUsidWzkm9AIgJ8aNAAhXhFX7CxwDdvK1gEahVERoRQVHsphBfjL2/A3wvhavefyOtyeu9h4HKfPh4U5idKwkBCfZ5ZAhpLdeRE0Y0Uq/bB6Lz9bFMgnfG6fFDqnVwql1PV+VkHBepj9fc/UdBpXEa1+FK1m5WnNzi/iWDR0hXZqbMLNnHuEKH0nUUQMUYOOPk0bBsGYkWv3yCjwY2WPhQvlH1r2euIv/tnseU3Va/Ue5KZjP5utbB/nz9qPZTyOPQ5NAa8KhDPODY+1r2y/ceNGiXUKYNJzC2nF7nRawSLW7qlE6IsrbRrEOkRsn5Z1qfToXurSuZOpZcP9jZy1jjjU3RnsUXWXkq9pYp0ycVqXhWq3lEROJSb4FuP3HTHViKeGgJ29fj+vKwa2rs8p8U7t1sQSeYWFmmH2cz5TU9EqT6ggJLfsB8o0D6WavjXS/SxXNYkLxoLE8Fn5xSwO4JlBNySmA60NUA7E7EGoxsvIaMEN8KCe1LEhLwANnc0Hsmj5znRatvMohxWg0bjtoH1BKjRFaMhWvqfgzAtBT0UIOV7jVkNKOL7j1GvDNrwOMbw+tt2+H5acghL2XrqC9Gs9mifavajo6m9el0MjhNoFjoSRXRrxgjj5Wf/t45ABhJ4gTRuWR2aupZGdG9Ho3imcAcN1oKrgjHJKOA2iK3EeMIe/GECo1tGJVlLmpLCV/cU6trPzgl/bj4v3lEMDf+2v7fthH94XTo9S533S9h+lm1MKKLmWUurpgnha/ehpRdWbIYqQ4B4PRIxibxhvwg/Toc1EhdlEdVsQxTjnOfU1ahAUFjwQHD/QLADUj7X9x5t//B3vqx984w+9EgvGH3rnz0aUxaO5w1e2x+A2NXGCEJyge5tjY3emc0gBvJ61NcMZuroxcxjEqfKitkmO9cuECoJn33d4x2euTKXvlqfS5gP2lIMAGVbO7NGUBSyyWdRGIxrOAqQ1w2BE/EW6QkyZnHo0l4VgucaS4Vms8nA7GlXuGmKG/cnpOW5/zQLSJUuDswh1zt5g7JWzGj/edjz3YJhBpnhahdoGaacceU99iCPdlRmeVrRxinLtrxEeYDKIp0PezNpKAF8bYMpaCF8heMFI+OGdGvEC8CO7cv0WTstmD1Oxe2qcQlacwlZc3uPpfY+FsShXQ6nL+5jJDOnXrJJWSvDs+4541ptPbEc3DWtLa/dmcvjAzFV76WBWAX26eCcvSMU2ulcKD+Bqleycd7q6YMDh9sMQpjl2YXooh7Zz1owcp5jbQMfuhLAPoHMaLBcKoVwWx8yvieOZ8RrCGW0MFd+Mdftru6hW+9j/Om/D/mHYFhpCudnZEuZhAiJa/YgZTm54JjGFJ6hTQeaAmp2ggMhWah98FS7di942VgS9wI9aXHipdMlrSHW+7xBS8MxheeC0TrRw62EOH5j1Xxp7Pl+Zs5kXDPSDeIUXtqL0a+id2qE8pYdy+a/dg5rDYwcqo1FCFLVKirUvybHUOjmGZ0LEADZjA0o1osilIXasYWUYO+AYW2BvZRnHIhgbcBB/GIjrLDLL1p0yNxzb7k6c+rtnATGtyBEt+BYRrX7EjC5iNeACI2TxBfY5ysuK/KwS7+kVMTHyINMRsbueeGt39DJhumEsT44uptnr9tP0Fan01+aDtIJTsqXT4z+soxM7NuD0e4ezC8s8pvaufXexzUYaxkfZBWlSLLVMjuG/WIdHVyZCqDnyfTcHuTP9CGZj8jW5alIBM7ysRtHqw0FYuoFJEgT9ELvriS/sHhsVznGtWDAJxA+rMIBrD/2Xmkm/rz/AS4XClMVoDAvSY95TEaZmI993c5C71s/dRr5OeVXZTFg+obD24lmDlR07dkjKKw0Ru+uJr+2OwbXXHN+al837s2jGylRObYasK/aufLu3FOIUYlfwD/J9Nwe5o4MMFR6AqSd9Ds+EVZZ6SkSrIAiCX2nfKJ7uHWVu7l9BsBKSdyeIwgOKSkp5AaaMHsZMWI5BWMEzmr+2wTSxgn6I3fVE7K4nYndzENEaRKjQgDrhYRXOg75o0SIKCwujM844o/onMKa68mAQFnKaXnrppdS0aVMOg2jWrBmdc845tGHDBtKZkpLaydcpWAuxu56I3fVE7G4OIlr9SHFx+akffREaUNkgrPfff59uu+02WrBgAe3du7d6J6hGftaioiIaOXIkJz6ePn06p/+YNm0ade/endLT06t33uoUscj6OQYPHTrk7yIIfkDsrididz0RuwexaJ0yZQq1atWKvXEDBw6kJUuWVLjviSeeWDYTh/Ni9BxeeeWV5d4/9dRTKdhxDMKqQLRmZ2ezcLzpppu4vj766KNy+/zwww/Uv39/tkVycjKde+65jvcKstLp/v+9Qs27DuBJEdq1a8ci2B1r166lrVu30htvvEGDBg2ili1b0pAhQ+jJJ5/kdcWePXvokksuofr161NsbCz169eP/vnnH8f7b775JrVt25YiIyM5qP3TTz91Og9si33OPvts/vz//vc/3j5z5kzq06cPX0ebNm1o0qRJPm8kCIIgCIKgkWiFiBo/fjxNnDiRli9fTj179qRRo0bRgQPuU3jAa7dv3z7H8t9//3F394UXXui0H0Sqcb8vv/ySrIYvZ8NCUmZHuqsK4lm/+uor6tSpE4u/yy+/nD744AOnCQ5++uknFqmnn346rVixgubMmUMDBgwoO0Epjb35Hvpyxq/06ksv0vr16+ntt9+muLg4t+dq0KAB56H95ptvKuwmgYgeNmwYpaam0vfff0+rVq2i++67j6c9BN999x3dcccddPfdd7Odb7jhBrrqqqto7ty5Tsd57LHHuNxr1qyhq6++mv78808aO3Ysf3bdunVcTgh0JWj9DUS4oB9idz0Ru+uJ2N0kbH5mwIABtltuucWxXlJSYmvatKlt8uTJHn3+pZdessXHx9uys7Md28aNG2c755xzvC5TRkYGT86Bv67k5eXZ1q1bx38dlJbabAXZ1V7yMw979TnHgvOWUVBUYlu1+6ht9e50W0nJse1GjjvuONvLL7/Mr4uKimzJycm2uXPnOt4fPHiw7bLLLnP72Y3/reQ6mT31bafzVsbrr79ui4mJYfucdNJJtscff9y2detWx/tvv/02v3f48OEKy3vdddc5bbvwwgttp59+umMdZbrzzjud9jn55JNtTz31lNO2Tz/91NakSRNbTXBrey/Yvn17jT4vBCZidz0Ru+uJ2XbPqESnBDPh/s5TumzZMpowYYJjG7xzI0aM4AFDnoDu6Ysvvpi7ho3MmzePk/vWq1ePhg8fzt3SSUlJbo9RUFDAiyIzM7P6sZ5PNa2+p5VqyIN7iSJjnWfCigh1O30dYkoRdgHvpcpcMGbMGK4/hFyAlStX0nXXXef2VCuX/cse7WEnHO/xTFi33HILezxhi8WLF9PXX39NTz31FHtVEe+K8/Xu3ZtDA9wBb+7111/vtA0hBq+88orTNoQUGIHH9u+//3byrMLbm5+fT7m5uX6fqcR4rwn6IHbXE7G7nojdzSHc34HKEBOuqSGw7skIc4gwdBu7xlUiNOC8886j1q1bc1zlgw8+SKeddppj5LwrkydP5phHVzZv3szd34jd3L17N9+EERER3KWubkhOW1Vc7JeKhAij0jC+ppwC+wCkqFDi7nXEb6pudsR1oosc2zCSX4HrQIjCSy+9xNcZHR197LhlDQgcG4ObVJhsSVgdKil7H8c17ou6UPNso55wfvyFQD3zzDPpkUce4b9PPPEEnXzyyY59VKyp+osyqePg3NhHratQA3VegHhXrCO+Fa8RdvDwww/T+eef7/QZnA/lNO6r7IjrxHtqIBfec61D7Iv3cd8mJiY6BrI1b96cB5dlZWXxMdq3b0+bNm3i+sV+8fHxHLsLUlJS+FrQiEAZOnToQFu2bOEyYj80snbt2sX7NmnShMt69OhRXse+27Zt43LBXmiE7dy5k99r3LgxH/fIkSO8jnsWx8E2iHQ04JDsGuA1rksNFEDML64F58J14h7BeVSYB8qpwnXwnUpLS6O8vDy2E64d5QcoD+6B/fv38zri1A8ePEg5OTlc91jHdwrgOnEuhO6AFi1a8HWiDnEMdK2hjkDdunX5GlR9IwsFGpZYVH3juLimhIQEXlR941rQUFGD/xAag2cC6rCq+sZxUWewORrFqAtVh3hG4RiHDx921Ld6RuB7BHsge4aqb9wLGJSIa6qsvhFHjmtS9Y06w2tcA+5J1JOqbzT2sA32AIgbR3lw/6MOcR7ch1XVN+59lN94z+L+QtiOqm/sh/Kre1bVN+oQ9sG1V1XfOCbKbKxv1Je6Z1HfuJ/d3bOob3xH1D2L+wM2VvWNYxnvWYB7T93fuG51z+J6UCZV37h+4z1rrG/UqbpnUXZ83liHKLuq74ruWdSbqhNPnxE4prG+5RkReM8I2Bb1V51nhPGereoZsa1sXTdC4G7118lhFHxBFy5cSIMHD3ZsR1zj/PnznQbkuAMxjhCiq1evrnQ/GBc3+O+//85iyRNPK75oeGjg5jaCmwg3Gr6YjtmsUIVqZH01wBcIN5/XGFJPbTuYTdkFxZRSN5qS4px9uPjy4UuMej3llFOc3hs9ejTdc889dOONN9JJJ53E9vjss8/KnWrHsjnUpv9I+u2Hb2nEGccGZ1WX22+/ne29dOlS+vjjj3kd9enO2wqvateuXemdd95xbLvooov4AfPjjz/yOh6W8B7jOoyfQ+xuRYPEvMWt7b0ADzg8oAW9ELvridhdT8y2e2ZmJjd43OmUYMavnlbXVq4C62iFVAZaZlOnTqXHH3+8yvOg1YJzobXnTrSiNVijQVEQjmXd9NWhEK2oyJpP44p2R2XpriDw0DK85ppr+CY3Am8kxB1EKwbDoX4g8BFyAbH7888/0/333UutmiTRuAvPpKtvup1efTWEB8yhBY/WNYSkK+j6x/GuuOIK6tKlC3st0BDB4K/777+f90HWAIQLQHDC241WLAaAoVWJRsy9997Lx0YIAUJGkNkAA/HQ+KiMRx99lD26aJlfcMEF3DBAyAC88ggT8TdoRMn0fvohdtcTsbueiN2DMHsAhEzfvn15lLrR+4h1o+fVHYiPhHcUo+CrAt0AcNFDFAUjmAWrpNTGXkd3M2FBlEL0uQpWJVrh9YS3GrGtqFfEnPbq1YtjgTn9WBG64m305tOP0AXnX0A333wzezIR/4rGgzvg2UVXD8IukMYM6acQi4r1hx56yGH/3377jbtGkLEAOVyffvppRwgHxCw+8/zzz7PHFSEOH374oSMGtyKQfQJCHcdG+i6k2EIIBLr6BEEQBEEITPwaHqBSXo0bN44FCdIrvfzyy5yaCTGtiGPCQB50WcMTZ2To0KG8Hd5WI4gFgjCCGIO3FrEp6BZHHAxSInniUa3M7e6rLmIAT6YvpnJNzy2kXUdyOdUV5qL2OTmHiDJ2E0XGEyW3I13xle0RU1bR4DMheBG764nYXU/MtnumhAf4B4xgR/AxunQRvA0P36xZsxyDsxAA7Rr3ieDrv/76iz1prsBLB68h4iURWI2uZsRxYvCPL/OiWglPZsKqESpeN9K/o+4FQRAEQdAXv4tWcOutt/LiDqRLcgVxIhU5iDEq79dff6VAwFeeVjUTVoxZorXQ8+lbhapBI008L/ohdtcTsbueiN2DdEYswYeDsCqYCatGIOVTcVl6qQh7SixBEARBEITaRkSrH8FApJpSWHxsEFaUGaK1OI8HYVFoOFFYzcsr2LNZCPohdtcTsbueiN3NQUSrH1GJ7GuC0csa6uFMVV7Fs8LLasbxNUQlyxb0QuyuJ2J3PRG7m4OIVi/xRdIFXxwjt7CWBmFJPKtP7AUwU4ygH2J3PRG764nY3RxEtFYTNcMFZmWqKejSrymmxrOCwrIvnohWh81rOstJsGaxECpH7K4nYnc9EbsHcfaAQAIptTDPsZpnGfMdeys+4blD7k9vwedzcvP4b1hpBOXnl5LPB2Hll4nzklAkKiUdQf1CsMLmsL2a/MBbMPGCoB9idz0Ru+uJ2N0cRLR6gZpiVglXf81NjJmw9mcWUGgIUURutO9DTosLiLIPEoWEEeWkku5AsFY1vbAnYMILmd5PP8TueiJ21xOxuzmIaPUCeFYxJSymH63JYCo1u5K3/LY2jZ6Zu5u6pSTSKxd3IZ+zahrR388RtTyeqNfLpDNoXNTUwyoIgiAIgveIaK0BEDE1ETIQvTWZDnR5ag6lZpXQqKTEGk8p65a9i4mydxMltyAy4/iakpyc7O8iCH5A7K4nYnc9EbtbcCBWYWEhT6mKmZ2E6lNTz93qPen8t0ezRDKFvSvsf5v2Nuf4miIeWz0Ru+uJ2F1PxO4WEq0YlHLNNdfwIKSuXbvSrl27ePttt91GTz/9tK/LGLTs37/f688Wl5TS2r2Z/Lq7GaK1IJvo0Eb76ya9fH98jamJ3YXAReyuJ2J3PRG7W0i0TpgwgVatWkXz5s1z6pYeMWIETZs2zZflEypg84FsKigupbiocGqdFOv7E6StIbKVEsU3IUpo4vvjC4IgCIIgmB3TOmPGDBangwYNckr3BK8rRswJntGqVSuvP7tmTwb/7ZaSQKFIH+Br9q20/5XQAEvZXQhcxO56InbXE7G7hTytBw8e5EFEruTk5PgkYb4u1CRl1upUFc9al0xB4llNo6ap0oTAROyuJ2J3PRG7W0i09uvXj3766SfHuhKq7733Hg0ePNh3pQtyajKrlvK0dk8xeRCWxLP6HF/MpiYEHmJ3PRG764nY3ULhAU899RSddtpptG7dOs4c8Morr/DrhQsX0vz5831fyiAlMjLSq88VFpfS+n1Z5mUOyM8kOrTZ/rqpiFar2F0IbMTueiJ21xOxu4U8rccffzwPxIJg7d69O/32228cLrBo0SLq27ev70sZpLRs2dKrz23an0WFJaWUUCecWtSP8Xm5KG01Ji8lSmhGFFc+DETwj92FwEbsrididz0Ru1tEtGIGqKuvvppDAt59911asmQJe1k/++wzFrCC52zeXObNrCary0IDEM9qSgyxI55VvKxWsrsQ2Ijd9UTsridid4uIVkxn+e2335pTGsEj1jgGYZkVzyqZAwRBEARBCILwgNGjR3PaK6Fm1K9f36vPrdqtPK0yE5ZOdhcCG7G7nojd9UTsbqGBWO3bt6fHH3+c/v77b45hjY11Tm5/++23+6p8QU1UVFS1P5NfVMIxraC7Gemu8tKJjpTl2hXRahm7C4GP2F1PxO56Ina3kGh9//33qW7durRs2TJejCDGUkSrZ+zbt48SEhKq9Zn1+zKpuNRGSbGR1DSxjgmFWmX/W7cFUYy0FK1idyHwEbvridhdT8TuFgoP2L59e4XLtm3bqn28KVOm8OwRmBJ24MCBPLirIj766CMWxsbFOJUssNls9Oijj1KTJk0oOjqap5cNlqDoNall+VmbJZo8CEu8rIIgCIIgBLhodRWIWLwF08GOHz+eJk6cSMuXL6eePXvSqFGjKp1NAq0XtGLUsnPnTqf3n332WXr11Vfprbfeon/++YfDF3DM/Px8shItWrTwPnOA2ZMKiGi1lN2FwEfsrididz0Ru1tMtH7yySec4gqeTCw9evSgTz/9tNrHefHFF+m6666jq666irp06cJCMyYmhj744IMKPwMPY+PGjR1Lo0aNHO9BQL/88sv08MMP0znnnMPlQln37t1rucFjR44c8X4mLLOmb90nmQOsaHch8BG764nYXU/E7hYSrRCaN910E51++un01Vdf8XLqqafSjTfeSC+99JLHxyksLOSYWHTfOwoUGsrrmKigIrKzszlxb/PmzVmYrl271vEeQhTS0tKcjpmYmMhhB5Ud0x/gOqpDbmExbT5g4kxYuUeIju6wv27S0/fHF7yyuxAciN31ROyuJ2J3Cw3Eeu211+jNN9+ksWPHOradffbZ1LVrV3rsscforrvu8ug4hw4dopKSEidPKcD6hg0b3H6mY8eO7IWFBzUjI4Oef/55Ou6441i4NmvWjAWrOobrMdV7rhQUFPCiyMzMpNogPLx61b9ubyaV2ogaxkdRo4Q65nlZ67Umiq7n++MLXtldCA7E7noidtcTsbs5eFWriCOFUHQF2/CemQwePJgX4zk7d+5Mb7/9Nj3xxBNeHXPy5Mk0adKkctsxeCsuLo7atWtHu3fvZmGLUAiEJMCjCzB9LUISDh48yOtt2rThUATEz2KAWNOmTR2D05KTk9mTrOJ1MfgMx83NzeV5ihEDs2XLFkeON2xTQhue5T/X7uLXbetF8Dk3bdrE6/Xq1eNzqbrHcY4ePUpZWVkUFhbG5ce++Ay8zrim1NRU3hdCH/uhAVB/3WxqQERZCe1o78aNFB8fz1kiUEaAa0FZ09PTHQ2IrVu38nS+OCbKvGuXvYwYBIf6Ul0kSJOG2GN41xH+gXrbsWOHo0GBxgsaMXx9bdvSnj17HPWNY6k6bNAAJSSn+sZ15+XlcYoRXA/KpOob179//35HfaPuVX2jTtUAPZQdnzfWIcqO1jIePijTxo0b+T3UCa4Bdgbw+KNOUI+wL67VWN+oR1wPSElJ4RhrHAthLh06dGCb4/qxH2xprEPcR7AlwL6oB1XfSUlJjnhu3JOoW1XfsDmO466+8bq0tNRR35Xds6hvlFPds61bt+Z7UtU3rl3dsygP6spY37BTTk4OT0qCdVXfld2zVdU3bIyGJRZV3zgurgnx7lhUfVd2z1ZV3zgu6gyzAMJmqAvjPYtjHD582FHfVT0jsD+uqbrPCOM9W9UzAuVR9yzO4+tnhLpnVX3LM8KzZ4SqE0+fETimsb7lGRGYzwjUX3WeEdXREdu8GPQeDITYvBhF1a1bN7r00kvpwQcfdNr+5JNP8sCqNWvWeHQc9WX55ptveMICxbhx4/gGmjlzpkfHufDCC/km/vLLL9mQuJlXrFhBvXodm4Z02LBhvP7KK6945GnFFw0PDTNTVuALhy+Ip9w1bSV9tyKV7hrRge4Y0d73BZp2OdH6H4hGPkE0RNKWWcXuQnAgdtcTsbuemG33zMxMbvCYrVOCwtMKr+SYMWNowYIFNGTIEN6GiQbmzJnD8a2egpYsJifA55RoRUsI67feeqtHx0DrEyIZ8bWqlYcWDI6hRCuMiywCiMN1B1qDgZAIePUemb5VEARBEAQ98Uq0nn/++SwCMehKjchHFz3yq/buXT3Bg3RX8Kz269ePBgwYwCP/0VWAbAIAcbPoLkEXPsBMXIMGDWJXO7yxzz33HHd/XHvttfw+uinuvPNO9vrCdQ8R+8gjj7B73ejNtQLo0vCUrPwi2nYox5Gj1efkHCLKsHfxySAs69hdCB7E7noidtcTsbs5eB0pDA/pZ599VuMCwGOLOA5MBoA4GHhHZ82a5RhIhVgSxG8oEEuCFFnYF7EmKMfChQs5XZbivvvuY+F7/fXXs7A9/vjj+ZiukxD4G4RGeMravZmEQI6UutGUHBdlnpc1qT1RHX26GqxudyF4ELvridhdT8TuFopp/fnnnzl4HQn7jfz666/cvX/aaaf5soy1Tm3FilQn5uWdBVvpqZ830KldG9NbV/T1fWHmP0c090mi7hcSnf+e748vOJAYNz0Ru+uJ2F1PJKbVQnlaH3jgAY4ldQX6F+8JvkfNhGVKaACQmbAEQRAEQQg20YrUEcbueEWnTp0cqS2EqkGGAk9Zk1o2fauIVq3sLgQPYnc9EbvridjdQqIVLml3OcIgWJGrTPAMlReuKjJyi2jn4Vx+3T3FBNGatZ8oC/ntQoga9/D98QWv7C4EF2J3PRG764nY3UKiFVOnYoS+StCsBOvdd9/NM2MJnoEkydXxsraoH0N1YyLNmwmrQUeiqDjfH1/wyu5CcCF21xOxu56I3S0kWp999ln2qCIcACmlsOA1ZrvAtKqCZxizIlTG6lR7i03iWfWyuxBciN31ROyuJ2J3C6W8QngA0kzNnj2bVq1axVOS9ezZk4YOHer7EgYxyCPrCWvKBmH1MCM0wChamxybQUzwv92F4ELsrididz0Ru5tDtZoCixYtoh9//NGRxP+UU07hOXPhXcWEA8iLapwOVagcNS+4XzMHIOOZeFotaXchuBC764nYXU/E7hYQrZiNau3atY51TJ+KRP8jR47kVFc//PCDY+YqoWo8SZF7OLuAUtPz+HU3UwZh7SPK3k8UEkrUuLvvjy+Uw4vUyEIQIHbXE7G7nojdLSBaV65cSSeffLJjferUqTz16rvvvsvTsb766qv01VdfmVHOoARhFp4OwmqTHEsJdSJ8XwjlZW3QmShSZvCwit2F4EPsrididz0Ru1tAtGIKVTW9Kpg/f77T7Ff9+/en3bvL5q8XqiQ+Pt7jeFbzBmGVZQ6Q0ABL2V0IPsTueiJ21xOxuwVEKwTr9u3b+XVhYSEtX76cBg0a5JTiISLCBG9gkLJnz54q91ntmFSgrjmFcMSzyiAsK9ldCD7E7noidtcTsbsFROvpp5/Osat//vknTZgwgWJiYpwyBqxevZratm1rRjm1ZfWedPNmwpJBWIIgCIIgBGPKqyeeeILOO+88GjZsGMXFxdHHH39MkZHHkt1/8MEHnFFA8IyUlJRK39+fmU/7MwsoNISoS5ME3xcgYw9R7iGi0HCiRl19f3zBK7sLwYnYXU/E7noidreAaE1OTqYFCxZQRkYGi9awsDCn97/++mveLnhGdnZ2pfWl4lnbNYyj2CivUupWjvKyNuxMFBHt++MLXtldCE7E7noidtcTsbs5hHo7Ks5VsIL69es7eV6FyoH49ySetXuK2fGsEhpgJbsLwYnYXU/E7noidjcHmWfMj2CChspYY2Y8K9gnmQOsaHchOBG764nYXU/E7uYgotWPdOjQodLExCpHq8yEpY/dheBF7K4nYnc9Ebubg4hWP7Jly5YK39uXkU+HsgspLDTEnEFY6TuJ8o4ShUYQNezi++MLXtldCF7E7noidtcTsbs5iGj1IyUlJRW+t7psEFaHRvFUJ6J8/HCNUV5WZA0Ij/L98QWv7C4EL2J3PRG764nY3RxEtFp0xow1qWXxrClmzYQloQH+QmZK0ROxu56I3fVE7G4OIlr9SL169ar0tJo3fauIVivaXQhexO56InbXE7G7OYho9SO7du2qchCWeTNhrbK/FtFqGbsLwY3YXU/E7noidg9i0TplyhRq1aoV1alThwYOHEhLliypcN93332Xp45FKwbLiBEjyu1/5ZVXcroJ43LqqadSoLDnaB6l5xZRZFgodWxsQhfDkW1EBRlEYVH2iQUEQRAEQRAsjt9F67Rp02j8+PE0ceJEWr58OfXs2ZNGjRpFBw4ccLv/vHnz6JJLLqG5c+fSokWLqHnz5jx1bGpqqtN+EKn79u1zLF9++SVZjSZNmlQaGtCpSTxFhZs4CKtxN6KwCN8fX/DK7kJwI3bXE7G7nojdg1S0vvjii3TdddfRVVddRV26dKG33nqLYmJi6IMPPnC7/+eff04333wz9erVizp16kTvvfcelZaW0pw5c5z2i4qKosaNGzsWK8aX5Ofnu92+umxSge4yCCsoqcjuQnAjdtcTsbueiN2DULQWFhbSsmXLuIvfUaDQUF6HF9UTcnNzqaioiKeQdfXINmzYkDp27Eg33XQTHT58mKzG0aNHK/W0mjYT1l6ZCcuKdheCG7G7nojd9UTsbg7h5EcOHTrEucwaNWrktB3rGzZs8OgY999/PzVt2tRJ+CI04LzzzqPWrVvT1q1b6cEHH6TTTjuNhXBYWPnu9oKCAl4UmZmZ5C9KS230n5oJK6WuGScg2ieDsARBEARBCCz8KlprytNPP01Tp05lryoGcSkuvvhix+vu3btTjx49qG3btrzfySefXO44kydPpkmTJpXbvnnzZoqLi6N27drR7t27WdhGR0dzuMH27dt5H3hzMdr/4MGDvN6mTRvau3cvdw2gTBDU27Zt4/eSk5PZk6zidVu2bMnHhbc4MjKSWrRoQfOWraOsgmKKCg+hhnVKaOPGjY594S3Ozs6m8PBwPs+mTZv4PYQ+4FyI3QU4Dlp5WVlZLNJRfuyLcta3HaYGhVlUGhZFm48QNYvL4f0yMjJ4wBqmnsN1I+QCeebq1q3LZQS4FpQ1Pd0evgAvNhoFxcXFXE/wdqsRk4jnQX0dOXKE19u3b087d+5k7zrCP1BvO3bscDRS0HhBIwbAVnv27HHUN46l6rBBgwb811jfuO68vDwOCWnWrBmXSdU3rn///v28jsF+qHtV36hTXCtA2fF5Yx2i7Kq+USZlC9QJrgF2BoirRp2gHmFfXKuq78TERK5HXA9ISUnhbTiWqm/MnILrx36wpbEOcR+pFjv2RT2o+k5KSuI6BbgnUbeqvmFzHMddfeM17Kvqu7J7FvWNcqp7Fg3BtLQ0R33j2tXMLygP6spY37BTTk4ORURE8Lqq78ru2arqGzZGwxKLqm91zyYkJPCi6ruye7aq+sZxUWfoyYmNjeW6MN6zOIbqwfHkGYEF11SdZ4TrPYt6UvWNexbbYA9fPiNwf+L+UuMEUN/yjPD+GYE6VXXi6TMCxzTWtzwjAvMZgfqrzjOiOjpiW9m6boTYUFN+Qn1ZvvnmGxo9erRj+7hx4/gGmjlzZoWfff755+nJJ5+k33//nfr161fluXAzYf8bbrjBI08rvmh4aODmNgt8MfDFMzJzZSrdMXUl9W5Rl767eYjvT7r6K6Lp1xE1H0h0zW++P77gld2F4Efsrididz0x2+6ZmZnc4DFbp1gNv8a0oiXbt29fp0FUalDV4MGDK/zcs88+S0888QTNmjXLI8GKFhVaOxWN5kNrULW+1FIboCVXYTyr2YOwmvQy5/iCV3YXgh+xu56I3fVE7B6k2QOQ7gq5Vz/++GNav349D5pCVwGyCYCxY8fShAkTHPs/88wz9Mgjj3B2AXQloBsCC7pTAP7ee++9tHjxYnbTQwCfc8457JpHKi0rge4bV9Y4ZsIyIZ4VSOYAS9pdCH7E7noidtcTsXuQxrSOGTOG4zgeffRRFp9IZQUPqhqchVgSxG8o3nzzTQ4ruOCCC5yOgzyvjz32GMcnrV69mkUwQgwQC4I8rvDMwqNqJRDfY6QEg7D2mpg5oLREBmFZ0O6CHojd9UTsridi9yCMabUqtRUrgiByBH0rNu/PopEvLaDoiDD6b9IoCgsN8e0JD6wnemMQUUQs0YTdRKEmTFwgVNvugh6I3fVE7K4nZts9U2JaBX+j4lm7pST4XrAa87M26SmCVRAEQRCEgEJEqx9Bygsja8rys/aQeFat7C7ogdhdT8TueiJ2NwcRrX4Esbnupm81byYsJVolc4CV7C7ogdhdT8TueiJ2NwcRrX5EJXkGxSWltHavfSau7makuyopJkpbbX8tnlbL2F3QB7G7nojd9UTsbg4iWi3Cpv3ZVFBcSvFR4dQqKdb3Jzi4gag4nygynqi+JLoWBEEQBCGwENHqR5A7VrEm1R4a0C0lkUJNGYRlCA0wpBAT/Gt3QR/E7noidtcTsbs5iHrxI2o+Y6eZsMyKZ91XljlA4lktZXdBH8TueiJ21xOxuzmIaLVIoLbKHNDd9EFYEs/qbyRAX0/E7noidtcTsbs5iGj1IzExMfy3oLiE1u+zD8LqkWJCuqviQqK0/+yvm4in1Sp2F/RC7K4nYnc9Ebubg4hWP9KwYUP+uyktm4pKbJQYHUHN60f7/kQH1xOVFBBFJRLVb+P74wte2V3QC7G7nojd9UTsbg4iWv3Ijh07+O/qskFYiGcNCTF5EJYZxxe8srugF2J3PRG764nY3RxEtFqANWWDsEzJzwoknlUQBEEQhABHRKsFug9MzxywV2UOENFqBaTbSE/E7noidtcTsbs5iGj1I6WlpZRfVEKb9mfxeo9mZgzCKiDav9b+WkSrZewu6IfYXU/E7noidjcHEa1+5NChQ5w1oLjURslxkdQksY7vTwLBWlpEFF2PqG4L3x9f8Mrugn6I3fVE7K4nYndzENHqZxz5WVPMHoTVWwZhCYIgCIIQsIho9SNt2rRxxLN2NyM0AMggLEvaXdAPsbueiN31ROxuDiJa/cjevXtp9Z6ydFemZQ6QQVhWtLugH2J3PRG764nY3RxEtPqRo1m5tOVAtnnTtxbl2ScWACJaLUN+fr6/iyD4AbG7nojd9UTsbg4iWv3I7mwbldqIGiVEUaMEswZhFRPFNiBKSPH98QWvqFPHBFsLlkfsrididz0Ru5uDiFY/sq8gkv92TzEhnrUon2jTr/bXTWQmLCvRtGlTfxdB8ANidz0Ru+uJ2N0cwk06ruABizem+m5SgewDRLsWE+3+x74glhWprkBKn5ofX/AZ27Zto44dO/q7GEItI3bXE7G7nojdzUFEqx/ZdLjAu3hWJC0+uIFoN0TqErtYPbq9/H6xDYlaHU/U9yoflVgQBEEQBEHj8IApU6ZQq1atOAZk4MCBtGTJkkr3//rrr6lTp068f/fu3ennn392et9ms9Gjjz5KTZo0oejoaBoxYgRt3ryZrERWfhHtyShy5GitlMIcou0LiOY/R/TZ+UTPtCJ6czDRj3cRrfqyTLCGEDXsStTvaqJz3ya6fSXRPZuILvyQKKFJ7VyU4BENGjTwdxEEPyB21xOxu56I3YPU0zpt2jQaP348vfXWWyxYX375ZRo1ahRt3LjR7dy9CxcupEsuuYQmT55MZ555Jn3xxRc0evRoWr58OXXr1o33efbZZ+nVV1+ljz/+mFq3bk2PPPIIH3PdunWWCY7+LzWT/6bUjabkuCjnNzNS7V7UXWVd/WlriGwlzvtExBI160vUfBBRi4FEKf2Iok3K9Sr4FFMmkRAsj9hdT8TueiJ2N4cQG9ySfgRCtX///vT666875utt3rw53XbbbfTAAw+U23/MmDGUk5NDP/74o2PboEGDqFevXix8cTkIgL777rvpnnvu4fczMjKoUaNG9NFHH9HFF19cZZkyMzMpMTGRP5eQkEBm8M6CrfTUzxvo9C4N6I0RkfZufiVUM/eU/0BCM7s4bV62NOpGFOb3NofgBWiQSayTfojd9UTsridm2z2zFnSKFfGr6iksLKRly5bRhAkTHNtCQ0O5O3/RokVuP4Pt8MwagRd1xowZ/Hr79u2UlpbGx1DAsBDH+Kw70VpQUMCL8WYwFZuNmq18hT6LWEoDdm4neifX+f2QMKLG3Y55USFSE5uZWyZBEARBEAQL41fReujQISopKWEvqBGsb9iwwe1nIEjd7Y/t6n21raJ9XEGowaRJk8ptRxxsXFwctWvXjnbv3s3CFjGyjRs3ZnEMEMIA7+7BgwcdU7dhJgwkFkYoAry+GEUIkpOTWZQfOHCAOh75g9qG7SYqISqJiKWCBj2pTvthlBrajPKSulK9Rs0oMjLSXua0HGoZlU+HDx+m7OxsCg8P5/Ns2rSJj1uvXj0+1759+3i9RYsWdPToUcrKyqKwsDAuP/ZFOSHgcU2pqfbMBc2aNeP90FpDd0aHDh34uuHxjo+Pp7p16/K1A1xLbm4upafbZ/FCK3Lr1q1UXFzMx6xfvz7t2rWL30M8MerryJEjvN6+fXvauXMnN1RiYmK43nbs2OGwDe4D3A+gbdu2tGfPHkd941iqDlWckLG+cd15eXkUFRXF14MyqfrG9e/fv5/XETeNusc1oG5btmzpiHVG2fF5Yx2i7Kq+USa0nAHqBNegZjxBzwDqBPUI++JajfWNesT1gJSUFIqNjeVjqfresmULXz/2gy2NdYj7CLYE2Bf1oOo7KSmJ6xTgnkTdqvqGzXEcd/WN17Cvqu/K7lnUN8qJegMIt8E9qeob147yA5QHdWWsb9gJPSMRERG8ruq7snu2qvqGjdGwxKLqW92z8DhgUfVd2T1bVX3juKizoqIithnqwnjP4hj4Tqr6ruoZARvjmjx9Rri7Z1FPqr5xzzqeEUR8P8szwnrPCJRb1Ymnzwgc01jf8owIvGcE7hfUX3WeEdXREdvK1nXDr+EBMAq+oIhTHTx4sGP7fffdR/Pnz6d//vmn3GfwIEGsKuJaFW+88QaLTnwRcKwhQ4bwsXGDKS666CL+YiGG1hNPK75oZrndUeX//fQmbd+zj0467QKKb96NKDTM5+cRrAkegHgAC3ohdtcTsbuemG33TAkPqH1cW7kKrKMV4g5sr2x/9RfbjKIV64h7dQdag1hqC4jn7mfeTJEbN1J8S4l10g14IAT9ELvridhdT8TuQZjyCl7Tvn370pw5cxzb4L7HutHzagTbjfuD2bNnO/ZH1wSEq3EftEjgta3omP6iNoWyYB3E7noidtcTsbueiN3Nwe/DzzGoaty4cdSvXz8aMGAAp7xCfMtVV9kT4o8dO5ZDCBB3Cu644w4aNmwYvfDCC3TGGWfQ1KlTaenSpfTOO+84vJh33nknPfnkkxxvolJeISYEqbGsBEIQBP0Qu+uJ2F1PxO56InYP0skFkMLq+eef58kA0H2/cuVKmjVrlmMgFeJCVCA2OO644zg3K0Rqz5496ZtvvuHMASpHq4qJRcqs66+/ntNpIagdx7RKjlaFCk4X9ELsrididz0Ru+uJ2D1I87RaEQQ2qxGxZgY4YzQjvMGCXojd9UTsrididz0x2+6ZZQPGkfUAA7J0we/hAVYEKTWAuPcFQRAEQbCyXknUSLSKp9UNGAyGlFnI0WbWVGyqlWS2N1ewFmJ3PRG764nYXU9qw+42m40FK8brIG+rLoin1Q24AZCcuDZQiY4FvRC764nYXU/E7npitt0TNfKwKvSR54IgCIIgCELAIqJVEARBEARBsDwiWv2YeHjixImSgFgzxO56InbXE7G7nojdzUMGYgmCIAiCIAiWRzytgiAIgiAIguUR0SoIgiAIgiBYHhGtgiAIgiAIguUR0SoIgiAIgiBYHhGtgiAIgiAIguUR0SoIgiAIgiBYHhGtgiAIgiAIguUR0SoIgiAIgiBYHhGtgiAIgiAIguUR0SoIgiAIgiBYHhGtgiAIgiAIguUR0SoIgiAIgiBYHhGtgiAIgiAIguUR0SoIQq1z5ZVXUqtWrXx+3JCQEHrsscc82hfnRzkEz9mxYwfX8UcffeTvogiCoCEiWgVBqBYQLBAuaqlTpw516NCBbr31Vtq/fz9ZiYULF7KITU9P93dRAoovvviCXn75ZX8XQxAEwYkQm81mc94kCIJQuWi96qqr6PHHH6fWrVtTfn4+/fXXX/Tpp59Sy5Yt6b///qOYmJhKj1FUVESlpaUUFRXl07KhLOHh4byA559/nu69917avn17Oc9uQUEBhYaGUkREhE/LEAyceeaZbEd4Vo3g5wL1hjoLCwvzW/kEQdAT+5NdEAShmpx22mnUr18/fn3ttddSUlISvfjiizRz5ky65JJL3H4mJyeHYmNjfSoUIX4LCwvZ44vFU3wtmAMJiE8I/Ojo6Gp9TnnWBUEQ/IGEBwiC4BOGDx/Of+HVBIgXjYuLo61bt9Lpp59O8fHxdNlllznec/V8QtDefffd1Lx5cxaUHTt2ZE+pa2cQhBNCET7//HPq2rUr7ztr1qxyMa34Cy8rgEdYhTMo76G7mFaEEdx5552OMrRr146eeeYZFsZGpk6dSn379uVrSkhIoO7du9Mrr7xSZR15co3dunWjk046qdxnUYaUlBS64IILnLahGx/1ADHZqFEjuuGGG+jo0aNOn8W1wnv666+/ckMDYvXtt992W8YTTzyRfvrpJ9q5c6ejzpSt3MW0Kjvv2rWLz4HXKOeUKVP4/TVr1vC9gcYKPPEIPXDF03oXBEFvxNMqCIJPgDgF8LgqiouLadSoUXT88cezOKsobACi7eyzz6a5c+fSNddcQ7169WKBBdGZmppKL730ktP+f/zxB3311VcsXpOTk90O6jrvvPNo06ZN9OWXX/LnsR9o0KCB2zLk5ubSsGHD+HwQfi1atOCY2AkTJtC+ffscMZ6zZ89mT/LJJ5/MwgqsX7+e/v77b7rjjjsqrB9Pr3HMmDEsuNPS0qhx48aOzyMEY+/evXTxxRc7tqGcKlzj9ttv5wbD66+/TitWrODyGD3aGzdu5HLjM9dddx0LZnc89NBDlJGRQXv27HGUCUK0MkpKStjzfsIJJ9Czzz7LDQrYBkIVx0NjBfZ46623aOzYsTR48GBuSFSn3gVBEPAgFQRB8JgPP/wQbkHb77//bjt48KBt9+7dtqlTp9qSkpJs0dHRtj179vB+48aN4/0eeOCBcsfAey1btnSsz5gxg/d98sknnfa74IILbCEhIbYtW7Y4tmG/0NBQ29q1a8sdF+9NnDjRsf7cc8/xtu3bt5fbF+dHORRPPPGELTY21rZp0yan/VD+sLAw265du3j9jjvusCUkJNiKi4s9rrPqXOPGjRt5v9dee81pv5tvvtkWFxdny83N5fU///yT9/v888+d9ps1a1a57bhWbMN7nnDGGWc42UeBesRxcA8olJ2feuopx7ajR4/yvYDrwr2h2LBhQzkbeVrvgiAIEh4gCIJXjBgxgr2W6NKF9w/euO+++467ho3cdNNNVR7r559/5oE98BYaQVc6tOgvv/zitB2euS5dupAv+frrr2no0KFUr149OnTokGPBdcKTuGDBAt6vbt263M0Pj2t18PQakYkBXthp06Y59sH5v/nmGzrrrLMccagob2JiIo0cOdKpvAhbgC3g0TUCzya83maBuGYF6gieXHhaL7roIsd2bMN727Ztq3a9C4IgSHiAIAhegZhFCCyM1EcsJQQJRuMbwXvNmjWr8liIn2zatCnHiBrp3Lmz430jqmvZl2zevJlWr15dYfjAgQMH+O/NN9/MoQnoDodAP+WUU1iYnXrqqT67RoQIPPjgg9xljnPMmzePz4/txvKiG79hw4aVltfMOlMgnta13iCoYXvEwLpuN8bcelrvgiAIIloFQfCKAQMGOLIHVAQG1bgKWV9Q3VHvnoBBP/Ba3nfffW7fh0AHEIkrV67keFR4R7F8+OGHHKv58ccf+6QsEKeI6YQXEgOUIJIh9ozCGOVFWRA/6g5XEWhGnSkqSn9V0XbjwDNP610QBEFEqyAIfgejyn///XfKyspy8kRu2LDB8b43uHr5KqNt27aUnZ3N3dJVERkZyV31WCC64H3FaPxHHnmER77X9BrhFUWjACECGNA0ffp0Gj16tFOaLpQXxxsyZIjPBWl16q2mVKfeBUHQG4lpFQTB7yAlFuIXMfLdCEavQ0ChK94bEFMJPJkRC138ixYtYg+qK/g8MiGAw4cPO70HT3KPHj34NRLv++oa4W1dvHgxffDBBxzjaQwNUOXF8Z544oly50JZazILGOoNoQe1gaf1LgiCIJ5WQRD8DjyWyE2K9EjIBdqzZ0/67bffeKICdI/DG+cNGJQEcFwMFkMKKJxLiVkjSD31/fffc65R5B7FZzHgCnlGMQgK5ULaLAw4OnLkCOceRcwmYlFfe+01Hjyl4lN9cY0Qc/fccw8v9evXL+eJxGA0pIiaPHkyhysgthbXhxhRhBUgb6wxp2t16w1e3vHjx1P//v15YBfKbwae1rsgCIKIVkEQ/A68lRAujz76KIslxIgi9+pzzz3Ho+u9BYILnkjkB8UEBOjKRy5Td6IVOWTnz59PTz31FIu+Tz75hCcOQEzlpEmTOKYUXH755fTOO+/QG2+8wZ5A5FJVuVUri9+t7jVCEB933HGcbxVC2d0sYrguiDyEJmDgFga+4ZgoI8IGvAXhDhDCKCM8wQhdMEu0elrvgiAIIch75e9CCIIgCIIgCEJlSEyrIAiCIAiCYHlEtAqCIAiCIAiWR0SrIIoHGRcAAJ8pSURBVAiCIAiCYHlEtAqCIAiCIAiWR0SrIAiCIAiCYHlEtAqCIAiCIAiWR/K0ugG5HPfu3ctTLdbmdIaCIAiCIAhVYbPZeEropk2bVpofOtgQ0eoGCNbmzZv7uxiCIAiCIAgVsnv3bp6IRBdEtLoBHlZ1M2BmFrPYv38/NWrUyLTjC9ZE7K4nYnc9Ebvridl2z8zMZOea0iu6EBCidcqUKTzVYVpaGs/XjXm+BwwYUOH+mFoR83tPnz6d5wjHFIQvv/wynX766R6dT4UEQLCaKVr37dtn6vEFayJ21xOxu56I3fWktuweolkIo+VFK+boHj9+PM+xPXDgQBafo0aNoo0bN1LDhg3L7V9YWEgjR47k97755htKSUmhnTt3Ut26dclq6HazCXbE7noidtcTsbueiN3NIcSGaF4LA6Hav39/ev311x2DpOASv+222+iBBx4otz/ELbyyGzZsoIiICK/d7omJiZSRkSEtZEEQBEEQLEWmpjrF0p5WeE2XLVtGEyZMcGzDKLkRI0bQokWL3H7m+++/p8GDB9Mtt9xCM2fOpAYNGtCll15K999/P4WFhZGV2Lx5M7Vv397fxRBqGbG7uTi1w92/xE7lt9uc/9qOvSi3T7m2vtM+9leuu2zbupXatG1LIaEhxD6YkDJvDP/FnxBOQmh/L8S+zWRvDV+HKrvNZi8z1vEaL0sNdaDed4GLGHLsmo5tL7s2x4aya+T9HZvKfdbdNTvq2109O9aP/TXazt0+FfpqypvVtSCVrbpF2d3jujLWk2Pd+XNV3Rfl6qvMnnajurm3y+zrtE19rpJrdBSj7IVTqVyK6Ciz8x/Duhu7l7eABzZyWfXESFUd036gylbd2r1T1w4UGqbPyH7SXbQeOnSISkpKygUzYx2eVHds27aN/vjjD7rsssvo559/pi1bttDNN99MRUVFNHHiRLefKSgo4MXYgvEElA3H9Zbi4mLKz8/3+vM6UFxYQrlZhZSXWUjFRaVUWmKj0lIblZaU8g9qaSnxNlvpsfdsJXaPvH277dh2w2v757Gfzenz/Ew3ipeyJ7rjeW7YwbGbUaS47H/sl+LYcXHPrIw47NH1e65Z3O3ofF6H6DBei/phcLrmYz9wTmLGZd9j13jsPEV5Niot+0q4iosKBYTr+d191oMfp0BgLu2t/oeUzisTNkq0sMXLBLBDDIXYq8whRh1/j9Wzuoet3ccWXHhld08puy0MjxrBIjR4MIUatNBroJTWotUbIFYQz/rOO++wZ7Vv376UmprKIQMVidbJkyfTpEmT3HrE4uLiqF27dpxJAMI2OjqaRfOmTZv4XOHh4Q4BC7CO1/ixwA8LygBxqrzE2Kb2xWucw5N9jccFCH1QgtndvizaoOiq2BfnxDFruq9r+V33dVcvRUXF/OMZQqHHhCT/oIbw5yAqPXoI44kNJ7qLI121bz3zrxvcP6YTRcEITFVaXEppG/Np74pjjUChhpRpemePkagTgcp7RgVLgUHhR/LsDRZXHdG4cWPavn07vwfNgu/3wYMHeb1NmzacehNOrTp16nAuVjjkQHJyMv82q3XdsLRohXEgbpA6wgjWYXB3NGnShMWUMRSgc+fOnHkA4QaRkZHlPoPwAwz2ck0lgS5cFSvSqlUrp1GBEGG40WJiYrzuwoOIs1rIgi9gserwepZSCQRpsd3TWVJSJk5LiGxRnj1q2asUVvbXtQvR2HVm7G5z6V5z6lpzbDPuYuzHPPa5Ki1ble3dOkBt1XGh+hznU1ddjnJ7hFRs97z8PEpocJB6nRxH9RMblL/MCrqA7atlXYxGu5Ure8XldXrL9V6oouwVdnO6duEar8H1mlzvOad9Qig3J5eiY6KdPaCGLngnD2iZZ5QbbsZufKPX2/E55279YyEHZd8JQ3ey6zbH67KyO0ISjH+N3dfKrWvsbfCky9lpu0v4hOtnXR4LrveDu++qW/t50j3t7h4r98FyO1T6tutNn5ubSzHR0TXvnnf1ljt2ObZyzM7u66um4QmqTOpSqgqrcbefu/Adp54V12dDhStVPB8qs5M3dq/qmPYCONk9NtZZHxh1BOjYsaPTev369R2vkfWosn3baxpiZmnRCoEJT+mcOXNo9OjRvA0eOKzfeuutbj8zZMgQ+uKLL3g/NUsEvKIQs+4EK4iKiuLFU6GJ1hMEa1JSEtUE1YoKZCBMczMLqYS77kuppNguSstjf4ranaJhDvcn4vvCwkIpNCyEQsPLXoeHcBxQGG+Dtze4RmIGg90rIj4hjm144MABiqufEpSNMm9J3bun3A9P4BI830ezSU0NJrsDsb2edrcGlhatAB7QcePGUb9+/Tg3K1Je5eTk0FVXXcXvjx07ltNaoYsf3HTTTZxp4I477uAMA+h+f+qpp+j222/3SXlUdzg8rAJRfnYR5Wa47wpmIWoQn1g/Jkrt74WGygMw2FDfDXxXRLQKgiAI2ojWMWPGcJzHo48+yl38vXr1olmzZjkGZ+3atctp3l106//666901113UY8ePVjQQsAie4Av8YXnz9uUXFaiqMAe7xoVE05RMREUVuYlhSgNJu+oLwkGu1eG2N09iEsT9EPsridid03ztFot/xm6dhE83bp16xp38cITFegC5nBqNpUUl1JiwxiKirZ8G8gSBIPdK8OX35FgQqbz1BOxu57UxjSuiRrmaZUEYn5Ejcq3MggcR0iGO+wxrGWZBCIrv5VOPPFEuvPOOymY+Oijj7yaaa0quz/22GPcoyAEF4iFF/RD7K4nYndzENGqCVdeeWXZaOEQHpCG9BuPP/64I0VVRfz77790/fXXu32vuNAuWMM4XrXyW2n69On0xBNPeCSGvb0uLBgcd+qpp9Lq1aurdZzaFIqY9GLQoEHcSo6Pj6euXbs6Cfp77rmHBxsKgiAIgnAMEa1+pLa7TiHmkK4Lg9PuvvtuFmrIX+sOpAcDmFGsokFnRYVleWEjKx5so46DVB4QaGZeFxaIPaQjO/PMM8mKoHxXXHEFnX/++bRkyRKe8e1///uf0yQVyA1c08wUgvWQkcR6InbXE7G7OYho9SO1PRsW0nohvy3yvyHLAqbDxbS3ymOJtGIQUAggV184V48oBr6dc845LKyaNGtA190yjg6n2xMiGz2W7733nlNMozE8AK937tzJg+WUhxQZIRCX88033ziVecaMGRQbG0tZWVlVXhcWnPuBBx7gJM4qUTPAQLwOHTqwAEfi5kceecQhFNHNj8klVq1a5SgPtqkunhtuuIFjk3At3bp1ox9//NHp/Bj4h1zAqBMloCvihx9+4GmG7733Xq5jlAn1PmXKlHJ1qDB6ktVizPf333//0WmnncbnRzkhijGbnGAttm7d6u8iCH5A7K4nYndzkJEzPoCTqhfnVftzBcUFVFrkLqepZ0SHR9dopDZm5Th8+LCTFxDCcfbs2W73R+5bJVjnz59Ph/dm0n0PjaerrrmC5i+Y79gPU+d+++23HBLgLuURtvfs2ZPDDq677jreBmF68cUX04cffkgXXHCBY1+17qmXNjs7mz777DMOfzB6K/F5CFEI8jVr1vB5se2+++7jDBUQfshK8fvvv/P+6LrH9UIMQjDjmG3btqV169Y5XRMSSD///PP06aefchaLyy+/nLv3P//8c7flg7Bev349nw8C2BOMIhjiHsIYwleJ6uHDh9O1115LL730EuXl5bFAv+iii3g6Y8E6VBWKIwQnYnc9Ebubg4hWHwDBOvCLgbV+3n8u/YdiImK8EtkQqPAQIpetAsIRHtKKJmHAZyD4MDI8pWkKHUrOptdefJtOGDmQY1/79+/vCAn45JNPOLTAHQgVgPCDaDTObAbhddxxx7FIw2QQSFD/888/O4RkRcDzCSGtRB0+i23GVGgPP/yw4zW8lBCWU6dOZdEK8Y7PI6zAWJ7ffvuNu/AhMuERBfDSGoG39q233mJBCzDpBWKFKwL1DcHfvXt39ngjtvWUU06hyy67rMIJLlSZYDeEFUBQv/3227wNOYl79+7NuYgVH3zwAad+w6QaqtyC/1H3qKAXYnc9Ebubg4QHaIQSd+jmhgcRHkZ0RSsgpCoSrADiDWIIixqE1aVzFx5Bj/cUEGMVCdbKwOQRGJT08ccf8zq8mzjWCSecUOnnTjrpJFq5ciUvEJmjRo3i60MIgmLatGk8WxoEIOoAIhahDpWB4zVr1qxS4YdwAyVYgRLbFYGGwU8//cTeaJQBZUF8Ma4dXtvKePDBB2nRokU8kAtCGyCkYe7cuXwctXTq1Infk+4pa2GcolHQB7G7nojdzUE8rT4A3fTwelaXgoICj6ePrei81QHi7s0332Rhim5yeBZdBZWnVDYIqzrHcQXeVsR3Ii4VoQGY+ayqEAicD+EACniL4Y1899136cknn2ShB08m4lYhaPEevKwvvPBCpcdVwrAyXPOtoqxVpT6GJxpCFwuu96GHHmJhDGGtZnpzBQIe3f/z5s3jCTOM4RBnnXUWPfPMM+U+AwEtWAc0kmRwhn6I3fVE7G4OIlp9AISKN930oSWhVCei9jIIuIq76oLBRhjghCWhjj1edMv2jRxX2aVLl2odC8LZXb5SxISiy/7VV1/l+FFM4euNPRAagPhOsHDhQvbYQhwqjF7YisqDGdX27Nljejc7whXgsUVogzsguiFuERKAcAIjffr04fhhHMO1ESIIgiAIwYSEB/iRQJsVCdkGEEIAr+XSpctp+cpldP1N19CwYcOoX79+1ToWRNaCBQsoNTXVaaR7vXr16LzzzuPR9Yj1RPe8Jx5rTPGLBWEKiBtVHkjQvn17bvXCu4oucwji7777rlx5EKuLkACUB8fEdSE0AXGkGJyG93/55RcesOUtCMdAWAA8pjjeihUr6Oqrr+bY2JEjR5bbH9d07rnn8iA1eInVdarMCLfccgsdOXKELrnkEo4rxvUhVhke20CYvEInxPOtJ2J3PRG7m4OIVj+C0emBBDyYiKdEDOvZF5xKF15+DrVp05a7tasLBivt2LGDu8hd41+vueYa7kKHmPMEiEg8ILAMHGgfFPb1119zai1w9tlnc3otDJJCKil4XpHyygiEKUblI4QC5fnyyy95O7yYGGAGUQhvMrzANRGDEMLbtm2jsWPHcuwpYm8hQjHoy11X0oYNG3g6QMT5qmvEoga9Iczj77//5jJB5KNRgdRisJFxIJrgf9AQEvRD7K4nYndzCLFVFYCnIZXN6evLedVxrECcm70gr5gyDuTyTFhJKb4fIYn0URCZe/furXRgWKASqHb3FF9+R4KJjRs3Soybhojd9cRsu2dWolOCGQmCE6pNcdkgrIioimfC8gaMnke6q6effpoT+gejYBUEQRAEwTuk/9CP1CRzgD8pLqh6+lZvePbZZ7nLHGmpJkyYQMFKoNpdqBmIrRb0Q+yuJ2J3cxDR6kcQtxmIFJXlaA2P9O3tg0FKGJCESQyCOTFzoNpdqBmuGSsEPRC764nY3RxEtPqRQAwnLikupdKSUlM8rboQiHYXao40VvRE7K4nYndzENHqRwJxdLeKZw2LCKXQ0MqT/gvBY3eh5iAXr6AfYnc9Ebubg/x6+pFATAavpm+NEC+rVnYXak7Dhg39XQTBD4jd9UTsbg4iWv1IIHYfVDZ9qxC8dhdqDvISC/ohdtcTsbs5iGgVqhWLeSzdldw6giAIgiDUHqI8/EigdROXlth4AWER4mnVxe6Cb2jUqJG/iyD4AbG7nojdzUFEq1AprVq1opdffplfKy9reERYtQdhYUpVTC+qa90JQk2m/xUCF7G7nojdzUFEqx8pLi6utXNdeeWVFBISwgtmmmrXrh09/vjjVZbh33//peuvv77G+VmnT59OTzzxhM8FHa5r9OjRZEWMdWem3VGXyrYYsdq9e3d67733qn0cfH7GjBk+LZtwjEOHDvm7CIIfELvridjdHES0asSpp57K06Ru3ryZ7r77bk7m/9xzz1U6WKhBgwaO1B2OmbCqMX2rOk79+vUpPj6eggFMgOAJxrozGzRAYNv//vuPLr/8crruuuvol19+qZVzC4IgCEJtIKJVo+k8cT5MkdqyZUu66aabaMSIEfT99987eSz/97//UdOmTaljx45OHlE1CGtP6m4ac+kFPGNVQkICXXTRRbR//37HOSCEe/XqxZ6+1q1bU506dcqFB+A1Zgu56667HB7CnJwcPt4333zjVGZ4/mJjYykrK8ura4aIO+2007i8iDG64oornFrAs2bNouOPP57q1q1LSUlJdOaZZ9LWrVudRoCifNOmTaNhw4bx9Xz++eeO+nr++eepSZMm/NlbbrnFSdC6epNxHNTLJZdcwmIW0/yp+ldgHdtxnpNOOok+/vhj/lx6enql14kGAWzbpk0buv/++7mRMHv2bCev78iRIyk5OZkSExP5WpYvX+5UVnDuuefy+dQ6mDlzJvXp04fLhONPmjSpVnsJgoW2bdv6uwiCHxC764nY3RxEtPoACLqigpJqLzlZeV59Ti01nVkpOjraKf0Spk/duHEji50ff/zRaV8MwCouLqFx111C6elHaf78+bzftm3baMyYMU77btmyhb799lsOCVi5cmW582J7s2bNHN5BLBCmF198MX344YdO+2L9ggsu8MpLC6E3fPhw6t27Ny1dupQFKgQ2hLYCYnn8+PH8Pq4fif8h3EpL7aEQigceeIDuuOMOWr9+PY0aNYq3zZ07lwUu/kJcfvTRR7xUBgQfjr969Wo6/fTT6bLLLqMjR47we9u3b+drhRhetWoV3XDDDfTQQw9V65pRbtT90aNHOQxEAdE/btw4+uuvv2jx4sUsjHF+1RiAqFX1DXuo9T///JPGjh3L175u3Tp6++23+RrRuBGqx549e/xdBMEPiN31ROxuDjKM2UcJ99+5Y36tn/f6V4ZRRDW66hUQuxBov/76K912222O7RCO8AQaxY4xP+uff8+j9RvXsbhq3rw5b//kk0+oa9euLHL69+/P2yCEsR3d4+6AFzAsLMzhHVRce+21dNxxx7FogvfywIED9PPPP9Pvv/9O3vD666+zYH3qqacc2z744AMu+6ZNm6hDhw50/vnnO30G76PcEGjdunVzbIeX+LzzznPat169enwOXEunTp3ojDPO4HpF13xFwEML0QyvJcr16quv0pIlSzh0A4IQHm4VsoHX8BR7IhDhXX344YepoKCAvaCoY9SnAuLdyDvvvMPeZTQ+4F1WtsI2o00gsiHYIXgBPK2ITb7vvvto4sSJVZZLOAZsI+iH2F1PxO7mIJ5WjYD3FN3kEEzoMoeHFN35CgzgcSdYVTzrpi2bKCWlmUOwgi5durDQgQdSgfCDigRrZQwYMIAFMLyW4LPPPuNjnXDCCeQN8FbCC4prVgvEJVAhAIjvRXc9xBjCE1S3+K5du5yO1a9fv3LHR1khWBVKaFdGjx49uPtdNRJwTvUZeLmV8DfWiSfce++97NX+448/aODAgfTSSy/xYDsFPMwQ0/CwIjwA583Ozi53ne7qEB5xYx3iOGhY5ObmelQ24VjPhqAfYnc9Ebubg3hafQBG08PrWV3QlVuTeeirO4ofMZJvvvkmC1PErbrmC4WIqmr61jK9VSmVHacq4B2cMmUKe/fQVX3VVVc5RF51gSg766yz6Jlnnin3HgQmwPsQxu+++y7XCWwCD6vrrFXurikiIsJpHeV0DStw9xnj5zz5jCcgVhUiFcvXX3/NDRAIbTQqADylhw8fpldeeYWvF/HNgwcPrnJ2LtQhvK2uXmag4pUFz1D3nKAXYnc9Ebubg4hWHwDh4U03fX5+EUVEOQsfM4HwMnrfqhWzW1hCHdp14Did3bt3O7yt6EZH7KgSR54C4ewujx1GvqPrGd3mOLbqlvYGDB5CfCe8p+4S+kPEwbsJwTp06FDehphPs4FQdCf4EA6AcAgjKra0OsA28KJPmDCBB1GBv//+m9544w2OYwWwoWtKFohpV5ugDlFH3tw3gjOI/1YDHAV9ELvridjdHCQ8QKgSWykWG50w9CT24GHwEEaeIxYTg3QwEt1d93llQEguWLCAUlNTncQT4kTh1UN39ymnnMIDtqoiIyODu8aNC0QZRvNjkBO6/yH+EBKAOF54byHOcC6M+kd8JwaPoWsdg7L8BQZebdiwgeNTEXP71VdfOQZ2VdfbjIFTP/zwAw8wAwgL+PTTTzmM459//mEbunZfwSaIyU1LS+OBXODRRx/l+GR4W9euXcufnzp1KsfPCoIgCEJtIqLVjwTKdJ4lJfbu64jIcPbcQewhzhQpsxALinRQ1QVxkkgnhbQgrvGv11xzDXsjr776ao+ONW/ePB5wZVwgstDdDw8jBCoEMAQ3BlQhBhdhGVggwJYtW8YhAUjBVVHe2tqwO1KEIeUXsisg9hWhHCp7QHXTo8HzjWuG6ATvv/8+C1F4TpH26/bbb6eGDRs6feaFF17gjBDw1KIOATIlIBb6t99+43jbQYMGcbwsQgyE6uFNnLcQ+Ijd9UTsbg4htprmTQpCMjMzebAKPHgYsGIkPz+fR88bc5B6C0Z5B4JwzT6aT7mZhRQdF0nxSebHMcIjCAG5d+/eCgeGBTLVsTsyB7z11lvsOQ4UfPkdCSbg9UdWB0EvxO56YrbdMyvRKcGMeFr9SKAkaHdM3xpl7u2C0ejown/66ae5qzwYBWtVdkfcKUIZEA8F8Q7Pb03iegXrcPDgQX8XQfADYnc9EbtrLFoxmhzxdvDaIJ0PYik9AV2/iAW06tz0gYCaCQuER1Z/sFl1ePbZZzklFfKEYhCRjiAF1znnnMPd+8iHqqbbFQRBEATdsXx4AOIlMdgHXaQQrJgWEyl9MKLZNSbPCOIlMT0nYi7hosd0oFYLD6hpyqvaoLiohI7szeFcVw2ax3mdfkoILLvXBAkPcA+m+HVNkyYEP2J3PTHb7pkSHmBNXnzxRU5mjhHf8D5BvGLedsxcVBEYeIPR0RiMA9FqVYzz1FsVlZ81PCJUBKtGdhd8DyZkEPRD7K4nYncNRStGkGNkN0apK+ChwvqiRYsqHZkOLyxGoVsZizu5HTNhgQiTQwN0IhDsLvievLw8fxdB8ANidz0Ru5uDpYeuI38nvKaNGjVy2o515LN0B5LDI70PcnVWZ45g4zzBcLtXhS9mMQoEzyUmFaiNQVg6EQh2rwm++G4EI9VNWyYEB2J3PRG7B5hozcnJqdF0nt6QlZXFOSgxwxGmtfSUyZMncyiBu0ExmGsdswEh5RCErYrRw5ztOIdaVzMJIYYFo8PhTYM4QWoj1R2s5qlX++I9HFPti8+qaTXhUcY2d8cFGFlv3BeLGpXuui++PEqUu+6LMkBkKKFh3Bfnz8st4uPkFZRSaUhRpfsar9X1uCgv3vOkXqqzr2t9G+uwOvvWtA5xjursi7hPb+pQldddveD83tR3de7ZquoQnztw4ABvwz6YQQ3PAuyHwZT4TgHk+sV3R3WhtWjRgvPI4juM8yN/L+LWAfLqIiQIKdAAJpxAwxILzomJE3BcXDtiu7DgvAC5epGVArO2AcxQgwwVuIb4+HguB77LatpF2EVNrIDjIjYe14RnGfIuYl01nHEMzKoGjM8ITNqAwYSI6wXo9eHvUF4eXxNClnAtOBfqAGVEtgiAZwquCXUIUGd4jWuATVFPmAgDIFYf2zAZBEDuXJQHU++iDnEeTFJRVX3Dxig/9kU5ESeH5x4m/lD1jf0QOwebd+jQwVHfqEPYR6Vkq6y+cUyU2VjfqC+kBlL1vXPnTr53YG/Um7G+cQ+qiUhwf8DGqr5xLFWHKj+mGr2NesB1o/7xfcP1oEyqvnH9+/fvd1vfqFN1z6Ls+LyxDlF2Vd8V3bOoJ1UnqEd1zxrrG/Wo7tmUlBQ+prG+YXNcf1X3LPZFPaj6xuQpqFOAexJ1q+obNsdx3NU3XqPcqr4ru2dR3yinumcRy457UtU38j6rexblQV0Z6xt2CsZnBO4t1F91nhHGe7aqZ8S2snXdMG0gFr4wF110ESeIx4Aob1BfJiRcN2YAQAog3GBqikoFvKtIiq5+aIH6EYeRcYPjRvfE04ovWkUBzigXvki40YM5QL+0pJRy0guJQoji6kUFvYewtrC63WsKvrN4uAdryjJvwfNHpnXUD7G7npht90xNB2KZ5mn97LPPeArK4cOHc+sJ4hVZANBi8BT86PXt25enllSiFSIU67feemu5/ZEuac2aNU7bMN0kWmavvPIKC1F3oDVYHVe+8nigJeY6V3t1UCOsrcr2VQdpzQ9bqUHzeDrlWusOaAs0rG73moAGI7wgvmrglJSWUGiIDAIUBEEQTBStEJlY4O5GknQI2EceeYSnhYSAPfvssz2aFQhzwcOzirntBwwYwCmv0JWAbAIAQhjdKejihxsd03EaQbcBcN1eU1TXaE08ZugSsHJKoMM78yk/o5Tq9Ym3dDkDDavb3Qy4e7w4j9IL0ulowVHKyM/g11gyCip+nV2UTfWi6lHH+h2pU/1O9r/1OlGrxFYUHmrpkPxyVCdkSQgexO56InY3B9Of+ojxgPDE8tprr9G9995LP//8Mxv0xhtvpAceeIC7EytizJgxLHwxhzriZHr16kWzZs1yDM5CrEmg5rw0hjFYkQO77APSGraM93dRgoLswmxacWAFbT6wmeIPx1NkaCRFhEZQRFiE02t321xfQ7CZ4X2EuCyxldiXUpe/Lq+LS4spszCT0vOrFp/4W1TqXaoviNzF+xbzoogKi6J2ddsdE7L1O1GHeh0oNqJ24+iD6fsumIPYXU/E7gE6uQACrj/++GP2tCIg/Nxzz+VUVAiCfuaZZzhc4LfffiMrUVuxImbHvMzdNZee/fdZKiy1D5SpFrYQOnvePRReGkmzB71FWbH2YHJ3oPu2fd321K9xP+rbqC91SerCAkt3sgqzWKT+m/YvLU1bSuuPrGex5ysgXFnMuhG9eA/nQjiNEpiltlIqttn/QnwaXyshinUzQTnrRtWlxDqJ/JdfR1X8OiEygdJy02jjkY204cgGx9/cYvfx5C3iWzhELAvaeh2pYUxDS4QXSGyjnojd9URiWgPM0zp9+nT68MMP6ddff+VJAW6++Wa6/PLLHd314LjjjqPOnTubVQTtee+/92hPtn2EZHWpl9uYBWthaD5tow1ky628bZOWk0Z/pv7Jr6PDo6lng57Ur5FdxHZv0J09Y8EOvI7L9y+3i9T9S1lcuYrAZnHNqGlEU4qNi2XPY1FJEf8tLCm0r7u8Vu9jcRW8EKJYyJ6YwHTCQsLsS+ixv/ER8U4iFF35lYlQ3BvVFZBJ0UnUNamrYx11mpqVShuObuA6VsuB3AO0K2sXL7N3znbsr8ILOtfv7BC0LRNaBlx4gSAIgu6Y5mlFC+Diiy+ma6+9lvr37+92H6TEwHzzEydOJCtRWy0YZCwwK5fbobxDNPyr4WQjG71/yvsUH1m9Lv59y3Np/bcZlNgqkvpel1TpvgUlBbTm0Bpatn8ZL+gKdvWuQbgqEQtBGxNRcUhIoIBub4hTeFHxF15A1LcRiCNcN7zQ+Ns4trHXdodHtCphq7ZDzMIDDmGGvxCZ7l6Hh4RzeI3r+0qU8vshoQExGOpI/hG2AXtjj9q9stsztrv1bqMRhd4BJWIRaoA42aQ6SaZdp5nf99oE9xfuC6vfD1YhWOwuWMvumZp6Wk0TrUgHVVmsqpWprZsBOdsqymhQU6Zvnk4TF06kbknd6Mszv6z25xdM3URr5u2hnic3p+MvbO/x5+AF25a+zS7m9i9lEQsBbQQ/eF2SuzhEbJ+GfSguMo6sztH8o3w9ypO6+ejmciK1VUIrFqj9G/Xnv+iark27C87kF+fT1vStDm/sxqN2UVtReEFcRBw3NLDAlvw6sSW1jG9Z43s0kOyeU5RDuzLtXuvdWbv59c7Mnfz6YN5BbsTEhsdy4xN1hlhiLKijmPAYp794v9x+hm1oQPhKAKuYbDTaVOPN9a/raw6RsdnzLKsQGdd1tU299vQzWIpyiujEDidS56TOEjalEWZ/3zM1Fa2m9Y8hKS9ymWKktBEk2sW2mqSKChZqmue1Mubunst/T2x+olefP7DTu0FY+DFrV68dLxd3upgf4vixg9hTQhahBKsPrublg/8+4M8g9lDFxPZt2Jfq1jkWRuIvDucddhKpW9LtCbKNtElsQ/0b93d4U5Ojk/1qd8GZOuF1qGtyV14UEBJ7svY4hRbAI7s3Zy9nK1h7eC0vrsC2TmK27HWz+GYUGRYZcHZHzDWL0szd9rCKMpGKv4fzK45hV3WYVZTFy36yJ4r3Fnj1XYWsGlDnKjorEqJGEerakLQC72551xE21adRH35edE/uzvenEJxY7fseLJgmWity4MJlLknH7ZhVD0gttHjvYq9FKyYVOLQnm183bFmzFhw8KOh2xXJ+h/N5W2p2ql3Eptk9sfihxCAlLJ+u+5T3QXcte2Ib9+W/nohBT7wv6q9ajOv44dueud3e3Z+2lLZm2GfNMaLKBaEKgY14y+oi979/QSOpRUILXk5pdYpjO0Ir4EnckbmDG1pYdmTYX0PEoccAC+5Z1+M1jW3KHllXQYtwELzvL7sjhEV5So3CFNsQTlEZ9evUp+bxzXlwW/ME+19cV9O4phyqAm+sWiD2ja9zi3KdtrnbD/tAYOI7iHhwLGagQl/UAEX1Vy2h+FcWIhNCIfzXaT00zB4iY9hPrVf4XtmC9e2HttPG7I1sC2MWDJwbPWHc29SoD/Vu2LvaYVyCdZHnfICEB7z66qv896677qInnniCZ8ZSwLu6YMECnuZsxYoVZFVqy+2Okd1mpOuat3se3fbHbfxDOuv8WdXueoNgnfbkEoqsE0bXvngChYSaG7u2P2c/LT+w3BEbui2j/PR0EAAQrk5C02YQn6XO2xwitWyUvLcgjZLypOLHpV6depa1u2CyVzJzl0PQGoUtBFhFoOsbwg/3L0JFVGyw41/Za/v/stdljT3X18bPVbQ/MoVAkCrvqWt8uSuI4YV4RxkhSJVAxToyN5gJvLVoYJcTuoU5lFNsr1N3QtN1m7u/xv1Vo8Ff8KyMIcRhKhioqWL/D+TZpz1VwH6IsebeprKwKW8axYJ7eJprFbZBpY51NJzUdmxT6xVux+eo/HbjZ9hRUlpC7eu3Zw+7GWRqGh7gc9GqZvpBeivM/2vMVYaWB2bHevzxx2ngwIFkVQI95dVjCx+jbzd/S5d2upQmDJxQ7c+v+3svzf10A6V0rEuj7+pD/uiWh4hV3thNRzeZ0uXHP2oh9h82NegIwgI/GByqYFKYgqTACR7w+IQXVnlkjYIWopGzO/iZBtEN7J7l+BZOAhV/rZzXNlhw933HfYPMLkrAQszifnEFjR0lYrHAy20GEFpo4BzMPcjP30P5h/i16l3AAs+8yobi1IAqW3fd7rbh5dooMzS+1DbXuGElMlX6PvbOl1YQR1zZfn4IG/n6rK95oKcZZGoqWsPNmKISnHTSSZz2ql69mnumBM/BFxSe1prEsx7cmcV/G7TwzxcB3oWRLUfyAtCthvhXDJ5xJzSVR0V1A5bbp+w9eF+MI+Jl9LNQU3APoQcACxo6RiBY9+Xsc4jZjXs2Uv369fnHk39A+b/N4a1RP6rKj+D0ns35/Yr2x32eEpfiEKVYgiFTRzDeN8o+o9vZpyiHSFx2YBktS1vGfzHQEw0gLHBCAISbOERsw77UOrF1pc8xZHaB4FRiFIPojEJUrR/JO1KjHqlgBAJa/U5w+Iehl8SxHX/VumE7/pUUl/D3UQiwyQUCkdpqwWCmL8wY5ktWHVxFl/98OQ9mWDBmASebry5fT/6XDuzMolOu7Urt+9lnHhOsbXfB+ojd9cRbu6OxjslJlCd23eF15YQl4o4RC4uwAoSwGAXpodxDPFCuOiCncXJMMiXXSaYGMQ3YgQBPPRplOBdEmNvGlbFhpRpjLo0t9ZlyDTWX/Y3xwBCAKk7Y3boxfti4rdy+oc7xykYRqmKS4fU1brf69z1TPK01B1O1Io41NjaWX1fGiy++SLpjRg435WU9PuV4rwRrSXEpHUpVg7BkUIAZSM5GPRG764m3dsdkHOgtUz1miPldfWi1I6QAvU/osp+zaw4vFYE82a4CVC1qHe9hkZRcvkO+7wEgWjG4qqjIPr/48uXLK2ytSLesHaQE83ULSYnWk5qf5NXnj+zNodJiG0XFhFNCsjkB5Lpjht0F6yN21xNf2R1hHoOaDOJFZbuA9xUCFinb4AktJ0xjknnWOvnNrX3k+x4AonXuXHtuUDBvnl08CbUHRgwjlyhiNoekDKlRftYGLeRBJwiCYFWQG7hXw168CIIumJILBN7W8PBw+u+//8w4fNDQokULnx5v3h57QwFB+uha8oYDu7J8kp9VqD27C4GB2F1PxO56InYPINEaERHBBpNZryrnyJHKk3tXl5pmDXDOHCDxrIFidyEwELvridhdT8Tu5mBa1uWHHnqIHnzwQTFcJWRn2wc8+QKMNFUz9QxrPsyrYxQXldBhGYQVUHYXAgexu56I3fVE7B5g07i+/vrrtGXLFmratCm1bNmSMwoYwUAt3UEIha/4M/VPTqCMaUaR+88bDqfmUGmJjerERlB8ksyJHQh2FwIHsbueiN31ROxuDqbV6ujR9oTJQsW0bdvWMlkDwEFHPKsMwgoUuwuBg9hdT8TueiJ2DzDROnHiRLMOHTT4ajrPopIi+iv1rxrHszoyB0hogKnINK56InbXE7G7nojdzcF0//WyZcto/fr1/Lpr167Uu3dvs0+pHf/u/5dyinI4L1+35G5eHwezYAHJHCAIgiAIgjai9cCBA3TxxRdzvta6devytvT0dDrppJNo6tSpMp0hkaNefBUaMKzZMJ6CzhuKC0t4YgEgmQMCw+5CYCF21xOxu56I3QMse8Btt91GWVlZtHbtWs4ggAV5WzFf7u23327WaQOKmJiYGh8DczX7Ip710J5sspXaKDo+guLqyfRzVre7EHiI3fVE7K4nYvcAE62zZs2iN954gzp37uzY1qVLF5oyZQr98ssvZp02oNi7d2+Nj7Hp6Cbal7OP6oTVoYFNBvokNEAGYVnf7kLgIXbXE7G7nojdA0y0lpaW8iQDrmAb3hN8w9zd9qlzBzcdTHXCvU9TdXCXDMISBEEQBEFD0Tp8+HC64447nFobqampdNddd9HJJ59s1mkDiubNvcun6k601iQ0AMggrMCyuxB4iN31ROyuJ2L3ABOtmFwA8autWrXifGVYWrduzdtee+01s04bUGBgWk1Iy0mjdYfXUQiF0NBmQ70+TlFBCR3dZx+E1VAGYVne7kJgInbXE7G7nojdAyx7AFoZmPXq999/pw0bNvA2xLeOGDHCrFMGHBioVhMW7FnAf3s06MHprrzl0O4sstmIYhIjKbauDMKyut2FwETsrididz0RuwdgnlYM6Bk5ciQvQnlCQ0N9EhpQkwkFgIQGBJbdhcBE7K4nYnc9EbsHgGh99dVX6frrr6c6derw68qQtFdE7du39/qzuUW59M++f/j18ObDa1SOA2WDsDB9q2BtuwuBi9hdT8TueiJ2DwDR+tJLL9Fll13GohWvK/PAimgl2rRpE3Xo0MGrzy7cu5CKSouoRXwLap3YukblOFjmaZVJBaxvdyFwEbvridhdT8TuASBat2/f7va1UPHEAL4IDahJXtXC/GI6uj+XX0t4gPXtLgQuYnc9EbvridjdHEwJuigqKuJsAevXrzfj8EFDYmKiV58rLi12DMKqaTwrBmGRjXgWrJiEyBodSzDX7kJgI3bXE7G7nojdA0i0YgKB/Px8Mw4dVMTHe9cdv+rgKkovSKeEyATq3bC3TwZhSWiA9e0uBDZidz0Ru+uJ2N0cTBvedsstt9AzzzxDxcXFZp0i4NmzZ49Xn5u3ex7/PaHZCRQeWrMID8kcEDh2FwIbsbueiN31ROweYCmv/v33X5ozZw799ttv1L17d4qNjXV6f/r06WadOuhRorWmoQHgwE7JHCAIgiAIgsae1rp169L5559Po0aNoqZNm3J8h3GpDlOmTOGZtZCVYODAgbRkyZIK93333Xdp6NChVK9ePV4wmUFl+/uTlJSUan9me8Z22pG5gz2sQ5oOqdH5C/KKKeNAHr9uIKLV0nYXAh+xu56I3fVE7B4gntbS0lJ67rnnON1DYWEhDR8+nB577DGKjo726njTpk2j8ePH01tvvcWC9eWXX2YhvHHjRmrYsGG5/efNm0eXXHIJHXfccSxyEaJwyimn0Nq1ay13E2VnZ1NcXJxXXtYBjQdQXGT1PuvKwV320ID4pDoUHSeDsKxsdyHwEbvridhdT8TuAeJp/d///kcPPvggGwsiEZMMIL7VW1588UW67rrr6KqrrqIuXbqweI2JiaEPPvjA7f6ff/453XzzzdSrVy/q1KkTvffeeyykEapgNTIyMqr9GSVaT2p+Uo3PL6EBgWN3IfARu+uJ2F1PxO4BIlo/+eQTeuONN+jXX3+lGTNm0A8//MBCEsKxusBTu2zZMu7iN06NhvVFixZ5dIzc3FxOwVW/fv0K9ykoKKDMzEynpTaobn7VI/lHaOXBlT6LZ5VJBfxDTfLqCoGL2F1PxO56InYPkPCAXbt20emnn+5Yh8CE8fbu3UvNmjWr1rEOHTpEJSUl1KhRI6ftWN+wYYNHx7j//vs5ptYofF2ZPHkyTZo0qdz2zZs3s8e4Xbt2tHv3bha3CHNo3LixY/IEhCggifDBgwd5vU2bNnytSPmF8ASce9u2bfxecnIyi+4DBw7wOuJ0cVwI68jISGrRogVt2bKF34PIxra0tDReb9myJc1cM5NKbaXUJrYNNYppxCESALG7ONe+fft4Hcc5evQoZWVlUVhYGJcf4RooJ+KJcU2pqamUuuUw7x8eX8zHgp0wgweuG40MpOxAbDLKCHAtKGt6ejqvd+zYkbZu3coZInBMlBn2B02aNOH6OnLkiGNKu507d3JDBJ5y1NuOHTsc9oSdYW+AHL8YeanqG8dSddigQQP+a6xvXHdeXh5FRUXxPYYyqfrG9e/fv99R36h7Vd+oU1yrqm983liHKDu6eMLDw7lMqr5RJ7gG2Bk0b96c6wT1DfviWo31jXpUI0nR+5CQkOBU37A5rh/7wZbGOsR9BFsC7It6UPWdlJTEdQpwT6JuVX3D5jiOu/rGa9hX1Xdl9yzqG+VU92zr1q35nlT1jWtX9yzKg7oy1jfslJOTw2nwsK7qu7J7tqr6ho1V41LVt7pnUbdYVH1Xds9WVd84LuoMjV4MJEVdGO9ZHOPwYft3yJNnBBZcU3WfEcZ7tqpnBMqj7lmcB/dhTZ4Rqr6xHzxH8ozw7hmh6sTTZwSOaaxveUYE5jMC9VedZ0R1dMS2snXdCLH5eNoGPABww6oHB4DhV69ezTdzdYDR8AVeuHAhDR482LH9vvvuo/nz59M///xT6eeffvppevbZZznOtUePHhXuh5sIiwI3Or5oeGjg5jYLfJFxI3vKnXPvpDm75tBNPW+im3vdXKNz5+cU0ft3/8mvr3lhKNWJjajR8QTz7C4EB2J3PRG764nZds/MzOQGj9k6Jeg9rdDAV155JbewFGgt3HjjjU5przxJeeXaClZgHa2Uynj++edZtP7++++VClaAshrLW1ug5ewpBSUFtHDvQt+FBpQNwkpoEC2C1cJ2F4IHsbueiN31ROweIKJ13Lhx5bZdfvnlXh0L3TN9+/blQVSjR4/mbWpQ1a233lrh5+BdxYAwxNX269ePgmHGjH/2/UN5xXnUMKYhda7fucbnlkFY/kNmStETsbueiN31ROweIKL1ww8/9OnxkO4KQhjic8CAAZzyCvEvyCYAxo4dyyEEiEsFSHH16KOP0hdffMHxMSreC/E9Vks/gVgZb7IG+CLAWw3CathCn26FQLS7EDyI3fVE7K4nYvcAm1zAV4wZM4a7+iFEkcZq5cqVNGvWLMfgLARIq0Bt8Oabb3Jg+QUXXMBB02rBMayGCu6uCgy+mr97vs9CA4zTt8qkAta1uxBciN31ROyuJ2L3AJvG1ZcgFKCicAAMsjKiRu4FE+sPr6cDeQcoJjyGJxWoKXlZhZR1JJ9fS7orQRAEQRACAct7WoMZeIA94Y/df/DfISlDKDKs5jNXHSgbhFW3UQxFRQdEu0VLuwvBhdhdT8TueiJ2NwcRrX4EWRVqexYsIJMKBIbdheBC7K4nYnc9Ebubg4hWP6ISFFdGanYqbTq6iUJDQmloylCfnFcyB1jf7kLwIXbXE7G7nojdzUFEq8VRXtbeDXtT3Tp1fXJMlaO1YUvJHCAIgiAIQmAgotWPYMq92g4NyMkooOyjBUQhRMnNrZUCTBc8sbsQfIjd9UTsridid3MQ0epHqpo7OKswi5amLfVpqivlZa3XKIYi68ggLH+g65zRuiN21xOxu56I3c1BRKsfKS4urvT9v1P/pmJbMbVObE0tE1r6ND+rhAZY1+5CcCJ21xOxu56I3c1BRKsfqWqGrrm75/rUy2r0tMqkAv7DajOzCbWD2F1PxO56InY3BxGtfiQpKanC94pKi+jP1D99Gs/qnDlAPK1WtLsQvIjd9UTsridid3MQ0epHdu7cWeF7y/cv55jW+nXqU4/kHj45X056AeVmFFKIDMKyrN2F4EXsrididz0Ru5uDiFaLorIGnNDsBAoLDfOpl7Vek1iKiPTNMQVBEARBEGoDEa1+pHHjxm6322w2U+JZjw3CknhWK9pdCG7E7noidtcTsbs5iGj1I4WFhW63b0nfwjNhRYZG0uAmg312PskcYG27C8GN2F1PxO56InY3BxGtfuTIkSOVhgYMajqIYiJifHIueG8P7rKHB0jmAGvaXQhuxO56InbXE7G7OYhotSBKtPoyNACzYOVlFVFoaAglp8ggLEEQBEEQAgsRrX6kXbt25bYdzD1Iqw+t5tfDmg3z2bkOloUG1GsaS+EyCMtydheCH7G7nojd9UTsbg4iWv3Irl27ym2bv2c+/+2W1I0axjQ0IT+rhAZY0e5C8CN21xOxu56I3c1BRKvFArXNCA0AB8pmwpJBWP5HAvT1ROyuJ2J3PRG7m4OIVj8SE+M8yCq3KJcW71vMr09q4btZsDAISzyt1rW7oAdidz0Ru+uJ2N0cRLT6kYYNnbv/IVgLSgooJS6F2tdt77PzZB3Op4KcYgoNC6GkpjIIy2p2F/RA7K4nYnc9Ebubg4hWP7Jjx44KQwNCMNeqj/OzJqXEUViEmNxqdhf0QOyuJ2J3PRG7m4MoGItQUlriGITl63hWyc8qCIIgCEKgI6LVIt0Haw6toSP5Ryg+Ip76Nurr0/M4ZsJqIaLVCki3kZ6I3fVE7K4nYndzENHqR0pLSx2v5+6ey3+PTzmeIkIjfDwISzIHWNXugj6I3fVE7K4nYndzENHqRw4dOmR6qquMg3lUmFdMYeGhVL9prE+PLdTc7oI+iN31ROyuJ2J3cxDRagF2Zu6kbRnbKDwknIakDPHpsQ+W5WdNahbHwlUQBEEQBCEQERXjR9q0aePkZUUsa2JUok/PcSw0QOJZrWZ3QS/E7noidtcTsbs5iGj1I3v37nUSrb6cUEBxsGxSgQYyCMtydhf0QuyuJ2J3PRG7m4OIVj+Sn59P6fnptOLACl4f1myYT49vK7XJ9K0WtbugH2J3PRG764nY3RxEtPqROnXq0J+pf1KJrYTa12tPzeKb+fT46QdyqSi/hCcUqN9EppSzkt0F/RC764nYXU/E7uYgotWPNG3a1JHq6sRmvs0aYIxnbdA8jkLDxNRWsrugH2J3PRG764nY3RxEyfiRjVs20t+pf/Prk5rXPJ61uKiE9m1Jp+W/7qSf31xNf361ibc3kNAAS7Ft2zZ/F0HwA2J3PRG764nY3RzCTTqu4AFrM9dSbnEuJUcnU9fkrtX+fG5mIaVty6B9WzMobWs6x6+WFtuc9omICqN2fWVmDkEQBEEQApuAEK1Tpkyh5557jtLS0qhnz5702muv0YABAyrc/+uvv6ZHHnmEduzYQe3bt6dnnnmGTj/9dLIa/+X/5xiAFRoSWuWgqiNpOZTGAtUuVDFxgCvR8RHUpG1datw2kZq0TaQGzeM5plWwDg0aNPB3EQQ/IHbXE7G7nojdNRWt06ZNo/Hjx9Nbb71FAwcOpJdffplGjRpFGzdudDu378KFC+mSSy6hyZMn05lnnklffPEFjR49mpYvX07dunUjq4DpVf9Oqzg0oKiwhA5sz7R7UbfZl4LcYuedQojqN4llcapEakJyNIWEhNTWZQheIPbRE7G7nojd9UTsbg4hNqgnCwOh2r9/f3r99dcd8/k2b96cbrvtNnrggQfK7T9mzBjKycmhH3/80bFt0KBB1KtXLxa+npCZmUmJiYmUkZFBCQnmxIOuP7yeLvrxIooOj6YFYxZQcVZIWVd/OntSD+3OptJSZ9OER4ZSo9YJDk9q49YJFBUTYUr5BPNAg6tjx47+LoZQy4jd9UTsridm2z2zFnSKFbG0p7WwsJCWLVtGEyZMcGwLDQ2lESNG0KJFi9x+BtvhmTUCz+yMGTPIKqCdMHfFIuqadjz1LBlEXz26nLKOlM/pFls3ysmLylOxShYAQRAEQRA0xNKi9dChQ1RSUkKNGjVy2o71DRs2uP0M4l7d7Y/tFVFQUMCLsQVjNvk/NKChBRfy6yzKJ/QkQJTavah2b2p8fcnzFoy0bt3a30UQ/IDYXU/E7noidtdQtNYWiH+dNGlSue2bN2+muLg4ateuHe3evZuFbXR0NDVu3Ji2b9/O+yCuFp7TgwcPOuYbxvRtmA0DyYWRq02lvkhOTmZP8b79+6iw0RFKz0qjAV26U1LjaEpuHkdt2reiLVu2kI0yKL8kjEozChxiu2XLlnT48GHKzs6m8PBwPs+mTfaUVvXq1eNz7du3j9dbtGhBR48epaysLAoLC+PyY1+UE90JuKbU1FTet1mzZrwfuhgQg9OhQwe+boRhxMfHU926dfnaAa4lNzeX0tPTeR1dH1u3bqXi4mI+Zv369WnXrl38XpMmTbi+jhw5wusYELdz5072nsfExHC9YaCcalSgcYJGCmjbti3t2bPHUd84lqpDFdxurG9cd15eHkVFRfH1oEyqvnH9+/fv5/VWrVrRgQMH+BoiIyO5TnGtAGXH5411iLKr+kaZ0N0DUCe4BjVNH8JVUCeoR9gX12qsb9QjrgekpKTwvYPzq/qGzXH92A+2NNYh7iPYEmBf1IOq76SkJK5TgHsSdavqGzbHcdzVN17Dvqq+K7tnUd8oJ+pNPYhxT6r6xrWj/ADlQV0Z6xt2QrhOREQEr6v6ruyeraq+YWM0LLGo+lb3LLrJsKj6ruyeraq+cVzUWVFREcXGxnJdGO9ZHAPfSVXfVT0jUE+4Hzx5Rqj6dr1nUU+qvnHPYps8I6z9jEC9YTxFdZ4ROKaxvuUZEXjPCNgW91d1nhHV0RHbNE2pZemYVvVl+uabb3gwlWLcuHF8g82cObPcZ3BjIzzgzjvvdGybOHEihwesWrXKY08rvmhmx4rAW9ypUyfTji9YE4lx0xOxu56I3fVEYlrNwdIBkmjp9u3bl+bMmePYhpYS1gcPHuz2M9hu3B/Mnj27wv0BWoOq9aWW2kCmedMT3G+Cfojd9UTsridid03DA+A1hWe1X79+nJsVKa/QlXDVVVfx+2PHjuXuFHTxgzvuuIOGDRtGL7zwAp1xxhk0depUWrp0Kb3zzjtkNeDNFfRD7K4nYnc9EbvridhdQ0+rSmH1/PPP06OPPsppq1auXEmzZs1yDLZCrImKeQHHHXcc52aFSMVEBAgtQGiAlXK0KlScj6AXYnc9EbvridhdT8TuGsa0+gvEiKjBBWaGCiAwHIHcgl6I3fVE7K4nYnc9MdvumWVjbzC+B7GtumD58AB/gNGJQNz7giAIgiBYWa8kaiRaxdPqBgz2QroJpLswayo21Uoy25srWAuxu56I3fVE7K4ntWF3m83GghXpsJACSxfE0+oG3ADI81Yb1Ga2AsE6iN31ROyuJ2J3PTHb7okaeVgV+shzQRAEQRAEIWAR0SoIgiAIgiBYHhGtfkw8jJm6JAGxXojd9UTsrididz0Ru5uHDMQSBEEQBEEQLI94WgVBEARBEATLI6JVEARBEARBsDwiWgVBEARBEATLI6JVEARBEARBsDwiWgVBEARBEATLI6JVEARBEARBsDwiWgVBEARBEATLI6JVEARBEARBsDwiWgVBEARBEATLI6JVEARBEARBsDwiWgVBEARBEATLI6JVEARBEARBsDwiWgVBEARBEATLI6JVEIQqufLKK6lVq1Zeffaxxx6jkJAQn5dJ0IOa3HuCIAQXIloFIYCBGPRkmTdvHukqeIz1kJCQQD179qQXXniBCgoKKNj48ccf6dRTT6WkpCSqU6cOdejQge655x46fPgwWQm5bwVB8IYQm81m8+qTgiD4nc8++8xp/ZNPPqHZs2fTp59+6rR95MiR1KhRI6/PU1RURKWlpRQVFVXtzxYXF/MCEeUP0Tp16lR67733eD09PZ2+/fZbFkNjxozh94IFiFOIcYjySy+9lOrXr0/Lly+nDz74gJKTk2nOnDnUsWNHCrT7Ftfh7b0nCEJwIaJVEIKIW2+9laZMmUJVfa1zc3MpJiaGgh2I1m+++Yays7Md2yCABg4cSEuXLqXU1FRq2rSp18fPz8+nyMhICg31b6fVl19+yUIVQvzzzz+nsLAwx3tLliyhk046idq2bcsiNjw8vNbKlZOTQ7GxsT67bwVB0BsJDxCEIOfEE0+kbt260bJly+iEE05gsfrggw/yezNnzqQzzjiDhRs8WRA2TzzxBJWUlFQaV7hjxw7uvn3++efpnXfe4c/h8/3796d///23yphWrEOozJgxg8uGz3bt2pVmzZpVrvzwivbr1489tTjP22+/XaM4WQhM1Im6jiNHjrCXsnv37hQXF8chBKeddhqtWrWqXDlwTnhnH374YUpJSeG6zMzMrPYxvvrqK5o0aRIfIz4+ni644ALKyMjgkIU777yTGjZsyMe56qqrPApjwLHq1avHtjAKVjBgwAC6//77ac2aNSzgAeoex0fjxZVLLrmEGjdu7HQP/PLLLzR06FAWoCgv7pm1a9eWu0dwzK1bt9Lpp5/O+1122WVUUyq79yB027Rpw3Y45ZRTaPfu3Sx8cQ83a9aMoqOj6ZxzzmH7uOLJNQmCYC1qr8ktCILfQEwjRNTFF19Ml19+uSNU4KOPPmKhMX78eP77xx9/0KOPPspC7LnnnqvyuF988QVlZWXRDTfcwELi2WefpfPOO4+2bdtGERERlX72r7/+ounTp9PNN9/MouHVV1+l888/n3bt2sUxmWDFihUco9mkSRMWZhBSjz/+ODVo0KBG9QFhBXAelBXi+cILL6TWrVvT/v37WRgPGzaM1q1bV84TC0EE7ypEKgQlXmO/6hxj8uTJLKgeeOAB2rJlC7322mtcXxDUR48eZVG+ePFitg+OB5tUxObNm2njxo0s7iCW3TF27FiaOHEix7ziHoBHFoLvp59+4jIrIGJ/+OEHPpYSv+iyHzduHI0aNYqeeeYZ3ufNN9+k448/nu1jFJQIA8F+eA+i0kxvPjzKhYWFdNttt7Eoxb130UUX0fDhw7lxAKGu6ha2QpiEojrXJAiChUB4gCAIwcEtt9yC/lWnbcOGDeNtb731Vrn9c3Nzy2274YYbbDExMbb8/HzHtnHjxtlatmzpWN++fTsfMykpyXbkyBHH9pkzZ/L2H374wbFt4sSJ5cqE9cjISNuWLVsc21atWsXbX3vtNce2s846i8uSmprq2LZ582ZbeHh4uWO6A+WOjY21HTx4kBec76mnnrKFhITYevTowfvgOktKSpw+h+uLioqyPf74445tc+fO5XO2adOmXL1V9xjdunWzFRYWOrZfcsklXKbTTjvN6RiDBw92qnd3zJgxg4/50ksvVbpfQkKCrU+fPvy6tLTUlpKSYjv//POd9vnqq6/4WAsWLOD1rKwsW926dW3XXXed035paWm2xMREp+2oa3z2gQcesPnivq3q3mvQoIEtPT3dsX3ChAm8vWfPnraioiKnusW9pu7n6lyTIAjWQsIDBEED0P2OrmZX4O1TwGN66NAh7jKF52nDhg1VHhceO3RLK/BZAO9lVYwYMYK7+xU9evRgT6H6LLyqv//+O40ePdrJU9muXTv2GlcnrhKeWSz4LEIjBg8eTN99952jblRMKs4JrzS8zhi0hBhQV+ChM9abN8eA59PoiUaMLbT81Vdf7bQftqPLGx7MioDdALzVlYH34UEH8IrDw/rzzz87xftOmzaNQxbgcQQYHIXBawgZwL2hFnhhUba5c+eWO89NN91EtQHKn5iY6FhHeQB6Eoxxu9gOjyzil729JkEQrIGEBwiCBkCIoBvbFcTwIT4TYQFK0CgQY1kVLVq0cFpXAhZd3NX9rPq8+uyBAwcoLy+PhaYr7rZVBGJh0eWtxCW62xHvaByY9corr9Abb7xB27dvd4rlVGEKRvB5V6p7DNdrV+KrefPm5bbj2LCFu+MYxaoSrxWB9xEra2xwvPzyy/T999/zIC6IV4hYFeqhQg8Autzd4RqOALForFszqU4dAnVfVfeaBEGwDiJaBUEDXD2DAN4mxFziRxpxovB6QuDBM4h4QIilqnAd9KPwZBR4TT5bHXAeeHUr4qmnnqJHHnmEvZyIV0WKJXhNMSDKXR24q8vqHqOia/emTjp37sx/V69eXeE+O3fu5EZJly5dHNsGDRrEsZsYFAbRCmGPRgLErEKVHTGgGJzlimsmAqPH2Wy8rcPqXpMgCNZBvp2CoCkYrIJubAyGQlYBBTyFVgBeQYhoDKZxxd02b8GIeqSEev/998uJeuQ3ra1jeAsmEMCCgWDw9roLE0AeVHDmmWc6bcfAJXwGghahARCxELMKFb4BW1Qm/AOJYLwmQdAFiWkVBE1RHimjFw+xf+jitgLKQwoxtnfvXifBinRFvjyPqyfz66+/dsRA1tYxagKyC6D7+8YbbyyXrgypzjBCHqnFkJ3BCLyqyIDw8ccfc7oxiFgjGF0PTzw8yZhgwpWDBw9SoBGM1yQIuiCeVkHQlOOOO45jSDGw6Pbbb+c4RnSZWinBO1I//fbbbzRkyBAe4ANB9vrrr7MAW7lypU/OAe8jwiMwUA11gnymSKeE/J+1eYyagHyoyI8LrylSbGEdtlUzYiEeFt5g1zRkffr04fjghx56iMWrMTQAQNwhFdQVV1zB+yJdFga0IS0Z0mXBLrBHIBGM1yQIuiCiVRA0BUIGeTvvvvtuHowFkYOR1yeffDJ7o6xA37592auKPJuIGcUgG4jD9evXe5TdwBOQTQAZBpBzFl3kEDIQL8ihWpvHqCkYVIUQBeRfhRcRGSBQX7fccguXo6IwBQjV//3vfyxeUW5XEO+K7A1PP/005+6FuMXAPmSKcJeRIhAIxmsSBB2QaVwFQQg4kAYLmQ/USHBBEAQh+JGYVkEQLA1GtBuBUEVqJjUVqyAIgqAH4mkVBMHSYApXTCuK+FCkbkI8IrpzMd1m+/bt/V08QRAEoZaQmFZBECzNqaeeSl9++SWlpaVxHlDMZoWYTRGsgiAIehFw4QEYTYxRzsalU6dOjvfz8/N54AEGmWAaRaR42b9/v1/LLAiC93z44Ye0Y8cO/m5jZiikZnI3YEgQBEGomAULFtBZZ53FgxChnZBO0JN83njewmGAwZofffQR+ZOAE62ga9eutG/fPsfy119/Od676667eGYX5EicP38+53c877zz/FpeQRAEQRAEf5KTk0M9e/bkDCOegIlmzjjjDM5KghSDmOHv2muvpV9//ZX8RUCGB2CaPXfT78ELgxlpkHZGzSsNLw2mOVy8eLHTTC+CIAiCIAi6cNppp/HiKW+99Ra1bt2aXnjhBV6HloKT8KWXXvJbWsSA9LRi9DDc2xiYgSTaSAqtZn7BDCfGqfkQOtCiRQtatGiRH0ssCIIgCIIQOEA3uU51DLHqTz0VcJ7WgQMHckxFx44dOTRg0qRJnBD6v//+44EakZGRVLduXafPNGrUiN+rCIxExqIoLi7m5OVIzB0aGpC6XhAEQRCEIKW0tJQddl26dOHeZwViT7H4Augm6CcjWM/MzORUhNHR0VTbBJxoNbq2e/TowSK2ZcuW9NVXX3ldgZMnT2bxKwiCIAiCEKhMnDiRB6wHKwEnWl2BV7VDhw60ZcsWGjlyJBUWFlJ6erqTtxXZA9zFwComTJhA48ePd6zv3r2b5zZfsmQJ54g0C+SchOAW9ELsridid/9xNC2P/p2xi47szeX1Bi1jqf/ZLSihQR3Tzy121xOz7b5v3z4aMGAA9zKjV1jhKy8rgG5yzb6E9YSEBL94WYNCtGZnZ9PWrVvpiiuu4HnKIyIiaM6cOZzqCmzcuJFd6MjtWBGu7vTExET+C8HarFkz08qemppq6vEFayJ21xOxe+1TVFhC//6wnVbO2UO20hB+th93XlvqMqQphYSG1EoZxO56Ult2T0xMZBFpBtBNmH3QyOzZsyvVU2YTcKL1nnvu4TxjaMEgnRVc4WFhYXTJJZew8a655hr2mtavX58Nedttt3EFWzFzgMTL6onYXU/E7rXLrnWHaf4XGynzUD6vt+vbkI6/qD3FJvrOE+UJYnc9saLds7OzuVfamNIKqayglzBgHb3OENuffPIJv3/jjTfS66+/Tvfddx9dffXV9Mcff3Ao5k8//eS3awg40bpnzx4WqIcPH6YGDRrQ8ccfz+ms8BogFQNuFnhaMbgKI93eeOMNsiL9+/f3dxEEPyB21xOxe+2Qm1lIf3+zmTYtsXdrxtWLomGXdKRWPZL9Uh6xu55Y0e5Lly7lnKsKFRY5btw4HuCOkAOVjQkg3RUEKvLfv/LKK+w5fu+99/yW7gqE2Gw2m9/ObmFhjBgRxLaa6d7HDdSvXz/Tji9YE7G7nojdzQU/ZRsWpdHf326mgpxiCgkh6nFScxpwdmuKrOM//4zYXU/MtvueWtIpViPgPK1WekAiNVZJSYnXx8BnMTWlYD0QcoI0IpjqztfU5J4RAhexu3mk78+leV9spNSNR3k9qVkcnXR5J2rUypxYv+rYXJ7zeuILu2OMDn6LhGOIaPUCZCiAGz031z4S1Vsw+g4xJYI1iYmJ4cF4yP3rS5KSknx6PCEwELv7npKSUlrx2y5a+tMOKikupfCIUOp/VmvqeXJzCgsL9Xv8ILxh8pzXE1/YHU4TeFHj4uJ8Vq5AR0SrFwl9cSOi9YNZuSBovPXGoSUmrShretHRMDl48CDbun379j4Nqm/YsKHPjiUEDmJ335K2LYPmfraBjuzN4fXmnevRsEs7UWID/6TicX22Q7Ci4QvRYUz+LuhBTX/f8TuE3yDcR/gNEq1gR75J1QRiBsIVsSR4INW0JV6njvl5AgXvWsnomkGuPdjcl3bCbGuYFEPQC7G7byjMK6bFM7bSmgWpRDaiOnERdPyF7anDgEamhPN4A6YTh+jAAGGIF3nO64cvft9x/+zYsYPvJxGtdkS0BlE6C8G3iI0FwVpsW3mQFkzdRDnp9mm3Ow1uTEPOb8/C1YpYRUQLgYncP+UR0epHpPWtJ+jqEfRD7O492UcL6M9pm1i0goQG0XTiZR2peaf6ZHXkOa8nYndzEFeSH9FlNDHyvxmn1fUVmF+5V69eFGhkZmb6uwiCHxC7Vx9bqY3WzNtDX05azII1NDSE+pzaki55ZEBACFadnvNm06pVK3r55ZcpUBC7m4OIVj+COJXa4sorr+SuBrVgJPOpp55Kq1evtqxQ/O6773gmM8x0Fh8fT127dqU777zTaXY0TNkbaLjO5Szogdi9ehzem03Tn1/G4QCF+SXUqHUCXfhgfxo8ui2FRwZOfF9tPud98TsxevRosiL//vsvXX/99bUijtXvJMatdO/enRPqe+NpnTFjhill1BkRrRoBkYpUXVgg9jCi9cwzzyQrgvKNGTOGZzZbsmQJLVu2jP73v/85/QAgDYikERKE4KK0pJT++X4bffW/fyltWyZF1AmjoWM60Hn39qXkZpL6J9jwVNRjUFJNBz97yuOPP86/k//99x9dfvnldN1119Evv/xSK+cWKkdEqx+p7dxrUVFR1LhxY17gLX3ggQd4Ng2k1VDcf//91KFDB344tGnThh555BHHQwXd/JMmTaJVq1Y5WqLYBtLT0+mGG26gRo0acQuzW7du9OOPPzqd/9dff6XOnTvzdSsBXRE//PADDRkyhO69917q2LEjlwkegClTplTo9TV6ktWCVrMCD6DTTjuNz49yXnHFFXTo0CGqbWQEuZ6I3aumML+YfnpjNS39eQeVlth46tVLJw6kHic149CAQCSYcmxW9QydNWsWT62OcDA4FOAU2bp1q+N9jITHc3natGk0bNgw/q34/PPPHR7e559/nnNj47O33HKLk6B1DQ/AceABPffcc/n3CjHj33//vVN5sY7tOA+mL/3444/5c/i9qgz07OF3Er+B+E2sX78+zZ4928nrO3LkSEpOTuaeQFzL8uXLncoKUDbX36GZM2dSnz59uEw4Pn5TMVGR4BkiWn0AUpvkFuVWezmUccirz6mlJjPwIh3HZ599Ru3atXPyVuLLCiG6bt06nmv43XffpZdeeonfg+fz7rvv5m565bHFNqQAw4Ps77//5mPis08//bRTig5MxIAH0qeffkoLFizg+Y3RvV8ReGCsXbuWH5KeosqEZcuWLXxtJ5xwAr+Hh9Tw4cOpd+/ePL0eHq7orr3ooouotjE+3AR9ELtXPdhq+vPLadfaIzxJwCnXdKXTb+pOcfUCe0BLTk4OP6uLCkr8svhqpnZPnqG4Vsxnj/fRW4YMLBBu+I0wAofJHXfcwWng1Dz2c+fOZYGLvxCX+B1STpGKgODD+RHmdvrpp9Nll11GR44c4feQY/uCCy5gMQxHC5wqDz30ULWuGeX+9ttv6ejRo06TzGRlZdG4cePor7/+osWLF7MwxvmxXYla8OGHH/LvkVr/888/aezYsXzt+J18++23+RrRiyh4hmQP8AF5xXk08Iva96L8c+k/FBPheXcJPJ+q1Y+HC1q02GZM7fTwww87XqN1CGE5depUuu+++zh3KT6PsAKISsVvv/3GXfh4AMEjCtCCNIIW81tvvUVt27bl9VtvvZW7YCritttu4y844olatmzJsa2nnHIKP5TgMXaHKhMe0ggrQAsYDwXw+uuv88P2qaeecuz/wQcfcL7dTZs2OcpdGwRSjJvgO8TuFXNoTzb9+PoqTmUVHR9BZ9zS0+9TsPp0yu/CUnrnjvl+Of/1rwyjiKiaxwB78gzFc9cI3ke3PgQaet8UGJtw3nnnOe1br149PgecHZ06daIzzjiDhS+65isCHtpLLrmEX6Ncr776Kv8WoScPz3700j333HP8Pl7DCeKJQIR3Fb+FBQUF7AWFp/Xaa691vA/xbuSdd95h7/L8+fPZu4xrBthm/K2EyIZgh+BVv5NPPPEE/75OnDixynIJ4mnVCnSPrFy5khd8sdHChYcUCfQV6LZBtzy+aBCo+OLCK1oZOB5mfalM+KH7RglWAMF84MCBCvePjY2ln376iT2mKAPKAi/vgAEDqpw+98EHH6RFixZxNwyENkBLGy14HEcteDACY/dVbYCHs6AfYnf37Fp7mAdcQbDWaxxDF9zfL2gEKwiW2bA8eYZu3ryZRSTEWEJCgqNb3PU3pF+/fuWOjx48Y+9cVb8RoEePHk6/GTin+szGjRupf//+Tvvj98MTEJaG37U//viDw3rQ24ieOwU8zBDT8LDCOYLzoveyqt9K1CGcNcY6xHF8MS28LgTHt8nPRIdHs9ezuqDroSYJ7HHe6oAvtfGLh3ggfOEQAvDkk0+y0IMnE61BCFq8By/rCy+8UHk5yoRhZWB2KSOI8/Gk2wpCFwtauejagTCGsL7qqqvc7o/wBDxg5s2bRykpKY7teKCcddZZ9Mwzz5T7DB6OtQmm/xX0Q+xennV/7aV5X2zk1FYpHerSqTd0pzqx1pwowFvw7MNzHh5PfxAe6RvflCfPULyPnjH8puB+x28cPKyYVdD1t8iT3wjXsAJffMYTEKuK30osX3/9Nff4QWh36dKF34en9PDhwxxCh+tF79/gwYPLXae7OsTvq6uXGUheV88Q0eoDODVGNbrpjTdwTFztjIasqNx4mObl5fH6woUL+QtojPsxemEB4npc88+htYv5kc3uZkerHR5bhDa4A6Ib4hbdQggnMILAd8Qm4Rj+9nwgVlcG5eiH2P0YEKnIELBslv350nFgYzrpik4UFh58nX94vsKj5osuen9S1TMUIg7eTQjWoUOH8jbEfPoLhAP8/PPPTttUbGl1QPgDxm5MmDCBe+8Axm+88cYbHMcKMKDZdVAvBLXrbyXqEHVkdB4J1SP4nhBChSA+Jy0tjRfEnyJuVLWeAbo60L0B7yq6exAfhFypRvDAQoA7uk7wJcUxMXISA54Qz4QRlngf6UEQqO8tyAyAOB94THG8FStW0NVXX81xgRi16QquCQH/F198MXuJ1XWqzAgYiYoAfXRd4cGF60M2A3hsJQm0INQeJUWlNPvDdQ7B2v+MVnTylZ2DUrAGIhkZGY4wMrVAlFX1DEX4Cwb1Ir4TYV3oWsegLH+BgVcbNmzg+FQ4VL766ivHwK7qTo+KgVPIaIMBZuq3EoOK8Tv6zz//cA+la49jixYtOCYXv0MYyAUeffRR+uSTT9jbikYsPo/fW+NYEqFy5CnhRyoaUGQWEJHoxsECjw8ePOj6OPHEE/n9s88+m+666y4eJIVUUvC8IuWVEQhTBLkjPhbB5l9++SVvRwsc8UN4oKELBYKzJmIQQnjbtm080hJxU4i9xZcfg77QgnYFDyfEGWHUqbpGLCqmCV1VaB2jTBjQhe4eDAZAoHxNQjS8wRjbK+iD2J0oP7uIZr6ygjb/u59TWJ08rjMNOKtNUM+xXtvP+ZoCRwEGXBkXiKyqnqFYIMCQUxshAfgtUYOg/EHr1q3pm2++oenTp3Nv4JtvvunoRayuTfCbhmuG6ATvv/8+C1F4TpH26/bbb6eGDRs6fQbXDicOPLWoQwCHCgY/43cMv03oEUQ4G3o4Bc8IsfkqH0YQga5u3GhoXWKAkZH8/Hz2/OELUdMYFHgpA+2BphO+tLUReLPRChf0Qne7ZxzMpR9fX03p+3MpMjqcTr2hW8BMxVqTZwcEuTznrQEyByCLDX7bzcYXv++V/QbtqUSnBDPiafUjkgJHTyqbVEEIXnS2e9q2DPrmmWUsWOPqR9F59/YJWsHqijzn/QfiTtGjiF47dOfD+6nSTZmN2N0cAlq0IoE9WrHG+ejRMkHsDWJrEPyO7myZ81sQBME/bF1+gGa8tIJDAxq0iOeUVklNg2eWKMG6IAXXOeecw937yIeKtIkYL6EzU6ZM4bEp8NwiTBDpLysDs5AhJA8xu/DsIuwDOstfBGz2ALSeMErcmKcNoEKR3xOxmkjZhPhMpJdALI7VcJf2Qwh+3OUoFIIf3eyOyLNVc3bT399uIbIRteqeRCOv6UqRdQL2Z8cr5DnvPxAvqmZ0rG2saPdp06bx4DiESECwQpAizhYZDVxjcsEXX3zBkyFgkojjjjuOB7RhQgc4C1988UW/XENAelox4h2j9ZBaw5iwG6MeESCNysSMFX379uVp1DCgCFOtWQ2VakrQi+pMTSsEDzrZvbSklP6cuon+/sYuWLsPS6HTbuqhnWAF8pzXEyva/cUXX+TJDJDxAd5niFekkYQodQe0EyYbuvTSS9k7i8FoGGxdlXfWTAJStKL7H1O8jRgxwmk7Ri0ijsS4HSPPMfgBOTwrC5jOzMx0LGr+YLPxRRJkIfDwZ9eK4D90sXthfjH98tYaWjM/lSiEaMgF7WjoxR04W4COyHNeT2rL7llZWU76BXrGHZj4ABrJqI+Q8QHrFekjeFfxGSVSERuM3LcqP60/CLhmL1JqLF++3G2SYKREQvJ7pOAw0qhRI36vIiZPnswpPVxBbtDU1FROa4F8amg5weUPQyPBPeYkxuhAdIOpmTDQasFNg7QgmJIO76vp2VA2uNXVTaUS+2NfvEbMiEqc77ov3oMgxzld90USY5xL/Sga98UxUGbsi3K67ou4Fpwf+7vuiwTS2F+1GI37AsQMV7QvrhtfWrUvjov3sA3nx/UZ93WtQ5RP7etah8BdfaNeUMaK6hv1gs+5q281a41xX5QB68inB6898vGp+wnT9iFeCnTu3JmnDkRybZQX3cC4P1F+pAXDvNXofgGYfAHbcSyUDdMK4qGg5rfGsXGvASSgRq+CunexL3Im4hpwj2PEqPLeYdpElHfv3r28jvKi3NimplNcvXo1v4f0KqgDjD4FSMeCbh/UBWyK8+I8ADFMqBc1yQTCcTCaFQ9K1BGuHd9HgBnIUOd4HyAlDka2pqensw3xWfW9xTTBuCfU9I9o9eM6kQcStsB3TtU3uq0Q6qPqGw1R5AjGgrIhdYyqb8xkgwUp0FQ+RfTAqKkd0SWG8uK+RH2jHJgXXaWkQh2o+sZxUWe4B1DfqIs1a9bwexjNCzvg+QCMz4j4+Hh+31jfKBvqDNeEdHLIYwnbog5wT+BZA2BT3EPG+t6xYwf/GOHexlSXuF8AUhBhG35IANIMwaaob9gB51E/NmpaZpxX3bOI9Ud947uL+wX74nuIexY9WLgnAOLZsB9yHqv6Rr5K3EMYOwD7qHu2WaNW9MeHmylzfyGFhBGNurYbZdj20JIl+/iYKDPuS1Xf+K6qAWr43uB+xj0Le8PZoOob9y++I+qeRX3Dxvg8rgvHwhSZQGVoUFNq9uzZk+8znswlJobvH3XPor5x/ahjdc/ic7hnULeoU5WbEyn08Hl1z8IW+L4h9ZHrPYt7EDbH/aSSzNfGM9n4nHX3TDY+Z12fycbnbE1+16r7TK7sd831mexah7Cp2te1DrGfu/pGvWBxV981+V1z3RfnRf158rtWUX3jfXUMdX83K3tGqGeGmqVLMXHiRLdxu3he4jrxG2ME6+p56Qo8rPjc8ccfz+VDfd544408Vbq/CKiUV/gBxIMNuc9ULCtyjOLhjNgMxF/A7e3a0sCPPfKKupt+DmB/42fwQ4QbwV0qCRgdD3M8qPHArgnqASBYE4hQiB2ICl/aCQ8iPJQEvQh2ux/em00/vr6Kso8UUHR8BJ1+Uw9q3CaRdMSYqkgJKkEvfPH77knKq3Xr1jlNWQ4B7C7VFhpY2A9d/phyVoGc6vPnz3c0uFxz9mLCHkzzjgY/Gr2YaAEhBq453GuLgPK0wsMAEYEWrfHGWLBgAb3++us8OwdaJPA0GL2t8CjA01ARrkaGV6MicBPi2Mpzgx8hbxNjozVnxWBt3UE7DgIDNoatff2DA++RTOepH8Fs990bjtCst9ZQYX4J1W0UQ2fe2oMSGwSvQPdmGldBL2rL7vHx8dybVhXw/uO3zDWbUmX6CMIUkydgenTVEwHdcv311/NEDbU9MU/AidaTTz7Z0V2kgGcV3T2Yqg2tDrRqMXUaUl0BdMuiu8fYsqgpysBKuHqLTC5gbSBYK2vsCIJAtH7hPpr32QYqLbVRk3aJ7GGtExvh72IJgmAAoRkIA4I+Gj16NG9DqALWkWXJHXDeuApT5cTxVyd9QIlWtCgQY2QEnkp006vt11xzDad0QLwaWh+33XYbC1ZMl+Yr4FlFfBNCBGqSQBixImj9CNbDzC49xOcJ+hFsdseP1pIft9PSn+zxoO37N6KTx3amsIiAHN9rGuKYqJqPPvqI862jlzRYsKLdx48fz5MrIMwSYZMIq4TnFM4/gGnTEUKAcT7grLPO4owDGPegwgPgfcV2f4W8BJRo9QTkZEPLAJ5WeDKRgwyzYpgBjFYTw0H8+nJ6UCEwQDC7oB/BZPeS4lKa++kG2viPfdBav9Nb0YCz7FOWCs4E0LARzsH58ccfOxruGNQGIYOBNxhoZBZjxozx64h0Xew+ZswYHlD56KOP8oBTjAeaNWuWY3AWeqWNntWHH36Yv9P4i7E+GKQJwYrpcP1FQA3Eqi1qa05fBD4Ha4ybUDFidz0JFrvn5xTRrLfXUOqmdAoJDaETL+tIXYY09XexLIVxAA0aK4ES0wrRihhH5DeH0wfpjZBiEiJlwoQJTvti/IjK5iKUB5kNamp3TwZi7TZZp1gN6ccRBEEQPCJ9fy5Nf24ZC9aIOmE84EoEa3CBbm3E8iNV20033cR5PL///nsWtIiFhIBF6jKkQgPwxM2YMaPceAB0+QOkE8M+06dP5yw+GLyMNGTG3KDY1zh4Gimb4AX89NNPOawGKdAwit2YQx2vMckQQgQRrodeVmQTMk7rLgQfQRceEEgYsyAI+iB215NAtjs65DYsSqMF0zZRcUEJxdWLojNv7UlJKYHhQfQnEGmoP5ufZkgKiY6uUdgG8qMi/R/AoB2MFUHayeqC0ebPP/88507Ga8yshBjJisIOkA8XYvjHH3/kXLgXXXQRPf30046uacRnYnp2CGp0b6PLG/l3IXatQDCnt/MnIlr9CBL6IoWEoBdidz0JVLsX5BXT/M830Oal9mwpKR3q0siru1JsXesNNLEi6OJFx+7GPn39cv6Oy5dRiBcCCkIbIhWpJDGgGbGQ8Gq+9957XoUF3HPPPTyTJcBkPpicAaIV2X/cgZHt8MBiADZA6iWUB6IVXlbE3iI3O7IKAYQ0wANsJbuLcPU9Ilr9iJoNQ9ALsbueBKLd07Zl0G/vr6Wsw/kcvzrw7NbU+5SW2k7J6vV0nn7IZ+kt8GwiFhOZcVB2zIqE7nrEtqLR5W0cq5oQCKA7X6WNrEi0IixACVb1GZVmErPAoXwYAa9ACIEKWbACMn2vOYho9SOBEpwv+Baxu54Ekt2Rc3X5rB205McdZCu1UUJyHfau6jrDVU1AhpmQOnXY4+mv8IDqgLjTN998k8UpPJfG7nt3k+Eg9MB1PLe7VJDIRmD8TFXCzri/+kwgCUGZBc0cRLT6EcyXLeiH2F1PAsXu2UfzafYH62jv5nRH/tVhl3akqGj5ufB2YBMElzdd9P4AwrRdu3Ye7480SPv27XOsb9682fRehTZt2rCo/ffffzktF8jIyOAp1k844QSyAlbM0xoMBE6fRRCyatUqfxdB8ANidz0JBLtvW3GQpj6xhAVrRFQYnXxlZxp5dRcRrJqFhVSH4cOH8zTqK1asoKVLl9KNN95YzkvqaxA2gCT59957L82dO5fWrl3LEwshx6hVcgUHu939hTyJBEEQNKeosIT+/mYLrV2QyusNW8bTyGu6Ut2GgeEdFPzHCy+8wDMqDR06lMMJXnnlFVq2zPxQCMzUBIF85plnckaD++67j3OWyoQ9wY1MLuCG2kraiy4VFZAu6IPYXU+savfDqdn063tr6ei+HF7vfUoLGnh2GwoLl444XySFh/dPkvCbD6YjxRSkENHwuvobX0y+IJMLlEc8rYIgCBoCf8Waeam08NstPC1rTEIkjbiqCzXvXN/fRROEKkE4AtLIIYMA4lkff/xx3n7OOef4u2iCiYho9SOY59eKnhfBXMTuemIlu+dlF9Ifn2ygHasP8XrL7kl08tjOFB0vHkFfI9OdmgcmK9i4cSPXb9++fenPP/+k5ORksgJid3MQ0SoIgqARuzccod8/XEe5GYUcAnDc+W2p+4nNLDOARRA8oXfv3rUSOytYCxGtfgTzLwv6IXbXE3/bvaSklJZ8v42W/7aLyEZUr3EMnXJtN0puFjj5YwMRmRVJT8Tu5iCR9n4EcysL+iF21xN/2j3jYC5Nf3YZLf/VLli7DG1KFz7YXwRrLVBQUODvIgh+QOxuDuJp9SPZ2dn+LoLgB8TueuIvu29cvI/mf7mJigpKKComnE66ohO17d3QL2XRcbBbSUmJv4sh+AFf2F2SO5VHRKufWL5/OX1y4BPqV9qPwkJlujedkG4jPaltuxfmFdP8LzfSpiX7eb1p+7qcHSC+vuSxrK0pPDEYx+xE+4I1QaqzmoL7B8iUsMcQ0eoHcoty6bY/bqPMwkz6YdsPNLrdaH8XSahFOnXq5O8iCEFu97TtGTT7/bWUeSifQkJDqP8Zrajvaa0oNFQGW9UG4eHh3Eg5ePAgNW7cmPNtCvpRE7uXlpby/YP7CPeTYKdWagLJbzEyVSXAXbJkCX3xxRfUpUsXuv7660k3YiJi6Jru19BLy16iKSun0GmtT6OoMJmnWBeWL19OAwcO9HcxhCC0u63URst/20lLvt9OpaU29qpiZqsmbRNNPa/gDH7vkN4MieG3bNki89BrGtNaU7vDW9uiRQvJ7FHbovXSSy9lcXrFFVdQWloajRw5krp27Uqff/45rz/66KMeH+vNN9/kZceOHbyO4+Dzp512mqNlc/fdd9PUqVP5phk1ahS98cYb1KhRI7ISl3a6lD5a9RGl5aTR1A1TaVzXcf4ukiAIAUz20Xz6/aP1lLrxKK+369eQTry0I0XFSPe0P0COzvbt29O///4rvSsasmrVqhrbHfeQL8IMjEyZMoWee+451l7IaPLaa6/xBA0VkZ6eTg899BBNnz6djhw5Qi1btqSXX36ZTj/9dApa0frff/85KuWrr76ibt260d9//02//fYbzx1cHdEKb+3TTz/NDwMEKX/88cc8AwZmx4CAveuuu+inn36ir7/+mhITE+nWW2+l8847j89nJeqE16Gx7cfSK+tfoXfXvEvntT+P4iPj/V0soRbQaco9wXy7YzarVX/spn9/2kHFBSUUHhVGJ4xpT50GNxEPjZ+B4IDdXafgFIIfK9p92rRpNH78eHrrrbe41wfiE449TNDQsGFDtzG1cDLivW+++Yanyd25cyfVrVuX/EWtiNaioiKHm/z333+ns88+m1+jFYL5uKvDWWed5bT+v//9jz2vixcv5pvk/fff59CD4cOH8/sffvghde7cmd8fNGgQWYnTm59O3+/9nrZnbKcP//uQbu9zu7+LJNQCEp+kJ2bYfc/Go7Tgy410NC2X1xu3SaThYztRvcaxPj+X4B3yfdcTK9r9xRdfpOuuu46uuuoqXod4hZPvgw8+oAceeKDc/tgO7+rChQsdAwpbtWpVrXMWFxfTvHnzOOUfet3j4+Np7969lJCQQHFxcdbM0woPKCoHU6zNnj2bTj31VN6OgiclJdUopQTCAHJycmjw4ME8OwYE8ogRIxz7QBgjJmTRokUVHgdhBJmZmY4lKyuLaoPdu3bT7b3tQvWz9Z/RwdyDtXJewb+o0BZBL3xp95z0Avrt/bU086UVLFij4yPo5HGd6bx7+ohgtRjyfdeT2rJ7VlaWk36pKD8svKbQSEZ9hJ4ArFekj77//nvWVrfccguHWKKX/KmnnvI4nRe8st27d+fecBwDA8vAM888Q/fcc49X11srTQEU8Nxzz+U4inHjxjlmhkGFVBZLURFr1qzhikT8KpT6d999x4O6Vq5cyTEgrq5rVDbiNypi8uTJNGnSpHLbEXKQmppKffr0ofXr11NeXh63Elq3bk2rV6/mfRDfgVF+GGwGevXqxYH3yMkYGxtLHTp04OMAeIKRugKGBDB805ym1KZOG9qWv43eXPkmjQobxe81bdqUuxa2bdvG67hZ9uzZw/EluEacBwPaAEanoh5wXgDP8v79+7mFhNYe5mTGvginaNCgAdWrV482bdrE+3bs2JH3w82EG7h///60dOlSLhsaFOgWwLUDhGTgS4FjA3QvYHAJGgo4Jsq8du1afq9t27aUm5vr8KT369ePw0RgM4RtoCEBO6qWG1pjuD6A+t6wYQN/HteFYyE+COBzah53gHsJLTjUN0ZZopGCMqn6xvWrhwe+PPhcRkYG1y3qFNcKMGgCn1cJ4NHQQqPq6NGj3MJEmf755x/H/YRW4ubNmx31feDAATp8+DDbF9eKODbcF6jv+vXrc/cLwP2ARhaOha5b3P94kOD6sR+Oreq7Xbt2fF3q3sW+uMfx8ME9jutDnYI2bdpw3aLMADaHLbANZUUdG+9Z2FfVN6ZDxP2AcqG+cV6cBzRv3pzvC3XP9ujRgweX4EEZHR3N167qG11HuDfxvqpvfC9wz6KnBZ9Fvah7Ft8PVd/4/uI6cS+61jfuQdwzqr5h40OHDvGi7llV35h3HAvuH3XPwt6wj+s9i/pGOdatW+e4Z1EHqr5xXNQZfgRQ36gLdc/iGQA74Pmg7tmqnhG4l3BN1XlGoM5w/+J7h3u2U6fONOuzJbR3RSGVFmPAD1Fyx3BK6RNBzXsk0KbNm+QZYbFnBN5HnVTnGYHPoL7lGRG4zwhsQ/1V5xlRHR2xomwd9WJk4sSJ9Nhjj5ErqAvY1HV8D9ZVXbgC/fHHH3/QZZddRj///DOX6eabb+a6wXmq4o477uB7Hd9No4MSehAeX28IsdVS9lpUFr64eHAp8KDAQ8BdLEVl4AupHiyIs3jvvfdo/vz5/CWC29u1pYEv8kknncTi2R3Y3/gZ3GS4EXADmRl/iAcurv/ftH/p6l+vpvCQcJo5eia1SLA/dIXgRNld0Iua2n3v5qM8ScCRvTm83qh1Ag27pCM1aCGx8FZGvu96Yrbd9+zZwyIZghqNAQXEv7usBWisYD909cPpp7jvvvtYP6kGgBGIZTRq0MhQuWIRYgAHpCehnRCqOB8avhDqEK9oPEH7QWOhjiw7jSu0MVqLb7/9tqP7HS0ub4yKz6Glh5YivKRoSb/yyivcIoKgRavNCFr9eK8iYGC0NNWCyq0NlCegf+P+NCRlCBXbium1Fa/VyrkF/6HsLuiFt3bPySig2R+upe9eWMGCtU5cBM9qdf69fUWwBgDyfdeT2rJ7fHy8k36pKM0WvMsQnqoXxBN9hN4FCFfj5AbwnMPTrCY+qAx4j92FEkBwe6uzakW0mhHX4Fox8JRCxKLbYM6cOY730OWCm8fYsrAK8BQr7uxzJ/+dtWMWrT1s7z4TghOj3QV9qK7dS0vsWQG+mLiYNv2znyiEqOsJKXTZpEHUZUhTnjRAsD7yfdcTq9k9MjKSNZJRH0E7Yb0ifTRkyBAOCcB+CoSJQMzieFVxyimncIYCBcJdEPKA0AJvU2bVimhVcQ2I00GMizGuwViBnjBhwgRasGABu5cRO4J1jExDzAViWq655hpO6TB37lz27CJcAAaxWuYAYEyH0al+Jzq9td2Iry5/1Y+lEszGamlQBOvZfd+WdPrqqaX011ebqTC/hBq2jKcL7u/HeVfrxEre1UBCvu96YkW7jx8/nt59911OFYr42ptuuoljdFU2gbFjx7KmUuB9xBBDw0GsItMABmLB+egJL7zwAqcbRSgAwgyQPQCx0wjBrChc0xIDsZA1AHENrspcFb46IFgaFYt4CohUBG7/+uuvnEsMvPTSSxx4ff755ztNLmBFEORv5Nbet9JvO3+jhXsX0j/7/qGBTWTWpGDE1e6CHnhi99zMQlo0fQttWGwf6BEVG06DR7elzkOayhSsAYp83/XEinYfM2YM93QjNz66+DHga9asWY7BWeiVNk5mgJhZ6Cvkv4fWQkwsBOz999/v0fkwJghxrMjyhEFn8LLCsQgno9GBabmBWBh8pdS2MRj3r7/+YnHpGmPhb1SAs9kDsRD47Dqt41P/PEVfbviSuiV1oy/O+EKSgwch7uwu6G13TLm6dkEqLZ65jQrzinlbl+Ob0qDRbSg6rupuOMG6yPddT8y2+55a0ilWo1Y8rSqu4Z133vFZXEOwcn2P62nGlhn03+H/aPbO2XRKq1P8XSRBEEwkbVsGzf9yIx3anc3rGFx1wiUdqHHrRH8XTRAEwWs++eSTSt9Hr7klRSviGtBNb4xrQD41jGb78ssvSVcQzOxKcnQyjes6jt5a9RZnEhjeYjiFh1pvZg3Bt3YX9LN7XlYhLZqxldb/bU8dExUTToPOaUNdhqZIKEAQId93PRG7E4cSGEF+V6S5UpmjLCtazYhrCAYqSvc1rss4mrZhGu3I3EHfbfmOLuxwYa2XTTAPydmot90RCrDur720eMZWKsi1hwJ0Oq4Jx67GJEgoQLAh33c9EbsTD753BQ5LDPC69957vTpmrbnwMOvI5ZdfXlunCwgw0we8za7ERcZxmMAz/z7Ds2Sd2eZMig7XV9zrYnch+O1ekh1JC77cSAd22nNVJzWL4wkCmrSVUIBgRb7veiJ2dw9mIHv66adZD1Y0E5ffRasZcQ3BzkUdL6LP1n9Gqdmp9Pn6z+na7tf6u0iCIHhJfnYR7fi7gJZuXkpkI4qsE0YDz2lD3U5IodCwWpvjRRAEwe/AiammE7Zs9oDK4hqQB8xK1NaoPIRJYB7nivhh6w/04F8PUnxEPP1y/i+UGCXemGCgKrsLwUN+ThGtmrObJwkoyrfPDNNxUGM67rx2EgqgCfJ91xOz7b4nALIHfP/9907rkJtIV/r6669z2X/55RdrelrNiGsIBtDSwBRpFYHJBj5c+yFtPrqZ3l/zPo3vN75Wyyf4x+5C8IjV1X/s5skBQHyDCBoxtjs1bV/X38UTahH5vuuJ2J1o9OjRTuvIHNWgQQMaPnw4D9D3hvBAjWsIBtyJeSNhoWE8vestc26hLzZ8QZd2vpQax7qfI1gIHrsLgUtBbhGthFidc0ysJqXEUf8zW9HBgm0iWDVEvu96InYnp+lffUV4oMY1BAMREVVPxzg0ZSj1adiHlh9YTm+uepMmHTepVsom+NfuQuCJVXsYwB7H5AD1m8bSgDNbU5teDSgkNIQylu/xdzEFPyDfdz0Ru5tDrcS0mhHXYCZWixVZeWAlXfHLFRQaEkrfnf0dtanbxt9FEgQBYjWv2C5W4Vk1iNX+Z7Smtr3tYlUQBCHYdYpi/HjPwxhffPFFsqSn1Yy4Bp2meevVsBcNbz6c/tj9B7264lV6+aSXa6V8gjnItI7BIVYRrwqxqnKtViVWxe56InbXE13tvmLFCo/283aK+vBAjWvQjTv63EHz9syjObvm0KqDq6hng57+LpIgaAe8qavn7qaVvx8Tq/WaQKy2onZ9GopnVRAErZk7d66px5f5Qf1Io0aNPN4XIQHntD2HZ8h6ednL9MGoD7xuqQiBY3fBSmJ1D638fdcxsdo4hvqf2Zra9mno0bSrYnc9Ebvridg9wESr2XENwUBCQkK19r+5183007afaOn+pfRX6l80tNlQ08omWMfugv8ozDeI1RyDWEUYQF/PxKpC7K4nYnc9EbvbWbp0KX311Ve0a9cuKiwsJCPTp08ny4hWs+MaggHkqq1OzAvSXSHt1UdrP6JXlr9CQ1KG8OAsIbjtLvhHrK6Zt4dWzD4mVus2glhtRe36NaqWWFWI3fVE7K4nYneiqVOn8oyno0aNot9++41OOeUU2rRpE+3fv5/OPfdcr44ZHqhxDbqC6Vy/3fQtbTy6kX7e/jOd2eZMfxdJEIJOrK6cvZsnCFBitd/prah9f+/EqiAIgo489dRT9NJLL9Ett9xC8fHx9Morr1Dr1q3phhtuoCZNmnh1THHT+ZHOnTtX+zOYyvXq7lfz69dXvE5FJfYfViG47S6YL1aX/7qTPn1oES2esY0Fa2LDaBpxVRe6ZOJA6jiwcY0Fq9hdT8TueiJ2J9q6dSudccYZ/DoyMpJycnK4d/2uu+6id955x9oDsXwd1xAMHDhwwKu4l8s6X0ZfrP+CUrNT6atNX/G6EPx2F3xPXnYhrftrL2cDyM+2NwAhVvsrz2qY79r1Ync9EbvrididqF69epSVlcWvU1JS6L///qPu3btTeno65ebmWtfTiriG4447jtavX0/fffcdFRUV0dq1a+mPP/6gxMTEah1r8uTJ1L9/f3Y1N2zYkHPAbty40Wmf/Px8dkcnJSVRXFwcnX/++RxDYTUOHz7s1eeiw6Ppxp438ut3Vr9DOUU5Pi6ZYEW7C75j//ZM+v2jdfTxAwvtntXsIkpsEE0nX9mZLoVndVATnwpWIHbXE7G7nljV7lOmTKFWrVpRnTp1OOZ2yZIlHus4eEld8+67A+IUnHDCCTR79mx+feGFF9Idd9xB1113HV1yySV08sknW1e0qriGH374gV3EiGvYsGEDXXTRRdSiRYtqHWv+/PksSBcvXsyVAQGM4F64nRVwPeNcX3/9Ne+PqWLPO+88shphYWFef/bc9udSy4SWdCT/CH2y9hOflkuwrt0F7ykuLKH1C/fR15P/pW+eWUobF6dRSXEpNWgRbxerjw2kTiaIVYXYXU/E7npiRbtPmzaNMztNnDiRli9fTj179uRBUvAKV8aOHTvonnvuoaFDPctY1KNHDxbE8KpCrIKHHnqIzw0HIhyJ77//vnWncY2NjWXPKtQ9vJ/z5s3ji4HnFbNiYUpXbzl48CB7XCFOoeozMjJ4tq0vvviCLrjgAt4HAhnxJYsWLaJBgwYF7PRorvy641e6Z/49FBMeQz+f9zMlRSf5u0iCYDkyD+XRfwtSad3fex2ZAELDQ6h930bU7cQUatQqQessJoIgBB57vNApEJLoqX799dcdEz/hGLfddhs98MADbj9TUlLC2urqq6+mP//8k7v2Z8yYUel5sN+HH35I33zzDZ8DIvXaa6/1WPT63dPqLq4B1CSuQQGRCurXr89/ly1bxt7XESNGOPbp1KkTe3QhWq3Ev//+W6PPn9LyFOqa1JVyi3Pp3TXv+qxcgrXtLlSNrdRGO9cepp+mrKJPH1lEK36zp66Kqx9Fg0a3oSsnD+FBVo1bJ9aaYBW764nYXU+sZvfCwkLWR0ZtFBoayuuVaaPHH3+cHYPXXHONx+eCOP3ggw/YIfnaa6+xp3bYsGHUoUMHeuaZZygtLc2aotXMuAYABX/nnXfSkCFDqFu3brwNlYEQhLp165abnaKiiiooKKDMzEzHogS21ae3xY/tnX3v5NfTNk6jPVl7fFQywUxkWmPzwKh/TATw+cTF9ONrq2jHmsNENqLmnevRaTd2pyuePI76ntqKouMja71sYnc9EbvrSW3ZPSsry0m/QM+449ChQ+w1dZ2pqzJt9Ndff3E3/rvvvut1L/tVV13FPeHIzwr9h5haOBHPPvts62UPQFwDXNEI3DXGNURERNDChQvZZfzwww97fXzEtkIYo2JrAgZ3TZo0ye0ECampqdSnTx8OZcjLy+MBYMgztnr1at6nZcuWfHPCRQ969epFW7ZsoezsbDYYWhZqogW48BHnsnPnTl6HsMZxcaMhKLpr167cEgJNmzblbdu2beN1iHJ0B8A7DVGO8yCAOoRCqHe93rTi6Aqa9PskurbptRwKgbiRI0eOUHh4OPXt25f3RSQIQifg+cYNBDp27Mj7IcwCrS7YC5kecHMjlAMtLJQRtG/fnsuqBrWhqwFxMfBs45goM8JAQNu2bdmLrkI/+vXrx7bCIDkMvsNNu2bNGn4PYSPFxcV8fQD1jZAOfB4D6XCsVatW8XsqBhpZKABicpBWA/UdExPDXnWUSdU3rh+tPICQFHwO3nnULeoU1wqQMw6fx7EAbIFY6KNHj/L9ijL9888/ji85RoUieTRAfSMmCIH3sC+uFa1s3Beob/QCqMGCuB9QzzgWGh0DBgxgm+P6sR+Oreq7Xbt2fF3qgYJ9V65cyS1m3Du4PtUwbNOmDdctygxgc9gC21BW1LHxnoV9VX337t2b7wfEhaO+cV6cB6DrCOVV9yy+09u3b+cHZXR0NF+7qm/0ouDexPuqvvG9wD0bFRXFn1Xeh8aNG/P3Q9V3ly5d+DpxL7rWN+5B3DOqvmFjPICxqHt23i+Laf+6QjqyvYRKi+0RT2GRRG37JVPjLlGUW5JOBwu2UZvQBo57FvWNcqxbt85xz6IOVH3juKgz/AigvlEX6p7FMwB2wPNB3bNVPSNwbFxTdZ4RqDPcvzV5Rqj6hm1xXnXPyjOidp4RsDvqpDrPCBwT9S3PCN89I1R9Jycn84L7R92zsLeK6zTeszV5RuD+Qf1V5xlRHR2xomwd9WIE8aqPPfYY1RTY74orrmDBivqqKbhnHnzwQb7WCRMm0E8//WS9mFYz4xpuvfVWmjlzJi1YsICNr0BGAnhv8aU3eltRUfDKYpCWK7jhjK0T3GS4EcyOacUX1dUj7A3rDq+jMT+OYQH79VlfU8f6HX1SPsHadtedkqJS2rriAE8GkLYt07E9KSWWup/YjDoMaEwRUdYZDCF21xOxu56Ybfc9ZTGtENRoDCgg/rG4goYMGl3QY8YMAOPGjeOyQk8ZQaMEjRXjgDLlPUZDAI0siHhPgE5DuMC3337Ln8UgfIQbeDLGqFbDA8yIa4DGhmBF6iwIVKNgVa1HtMLmzJnj2IbKRet58ODBbo8JA6OlqRa0gmoD11Rd3tIlqQud2upUspGNp3cVrI2v7K4rWUfyafHMrfTxg3/T7A/WsWBF4v92/RrSuXf/v717AY6quv8A/strN6/NG/KCkCABJEBQwktAijg8bJmCOAXGlkD91xGBkVKGaiU8pjKgpZWhUhjbKvZveZjOH1RacYQKogQwQKCRJBBJCSHkRZ6bd7L7n99JdtklCY+wd+/dPd/PzHX37m6yh/xyzXfPPefcx2n+2rGUNDlWU4GVoe5yQt3l5Ky6GwwGu/zSXWBl3MPN+cg2G3EI5f3ushH3VnOvMYdXy8an9KdOnSruc2C+G+7R55WjOO/94Ac/ED3H27dvF49z721vAqvTLi5gGdfAGzece195XENaWhrNnDmTPvnkkwcaEsArA/CnAi6WJfjyqQE+FcG3nOB5aQXu2uci8sw4Lkpvf0iuYMVjK+jItSN04sYJyizJpJSoFLWbBOAw/GG1KK+Kso/doIIL5WQ5PxQQrKOkJ2Np2KQYCgju/n/WAABAIhdxzyoPT+GhJNu2bRPDHTibsUWLFoleWx4yaRkeY8vSc3zn43eaNWsWHTlyRAwr4O/JKw/wMCNHcNoVsRw1rmHnzp3ilpO7LQ7CixcvFvd5TVjugubhCHzan9ch+9Of/kRaw59AHCUuKI7mDZ4nJmS9fe5t+nDWh1jGR6McWXd319LYRrmnblL28RtUVXJ7pZGYxBAxBCBhVAR5KbSuqqOh7nJC3eWkxbrPnz9fjJVet26d6PDjsbOHDx+2Ts7iM9KcnR4Wn+3mYQg/+tGPHL5erVPWaVViXIOSnLVOK0+g4MHxjlLRWEHP/N8z1NjWSNumbqNpcb1fmQHIZerubnjB/8JLlXTlTAkVXKygtpaOcVR8un/IuCgaPiWWwmMDydWg7nJC3eWkdN2LXGQ9eUdTvKeVxy/s3r1bbDw0gC/nyuMaOLDysAGZ8SceR/5SR/hF0E8f/alYs3X7ue00pd8U8vZ0emc6OLnu7sBkMlPxlWoRVL8/X07NDR0XAWChUf40fEo/Gjo+inR+rvv7jLrLCXWXE+quDEX/Aig5rsEdKHH6fsnwJZR+OZ2u1lylT7//VFzuFbQFwzY68Emesv/W0ZVvS+nK2VJqqGmxPucfpBMTqwaPiaK+8Qa3+Jm5w78BHhzqLifUXRmKDg/gmWZ8+l+JcQ1KcvVu9w+++4C2Zm6lSP9IOjT3EPl6+6rdJACryuJ6upJZSpe/LaXa8kbr43p/b3rksT6UOCaSYgaHihUBAADA/XKKJntaH2RVABnxgtG8BIWjLRi6gD7M+ZBK6ktoX+4+Wjy8Y4IauHfdtaz2ViPlZ5aJoHqryGh93FvnSQkjI0RQjRsWTl4+rjGpqjdkrDug7rJC3ZXhugPE3ABf4UQJei89LRu1jNK+SRPjW/19/HGqQkMKKgro6uWOqxi5M1ODBzVd1lFzrp5ai31uP+FpJl18K/kObSbdwBaq0ZVTJuVQZsdFctyWLHUHe6i7vHUf2DSQQn1D1W6KW0FoVRGvI6uU2QNni2EC+dX59NtTv1XsfaCXHvy6Gi7Bp01PCZUjKbFiNMXWDCZP6hgWZCYTFQd9T/kRZ+lq2AVq9mkgquLuCJKLm9Yd7gF1l9Ls+tkIrQ6G0Koiy9poSvDy9KJNkzbRe9nvUWt7q2LvA73rYefrnbsLjzZPCrgZSYbCfuLW03R7/HpTWBXV9i8iY/9iavNvomjyo2jS1hJ3zuJudYf7g7rLW/dAnestzad1OJJUlJOTQ+PGjVPs+/PlXbdO2arY94feOX36tKJ1d4b2VhMVXa4SM/+vZpVTa1O73RJVPEY1MSWSQiL9VW2nlrhD3eHBoe7y1r2/4e6XOoUHh9AKAPfFWNVE17Jvia0ot4pam28H1cAwvQipg8dGioX/MYYaAAAcDaFVRXxJW5CPq9Td1G6ikoJaa1C1nfVvWUvVskRV1MBg8sASVW5Rd3As1F1OqLsyEFpVZDQaKTw8XO1mgJNpue6NdS1U+F1HSOVLqdpemYo8iKISgmjA8HAaMDyCIvoFIqi6Sd1BOai7nFB3ZSC0qqikpIQGDBigdjNA4rqbTWYqv15n7U0t/W8tT/W3W/A/LolDajjFJYWRX6BOzea6NC3VHZwHdZcT6q4MhFYAyTQ3ttH1S5V07btbVJh9ixpqb18+lUX0D6QBnUE1MiGIPL3cd8F/AABwHYpextVVOevyaPyjx4QV+Ti77vx+lTfrO075Z9+im/k1ZDLdPuy99V7Uf2goxY+IEL2qgaF6p7VNJjje5YS6y0npuhfhMq7gbFlZWfTYY4+p3Qxww7q3trTTjdwq62n/usomu+d5KSoxNnVEOMU8EuLWl0/VChzvckLd5YS6KwOhVUUtLfanZUEOStSdQ2rp1RoqvlJNxfnVVPJ9LbW3mazPe3l7UuyQkM5JVOEU3Afrpzobjnc5oe5yQt2VgdCqopCQELWbAC5adx6XejO/WmwcVMv+W2d3yt+ydirP8o8fHk6xQ0LJR3/7SlXgfDje5YS6ywl1VwZCq4pkGocCD1d3XoqKe1BFT+qVaqrgNVPvGI0eEKKnmMQQ68ZXpsJYOu3A8S4n1F1OqLsyEFpVlJ2djcv7Seh+6s5Xn7IEVN6qShq6vCa4j59dSDWE+yKkahiOdzmh7nJC3ZXhcrMvvvrqK5o9ezbFxMSIP9AHDx7sMmNv3bp1FB0dTX5+fvT000/TlStXVGsvwL3w72x1WQNd+qaYju6+RP+79iR98NpJ+uK9S/TdiWJrYA2LCaDhU2Jp+v8k0eItE+mnv51ATy16lIZOiKagCD8EVgAAuKsdO3ZQfHw8+fr6ilB95syZHl/75z//mSZPnkyhoaFi4zx1t9c7g8v1tNbX11NycjL9/Oc/p2effbbL82+99RZt376dPvjgA0pISKC0tDSaMWMGXbp0SRRJSwYOHKh2E0AFCfEJdOuG0Tppim8bauwH7fOVpvr0D7T2okY/EkK+gT6qtRkeHo53OaHuctJi3ffv30+rVq2iXbt2icC6bds2kY/y8vKob9++XV5/7NgxWrhwIT3xxBMiP7355ps0ffp0+u677yg2NlaVf4NLr9PKPUsHDhygOXPmiH3+p3AP7K9+9StavXq1eKympoYiIyNp9+7dtGDBAk2sf8btNDc2ivfBuBf3V1/TQuWFtVReZKSKwjq6ebWaWptuz+xnnt4e1DfOQNGDOKAGi0X9dXqX+0wJd4HjXU6ou5xEjkhMVOwMWFEvcgoH1TFjxtA777wj9k0mk/geK1asoFdfffWeX9/e3i56XPnrFy1aRGpwq7+KBQUF4tJp3IVtERwcLAqVkZHRY2htbm4Wm0VdXZ2i7eTAmvf4aHE/T9F3Ai0J6tzu9fmbBwMUOKlN4Fw43uWEusvJfO4sefgru7xgXV0d1dbWWvf1er3YuluC6+zZs/Taa69ZH/P09BR5ifPR/WhoaKDW1lYKCwsjtbhVaOXAyrhn1RbvW57rzubNm2njxo1dHj9//jzduHGDHn/8ccrJyaHGxkYyGAxi2MHFixfFa/jawvxphT/tsFGjRlF+fj4ZjUYKCAigwYMHi+/D+NOQl5cXXbt8WYQXAAAAcE+cH4orK8V9R+eI8537w4YNs3vP9evX04YNG7q0paKiQvSUdpePcnNz7+vf8+tf/1qczbbtGHQ2twqtvcWfPHich+0vGv8i8NUsLN3uI0eOtPuaO2cFciEtkpKS7vpa/iXhT2BtbW3k7Y0SuILG+haqKDRSOW/Xa6niupGM1bd75y34TFBoVABF9AukPnG8BYkJVD6622ukou5yQt3lhLrLW3cfg4H62wwPcGSOeKzzals8X8d2fGl3vayOsGXLFtq3b58Y56rm/CC3OpKioqLEbWlpqVg9wIL3+ZNLT+7sTrftalcCj3HhUwY5Fy6ISWWgLU31rVR+rY7KeBwq316r63IZVMFbT6GR/tRngIH6xgWJWw6rOt+7H1aou5xQdzmh7nJyVt0NBgMFBd373G1ERITooeU8ZIv3LdmpJ1u3bhWh9ciRI12Ct7O5VWjl7nb+4R89etQaUjmAnj59mpYuXUpa09TUTRACp+HLnFaXNlDlzXqqLK4XtxXX66i2ovu6hHBAjTNQXw6pHFD7G+4ZULuDussJdZcT6i4nrdVdp9PR6NGjRT6yTF7nIQm8v3z58h6/jldk2rRpE33++eeUkpJCanO50MpjPHish+3kq6ysLDEwOC4ujlauXElvvPEGJSYmWpe84i53S5G05H4+HcHDa283UU1pY2c4NVpDak1ZY5dLn1oE9fHrCKedPagcVvV+jjlcUHc5oe5yQt3lpMW6r1q1ilJTU0X4HDt2rFjyipcRXbJkiXieVwTgoQY8z4fxEle87v2ePXvE2q6WuUGBgYFiU4PLhdbMzEyaOnWqdd8yFpULwctarVmzRhThxRdfpOrqapo0aRIdPnxYc2u0Mv4lAMcxcTgtb7TrOeVb7k01tXcfTn18vSgsOkCMO+XbcB6L2t9AvgHKrYmKussJdZcT6i4nLdZ9/vz5VF5eLoIoB1A+I835yDI5q7CwUKwoYLFz506x6sBzzz13X5O9nMGl12lVitLrtFrwsAVc5u3Bce9obTfhtKq0nkxtPYRTvReF2oRTy21gqN7pV5JC3eWEussJdZeT0nUvclJO0RqX62kFebQ0tomeU974MqdVJZ3htKSB2lvtF+e38NZ5itn7duE0JoAMob7iKlMAAADgmhBaVcRrs8mOZ+rz2NKa8oaOgGpzv7Gutcev8/LhcOpvE04DxW1QuPbDKeouJ9RdTqi7nFB3ZSC0qogX+nV3PPqEw2dNWWcoFcH09v3mhra7fr2fwYeC+/hTcB8/CuGQ2tl7GhThR54aD6cy1x26Qt3lhLrLCXVXBkKrinhMiu2iwK7KbDJTfU1zZy9pZ08p36/o6Dltbb77wRsQohehVGx9/awhlTedg2bsa4m71B0eDOouJ9RdTqi7MtwvEYDDtba0k7GyiYyVzVRXxbdNVFfV3HHb+TivedojDxJjSjsCaWco7bzPS0vZXi0KAAAAoDsIrSqyXIZN7V7ShtoWET5FALUNo1XN4rbJ2PPYUgs+VW8I97XvKbUE03A/MQYVtFN3cD7UXU6ou5xQd2UgtKqEZ7/nXsqjYUnDlH2fNjMZRe9os7i19Ix2hNKOYNrTGqZ3LhnFoTQw1JcMYfrbt2EdjwWG6cnLC8H0fly+fJmGDx+udjPAyVB3OaHuckLdlYHQqpL0LZl064aRTtBxtZsiZtsHhOjI0BlAO271HbdhHeGUx5Y6ez1Td8UXvwD5oO5yQt3lhLorA6FVAnp/787w6UuG0M7e0TC9GGfK9wOCdeSJXlKnUevyd6Au1F1OqLucUHdlILSqZN6a0dTU1ER6vV7R9+Gxpt6Y6KQpgwYNUrsJoALUXU6ou5xQd2Wge00lPEb0Um426Xy9Fd0QWLUnKytL7SaAClB3OaHuckLdlYHQCgAAAACah9Cqov79+6vdBFAB6i4n1F1OqLucUHdlILSqyNMTP34Zoe5yQt3lhLrLCXVXBn6qKrp27ZraTQAVoO5yQt3lhLrLCXVXBkIrAAAAAGgeQquKRo4cqXYTQAWou5xQdzmh7nJC3ZWB0KqigoICtZsAKkDd5YS6ywl1lxPqrgy3Da07duyg+Ph48vX1pXHjxtGZM2dIa+rq6tRuAqgAdZcT6i4n1F1OWq37jgfMRunp6TR06FDx+hEjRtC//vUvUpNbhtb9+/fTqlWraP369XTu3DlKTk6mGTNmUFlZGWmJn5+f2k0AFaDuckLd5YS6y0mLdd//gNno5MmTtHDhQnrhhRfo/PnzNGfOHLFlZ2eTWjzMZrOZ3Ax/ehgzZgy98847Yt9kMok101asWEGvvvrqPb++qKhIvP769evUr18/xdrZ2tpKPj4+in1/0CbUXU6ou5xQdzkpXfeiXuSUB81G8+fPp/r6ejp06JD1sfHjx9OoUaNo165dpAa362ltaWmhs2fP0tNPP223XhrvZ2RkkJbwJx2QD+ouJ9RdTqi7nLRW95ZeZCN+3Pb1jHtm1cxS3uRmKioqqL29nSIjI+0e5/3c3Nxuv6a5uVlsFjU1NeL25s2biraVu+T50xLIBXWXE+ouJ9RdTkrX/WZnPuG8EhQUZH1cr9eLzRHZqKSkpNvX8+NqcbvQ2hubN2+mjRs3dnl87NixqrQHAAAA4F6GDx9ut8/jVTds2EDuyu1Ca0REBHl5eVFpaand47wfFRXV7de89tprYnCyRVtbG+Xk5IixHkpdio1nFg4bNowuXbpEBoNBkfcA7UHd5YS6ywl1l5Mz6m4ymaiwsFC8j7f37SjXXS9rb7MRP/4gr3cGtwutOp2ORo8eTUePHhWz3CzF5f3ly5d3+zXddadPnDhR0XbW1taK29jYWLuufXBvqLucUHc5oe5yclbd4+LiFM1GEyZMEM+vXLnS+tgXX3whHleL24VWxr2mqamplJKSIk7xb9u2TcyAW7JkidpNAwAAANBcNlq0aJEI2jxkkr3yyis0ZcoU+v3vf08//OEPad++fZSZmUnvvvuuav8GtwytvExDeXk5rVu3TgwY5uUZDh8+3GVAMQAAAIAM5t8jG/FwA9shkU888QTt2bOH1q5dS7/5zW8oMTGRDh482GUcrTO55TqtroBXK+BPMzyetqcxKOB+UHc5oe5yQt3lhLorB6EVAAAAADTP7S4uAAAAAADuB6EVAAAAADQPoRUAAAAANA+hVSU7duyg+Ph48vX1pXHjxtGZM2fUbhIoiK9Q4uHhYbcNHTpU7WaBg3311Vc0e/ZsiomJETXmmba2eAoBz9yNjo4mPz8/cV3vK1euqNZecE7dFy9e3OX4nzlzpmrthYfHE63GjBkjLh7Qt29fsfZpXl6e3Wuamppo2bJlFB4eToGBgTRv3rwui/XDg0FoVcH+/fvFeml8ubVz585RcnIyzZgxQ1yrGNxXUlKSuF60Zfv666/VbhI4GK95yMczfyjtzltvvUXbt2+nXbt20enTpykgIEAc+/zHDdy37oxDqu3xv3fvXqe2ERzr+PHjIpCeOnVKLLjf2tpK06dPF78LFr/85S/p008/pfT0dPH64uJievbZZ1Vtt8vj1QPAucaOHWtetmyZdb+9vd0cExNj3rx5s6rtAuWsX7/enJycrHYzwIn4f68HDhyw7ptMJnNUVJT5d7/7nfWx6upqs16vN+/du1elVoLSdWepqanmH//4x6q1CZRXVlYman/8+HHrse3j42NOT0+3viYnJ0e8JiMjQ8WWujb0tDpZS0sLnT17VpwWtODFfHk/IyND1baBsvg0MJ8+HDhwID3//PNiIWeQR0FBgVjQ2/bYDw4OFsODcOy7v2PHjonTyEOGDKGlS5fSrVu31G4SOFBNTY24DQsLE7f8d557X22Pdx4SxpdexfHeewitTlZRUUHt7e1drs7F+/wHDdwTB5Pdu3eLq4/s3LlTBJjJkydTXV2d2k0DJ7Ec3zj25cNDA/72t7+J67i/+eab4lTxrFmzxN8CcH0mk4lWrlxJEydOtF4tio9pnU5HISEhdq/F8f5w3PIyrgBaw3+gLEaOHClC7IABA+ijjz6iF154QdW2AYCyFixYYL0/YsQI8f+ARx55RPS+Tps2TdW2wcPjsa3Z2dmYp+AE6Gl1soiICPLy8uoyg5D3o6KiVGsXOBd/+h48eDDl5+er3RRwEsvxjWMfeIgQ/y3A8e/6li9fTocOHaIvv/yS+vXrZ32cj2keDlhdXW33ehzvDweh1cn4dMHo0aPFaSLbUwu8P2HCBFXbBs5jNBrp+++/F0sfgRwSEhLEHyvbY7+2tlasIoBjXy5FRUViTCuOf9fFc+44sB44cID+/e9/i+PbFv+d9/HxsTveeUksnsuA4733MDxABbzcVWpqKqWkpNDYsWNp27ZtYpmMJUuWqN00UMjq1avFOo48JICXPeHlzrjHfeHChWo3DRz8YcS294zHLmdlZYnJGTwBg8e9vfHGG5SYmCj+yKWlpYnJebzGI7hn3XnbuHGjWKOTP7Twh9U1a9bQoEGDxHJn4LpDAvbs2UMff/yxWKvVMk6VJ1fyGsx8y0O/+O89/w4EBQXRihUrRGAdP3682s13XWovXyCrP/7xj+a4uDizTqcTS2CdOnVK7SaBgubPn2+Ojo4W9Y6NjRX7+fn5ajcLHOzLL78US9rcufGSR5Zlr9LS0syRkZFiqatp06aZ8/Ly1G42KFj3hoYG8/Tp0819+vQRSyANGDDA/Itf/MJcUlKidrPhIXRXb97ef/9962saGxvNL7/8sjk0NNTs7+9vnjt3rvnmzZuqttvVefB/1A7OAAAAAAB3gzGtAAAAAKB5CK0AAAAAoHkIrQAAAACgeQitAAAAAKB5CK0AAAAAoHkIrQAAAACgeQitAAAAAKB5CK0AAAAAoHkIrQAAKvLw8KCDBw+q3QwAAM1DaAUAKS1evFgExju3mTNnkiv59ttvKSYmRtwvLi4W1z1vaWlRu1kAAA7n7fhvCQDgGjigvv/++3aP6fV6ciUZGRk0ceJEcf/EiROUkpJCOp1O7WYBADgceloBQFocUKOiouy20NBQ6/Pc87pz506aNWuW6MEcOHAg/eMf/7D7Hv/5z3/oqaeeEs+Hh4fTiy++SEaj0e417733HiUlJYn3i46OpuXLl9s9X1FRQXPnziV/f39KTEykTz755L7/DSdPnrSG1q+//tp6HwDA3SC0AgDcRVpaGs2bN48uXLhAzz//PC1YsIBycnLEc/X19TRjxgwRdPk0fXp6Oh05csQulHLoXbZsmQizHHA5kA4aNMjuPTZu3Eg/+clP6OLFi/TMM8+I96msrOyxTRxOQ0JCxMYh+vXXXxf3d+3aRdu3bxf3t2zZouBPBQBABWYAAAmlpqaavby8zAEBAXbbpk2brK/h/0W+9NJLdl83btw489KlS8X9d9991xwaGmo2Go3W5//5z3+aPT09zSUlJWI/JibG/Prrr/fYDn6PtWvXWvf5e/Fjn332WY9f09jYaC4oKBCv4fe/evWqOTMz06zT6cw5OTniuaqqql7+ZAAAtAljWgFAWlOnThU9obbCwsLs9idMmNBlPysrS9znHtfk5GQKCAiwPs+n500mE+Xl5YnhBTw5atq0aXdtx8iRI633+XsFBQVRWVlZj6/39fWl+Ph4+uijj8TQhYSEBDFMYPLkyTR06ND7/NcDALgWhFYAkBYHxDtP1TsSj3O9Hz4+Pnb7HHY5+PYkMDBQ3DY3N5Onpyd9/PHHYsUA7rjl5zi8fvbZZw/ZegAAbcGYVgCAuzh16lSX/UcffVTc51se68pjWy2++eYbESSHDBlCBoNB9IgePXrUoW3int7MzEzy8vIS35v3eRIY97zy/b/85S8OfT8AAC1ATysASIt7KktKSuwe8/b2poiICOs+T67iZaQmTZpEf//73+nMmTP017/+VTzHE6bWr19PqamptGHDBiovL6cVK1bQz372M4qMjBSv4cdfeukl6tu3rziVX1dXJ4Itv663uHeYwzO/B7ersLBQfN/Zs2eL9gMAuCP83w0ApHX48GGxBJUt7iHNzc21m9m/b98+evnll8Vr9+7dS8OGDRPP8RJVn3/+Ob3yyis0ZswYsc8rDfzhD3+wfj0H2qamJnr77bdp9erVIhA/99xzD932Y8eO0ZNPPinuHz9+XIy1RWAFAHfmwbOx1G4EAIAW8djSAwcO0Jw5c9RuCgCA9DCmFQAAAAA0D6EVAAAAADQPA6AAAHqA0VMAANqBnlYAAAAA0DyEVgAAAADQPIRWAAAAANA8hFYAAAAA0DyEVgAAAADQPIRWAAAAANA8hFYAAAAA0DyEVgAAAADQPIRWAAAAACCt+3+6hIdlL9h0EQAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "# Visualize resource usage during training\n", "log_usage_plot(log_file)\n" @@ -220,7 +243,7 @@ "widgets": {} }, "kernelspec": { - "display_name": "venv", + "display_name": "venv_edgetrain", "language": "python", "name": "python3" }, diff --git a/tests/__init__.py b/tests/__init__.py index efd313d..0ea31d7 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,13 +1,5 @@ import sys import os -# Debug: Print the path being added (for troubleshooting) -edgetrain_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'edgetrain')) - -# Ensure edgetrain is in the path BEFORE importing anything -if edgetrain_path not in sys.path: - sys.path.insert(0, edgetrain_path) - -# Import pytest (only needed here) -import pytest +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) diff --git a/tests/test_calculate_priorities.py b/tests/test_calculate_priorities.py index 4c5586b..816264f 100644 --- a/tests/test_calculate_priorities.py +++ b/tests/test_calculate_priorities.py @@ -10,9 +10,9 @@ def test_define_priorities_with_default_priorities(): priority_value = define_priorities(normalized_scores) - # Default priorities: batch_size: 0.35, accuracy_improvement: 0.65 - assert priority_value["batch_size"] == pytest.approx(0.28, rel=1e-3), "Batch size priority calculation failed." - assert priority_value["learning_rate"] == pytest.approx(0.26, rel=1e-3), "Learning rate priority calculation failed." + # Default priorities: batch_size: 0.4, accuracy_improvement: 0.6 + assert priority_value["batch_size"] == pytest.approx(0.32, rel=1e-3), "Batch size priority calculation failed." + assert priority_value["learning_rate"] == pytest.approx(0.24, rel=1e-3), "Learning rate priority calculation failed." def test_define_priorities_with_custom_priorities(): @@ -55,5 +55,5 @@ def test_define_priorities_with_extreme_scores(): priority_value = define_priorities(normalized_scores) - assert priority_value["batch_size"] == 0.35, "Batch size priority with extreme scores failed." - assert priority_value["learning_rate"] == 0.65, "Learning rate priority with extreme scores failed." + assert priority_value["batch_size"] == 0.40, "Batch size priority with extreme scores failed." + assert priority_value["learning_rate"] == 0.60, "Learning rate priority with extreme scores failed." diff --git a/tests/test_calculate_scores.py b/tests/test_calculate_scores.py index 7c3ec04..95f78e6 100644 --- a/tests/test_calculate_scores.py +++ b/tests/test_calculate_scores.py @@ -23,7 +23,7 @@ def test_compute_scores(): current_accuracy = 0.6 scores = compute_scores(previous_accuracy, current_accuracy, score_ranges=None, resources=mock_resources) assert scores["memory_score"] == 0.5, "Memory score calculation failed with GPUs." - assert scores["accuracy_score"] == pytest.approx(0.2, rel=1e-3), "Accuracy score calculation failed." + assert scores["accuracy_score"] == pytest.approx(1, rel=1e-3), "Accuracy score calculation failed." # Test with no GPUs mock_resources = { @@ -37,13 +37,13 @@ def test_compute_scores(): previous_accuracy = 0.7 current_accuracy = 0.7 scores = compute_scores(previous_accuracy, current_accuracy, score_ranges=None, resources=mock_resources) - assert scores["accuracy_score"] == 0, "Accuracy score should be 0 for stagnation." + assert scores["accuracy_score"] == pytest.approx(1, rel=1e-3), "Accuracy score should be 1 for stagnation." # Test edge case where current accuracy is higher (clamped at 0) previous_accuracy = 0.6 current_accuracy = 0.8 scores = compute_scores(previous_accuracy, current_accuracy, score_ranges=None, resources=mock_resources) - assert scores["accuracy_score"] == 0, "Accuracy score should not be negative." + assert scores["accuracy_score"] == pytest.approx(0.8, rel=1e-3), "Accuracy score should be 1 for decreasing accuracy." def test_compute_scores_with_custom_ranges(): @@ -64,7 +64,7 @@ def test_compute_scores_with_custom_ranges(): current_accuracy=0.6 scores = compute_scores(previous_accuracy, current_accuracy, score_ranges=score_ranges, resources=mock_resources) assert scores["memory_score"] == pytest.approx(0.35, rel=1e-3), "Memory score normalization with custom range failed." - assert scores["accuracy_score"] == pytest.approx(0.4, rel=1e-3), "Accuracy score normalization with custom range failed." + assert scores["accuracy_score"] == pytest.approx(2.0, rel=1e-3), "Accuracy score normalization with custom range failed." def test_compute_scores_with_acc_improvement(): @@ -78,4 +78,4 @@ def test_compute_scores_with_acc_improvement(): previous_accuracy=0.6 current_accuracy=0.8 scores = compute_scores(previous_accuracy, current_accuracy, score_ranges=None, resources=mock_resources) - assert scores["accuracy_score"] == pytest.approx(0, rel=1e-3), "Accuracy score normalization with custom range failed." + assert scores["accuracy_score"] == pytest.approx(0.80, rel=1e-3), "Accuracy score normalization with custom range failed." From 0652b0ea5211925dc9777ad1bc076bb7faf4e95d Mon Sep 17 00:00:00 2001 From: BradleyEdelman Date: Tue, 11 Feb 2025 14:44:34 +0100 Subject: [PATCH 6/7] notebook working, linted and ready for v0.2.0 release --- .flake8 | 3 + .github/workflows/lint.yml | 36 +++++ .gitignore | 1 + .isort.cfg | 2 + .pre-commit-config.yaml | 15 ++ README.md | 93 +++++++----- edgetrain/__init__.py | 23 ++- edgetrain/adjust_train_parameters.py | 20 ++- edgetrain/calculate_priorities.py | 19 ++- edgetrain/calculate_scores.py | 40 ++--- edgetrain/create_model.py | 36 +++-- edgetrain/dynamic_train.py | 71 ++++++--- edgetrain/edgetrain_folder.py | 17 ++- edgetrain/resource_monitor.py | 90 +++++++---- edgetrain/train_visualize.py | 207 +++++++++++++++++--------- notebooks/EdgeTrain_example.ipynb | 34 +++-- requirements-dev.txt | 25 ++-- requirements.txt | 7 +- setup.py | 10 +- tests/__init__.py | 5 +- tests/test_adjust_train_parameters.py | 47 ++++-- tests/test_calculate_priorities.py | 75 +++++----- tests/test_calculate_scores.py | 97 ++++++------ tests/test_create_model_tf.py | 9 +- tests/test_log_usage_once.py | 87 +++++++---- tests/test_sys_resources.py | 46 +++--- 26 files changed, 723 insertions(+), 392 deletions(-) create mode 100644 .flake8 create mode 100644 .github/workflows/lint.yml create mode 100644 .isort.cfg create mode 100644 .pre-commit-config.yaml diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..cb50e3a --- /dev/null +++ b/.flake8 @@ -0,0 +1,3 @@ +[flake8] +max-line-length = 88 +extend-ignore = E501 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 0000000..26b6683 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,36 @@ +name: Lint + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + lint: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.10' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install black flake8 isort + + - name: Run Black + run: black --check . + + - name: Run Flake8 + run: flake8 . + + - name: Run isort + run: isort --check-only . diff --git a/.gitignore b/.gitignore index d48cdb9..7361dcb 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ venv*/ models/ logs/ images/ +results/ # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/.isort.cfg b/.isort.cfg new file mode 100644 index 0000000..f238bf7 --- /dev/null +++ b/.isort.cfg @@ -0,0 +1,2 @@ +[settings] +profile = black diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..e046832 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,15 @@ +repos: + - repo: https://github.com/psf/black + rev: 23.3.0 + hooks: + - id: black + + - repo: https://github.com/pycqa/flake8 + rev: 6.1.0 + hooks: + - id: flake8 + + - repo: https://github.com/timothycrosley/isort + rev: 5.12.0 + hooks: + - id: isort diff --git a/README.md b/README.md index 352390c..d7d17b3 100644 --- a/README.md +++ b/README.md @@ -1,40 +1,44 @@ # EdgeTrain: Automated Resource Adjustment for Efficient Edge AI Training -**Version: 0.1.1-alpha** +**Version: 0.2.0** EdgeTrain is a Python package designed to dynamically adjust deep learning training parameters and strategies based on CPU and GPU performance. It optimizes the training process by adjusting batch size and learning rate to ensure efficient training without overutilizing or underutilizing available resources. This package is specifically designed to reduce memory usage for model training on edge AI devices, laptops or other setups that have limited memory. ## Features -### Automated Resource Adjustment -EdgeTrain currently adjusts the following hyperparameters based on CPU/GPU usage: -- **Batch Size**: Automatically adjusts batch size for better memory optimization based on resource usage. -- **Learning Rate**: Dynamically adjusts the learning rate to improve training efficiency. - -These adjustments optimize resource utilization throughout training, enabling efficient use of available resources on edge AI devices. +### Dynamic Resource-Based Training Adjustments +EdgeTrain monitors CPU and GPU usage in real-time and automatically adjusts hyperparameters during training: +- **Batch Size**: Increases or decreases to optimize memory usage. +- **Learning Rate**: Adjusts based on model performance to improve training efficiency. ### Resource Logging & Visualization -EdgeTrain logs critical system metrics (e.g., CPU and GPU usage) and training parameters (batch size, learning rate) for each epoch. The logs enable post-hoc visualization and analysis of: +EdgeTrain logs system performance and training parameters, allowing post-hoc visualization of: - Resource utilization over time. - Training parameter adjustments across epochs. - Correlations between resource usage and model performance. -The built-in **visualization tools** help you understand how system resources are being utilized and how training parameters evolve during training. +The provided visualization tools help you understand how system resources are being utilized and how training parameters evolve during training. -### Customizable +### Customization and control EdgeTrain is highly customizable. You can easily modify: - **Resource Adjustment Thresholds**: Set CPU/GPU usage ranges to trigger adjustments. - **Training Configuration Settings**: Adjust batch size increment, learning rate adjustments, and more. -- Tailor the optimization process to fit various setups, especially on edge devices with limited resources. +- **Fixed Pruning Strategy**: Pruning is applied with a constant ratio and stripped at the end to improve deployment efficiency. + +## Release Notes for v0.2.0 +This version introduces a **refined adaptive training strategy with a constant pruning ratio**. Key updates: -## Release Notes -Version: 0.1.1-alpha -- Fixed circular import issue in `create_model.py`. Now users should not encounter import errors during initialization. +- **Score Calculation**: This version now computes an **accuracy score** and a **memory score** based on resource usage and model performance +- **Parameter Prioritization**: Accuracy and memory scores are weighted according to default or user-defined priority weighting to idenfity a priority list for parameter adjustment. Now, only the top priority paramater is adjusted in each epoch. + - **Batch size priority** is weighted by memory usage. + - **Learning rate priority** is inversely weighted by accuracy improvement (i.e. increases if accuracy stagnates). +- **Fixed Pruning Ratio**: Pruning is constant and is stripped at the end. +- **Code Quality Improvements**: Added pre-commit hooks and CI linting for consistency. ## Installation You can install the latest version of EdgeTrain via pip: ```bash -pip install https://github.com/BradleyEdelman/EdgeTrain/releases/download/v0.1.1-alpha/edgetrain-0.1.1a0.tar.gz +pip install https://github.com/BradleyEdelman/EdgeTrain/releases/download/v0.2.0-alpha/edgetrain-0.2.0.tar.gz ``` Alternatively, clone the repository and install manually: @@ -45,21 +49,30 @@ git clone https://github.com/BradleyEdelman/edgetrain.git # Checkout the desired version cd edgetrain -git checkout tags/v0.1.1-alpha +git checkout tags/v0.2.0 # Install the package pip install . ``` -## Usage +## Usage Example To use EdgeTrain, simply import the package and configure your training environment. Below is an example of using EdgeTrain with a TensorFlow model: ``` -# Import library import edgetrain # Example of resource monitoring and training with dynamic adjustments train_dataset = {'images': train_images, 'labels': train_labels} -history = edgetrain.dynamic_train(train_dataset, epochs=10, batch_size=32, lr=1e-3, log_file="resource_log.csv", dynamic_adjustments=True) +final_model, history = edgetrain.dynamic_train( + train_dataset, + epochs=10, + batch_size=32, + lr=1e-3, + log_file="resource_log.csv", + dynamic_adjustments=True +) + +# Plot resource usage, parameter scoring and prioritization, and parameter values over time +edgetrain.log_usage_plot("resource_log.csv") ``` ## File Tree @@ -67,23 +80,33 @@ history = edgetrain.dynamic_train(train_dataset, epochs=10, batch_size=32, lr=1e EdgeTrain/ ├── edgetrain/ │ ├── __init__.py +│ ├── adjust_train_parameters.py +│ ├── calculate_priorities.py +│ ├── calculate_scores.py │ ├── create_model.py │ ├── dynamic_train.py -│ ├── edgetrain_folder -│ ├── resource_adjust.py +│ ├── edgetrain_folder.py │ ├── resource_monitor.py -│ ├── train_visualize.py +│ └── train_visualize.py +│ +├── notebooks/ +│ └── EdgeTrain_example.ipynb +│ ├── tests/ │ ├── __init__.py -│ ├── test_adjust_batch_size.py -│ ├── test_adjust_learning_rate.py +│ ├── test_adjust_train_parameters.py +│ ├── test_calculate_priorities.py +│ ├── test_calculate_scores.py │ ├── test_create_model_tf.py │ ├── test_log_usage_once.py -│ ├── test_sys_resources.py -│ ├── test_dynamic_train.py -├── example_notebooks/ -│ ├── EdgeTrain_example.ipynb +│ └── test_sys_resources.py +│ +├── .github/workflows/ +│ ├── ci.yml +│ └──lint.yml +│ ├── .gitignore +├── .pre-commit-config.yaml ├── CHANGELOG.md ├── LICENSE ├── README.md @@ -93,16 +116,16 @@ EdgeTrain/ ``` ## Contributions -You can contribute by: +Contributions are welcomed: - Reporting bugs or requesting features: [GitHub Issues](https://github.com/BradleyEdelman/edgetrain/issues) +- Improve documentation: Help refine explanations and add examples +- Testing: Test EdgeTrain using mode complex models and datasets in heavily resource-constrained environments -## Reporting bugs or requesting features. -Improving the documentation. ## License This project is licensed under the MIT License - see the LICENSE file for details. -## Known Limitations (Alpha) -- The package currently supports TensorFlow only. Support for other frameworks, especially lightweight ones is planned for future releases. -- Model pruning and quantization are future features. -- Resource usage thresholds for dynamic adjustments are in the initial phase and may require tuning based on the training setup. +## Known Limitations (v0.2.0) +- Currently supports **TensorFlow only**. Future updates will expand framework support. +- **Gradient accumulation**: Planned for a future release to further optimize memory usage +- **Resource usage thresholds** are still in an experimental phase and may require fine-tuning. diff --git a/edgetrain/__init__.py b/edgetrain/__init__.py index 0a87bd6..c93069d 100644 --- a/edgetrain/__init__.py +++ b/edgetrain/__init__.py @@ -1,8 +1,15 @@ -from .resource_monitor import sys_resources, log_usage_once -from .calculate_scores import compute_scores, normalize_scores -from .calculate_priorities import define_priorities -from .adjust_train_parameters import adjust_training_parameters -from .edgetrain_folder import get_edgetrain_folder -from .train_visualize import log_usage_plot, log_train_time, training_history_plot -from .create_model import create_model_tf, check_sparsity -from .dynamic_train import dynamic_train \ No newline at end of file +__all__ = [ + "adjust_training_parameters", + "define_priorities", + "compute_scores", + "normalize_scores", + "check_sparsity", + "create_model_tf", + "dynamic_train", + "get_edgetrain_folder", + "log_usage_once", + "sys_resources", + "log_train_time", + "log_usage_plot", + "training_history_plot", +] diff --git a/edgetrain/adjust_train_parameters.py b/edgetrain/adjust_train_parameters.py index 224453c..a67d36c 100644 --- a/edgetrain/adjust_train_parameters.py +++ b/edgetrain/adjust_train_parameters.py @@ -1,16 +1,19 @@ from edgetrain import sys_resources -def adjust_training_parameters(priority_values, batch_size, lr, accuracy_score, resources=None): + +def adjust_training_parameters( + priority_values, batch_size, lr, accuracy_score, resources=None +): """ Adjust the training parameters (batch size, learning rate) based on the highest priority score, moving parameters in the opposite direction if resource usage or accuracy trends improve. - + Parameters: - priority_values (dict): Dictionary containing priority scores for batch size, pruning, and learning rate. - batch_size (int): Current batch size. - lr (float): Current learning rate. - accuracy_score (float): Current accuracy score from the latest epoch (0-1). - + Returns: - adjusted_batch_size (int): Adjusted batch size. - adjusted_lr (float): Adjusted learning rate. @@ -22,18 +25,21 @@ def adjust_training_parameters(priority_values, batch_size, lr, accuracy_score, # Determine which parameter has the highest priority score highest_priority = max(priority_values, key=priority_values.get) - + # Adjust the parameter based on system resources and highest priority score if highest_priority == "batch_size": # Adjust batch size based on memory usage if resources["cpu_memory_percent"] > 75 or resources["gpu_memory_percent"] > 75: adjusted_batch_size = max(16, batch_size // 2) # Halve batch size - elif resources["cpu_memory_percent"] < 50 and resources["gpu_memory_percent"] < 50: + elif ( + resources["cpu_memory_percent"] < 50 + and resources["gpu_memory_percent"] < 50 + ): adjusted_batch_size = min(128, batch_size * 2) # Double batch size else: adjusted_batch_size = batch_size adjusted_lr = lr - + elif highest_priority == "learning_rate": # Adjust learning rate based on accuracy score if accuracy_score < 0.05: # Example threshold for low accuracy @@ -43,5 +49,5 @@ def adjust_training_parameters(priority_values, batch_size, lr, accuracy_score, else: adjusted_lr = lr adjusted_batch_size = batch_size - + return adjusted_batch_size, adjusted_lr diff --git a/edgetrain/calculate_priorities.py b/edgetrain/calculate_priorities.py index 9fb9922..ca9ecea 100644 --- a/edgetrain/calculate_priorities.py +++ b/edgetrain/calculate_priorities.py @@ -1,30 +1,33 @@ def define_priorities(normalized_scores, user_priorities=None): """ Calculate priority scores for adjustments based on resource usage and accuracy. - + Parameters: - normalized_scores (dict): Dictionary containing normalized scores for memory usage and accuracy. - memory_score (float): Score indicating memory usage pressure (0-100). - accuracy_score (float): Score indicating stagnation in accuracy improvement (0-1). - user_priorities (dict, optional): Optional user-defined priorities for resource conservation and accuracy improvement. - + Returns: - priority_value (dict): A dictionary of priority scores for batch size and learning rate. """ - + # Default weights if user priorities are not provided default_priorities = { "batch_size_adjustment": 0.4, "accuracy_improvement": 0.6, } - + # Use user-defined priorities if available priorities = user_priorities if user_priorities else default_priorities # Calculate weighted priority scores priority_value = { - "batch_size": priorities["batch_size_adjustment"] * normalized_scores.get('memory_score'), - "learning_rate": (priorities["accuracy_improvement"] * normalized_scores.get('accuracy_score')), + "batch_size": priorities["batch_size_adjustment"] + * normalized_scores.get("memory_score"), + "learning_rate": ( + priorities["accuracy_improvement"] * normalized_scores.get("accuracy_score") + ), } - - return priority_value \ No newline at end of file + + return priority_value diff --git a/edgetrain/calculate_scores.py b/edgetrain/calculate_scores.py index 1eda006..c0429ab 100644 --- a/edgetrain/calculate_scores.py +++ b/edgetrain/calculate_scores.py @@ -1,15 +1,18 @@ from edgetrain import sys_resources -def compute_scores(previous_accuracy, current_accuracy, score_ranges=None, resources=None): + +def compute_scores( + previous_accuracy, current_accuracy, score_ranges=None, resources=None +): """ Compute memory and accuracy scores, and normalize them. - + Parameters: - previous_accuracy (float): Accuracy from the previous epoch. - current_accuracy (float): Current accuracy. - score_ranges (dict, optional): Dictionary of maximum possible improvements for each score. - resources (dict, optional): Dictionary containing system resource usage metrics. If None, system resources will be fetched. - + Returns: - normalized_scores (dict): Dictionary of normalized scores. """ @@ -17,29 +20,28 @@ def compute_scores(previous_accuracy, current_accuracy, score_ranges=None, resou # Get system resources if resources is None: resources = sys_resources() - + # Default score ranges if score_ranges is None: score_ranges = { "memory_score_range": 100, # Default 0-100 range for memory score "accuracy_score_range": 1, # Default 0-1 range for accuracy score } - + # Calculate memory score # If there is a GPU, average GPU and CPU for memory score, otherwise, just use CPU - if resources.get('num_gpus') > 0: - memory_score = (resources.get('cpu_memory_percent') + resources.get('gpu_memory_percent')) / 2 + if resources.get("num_gpus") > 0: + memory_score = ( + resources.get("cpu_memory_percent") + resources.get("gpu_memory_percent") + ) / 2 else: - memory_score = resources.get('cpu_memory_percent') + memory_score = resources.get("cpu_memory_percent") # Calculate accuracy score accuracy_score = 1 - max(0, current_accuracy - previous_accuracy) # Store all scores in a dictionary - raw_scores = { - "memory_score": memory_score, - "accuracy_score": accuracy_score - } + raw_scores = {"memory_score": memory_score, "accuracy_score": accuracy_score} # Normalize the scores normalized_scores = normalize_scores(raw_scores, score_ranges) @@ -50,19 +52,21 @@ def compute_scores(previous_accuracy, current_accuracy, score_ranges=None, resou def normalize_scores(raw_scores, score_ranges): """ Normalize raw scores based on predefined score ranges. - + Parameters: - raw_scores (dict): Dictionary of raw scores. - score_ranges (dict): Dictionary of maximum possible improvements for each score. - + Returns: - normalized_scores (dict): Dictionary of normalized scores. """ normalized_scores = {} - + for score_name, score_value in raw_scores.items(): - score_range = score_ranges.get(f'{score_name}_range', 1) # Default range is 1 if not specified + score_range = score_ranges.get( + f"{score_name}_range", 1 + ) # Default range is 1 if not specified normalized_score = score_value / score_range normalized_scores[score_name] = normalized_score - - return normalized_scores \ No newline at end of file + + return normalized_scores diff --git a/edgetrain/create_model.py b/edgetrain/create_model.py index f3a4786..c0d1793 100644 --- a/edgetrain/create_model.py +++ b/edgetrain/create_model.py @@ -1,6 +1,7 @@ +import numpy as np import tensorflow as tf from tensorflow.keras import layers, models -import numpy as np + def create_model_tf(input_shape, model_path=None): """ @@ -13,7 +14,7 @@ def create_model_tf(input_shape, model_path=None): Returns: - model: A compiled tensorflow model. """ - + # Ensure that the input shape is provided if input_shape is None: raise ValueError("Input shape must be defined.") @@ -22,20 +23,23 @@ def create_model_tf(input_shape, model_path=None): model = tf.keras.models.load_model(model_path) else: # Define a Sequential model with input layer, Conv2D, MaxPooling2D, Flatten, and Dense layers - model = models.Sequential([ - layers.Input(shape=input_shape), - layers.Conv2D(32, (3, 3), activation='relu'), - layers.MaxPooling2D((2, 2)), - layers.Conv2D(64, (3, 3), activation='relu'), - layers.MaxPooling2D((2, 2)), - layers.Conv2D(64, (3, 3), activation='relu'), - layers.Flatten(), - layers.Dense(64, activation='relu'), - layers.Dense(10, activation='softmax') - ]) - + model = models.Sequential( + [ + layers.Input(shape=input_shape), + layers.Conv2D(32, (3, 3), activation="relu"), + layers.MaxPooling2D((2, 2)), + layers.Conv2D(64, (3, 3), activation="relu"), + layers.MaxPooling2D((2, 2)), + layers.Conv2D(64, (3, 3), activation="relu"), + layers.Flatten(), + layers.Dense(64, activation="relu"), + layers.Dense(10, activation="softmax"), + ] + ) + return model + def check_sparsity(model): """ Calculate the sparsity of a given model. @@ -50,10 +54,10 @@ def check_sparsity(model): total_params = 0 zero_params = 0 for layer in model.layers: - if hasattr(layer, 'weights'): + if hasattr(layer, "weights"): for weight in layer.weights: weight_values = weight.numpy() total_params += np.prod(weight_values.shape) zero_params += np.sum(np.isclose(weight_values, 0)) sparsity = (zero_params / total_params) if total_params > 0 else 0 - return sparsity \ No newline at end of file + return sparsity diff --git a/edgetrain/dynamic_train.py b/edgetrain/dynamic_train.py index cdd100c..8df878e 100644 --- a/edgetrain/dynamic_train.py +++ b/edgetrain/dynamic_train.py @@ -1,19 +1,24 @@ import tensorflow as tf -from tensorflow import keras import tensorflow_model_optimization as tfmot +from tensorflow import keras + from edgetrain import ( - log_usage_once, create_model_tf, compute_scores, - define_priorities, adjust_training_parameters + adjust_training_parameters, + compute_scores, + create_model_tf, + define_priorities, + log_usage_once, ) + def dynamic_train( - train_dataset, - epochs=10, - batch_size=32, - lr=1e-3, - pruning=0.2, - log_file="resource_log.csv", - dynamic_adjustments=True + train_dataset, + epochs=10, + batch_size=32, + lr=1e-3, + pruning=0.2, + log_file="resource_log.csv", + dynamic_adjustments=True, ): """ Train the model with optional dynamic resource adjustment. @@ -35,7 +40,16 @@ def dynamic_train( # Log initial resource usage normalized_scores = {"memory_score": 0, "accuracy_score": 0} priority_value = {"batch_size": 0, "learning_rate": 0} - log_usage_once(log_file, pruning, batch_size, lr, normalized_scores, priority_value, num_epoch=0, resources=None) + log_usage_once( + log_file, + pruning, + batch_size, + lr, + normalized_scores, + priority_value, + num_epoch=0, + resources=None, + ) # Create MirroredStrategy for distributed training strategy = tf.distribute.MirroredStrategy() @@ -45,7 +59,7 @@ def dynamic_train( prev_accuracy = 0.0 # Prepare training data - train_images, train_labels = train_dataset['images'], train_dataset['labels'] + train_images, train_labels = train_dataset["images"], train_dataset["labels"] # Create model within scope and apply initial pruning with strategy.scope(): @@ -53,8 +67,14 @@ def dynamic_train( optimizer = keras.optimizers.Adam(learning_rate=lr) pruning_schedule = tfmot.sparsity.keras.ConstantSparsity(pruning, begin_step=0) - model = tfmot.sparsity.keras.prune_low_magnitude(base_model, pruning_schedule=pruning_schedule) - model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy']) + model = tfmot.sparsity.keras.prune_low_magnitude( + base_model, pruning_schedule=pruning_schedule + ) + model.compile( + optimizer=optimizer, + loss="sparse_categorical_crossentropy", + metrics=["accuracy"], + ) # Training model one epoch at a time for epoch in range(epochs): @@ -68,14 +88,14 @@ def dynamic_train( train_labels, batch_size=batch_size, epochs=1, - callbacks=callbacks - ) + callbacks=callbacks, + ) # Save training history history_list.append(history.history) # Update "current" accuracy - curr_accuracy = history.history['accuracy'][-1] + curr_accuracy = history.history["accuracy"][-1] # If dynamic adjustments are enabled if dynamic_adjustments: @@ -88,16 +108,27 @@ def dynamic_train( priority_values=priority_value, batch_size=batch_size, lr=lr, - accuracy_score=curr_accuracy + accuracy_score=curr_accuracy, ) batch_size = adjusted_batch_size lr = adjusted_lr - print(f"Adjusted parameters for next epoch: batch_size={batch_size}, pruning_ratio={pruning}, learning_rate={lr}") + print( + f"Adjusted parameters for next epoch: batch_size={batch_size}, pruning_ratio={pruning}, learning_rate={lr}" + ) # Log resource usage - log_usage_once(log_file, pruning, batch_size, lr, normalized_scores, priority_value, num_epoch=epoch + 1, resources=None) + log_usage_once( + log_file, + pruning, + batch_size, + lr, + normalized_scores, + priority_value, + num_epoch=epoch + 1, + resources=None, + ) # Update previous accuracy prev_accuracy = curr_accuracy diff --git a/edgetrain/edgetrain_folder.py b/edgetrain/edgetrain_folder.py index c96ac4f..b4a372d 100644 --- a/edgetrain/edgetrain_folder.py +++ b/edgetrain/edgetrain_folder.py @@ -1,5 +1,6 @@ import os + def get_edgetrain_folder(): """ Create the necessary folder structure for the EdgeTrain project. @@ -14,20 +15,20 @@ def get_edgetrain_folder(): # Get the current working directory (assumed to be within the 'notebooks' folder) cwd = os.getcwd() - + # Navigate two levels back from the notebooks folder to the root directory root_dir = os.path.dirname(os.path.dirname(cwd)) - + # Define the base "EdgeTrain" directory path edgetrain_dir = os.path.join(root_dir, "EdgeTrain") - + # Check if the EdgeTrain folder exists, and create it if not if not os.path.exists(edgetrain_dir): os.makedirs(edgetrain_dir) - - # Define the subfolders (models, logs, images) - subfolders = ["models", "logs", "images"] - + + # Define the subfolders + subfolders = ["models", "logs", "images", "results"] + # Create subfolders under EdgeTrain if they don't exist for subfolder in subfolders: subfolder_path = os.path.join(edgetrain_dir, subfolder) @@ -35,4 +36,4 @@ def get_edgetrain_folder(): os.makedirs(subfolder_path) # Return the path to the "EdgeTrain" folder - return edgetrain_dir \ No newline at end of file + return edgetrain_dir diff --git a/edgetrain/resource_monitor.py b/edgetrain/resource_monitor.py index 4e5a2a4..ae20bd7 100644 --- a/edgetrain/resource_monitor.py +++ b/edgetrain/resource_monitor.py @@ -1,6 +1,15 @@ -import psutil, GPUtil, csv +import csv from datetime import datetime -from pynvml import nvmlInit, nvmlDeviceGetHandleByIndex, nvmlDeviceGetUtilizationRates, nvmlShutdown + +import GPUtil +import psutil +from pynvml import ( + nvmlDeviceGetHandleByIndex, + nvmlDeviceGetUtilizationRates, + nvmlInit, + nvmlShutdown, +) + def sys_resources(): """ @@ -21,26 +30,32 @@ def sys_resources(): # Check CPU usage (compute and RAM) cpu_compute_percent = psutil.cpu_percent(interval=1) cpu_cores = psutil.cpu_count(logical=True) - + # Check GPU usage (memory and compute) gpus = GPUtil.getGPUs() num_gpus = len(gpus) gpu_memory_usage = sum(gpu.memoryUsed for gpu in gpus) gpu_memory_total = sum(gpu.memoryTotal for gpu in gpus) gpu_memory_percent = sum(gpu.memoryUtil for gpu in gpus) / num_gpus if gpus else 0 - + # GPU compute utilization gpu_compute_percent = 0 if num_gpus > 0: nvmlInit() try: - gpu_compute_percent = sum(nvmlDeviceGetUtilizationRates(nvmlDeviceGetHandleByIndex(i)).gpu for i in range(num_gpus)) / num_gpus + gpu_compute_percent = ( + sum( + nvmlDeviceGetUtilizationRates(nvmlDeviceGetHandleByIndex(i)).gpu + for i in range(num_gpus) + ) + / num_gpus + ) finally: nvmlShutdown() - + # Check system memory usage (RAM) cpu_memory_percent = psutil.virtual_memory().percent - + return { "cpu_cores": cpu_cores, "cpu_compute_percent": cpu_compute_percent, @@ -49,14 +64,23 @@ def sys_resources(): "gpu_memory_usage": gpu_memory_usage, "gpu_memory_total": gpu_memory_total, "gpu_memory_percent": gpu_memory_percent, - "num_gpus": num_gpus + "num_gpus": num_gpus, } -def log_usage_once(log_file, pruning, batch_size, lr, normalize_scores, priority_value, num_epoch=0, resources=None): +def log_usage_once( + log_file, + pruning, + batch_size, + lr, + normalize_scores, + priority_value, + num_epoch=0, + resources=None, +): """ Log GPU and CPU resource usage once. - + Parameters: - log_file (str): Path to the log file. - pruning (bool): Whether pruning is enabled. @@ -67,35 +91,43 @@ def log_usage_once(log_file, pruning, batch_size, lr, normalize_scores, priority - num_epoch (int, optional): Current epoch number. Default is 0. - resources (dict, optional): Dictionary containing system resource usage metrics. If None, system resources will be fetched. """ - + # Create CSV header if the file doesn't exist try: - with open(log_file, 'r') as f: + with open(log_file, "r") as f: pass except FileNotFoundError: - with open(log_file, 'w', newline='') as f: + with open(log_file, "w", newline="") as f: writer = csv.writer(f) header = [ - 'Timestamp', 'Epoch #', 'CPU Usage (%)', 'CPU RAM (%)', - 'GPU RAM (%)', 'GPU Usage (%)', - 'Mem Score', 'Acc Score', - 'Priority Batch Size', 'Priority Learning Rate', - 'Pruning', 'Batch Size', 'Learning Rate', + "Timestamp", + "Epoch #", + "CPU Usage (%)", + "CPU RAM (%)", + "GPU RAM (%)", + "GPU Usage (%)", + "Mem Score", + "Acc Score", + "Priority Batch Size", + "Priority Learning Rate", + "Pruning", + "Batch Size", + "Learning Rate", ] writer.writerow(header) # Get resource usage if resources is None: resources = sys_resources() - cpu_compute_percent = resources.get('cpu_compute_percent') - cpu_memory_percent = resources.get('cpu_memory_percent') - gpu_compute_percent = resources.get('gpu_compute_percent') - gpu_memory_percent = resources.get('gpu_memory_percent') + cpu_compute_percent = resources.get("cpu_compute_percent") + cpu_memory_percent = resources.get("cpu_memory_percent") + gpu_compute_percent = resources.get("gpu_compute_percent") + gpu_memory_percent = resources.get("gpu_memory_percent") - memory_score = normalize_scores.get('memory_score') - accuracy_score = normalize_scores.get('accuracy_score') - batch_size_priority_value = priority_value.get('batch_size') - learning_rate_priority_value = priority_value.get('learning_rate') + memory_score = normalize_scores.get("memory_score") + accuracy_score = normalize_scores.get("accuracy_score") + batch_size_priority_value = priority_value.get("batch_size") + learning_rate_priority_value = priority_value.get("learning_rate") # Prepare log entry log_entry = [ @@ -111,10 +143,10 @@ def log_usage_once(log_file, pruning, batch_size, lr, normalize_scores, priority learning_rate_priority_value, pruning, batch_size, - lr + lr, ] # Append log entry to the file - with open(log_file, 'a', newline='') as f: + with open(log_file, "a", newline="") as f: writer = csv.writer(f) - writer.writerow(log_entry) \ No newline at end of file + writer.writerow(log_entry) diff --git a/edgetrain/train_visualize.py b/edgetrain/train_visualize.py index 4aaaca4..8b9f5ca 100644 --- a/edgetrain/train_visualize.py +++ b/edgetrain/train_visualize.py @@ -1,16 +1,18 @@ import matplotlib.pyplot as plt -import pandas as pd import numpy as np +import pandas as pd + from edgetrain import get_edgetrain_folder + def log_usage_plot(log_file): """ - Load the resource usage log from the CSV file and plot CPU and GPU usage, + Load the resource usage log from the CSV file and plot CPU and GPU usage, as well as batch size and learning rate over time (epochs). - + Parameters: - log_file (str): The path to the log file (CSV format) that contains the resource usage data. - + Returns: - None """ @@ -21,61 +23,123 @@ def log_usage_plot(log_file): except FileNotFoundError: print(f"Log file '{log_file}' not found.") return - + # Plot CPU and GPU usage over time on the same plot with workers on a separate y axis fig, ax1 = plt.subplots(5, 1, figsize=(7, 10), sharex=True) - ax1[0].plot(df['Epoch #'], df['CPU Usage (%)'], label='CPU Usage (%)', color='tab:blue', linewidth=1.5) - ax1[0].plot(df['Epoch #'], df['GPU Usage (%)'], label='GPU Usage (%)', color='tab:orange', linewidth=1.5) - ax1[0].set_ylabel('Compute (%)') + ax1[0].plot( + df["Epoch #"], + df["CPU Usage (%)"], + label="CPU Usage (%)", + color="tab:blue", + linewidth=1.5, + ) + ax1[0].plot( + df["Epoch #"], + df["GPU Usage (%)"], + label="GPU Usage (%)", + color="tab:orange", + linewidth=1.5, + ) + ax1[0].set_ylabel("Compute (%)") ax1[0].set_ylim(-5, 100) - ax1[0].set_title('CPU and GPU Usage Over Time') - ax1[0].legend(loc='upper left') - ax1[0].grid(True, which='both', linestyle='--', linewidth=0.5) + ax1[0].set_title("CPU and GPU Usage Over Time") + ax1[0].legend(loc="upper left") + ax1[0].grid(True, which="both", linestyle="--", linewidth=0.5) # Plot CPU and GPU RAM usage over time on the same plot with batch size on a separate y axis - ax1[1].plot(df['Epoch #'], df['CPU RAM (%)'], label='CPU RAM (%)', color='tab:blue', linewidth=1.5) - ax1[1].plot(df['Epoch #'], df['GPU RAM (%)'], label='GPU RAM (%)', color='tab:orange', linewidth=1.5) - ax1[1].set_ylabel('RAM (%)') - ax1[1].set_title('CPU and GPU RAM Usage Over Time') - ax1[1].legend(loc='upper left') - ax1[1].grid(True, which='both', linestyle='--', linewidth=0.5, alpha=0.7) + ax1[1].plot( + df["Epoch #"], + df["CPU RAM (%)"], + label="CPU RAM (%)", + color="tab:blue", + linewidth=1.5, + ) + ax1[1].plot( + df["Epoch #"], + df["GPU RAM (%)"], + label="GPU RAM (%)", + color="tab:orange", + linewidth=1.5, + ) + ax1[1].set_ylabel("RAM (%)") + ax1[1].set_title("CPU and GPU RAM Usage Over Time") + ax1[1].legend(loc="upper left") + ax1[1].grid(True, which="both", linestyle="--", linewidth=0.5, alpha=0.7) # Plot memory and accuracy scores - ax1[2].plot(df['Epoch #'], df['Mem Score'], label='Mem Score', color='tab:blue', linewidth=1.5) - ax1[2].plot(df['Epoch #'], df['Acc Score'], label='Acc Score', color='tab:orange', linewidth=1.5) - ax1[2].set_ylabel('Score') - ax1[2].set_title('Scores over time') - ax1[2].legend(loc='upper left') - ax1[2].grid(True, which='both', linestyle='--', linewidth=0.5, alpha=0.7) + ax1[2].plot( + df["Epoch #"], + df["Mem Score"], + label="Mem Score", + color="tab:green", + linewidth=1.5, + ) + ax1[2].plot( + df["Epoch #"], + df["Acc Score"], + label="Acc Score", + color="tab:purple", + linewidth=1.5, + ) + ax1[2].set_ylabel("Score") + ax1[2].set_title("Scores over time") + ax1[2].legend(loc="upper left") + ax1[2].grid(True, which="both", linestyle="--", linewidth=0.5, alpha=0.7) # Plot memory and accuracy scores - ax1[3].plot(df['Epoch #'], df['Priority Batch Size'], label='Priority Batch Size', color='tab:green', linewidth=1.5) - ax1[3].plot(df['Epoch #'], df['Priority Learning Rate'], label='Priority Learning Rate', color='tab:purple', linewidth=1.5) - ax1[3].set_ylabel('Priority') - ax1[3].set_title('Priorities over time') - ax1[3].legend(loc='upper left') - ax1[3].grid(True, which='both', linestyle='--', linewidth=0.5, alpha=0.7) - - # Plot Batch Size, Learning Rate, and Grad Accum over time on the same plot + ax1[3].plot( + df["Epoch #"], + df["Priority Batch Size"], + label="Priority Batch Size", + color="tab:green", + linewidth=1.5, + ) + ax1[3].plot( + df["Epoch #"], + df["Priority Learning Rate"], + label="Priority Learning Rate", + color="tab:purple", + linewidth=1.5, + ) + ax1[3].set_ylabel("Priority") + ax1[3].set_title("Priorities over time") + ax1[3].legend(loc="upper left") + ax1[3].grid(True, which="both", linestyle="--", linewidth=0.5, alpha=0.7) + + # Plot Batch Size, Learning Rate, and Pruning over time on the same plot ax2 = ax1[4].twinx() - ax1[4].plot(df['Epoch #'], df['Batch Size'], label='Batch Size', color='tab:green', linewidth=1.5) - ax1[4].set_ylim(0, 50) - ax2.plot(df['Epoch #'], df['Learning Rate']*100, label='Learning Rate', color='tab:purple', linewidth=1.5) - ax2.plot(df['Epoch #'], df['Pruning'], label='Pruning', color='tab:red', linewidth=1.5) - ax2.set_ylabel('Value') - ax2.set_ylim(0, 1) - ax1[4].set_xlabel('Epoch #') - ax1[4].set_ylabel('Values') - ax1[4].set_title('Training Param Over Time') - ax1[4].legend(loc='upper left') - ax2.legend(loc='upper right') - ax1[4].grid(True, which='both', linestyle='--', linewidth=0.5) + ax1[4].plot( + df["Epoch #"], + df["Batch Size"], + label="Batch Size", + color="tab:green", + linewidth=1.5, + ) + ax1[4].set_ylim(0, 75) + ax2.plot( + df["Epoch #"], + df["Learning Rate"] * 100, + label="Learning Rate", + color="tab:purple", + linewidth=1.5, + ) + ax2.plot( + df["Epoch #"], df["Pruning"], label="Pruning", color="tab:red", linewidth=1.5 + ) + ax2.set_ylabel("Value") + ax2.set_ylim(0, 2) + ax1[4].set_xlabel("Epoch #") + ax1[4].set_ylabel("Values") + ax1[4].set_title("Training Param Over Time") + ax1[4].legend(loc="upper left") + ax2.legend(loc="upper right") + ax1[4].grid(True, which="both", linestyle="--", linewidth=0.5) # Set x tick marks as integers every 5 for ax in ax1: ax.xaxis.set_major_locator(plt.MultipleLocator(5)) - ax.xaxis.set_major_formatter(plt.FuncFormatter(lambda x, _: f'{int(x):d}')) + ax.xaxis.set_major_formatter(plt.FuncFormatter(lambda x, _: f"{int(x):d}")) plt.tight_layout() plt.show() @@ -83,18 +147,18 @@ def log_usage_plot(log_file): # Save the figure to the images folder edgetrain_folder = get_edgetrain_folder() - img_dir = f'{edgetrain_folder}/images/' - timestamp = '_'.join(log_file.split('/')[-1].split('_')[:2]) - fig.savefig(f'{img_dir}/{timestamp}_resource_usage_plot.png') + img_dir = f"{edgetrain_folder}/images/" + timestamp = "_".join(log_file.split("/")[-1].split("_")[:2]) + fig.savefig(f"{img_dir}/{timestamp}_resource_usage_plot.png") def log_train_time(log_file): """ Calculate and print the total training time from the log file based on timestamps. - + Parameters: - log_file (str): The path to the log file (CSV format) containing the timestamps. - + Returns: - total_training_time (timedelta): The total training time. """ @@ -104,13 +168,13 @@ def log_train_time(log_file): except FileNotFoundError: print(f"Log file '{log_file}' not found.") return - + # Convert the 'Timestamp' column to datetime format - df['Timestamp'] = pd.to_datetime(df['Timestamp']) + df["Timestamp"] = pd.to_datetime(df["Timestamp"]) # Get the first and last timestamps from the log - start_time = df['Timestamp'].iloc[0] - end_time = df['Timestamp'].iloc[-1] + start_time = df["Timestamp"].iloc[0] + end_time = df["Timestamp"].iloc[-1] # Calculate the total training time total_training_time = end_time - start_time @@ -131,31 +195,36 @@ def training_history_plot(history_list, log_file): Returns: - None """ - - accuracy_values = np.array([epoch['accuracy'] for epoch in history_list]) - loss_values = np.array([epoch['loss'] for epoch in history_list]) + + accuracy_values = np.array([epoch["accuracy"][0] for epoch in history_list]) + loss_values = np.array([epoch["loss"][0] for epoch in history_list]) fig, ax1 = plt.subplots(figsize=(6, 4)) - ax1.set_xlabel('Epochs') - ax1.set_ylabel('Loss', color='red') - ax1.plot(range(1, len(loss_values) + 1), loss_values, 'r', label='Loss') - ax1.tick_params(axis='y', labelcolor='red') - ax1.set_ylim(0, 15) + ax1.set_xlabel("Epochs") + ax1.set_ylabel("Loss", color="red") + ax1.plot(range(1, len(loss_values) + 1), loss_values, "r", label="Loss") + ax1.tick_params(axis="y", labelcolor="red") + ax1.set_ylim(-1, 6) ax2 = ax1.twinx() - ax2.set_ylabel('Accuracy (%)', color='black') - ax2.plot(range(1, len(accuracy_values) + 1), accuracy_values * 100, 'k', label='Accuracy (%)') - ax2.tick_params(axis='y', labelcolor='black') - ax2.set_ylim(0, 100) + ax2.set_ylabel("Accuracy (%)", color="black") + ax2.plot( + range(1, len(accuracy_values) + 1), + accuracy_values * 100, + "k", + label="Accuracy (%)", + ) + ax2.tick_params(axis="y", labelcolor="black") + ax2.set_ylim(45, 105) fig.tight_layout() - plt.title('Training Loss and Accuracy') + plt.title("Training Loss and Accuracy") plt.show() fig.canvas.draw() # Save the figure to the images folder edgetrain_folder = get_edgetrain_folder() - img_dir = f'{edgetrain_folder}/images/' - timestamp = '_'.join(log_file.split('/')[-1].split('_')[:2]) - fig.savefig(f'{img_dir}/{timestamp}_training_history_plot.png') \ No newline at end of file + img_dir = f"{edgetrain_folder}/images/" + timestamp = "_".join(log_file.split("/")[-1].split("_")[:2]) + fig.savefig(f"{img_dir}/{timestamp}_training_history_plot.png") diff --git a/notebooks/EdgeTrain_example.ipynb b/notebooks/EdgeTrain_example.ipynb index dfd2098..8c3ce5d 100644 --- a/notebooks/EdgeTrain_example.ipynb +++ b/notebooks/EdgeTrain_example.ipynb @@ -28,16 +28,22 @@ }, "outputs": [], "source": [ - "\n", "# Import libraries\n", "from tensorflow.keras.datasets import mnist\n", "from datetime import datetime\n", "\n", "import sys\n", - "sys.path.append('C:/Users/bedelman/Documents/GitHub/EdgeTrain')\n", - "from edgetrain import dynamic_train, log_usage_plot, training_history_plot, get_edgetrain_folder\n", + "\n", + "sys.path.append(\"C:/Users/bedelman/Documents/GitHub/EdgeTrain\")\n", + "from edgetrain import (\n", + " dynamic_train,\n", + " log_usage_plot,\n", + " training_history_plot,\n", + " get_edgetrain_folder,\n", + ")\n", "\n", "from IPython.display import clear_output\n", + "\n", "clear_output(wait=False)" ] }, @@ -61,9 +67,9 @@ "source": [ "# Load the MNIST dataset from tensorflow\n", "(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n", - "train_images = train_images.reshape(-1, 28, 28, 1).astype('float32') / 255.0\n", - "test_images = test_images.reshape(-1, 28, 28, 1).astype('float32') / 255.0\n", - "train_dataset = {'images': train_images, 'labels': train_labels}" + "train_images = train_images.reshape(-1, 28, 28, 1).astype(\"float32\") / 255.0\n", + "test_images = test_images.reshape(-1, 28, 28, 1).astype(\"float32\") / 255.0\n", + "train_dataset = {\"images\": train_images, \"labels\": train_labels}" ] }, { @@ -125,13 +131,13 @@ "source": [ "# Perform dynamic training with edgetrain and log resource usage\n", "history_list = dynamic_train(\n", - " train_dataset, \n", - " epochs=20, \n", - " batch_size=32, \n", - " lr=1e-3, \n", - " pruning=0.2, \n", - " log_file=log_file, \n", - " dynamic_adjustments=True\n", + " train_dataset,\n", + " epochs=20,\n", + " batch_size=32,\n", + " lr=1e-3,\n", + " pruning=0.2,\n", + " log_file=log_file,\n", + " dynamic_adjustments=True,\n", ")\n", "clear_output(wait=False)" ] @@ -223,7 +229,7 @@ ], "source": [ "# Visualize resource usage during training\n", - "log_usage_plot(log_file)\n" + "log_usage_plot(log_file)" ] } ], diff --git a/requirements-dev.txt b/requirements-dev.txt index 22ad2b7..6eadadd 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,10 +1,15 @@ -pytest>=6.0 -pytest-cov>=2.10 -tensorflow>=2.0.0 -psutil>=5.0.0 -GPUtil>=1.4.0 -matplotlib>=3.7.0 -pandas>=1.5.0 -numpy>=1.24.0 -pynvml>=8.0.0 -torch>=2.5.1 +# Main project dependencies +-r requirements.txt + +# Linting +black +flake8 +isort + +# Pre-commit hooks +pre-commit + +# Testing +pytest +pytest-cov +pytest-mock \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index fa92b3c..bd656ab 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,13 +1,16 @@ +# General libraries psutil>=5.0.0 GPUtil>=1.4.0 matplotlib>=3.7.0 pandas>=1.5.0 -# numpy>=1.24.0 pynvml>=8.0.0 -# Last stable combination of TF + Keras + TFMOT + +# Last stable combination of TF + Keras + TFMOT for me tensorflow==2.12.0 keras==2.12.0 tensorflow-model-optimization==0.7.3 + +# Notebooks jupyter diff --git a/setup.py b/setup.py index fddb64e..5db1747 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -from setuptools import setup, find_packages +from setuptools import find_packages, setup setup( name="edgetrain", @@ -12,12 +12,12 @@ "pandas>=1.5.0", "numpy>=1.24.0", "pynvml>=8.0.0", - "torch>=2.5.1", + "torch>=2.5.1", ], extras_require={ - 'dev': [ - 'pytest', # for testing - ] + "dev": [ + "pytest", # for testing + ] }, description="A utility for machine learning training with limited resources.", author="Bradley Edelman", diff --git a/tests/__init__.py b/tests/__init__.py index 0ea31d7..1406b77 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,5 +1,4 @@ -import sys import os +import sys -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) - +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) diff --git a/tests/test_adjust_train_parameters.py b/tests/test_adjust_train_parameters.py index c89b45f..8556cdf 100644 --- a/tests/test_adjust_train_parameters.py +++ b/tests/test_adjust_train_parameters.py @@ -1,14 +1,15 @@ import pytest -from unittest.mock import patch + from edgetrain import adjust_training_parameters + @pytest.fixture def default_parameters(): return { "priority_values": {"batch_size": 0.6, "learning_rate": 0.4}, "batch_size": 32, "lr": 0.001, - "accuracy_score": 0.8 + "accuracy_score": 0.8, } @@ -21,7 +22,9 @@ def test_adjust_batch_size_high_memory(default_parameters): adjusted_batch_size, adjusted_lr = adjust_training_parameters(**params) - assert adjusted_batch_size == 16, "Batch size adjustment for high memory usage failed." + assert ( + adjusted_batch_size == 16 + ), "Batch size adjustment for high memory usage failed." assert adjusted_lr == params["lr"], "Learning rate should remain unchanged." @@ -33,8 +36,10 @@ def test_adjust_batch_size_low_memory(default_parameters): params["resources"] = sys_resources adjusted_batch_size, adjusted_lr = adjust_training_parameters(**params) - - assert adjusted_batch_size == 64, "Batch size adjustment for low memory usage failed." + + assert ( + adjusted_batch_size == 64 + ), "Batch size adjustment for low memory usage failed." assert adjusted_lr == params["lr"], "Learning rate should remain unchanged." @@ -47,9 +52,13 @@ def test_adjust_learning_rate_low_accuracy(default_parameters): params["resources"] = sys_resources adjusted_batch_size, adjusted_lr = adjust_training_parameters(**params) - assert adjusted_lr == pytest.approx(0.0005, rel=1e-2), "Learning rate adjustment for low accuracy failed." - assert adjusted_batch_size == params["batch_size"], "Batch size should remain unchanged." - + assert adjusted_lr == pytest.approx( + 0.0005, rel=1e-2 + ), "Learning rate adjustment for low accuracy failed." + assert ( + adjusted_batch_size == params["batch_size"] + ), "Batch size should remain unchanged." + def test_adjust_learning_rate_high_accuracy(default_parameters): # Simulate high accuracy @@ -61,17 +70,29 @@ def test_adjust_learning_rate_high_accuracy(default_parameters): adjusted_batch_size, adjusted_lr = adjust_training_parameters(**params) - assert adjusted_lr == pytest.approx(0.0012, rel=1e-2), "Learning rate adjustment for high accuracy failed." - assert adjusted_batch_size == params["batch_size"], "Batch size should remain unchanged." + assert adjusted_lr == pytest.approx( + 0.0012, rel=1e-2 + ), "Learning rate adjustment for high accuracy failed." + assert ( + adjusted_batch_size == params["batch_size"] + ), "Batch size should remain unchanged." def test_no_adjustments_when_no_priorities(default_parameters): # Simulate balanced memory and low priorities sys_resources = {"cpu_memory_percent": 60, "gpu_memory_percent": 60} params = default_parameters.copy() - params["priority_values"] = {"batch_size": 0.0, "pruning": 0.0, "learning_rate": 0.0} + params["priority_values"] = { + "batch_size": 0.0, + "pruning": 0.0, + "learning_rate": 0.0, + } params["resources"] = sys_resources adjusted_batch_size, adjusted_lr = adjust_training_parameters(**params) - assert adjusted_batch_size == params["batch_size"], "Batch size should remain unchanged with no priorities." - assert adjusted_lr == params["lr"], "Learning rate should remain unchanged with no priorities." + assert ( + adjusted_batch_size == params["batch_size"] + ), "Batch size should remain unchanged with no priorities." + assert ( + adjusted_lr == params["lr"] + ), "Learning rate should remain unchanged with no priorities." diff --git a/tests/test_calculate_priorities.py b/tests/test_calculate_priorities.py index 816264f..cc0ba97 100644 --- a/tests/test_calculate_priorities.py +++ b/tests/test_calculate_priorities.py @@ -1,59 +1,62 @@ import pytest + from edgetrain import define_priorities + def test_define_priorities_with_default_priorities(): # Test default priorities with normalized scores - normalized_scores = { - "memory_score": 0.8, - "accuracy_score": 0.4 - } - + normalized_scores = {"memory_score": 0.8, "accuracy_score": 0.4} + priority_value = define_priorities(normalized_scores) - + # Default priorities: batch_size: 0.4, accuracy_improvement: 0.6 - assert priority_value["batch_size"] == pytest.approx(0.32, rel=1e-3), "Batch size priority calculation failed." - assert priority_value["learning_rate"] == pytest.approx(0.24, rel=1e-3), "Learning rate priority calculation failed." + assert priority_value["batch_size"] == pytest.approx( + 0.32, rel=1e-3 + ), "Batch size priority calculation failed." + assert priority_value["learning_rate"] == pytest.approx( + 0.24, rel=1e-3 + ), "Learning rate priority calculation failed." def test_define_priorities_with_custom_priorities(): # Test user-defined priorities with normalized scores - normalized_scores = { - "memory_score": 0.5, - "accuracy_score": 0.7 - } - user_priorities = { - "batch_size_adjustment": 0.8, - "accuracy_improvement": 0.2 - } - + normalized_scores = {"memory_score": 0.5, "accuracy_score": 0.7} + user_priorities = {"batch_size_adjustment": 0.8, "accuracy_improvement": 0.2} + priority_value = define_priorities(normalized_scores, user_priorities) - + # Custom priorities: batch_size: 0.8, accuracy_improvement: 0.2 - assert priority_value["batch_size"] == pytest.approx(0.4, rel=1e-3), "Batch size priority with custom priorities failed." - assert priority_value["learning_rate"] == pytest.approx(0.14, rel=1e-3), "Learning rate priority with custom priorities failed." + assert priority_value["batch_size"] == pytest.approx( + 0.4, rel=1e-3 + ), "Batch size priority with custom priorities failed." + assert priority_value["learning_rate"] == pytest.approx( + 0.14, rel=1e-3 + ), "Learning rate priority with custom priorities failed." def test_define_priorities_with_zero_scores(): # Test edge case where all normalized scores are zero - normalized_scores = { - "memory_score": 0.0, - "accuracy_score": 0.0 - } - + normalized_scores = {"memory_score": 0.0, "accuracy_score": 0.0} + priority_value = define_priorities(normalized_scores) - - assert priority_value["batch_size"] == 0.0, "Batch size priority with zero scores failed." - assert priority_value["learning_rate"] == 0.0, "Learning rate priority with zero scores failed." + + assert ( + priority_value["batch_size"] == 0.0 + ), "Batch size priority with zero scores failed." + assert ( + priority_value["learning_rate"] == 0.0 + ), "Learning rate priority with zero scores failed." def test_define_priorities_with_extreme_scores(): # Test edge case with extreme normalized scores - normalized_scores = { - "memory_score": 1.0, - "accuracy_score": 1.0 - } - + normalized_scores = {"memory_score": 1.0, "accuracy_score": 1.0} + priority_value = define_priorities(normalized_scores) - - assert priority_value["batch_size"] == 0.40, "Batch size priority with extreme scores failed." - assert priority_value["learning_rate"] == 0.60, "Learning rate priority with extreme scores failed." + + assert ( + priority_value["batch_size"] == 0.40 + ), "Batch size priority with extreme scores failed." + assert ( + priority_value["learning_rate"] == 0.60 + ), "Learning rate priority with extreme scores failed." diff --git a/tests/test_calculate_scores.py b/tests/test_calculate_scores.py index 95f78e6..07fab62 100644 --- a/tests/test_calculate_scores.py +++ b/tests/test_calculate_scores.py @@ -1,6 +1,8 @@ import pytest + from edgetrain import compute_scores, normalize_scores + def test_normalize_scores(): # Test normalization of raw scores with specified ranges raw_scores = {"memory_score": 50, "accuracy_score": 0.5} @@ -12,70 +14,81 @@ def test_normalize_scores(): def test_compute_scores(): # Mock system resource usage - mock_resources = { - "num_gpus": 1, - "cpu_memory_percent": 60, - "gpu_memory_percent": 40 - } + mock_resources = {"num_gpus": 1, "cpu_memory_percent": 60, "gpu_memory_percent": 40} # Test with a decrease in accuracy previous_accuracy = 0.8 current_accuracy = 0.6 - scores = compute_scores(previous_accuracy, current_accuracy, score_ranges=None, resources=mock_resources) + scores = compute_scores( + previous_accuracy, current_accuracy, score_ranges=None, resources=mock_resources + ) assert scores["memory_score"] == 0.5, "Memory score calculation failed with GPUs." - assert scores["accuracy_score"] == pytest.approx(1, rel=1e-3), "Accuracy score calculation failed." - + assert scores["accuracy_score"] == pytest.approx( + 1, rel=1e-3 + ), "Accuracy score calculation failed." + # Test with no GPUs - mock_resources = { - "num_gpus": 0, - "cpu_memory_percent": 75 - } - scores = compute_scores(previous_accuracy, current_accuracy, score_ranges=None, resources=mock_resources) - assert scores["memory_score"] == 0.75, "Memory score calculation failed without GPUs." + mock_resources = {"num_gpus": 0, "cpu_memory_percent": 75} + scores = compute_scores( + previous_accuracy, current_accuracy, score_ranges=None, resources=mock_resources + ) + assert ( + scores["memory_score"] == 0.75 + ), "Memory score calculation failed without GPUs." # Test edge case with accuracy stagnation previous_accuracy = 0.7 current_accuracy = 0.7 - scores = compute_scores(previous_accuracy, current_accuracy, score_ranges=None, resources=mock_resources) - assert scores["accuracy_score"] == pytest.approx(1, rel=1e-3), "Accuracy score should be 1 for stagnation." + scores = compute_scores( + previous_accuracy, current_accuracy, score_ranges=None, resources=mock_resources + ) + assert scores["accuracy_score"] == pytest.approx( + 1, rel=1e-3 + ), "Accuracy score should be 1 for stagnation." # Test edge case where current accuracy is higher (clamped at 0) previous_accuracy = 0.6 current_accuracy = 0.8 - scores = compute_scores(previous_accuracy, current_accuracy, score_ranges=None, resources=mock_resources) - assert scores["accuracy_score"] == pytest.approx(0.8, rel=1e-3), "Accuracy score should be 1 for decreasing accuracy." + scores = compute_scores( + previous_accuracy, current_accuracy, score_ranges=None, resources=mock_resources + ) + assert scores["accuracy_score"] == pytest.approx( + 0.8, rel=1e-3 + ), "Accuracy score should be 1 for decreasing accuracy." def test_compute_scores_with_custom_ranges(): # Mock resources - mock_resources = { - "num_gpus": 1, - "cpu_memory_percent": 80, - "gpu_memory_percent": 60 - } + mock_resources = {"num_gpus": 1, "cpu_memory_percent": 80, "gpu_memory_percent": 60} # Test custom ranges - score_ranges = { - "memory_score_range": 200, - "accuracy_score_range": 0.5 - } + score_ranges = {"memory_score_range": 200, "accuracy_score_range": 0.5} - previous_accuracy=0.8 - current_accuracy=0.6 - scores = compute_scores(previous_accuracy, current_accuracy, score_ranges=score_ranges, resources=mock_resources) - assert scores["memory_score"] == pytest.approx(0.35, rel=1e-3), "Memory score normalization with custom range failed." - assert scores["accuracy_score"] == pytest.approx(2.0, rel=1e-3), "Accuracy score normalization with custom range failed." + previous_accuracy = 0.8 + current_accuracy = 0.6 + scores = compute_scores( + previous_accuracy, + current_accuracy, + score_ranges=score_ranges, + resources=mock_resources, + ) + assert scores["memory_score"] == pytest.approx( + 0.35, rel=1e-3 + ), "Memory score normalization with custom range failed." + assert scores["accuracy_score"] == pytest.approx( + 2.0, rel=1e-3 + ), "Accuracy score normalization with custom range failed." def test_compute_scores_with_acc_improvement(): # Mock resources - mock_resources = { - "num_gpus": 1, - "cpu_memory_percent": 80, - "gpu_memory_percent": 60 - } - - previous_accuracy=0.6 - current_accuracy=0.8 - scores = compute_scores(previous_accuracy, current_accuracy, score_ranges=None, resources=mock_resources) - assert scores["accuracy_score"] == pytest.approx(0.80, rel=1e-3), "Accuracy score normalization with custom range failed." + mock_resources = {"num_gpus": 1, "cpu_memory_percent": 80, "gpu_memory_percent": 60} + + previous_accuracy = 0.6 + current_accuracy = 0.8 + scores = compute_scores( + previous_accuracy, current_accuracy, score_ranges=None, resources=mock_resources + ) + assert scores["accuracy_score"] == pytest.approx( + 0.80, rel=1e-3 + ), "Accuracy score normalization with custom range failed." diff --git a/tests/test_create_model_tf.py b/tests/test_create_model_tf.py index 7259744..bad7b02 100644 --- a/tests/test_create_model_tf.py +++ b/tests/test_create_model_tf.py @@ -1,15 +1,19 @@ import pytest -import tensorflow as tf from tensorflow.keras import models + from edgetrain import create_model_tf + def test_create_model_without_path(): # Create a model without providing a preloaded model path. input_shape = (28, 28, 1) model = create_model_tf(input_shape) - assert isinstance(model, models.Sequential), "Model should be an instance of Sequential" + assert isinstance( + model, models.Sequential + ), "Model should be an instance of Sequential" assert model.input_shape[1:] == input_shape, "Input shape does not match" + # def test_create_model_with_valid_path(tmp_path): # # Create a model by loading from a valid path. # input_shape = (28, 28, 1) @@ -20,6 +24,7 @@ def test_create_model_without_path(): # loaded_model = create_model_tf(input_shape, model_path=str(model_path)) # assert isinstance(loaded_model, models.Sequential), "Loaded model should be an instance of Sequential" + def test_create_model_invalid_input_shape(): # Test that a ValueError is raised when input_shape is None. with pytest.raises(ValueError, match="Input shape must be defined."): diff --git a/tests/test_log_usage_once.py b/tests/test_log_usage_once.py index 46066fc..21f77f3 100644 --- a/tests/test_log_usage_once.py +++ b/tests/test_log_usage_once.py @@ -1,19 +1,22 @@ -import os, csv +import csv +import os from datetime import datetime -from edgetrain import log_usage_once, compute_scores, define_priorities + +from edgetrain import compute_scores, define_priorities, log_usage_once + def test_log_usage_once(tmpdir): # Mock resource usage mock_resources = { - 'num_gpus': 0, - 'cpu_compute_percent': 30.0, - 'cpu_memory_percent': 40.0, - 'gpu_compute_percent': 45.0, - 'gpu_memory_percent': 50.0 + "num_gpus": 0, + "cpu_compute_percent": 30.0, + "cpu_memory_percent": 40.0, + "gpu_compute_percent": 45.0, + "gpu_memory_percent": 50.0, } - + # Create a temporary log file - log_file = os.path.join(tmpdir, 'test_log.csv') + log_file = os.path.join(tmpdir, "test_log.csv") # Call the function to log usage lr = 0.001 @@ -24,44 +27,70 @@ def test_log_usage_once(tmpdir): curr_accuracy = 0.6 # Calculate performance and resource usage scores - normalized_scores = compute_scores(prev_accuracy, curr_accuracy, score_ranges=None, resources=mock_resources) + normalized_scores = compute_scores( + prev_accuracy, curr_accuracy, score_ranges=None, resources=mock_resources + ) priority_value = define_priorities(normalized_scores) - log_usage_once(log_file, pruning, batch_size, lr, normalized_scores, priority_value, num_epoch, resources=mock_resources) + log_usage_once( + log_file, + pruning, + batch_size, + lr, + normalized_scores, + priority_value, + num_epoch, + resources=mock_resources, + ) # Verify the log file is created assert os.path.exists(log_file), "Log file was not created." # Read the log file and verify contents - with open(log_file, 'r') as f: + with open(log_file, "r") as f: reader = csv.DictReader(f) rows = list(reader) - + # Check if the header is correct expected_header = [ - 'Timestamp', 'Epoch #', 'CPU Usage (%)', 'CPU RAM (%)', - 'GPU RAM (%)', 'GPU Usage (%)', - 'Mem Score', 'Acc Score', - 'Priority Batch Size', 'Priority Learning Rate', - 'Pruning', 'Batch Size', 'Learning Rate', - ] + "Timestamp", + "Epoch #", + "CPU Usage (%)", + "CPU RAM (%)", + "GPU RAM (%)", + "GPU Usage (%)", + "Mem Score", + "Acc Score", + "Priority Batch Size", + "Priority Learning Rate", + "Pruning", + "Batch Size", + "Learning Rate", + ] assert reader.fieldnames == expected_header, "Log file header is incorrect." # Check if the log entry contains expected values assert len(rows) == 1, "Log file should contain one entry." log_entry = rows[0] - assert log_entry['Epoch #'] == str(num_epoch), "Epoch number mismatch." - assert log_entry['Mem Score'] == str(normalized_scores.get('memory_score')), "Mem score mismatch." - assert log_entry['Acc Score'] == str(normalized_scores.get('accuracy_score')), "Acc score mismatch." - assert log_entry['Priority Batch Size'] == str(priority_value.get('batch_size')), "Priority batch size mismatch." - assert log_entry['Priority Learning Rate'] == str(priority_value.get('learning_rate')), "Priority learning rate mismatch." - assert log_entry['Pruning'] == str(pruning), "Pruning ratio mismatch." - assert log_entry['Batch Size'] == str(batch_size), "Batch size mismatch." - assert log_entry['Learning Rate'] == str(lr), "Learning rate mismatch." + assert log_entry["Epoch #"] == str(num_epoch), "Epoch number mismatch." + assert log_entry["Mem Score"] == str( + normalized_scores.get("memory_score") + ), "Mem score mismatch." + assert log_entry["Acc Score"] == str( + normalized_scores.get("accuracy_score") + ), "Acc score mismatch." + assert log_entry["Priority Batch Size"] == str( + priority_value.get("batch_size") + ), "Priority batch size mismatch." + assert log_entry["Priority Learning Rate"] == str( + priority_value.get("learning_rate") + ), "Priority learning rate mismatch." + assert log_entry["Pruning"] == str(pruning), "Pruning ratio mismatch." + assert log_entry["Batch Size"] == str(batch_size), "Batch size mismatch." + assert log_entry["Learning Rate"] == str(lr), "Learning rate mismatch." # Validate timestamp format try: - datetime.strptime(log_entry['Timestamp'], "%Y-%m-%d %H:%M:%S") + datetime.strptime(log_entry["Timestamp"], "%Y-%m-%d %H:%M:%S") except ValueError: assert False, "Timestamp format is incorrect." - diff --git a/tests/test_sys_resources.py b/tests/test_sys_resources.py index ecea886..01ec86c 100644 --- a/tests/test_sys_resources.py +++ b/tests/test_sys_resources.py @@ -1,40 +1,51 @@ -import pytest from unittest import mock + +import pytest + from edgetrain.resource_monitor import sys_resources + # Mock the psutil and GPUtil modules @pytest.fixture def mock_psutil_and_gputil(): - with mock.patch("edgetrain.resource_monitor.psutil.cpu_percent") as mock_cpu_percent, \ - mock.patch("edgetrain.resource_monitor.psutil.cpu_count") as mock_cpu_count, \ - mock.patch("edgetrain.resource_monitor.psutil.virtual_memory") as mock_virtual_memory, \ - mock.patch("edgetrain.resource_monitor.GPUtil.getGPUs") as mock_get_gpus, \ - mock.patch("edgetrain.resource_monitor.nvmlInit") as mock_nvml_init, \ - mock.patch("edgetrain.resource_monitor.nvmlShutdown") as mock_nvml_shutdown, \ - mock.patch("edgetrain.resource_monitor.nvmlDeviceGetUtilizationRates") as mock_nvml_device_utilization, \ - mock.patch("edgetrain.resource_monitor.nvmlDeviceGetHandleByIndex") as mock_nvml_device_handle: - + with mock.patch( + "edgetrain.resource_monitor.psutil.cpu_percent" + ) as mock_cpu_percent, mock.patch( + "edgetrain.resource_monitor.psutil.cpu_count" + ) as mock_cpu_count, mock.patch( + "edgetrain.resource_monitor.psutil.virtual_memory" + ) as mock_virtual_memory, mock.patch( + "edgetrain.resource_monitor.GPUtil.getGPUs" + ) as mock_get_gpus, mock.patch( + "edgetrain.resource_monitor.nvmlInit" + ) as mock_nvml_init, mock.patch( + "edgetrain.resource_monitor.nvmlShutdown" + ) as mock_nvml_shutdown, mock.patch( + "edgetrain.resource_monitor.nvmlDeviceGetUtilizationRates" + ) as mock_nvml_device_utilization, mock.patch( + "edgetrain.resource_monitor.nvmlDeviceGetHandleByIndex" + ) as mock_nvml_device_handle: # Setup mock return values for psutil mock_cpu_percent.return_value = 50.0 mock_cpu_count.return_value = 8 mock_virtual_memory.return_value.percent = 75.0 - + # Setup mock return values for GPUtil mock_get_gpus.return_value = [ mock.Mock(memoryUsed=1000, memoryTotal=8000, memoryUtil=0.12), mock.Mock(memoryUsed=1200, memoryTotal=8000, memoryUtil=0.15), ] - - # Mock nvmlInit and nvmlShutdown (no-op) + + # Mock nvmlInit and nvmlShutdown mock_nvml_init.return_value = None mock_nvml_shutdown.return_value = None - + # Mock nvmlDeviceGetHandleByIndex to return a dummy handle mock_nvml_device_handle.side_effect = lambda i: f"handle_{i}" - + # Mock nvmlDeviceGetUtilizationRates to return a mock object with a gpu attribute mock_nvml_device_utilization.return_value = mock.Mock(gpu=60) - + yield { "mock_cpu_percent": mock_cpu_percent, "mock_cpu_count": mock_cpu_count, @@ -48,7 +59,6 @@ def mock_psutil_and_gputil(): def test_sys_resources(mock_psutil_and_gputil): - result = sys_resources() # Check if the function returns a dictionary with the expected keys @@ -68,6 +78,6 @@ def test_sys_resources(mock_psutil_and_gputil): assert result["gpu_compute_percent"] == 60.0 assert result["gpu_memory_usage"] == 2200 assert result["gpu_memory_total"] == 16000 - + # Compare the fractional value instead of percentage assert result["gpu_memory_percent"] == 0.135 From 86fa4eae63abd116740708d0d27ab08d79e40ec4 Mon Sep 17 00:00:00 2001 From: Bradley Edelman <63209487+BradleyEdelman@users.noreply.github.com> Date: Tue, 11 Feb 2025 14:54:02 +0100 Subject: [PATCH 7/7] Update README.md --- README.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index d7d17b3..7d90f92 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # EdgeTrain: Automated Resource Adjustment for Efficient Edge AI Training **Version: 0.2.0** -EdgeTrain is a Python package designed to dynamically adjust deep learning training parameters and strategies based on CPU and GPU performance. It optimizes the training process by adjusting batch size and learning rate to ensure efficient training without overutilizing or underutilizing available resources. This package is specifically designed to reduce memory usage for model training on edge AI devices, laptops or other setups that have limited memory. +EdgeTrain is a Python package designed to dynamically adjust deep learning training parameters and strategies based on CPU and GPU performance. It optimizes the training process by adjusting batch size and learning rate to ensure efficient training without overutilizing or underutilizing available resources. This package is specifically designed to balance model training performance and memory usage on edge AI devices, laptops or other setups that have limited memory. ## Features @@ -16,7 +16,7 @@ EdgeTrain logs system performance and training parameters, allowing post-hoc vis - Training parameter adjustments across epochs. - Correlations between resource usage and model performance. -The provided visualization tools help you understand how system resources are being utilized and how training parameters evolve during training. +The provided visualization tools illustrate how system resources are being utilized and how training parameters evolve during training. ### Customization and control EdgeTrain is highly customizable. You can easily modify: @@ -27,8 +27,8 @@ EdgeTrain is highly customizable. You can easily modify: ## Release Notes for v0.2.0 This version introduces a **refined adaptive training strategy with a constant pruning ratio**. Key updates: -- **Score Calculation**: This version now computes an **accuracy score** and a **memory score** based on resource usage and model performance -- **Parameter Prioritization**: Accuracy and memory scores are weighted according to default or user-defined priority weighting to idenfity a priority list for parameter adjustment. Now, only the top priority paramater is adjusted in each epoch. +- **Score Calculation**: This version now computes an **accuracy score** and a **memory score** based on resource usage and model performance. +- **Parameter Prioritization**: Accuracy and memory scores are weighted according to default or user-defined priority weighting schemes to idenfity a priority order for parameter adjustment. Only the top priority paramater is adjusted in each epoch. - **Batch size priority** is weighted by memory usage. - **Learning rate priority** is inversely weighted by accuracy improvement (i.e. increases if accuracy stagnates). - **Fixed Pruning Ratio**: Pruning is constant and is stripped at the end. @@ -105,7 +105,9 @@ EdgeTrain/ │ ├── ci.yml │ └──lint.yml │ +├── .flake8 ├── .gitignore +├── .isort.cfg ├── .pre-commit-config.yaml ├── CHANGELOG.md ├── LICENSE