diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1 @@ + diff --git a/1_simple_rag.ipynb b/01_simple_rag.ipynb similarity index 99% rename from 1_simple_rag.ipynb rename to 01_simple_rag.ipynb index c4f9325..611959f 100644 --- a/1_simple_rag.ipynb +++ b/01_simple_rag.ipynb @@ -176,7 +176,7 @@ ], "source": [ "# Define the path to the PDF file\n", - "pdf_path = \"data/AI_information.pdf\"\n", + "pdf_path = \"data/AI_Information.pdf\"\n", "\n", "# Extract text from the PDF file\n", "extracted_text = extract_text_from_pdf(pdf_path)\n", diff --git a/2_semantic_chunking.ipynb b/02_semantic_chunking.ipynb similarity index 99% rename from 2_semantic_chunking.ipynb rename to 02_semantic_chunking.ipynb index 61ff12e..f7c43a9 100644 --- a/2_semantic_chunking.ipynb +++ b/02_semantic_chunking.ipynb @@ -92,7 +92,7 @@ " return all_text.strip()\n", "\n", "# Define the path to the PDF file\n", - "pdf_path = \"data/AI_information.pdf\"\n", + "pdf_path = \"data/AI_Information.pdf\"\n", "\n", "# Extract text from the PDF file\n", "extracted_text = extract_text_from_pdf(pdf_path)\n", diff --git a/3_chunk_size_selector.ipynb b/03_chunk_size_selector.ipynb similarity index 99% rename from 3_chunk_size_selector.ipynb rename to 03_chunk_size_selector.ipynb index 7f00868..b30cc86 100644 --- a/3_chunk_size_selector.ipynb +++ b/03_chunk_size_selector.ipynb @@ -70,7 +70,7 @@ "metadata": {}, "source": [ "## Extracting Text from the PDF\n", - "First, we will extract text from the `AI_information.pdf` file." + "First, we will extract text from the `AI_Information.pdf` file." ] }, { @@ -116,7 +116,7 @@ " return all_text.strip()\n", "\n", "# Define the path to the PDF file\n", - "pdf_path = \"data/AI_information.pdf\"\n", + "pdf_path = \"data/AI_Information.pdf\"\n", "\n", "# Extract text from the PDF file\n", "extracted_text = extract_text_from_pdf(pdf_path)\n", diff --git a/4_context_enriched_rag.ipynb b/04_context_enriched_rag.ipynb similarity index 99% rename from 4_context_enriched_rag.ipynb rename to 04_context_enriched_rag.ipynb index 33c274f..6ad52bf 100644 --- a/4_context_enriched_rag.ipynb +++ b/04_context_enriched_rag.ipynb @@ -175,7 +175,7 @@ ], "source": [ "# Define the path to the PDF file\n", - "pdf_path = \"data/AI_information.pdf\"\n", + "pdf_path = \"data/AI_Information.pdf\"\n", "\n", "# Extract text from the PDF file\n", "extracted_text = extract_text_from_pdf(pdf_path)\n", diff --git a/5_contextual_chunk_headers_rag.ipynb b/05_contextual_chunk_headers_rag.ipynb similarity index 99% rename from 5_contextual_chunk_headers_rag.ipynb rename to 05_contextual_chunk_headers_rag.ipynb index b681ce1..e65951f 100644 --- a/5_contextual_chunk_headers_rag.ipynb +++ b/05_contextual_chunk_headers_rag.ipynb @@ -212,7 +212,7 @@ ], "source": [ "# Define the PDF file path\n", - "pdf_path = \"data/AI_information.pdf\"\n", + "pdf_path = \"data/AI_Information.pdf\"\n", "\n", "# Extract text from the PDF file\n", "extracted_text = extract_text_from_pdf(pdf_path)\n", diff --git a/6_doc_augmentation_rag.ipynb b/06_doc_augmentation_rag.ipynb similarity index 99% rename from 6_doc_augmentation_rag.ipynb rename to 06_doc_augmentation_rag.ipynb index a626655..a3107d9 100644 --- a/6_doc_augmentation_rag.ipynb +++ b/06_doc_augmentation_rag.ipynb @@ -428,7 +428,7 @@ ], "source": [ "# Define the path to the PDF file\n", - "pdf_path = \"data/AI_information.pdf\"\n", + "pdf_path = \"data/AI_Information.pdf\"\n", "\n", "# Process the document (extract text, create chunks, generate questions, build vector store)\n", "text_chunks, vector_store = process_document(\n", @@ -846,7 +846,7 @@ ], "source": [ "# Define the path to the PDF file\n", - "pdf_path = \"data/AI_information.pdf\"\n", + "pdf_path = \"data/AI_Information.pdf\"\n", "\n", "# Extract text from the PDF file\n", "extracted_text = extract_text_from_pdf(pdf_path)\n", diff --git a/7_query_transform.ipynb b/07_query_transform.ipynb similarity index 99% rename from 7_query_transform.ipynb rename to 07_query_transform.ipynb index 85e4a64..fb61415 100644 --- a/7_query_transform.ipynb +++ b/07_query_transform.ipynb @@ -1033,7 +1033,7 @@ "reference_answer = data[0]['ideal_answer']\n", "\n", "# pdf_path\n", - "pdf_path = \"data/AI_information.pdf\"\n", + "pdf_path = \"data/AI_Information.pdf\"\n", "\n", "# Run evaluation\n", "evaluation_results = evaluate_transformations(pdf_path, query, reference_answer)" diff --git a/8_reranker.ipynb b/08_reranker.ipynb similarity index 99% rename from 8_reranker.ipynb rename to 08_reranker.ipynb index d4e449f..2496cd1 100644 --- a/8_reranker.ipynb +++ b/08_reranker.ipynb @@ -609,7 +609,7 @@ "reference_answer = data[0]['ideal_answer']\n", "\n", "# pdf_path\n", - "pdf_path = \"data/AI_information.pdf\"" + "pdf_path = \"data/AI_Information.pdf\"" ] }, { diff --git a/9_rse.ipynb b/09_rse.ipynb similarity index 99% rename from 9_rse.ipynb rename to 09_rse.ipynb index f638d28..ae9f263 100644 --- a/9_rse.ipynb +++ b/09_rse.ipynb @@ -871,7 +871,7 @@ "reference_answer = data[0]['ideal_answer']\n", "\n", "# pdf_path\n", - "pdf_path = \"data/AI_information.pdf\"\n", + "pdf_path = \"data/AI_Information.pdf\"\n", "\n", "# Run evaluation\n", "results = evaluate_methods(pdf_path, query, reference_answer)" diff --git a/11_feedback_loop_rag.ipynb b/11_feedback_loop_rag.ipynb new file mode 100644 index 0000000..bf208b5 --- /dev/null +++ b/11_feedback_loop_rag.ipynb @@ -0,0 +1,1313 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "markdown" + } + }, + "source": [ + "# Feedback Loop in RAG\n", + "\n", + "In this notebook, I implement a RAG system with a feedback loop mechanism that continuously improves over time. By collecting and incorporating user feedback, our system learns to provide more relevant and higher-quality responses with each interaction.\n", + "\n", + "Traditional RAG systems are static - they retrieve information based solely on embedding similarity. With a feedback loop, we create a dynamic system that:\n", + "\n", + "- Remembers what worked (and what didn't)\n", + "- Adjusts document relevance scores over time\n", + "- Incorporates successful Q&A pairs into its knowledge base\n", + "- Gets smarter with each user interaction" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the Environment\n", + "We begin by importing necessary libraries." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import fitz\n", + "import os\n", + "import numpy as np\n", + "import json\n", + "from openai import OpenAI\n", + "from datetime import datetime" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Extracting Text from a PDF File\n", + "To implement RAG, we first need a source of textual data. In this case, we extract text from a PDF file using the PyMuPDF library." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def extract_text_from_pdf(pdf_path):\n", + " \"\"\"\n", + " Extracts text from a PDF file and prints the first `num_chars` characters.\n", + "\n", + " Args:\n", + " pdf_path (str): Path to the PDF file.\n", + "\n", + " Returns:\n", + " str: Extracted text from the PDF.\n", + " \"\"\"\n", + " # Open the PDF file\n", + " mypdf = fitz.open(pdf_path)\n", + " all_text = \"\" # Initialize an empty string to store the extracted text\n", + "\n", + " # Iterate through each page in the PDF\n", + " for page_num in range(mypdf.page_count):\n", + " page = mypdf[page_num] # Get the page\n", + " text = page.get_text(\"text\") # Extract text from the page\n", + " all_text += text # Append the extracted text to the all_text string\n", + "\n", + " return all_text # Return the extracted text" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Chunking the Extracted Text\n", + "Once we have the extracted text, we divide it into smaller, overlapping chunks to improve retrieval accuracy." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def chunk_text(text, n, overlap):\n", + " \"\"\"\n", + " Chunks the given text into segments of n characters with overlap.\n", + "\n", + " Args:\n", + " text (str): The text to be chunked.\n", + " n (int): The number of characters in each chunk.\n", + " overlap (int): The number of overlapping characters between chunks.\n", + "\n", + " Returns:\n", + " List[str]: A list of text chunks.\n", + " \"\"\"\n", + " chunks = [] # Initialize an empty list to store the chunks\n", + " \n", + " # Loop through the text with a step size of (n - overlap)\n", + " for i in range(0, len(text), n - overlap):\n", + " # Append a chunk of text from index i to i + n to the chunks list\n", + " chunks.append(text[i:i + n])\n", + "\n", + " return chunks # Return the list of text chunks" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the OpenAI API Client\n", + "We initialize the OpenAI client to generate embeddings and responses." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize the OpenAI client with the base URL and API key\n", + "client = OpenAI(\n", + " base_url=\"https://api.studio.nebius.com/v1/\",\n", + " api_key=os.getenv(\"OPENAI_API_KEY\") # Retrieve the API key from environment variables\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Simple Vector Store Implementation\n", + "We'll create a basic vector store to manage document chunks and their embeddings." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "class SimpleVectorStore:\n", + " \"\"\"\n", + " A simple vector store implementation using NumPy.\n", + " \n", + " This class provides an in-memory storage and retrieval system for \n", + " embedding vectors and their corresponding text chunks and metadata.\n", + " It supports basic similarity search functionality using cosine similarity.\n", + " \"\"\"\n", + " def __init__(self):\n", + " \"\"\"\n", + " Initialize the vector store with empty lists for vectors, texts, and metadata.\n", + " \n", + " The vector store maintains three parallel lists:\n", + " - vectors: NumPy arrays of embedding vectors\n", + " - texts: Original text chunks corresponding to each vector\n", + " - metadata: Optional metadata dictionaries for each item\n", + " \"\"\"\n", + " self.vectors = [] # List to store embedding vectors\n", + " self.texts = [] # List to store original text chunks\n", + " self.metadata = [] # List to store metadata for each text chunk\n", + " \n", + " def add_item(self, text, embedding, metadata=None):\n", + " \"\"\"\n", + " Add an item to the vector store.\n", + "\n", + " Args:\n", + " text (str): The original text chunk to store.\n", + " embedding (List[float]): The embedding vector representing the text.\n", + " metadata (dict, optional): Additional metadata for the text chunk,\n", + " such as source, timestamp, or relevance scores.\n", + " \"\"\"\n", + " self.vectors.append(np.array(embedding)) # Convert and store the embedding\n", + " self.texts.append(text) # Store the original text\n", + " self.metadata.append(metadata or {}) # Store metadata (empty dict if None)\n", + " \n", + " def similarity_search(self, query_embedding, k=5, filter_func=None):\n", + " \"\"\"\n", + " Find the most similar items to a query embedding using cosine similarity.\n", + "\n", + " Args:\n", + " query_embedding (List[float]): Query embedding vector to compare against stored vectors.\n", + " k (int): Number of most similar results to return.\n", + " filter_func (callable, optional): Function to filter results based on metadata.\n", + " Takes metadata dict as input and returns boolean.\n", + "\n", + " Returns:\n", + " List[Dict]: Top k most similar items, each containing:\n", + " - text: The original text\n", + " - metadata: Associated metadata\n", + " - similarity: Raw cosine similarity score\n", + " - relevance_score: Either metadata-based relevance or calculated similarity\n", + " \n", + " Note: Returns empty list if no vectors are stored or none pass the filter.\n", + " \"\"\"\n", + " if not self.vectors:\n", + " return [] # Return empty list if vector store is empty\n", + " \n", + " # Convert query embedding to numpy array for vector operations\n", + " query_vector = np.array(query_embedding)\n", + " \n", + " # Calculate cosine similarity between query and each stored vector\n", + " similarities = []\n", + " for i, vector in enumerate(self.vectors):\n", + " # Skip items that don't pass the filter criteria\n", + " if filter_func and not filter_func(self.metadata[i]):\n", + " continue\n", + " \n", + " # Calculate cosine similarity: dot product / (norm1 * norm2)\n", + " similarity = np.dot(query_vector, vector) / (np.linalg.norm(query_vector) * np.linalg.norm(vector))\n", + " similarities.append((i, similarity)) # Store index and similarity score\n", + " \n", + " # Sort results by similarity score in descending order\n", + " similarities.sort(key=lambda x: x[1], reverse=True)\n", + " \n", + " # Construct result dictionaries for the top k matches\n", + " results = []\n", + " for i in range(min(k, len(similarities))):\n", + " idx, score = similarities[i]\n", + " results.append({\n", + " \"text\": self.texts[idx],\n", + " \"metadata\": self.metadata[idx],\n", + " \"similarity\": score,\n", + " # Use pre-existing relevance score from metadata if available, otherwise use similarity\n", + " \"relevance_score\": self.metadata[idx].get(\"relevance_score\", score)\n", + " })\n", + " \n", + " return results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating Embeddings" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def create_embeddings(text, model=\"BAAI/bge-en-icl\"):\n", + " \"\"\"\n", + " Creates embeddings for the given text.\n", + "\n", + " Args:\n", + " text (str or List[str]): The input text(s) for which embeddings are to be created.\n", + " model (str): The model to be used for creating embeddings.\n", + "\n", + " Returns:\n", + " List[float] or List[List[float]]: The embedding vector(s).\n", + " \"\"\"\n", + " # Convert single string to list for uniform processing\n", + " input_text = text if isinstance(text, list) else [text]\n", + " \n", + " # Call the OpenAI API to generate embeddings for all input texts\n", + " response = client.embeddings.create(\n", + " model=model,\n", + " input=input_text\n", + " )\n", + " \n", + " # For single string input, return just the first embedding vector\n", + " if isinstance(text, str):\n", + " return response.data[0].embedding\n", + " \n", + " # For list input, return a list of all embedding vectors\n", + " return [item.embedding for item in response.data]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Feedback System Functions\n", + "Now we'll implement the core feedback system components." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def get_user_feedback(query, response, relevance, quality, comments=\"\"):\n", + " \"\"\"\n", + " Format user feedback in a dictionary.\n", + " \n", + " Args:\n", + " query (str): User's query\n", + " response (str): System's response\n", + " relevance (int): Relevance score (1-5)\n", + " quality (int): Quality score (1-5)\n", + " comments (str): Optional feedback comments\n", + " \n", + " Returns:\n", + " Dict: Formatted feedback\n", + " \"\"\"\n", + " return {\n", + " \"query\": query,\n", + " \"response\": response,\n", + " \"relevance\": int(relevance),\n", + " \"quality\": int(quality),\n", + " \"comments\": comments,\n", + " \"timestamp\": datetime.now().isoformat()\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "def store_feedback(feedback, feedback_file=\"feedback_data.json\"):\n", + " \"\"\"\n", + " Store feedback in a JSON file.\n", + " \n", + " Args:\n", + " feedback (Dict): Feedback data\n", + " feedback_file (str): Path to feedback file\n", + " \"\"\"\n", + " with open(feedback_file, \"a\") as f:\n", + " json.dump(feedback, f)\n", + " f.write(\"\\n\")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "def load_feedback_data(feedback_file=\"feedback_data.json\"):\n", + " \"\"\"\n", + " Load feedback data from file.\n", + " \n", + " Args:\n", + " feedback_file (str): Path to feedback file\n", + " \n", + " Returns:\n", + " List[Dict]: List of feedback entries\n", + " \"\"\"\n", + " feedback_data = []\n", + " try:\n", + " with open(feedback_file, \"r\") as f:\n", + " for line in f:\n", + " if line.strip():\n", + " feedback_data.append(json.loads(line.strip()))\n", + " except FileNotFoundError:\n", + " print(\"No feedback data file found. Starting with empty feedback.\")\n", + " \n", + " return feedback_data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Document Processing with Feedback Awareness" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "def process_document(pdf_path, chunk_size=1000, chunk_overlap=200):\n", + " \"\"\"\n", + " Process a document for RAG (Retrieval Augmented Generation) with feedback loop.\n", + " This function handles the complete document processing pipeline:\n", + " 1. Text extraction from PDF\n", + " 2. Text chunking with overlap\n", + " 3. Embedding creation for chunks\n", + " 4. Storage in vector database with metadata\n", + "\n", + " Args:\n", + " pdf_path (str): Path to the PDF file to process.\n", + " chunk_size (int): Size of each text chunk in characters.\n", + " chunk_overlap (int): Number of overlapping characters between consecutive chunks.\n", + "\n", + " Returns:\n", + " Tuple[List[str], SimpleVectorStore]: A tuple containing:\n", + " - List of document chunks\n", + " - Populated vector store with embeddings and metadata\n", + " \"\"\"\n", + " # Step 1: Extract raw text content from the PDF document\n", + " print(\"Extracting text from PDF...\")\n", + " extracted_text = extract_text_from_pdf(pdf_path)\n", + " \n", + " # Step 2: Split text into manageable, overlapping chunks for better context preservation\n", + " print(\"Chunking text...\")\n", + " chunks = chunk_text(extracted_text, chunk_size, chunk_overlap)\n", + " print(f\"Created {len(chunks)} text chunks\")\n", + " \n", + " # Step 3: Generate vector embeddings for each text chunk\n", + " print(\"Creating embeddings for chunks...\")\n", + " chunk_embeddings = create_embeddings(chunks)\n", + " \n", + " # Step 4: Initialize the vector database to store chunks and their embeddings\n", + " store = SimpleVectorStore()\n", + " \n", + " # Step 5: Add each chunk with its embedding to the vector store\n", + " # Include metadata for feedback-based improvements\n", + " for i, (chunk, embedding) in enumerate(zip(chunks, chunk_embeddings)):\n", + " store.add_item(\n", + " text=chunk,\n", + " embedding=embedding,\n", + " metadata={\n", + " \"index\": i, # Position in original document\n", + " \"source\": pdf_path, # Source document path\n", + " \"relevance_score\": 1.0, # Initial relevance score (will be updated with feedback)\n", + " \"feedback_count\": 0 # Counter for feedback received on this chunk\n", + " }\n", + " )\n", + " \n", + " print(f\"Added {len(chunks)} chunks to the vector store\")\n", + " return chunks, store" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Relevance Adjustment Based on Feedback" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "def assess_feedback_relevance(query, doc_text, feedback):\n", + " \"\"\"\n", + " Use LLM to assess if a past feedback entry is relevant to the current query and document.\n", + " \n", + " This function helps determine which past feedback should influence the current retrieval\n", + " by sending the current query, past query+feedback, and document content to an LLM\n", + " for relevance assessment.\n", + " \n", + " Args:\n", + " query (str): Current user query that needs information retrieval\n", + " doc_text (str): Text content of the document being evaluated\n", + " feedback (Dict): Previous feedback data containing 'query' and 'response' keys\n", + " \n", + " Returns:\n", + " bool: True if the feedback is deemed relevant to current query/document, False otherwise\n", + " \"\"\"\n", + " # Define system prompt instructing the LLM to make binary relevance judgments only\n", + " system_prompt = \"\"\"You are an AI system that determines if a past feedback is relevant to a current query and document.\n", + " Answer with ONLY 'yes' or 'no'. Your job is strictly to determine relevance, not to provide explanations.\"\"\"\n", + "\n", + " # Construct user prompt with current query, past feedback data, and truncated document content\n", + " user_prompt = f\"\"\"\n", + " Current query: {query}\n", + " Past query that received feedback: {feedback['query']}\n", + " Document content: {doc_text[:500]}... [truncated]\n", + " Past response that received feedback: {feedback['response'][:500]}... [truncated]\n", + "\n", + " Is this past feedback relevant to the current query and document? (yes/no)\n", + " \"\"\"\n", + "\n", + " # Call the LLM API with zero temperature for deterministic output\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0 # Use temperature=0 for consistent, deterministic responses\n", + " )\n", + " \n", + " # Extract and normalize the response to determine relevance\n", + " answer = response.choices[0].message.content.strip().lower()\n", + " return 'yes' in answer # Return True if the answer contains 'yes'" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "def adjust_relevance_scores(query, results, feedback_data):\n", + " \"\"\"\n", + " Adjust document relevance scores based on historical feedback to improve retrieval quality.\n", + " \n", + " This function analyzes past user feedback to dynamically adjust the relevance scores of \n", + " retrieved documents. It identifies feedback that is relevant to the current query context,\n", + " calculates score modifiers based on relevance ratings, and re-ranks the results accordingly.\n", + " \n", + " Args:\n", + " query (str): Current user query\n", + " results (List[Dict]): Retrieved documents with their original similarity scores\n", + " feedback_data (List[Dict]): Historical feedback containing user ratings\n", + " \n", + " Returns:\n", + " List[Dict]: Results with adjusted relevance scores, sorted by the new scores\n", + " \"\"\"\n", + " # If no feedback data available, return original results unchanged\n", + " if not feedback_data:\n", + " return results\n", + " \n", + " print(\"Adjusting relevance scores based on feedback history...\")\n", + " \n", + " # Process each retrieved document\n", + " for i, result in enumerate(results):\n", + " document_text = result[\"text\"]\n", + " relevant_feedback = []\n", + " \n", + " # Find relevant feedback for this specific document and query combination\n", + " # by querying the LLM to assess relevance of each historical feedback item\n", + " for feedback in feedback_data:\n", + " is_relevant = assess_feedback_relevance(query, document_text, feedback)\n", + " if is_relevant:\n", + " relevant_feedback.append(feedback)\n", + " \n", + " # Apply score adjustments if relevant feedback exists\n", + " if relevant_feedback:\n", + " # Calculate average relevance rating from all applicable feedback entries\n", + " # Feedback relevance is on a 1-5 scale (1=not relevant, 5=highly relevant)\n", + " avg_relevance = sum(f['relevance'] for f in relevant_feedback) / len(relevant_feedback)\n", + " \n", + " # Convert the average relevance to a score modifier in range 0.5-1.5\n", + " # - Scores below 3/5 will reduce the original similarity (modifier < 1.0)\n", + " # - Scores above 3/5 will increase the original similarity (modifier > 1.0)\n", + " modifier = 0.5 + (avg_relevance / 5.0)\n", + " \n", + " # Apply the modifier to the original similarity score\n", + " original_score = result[\"similarity\"]\n", + " adjusted_score = original_score * modifier\n", + " \n", + " # Update the result dictionary with new scores and feedback metadata\n", + " result[\"original_similarity\"] = original_score # Preserve the original score\n", + " result[\"similarity\"] = adjusted_score # Update the primary score\n", + " result[\"relevance_score\"] = adjusted_score # Update the relevance score\n", + " result[\"feedback_applied\"] = True # Flag that feedback was applied\n", + " result[\"feedback_count\"] = len(relevant_feedback) # Number of feedback entries used\n", + " \n", + " # Log the adjustment details\n", + " print(f\" Document {i+1}: Adjusted score from {original_score:.4f} to {adjusted_score:.4f} based on {len(relevant_feedback)} feedback(s)\")\n", + " \n", + " # Re-sort results by adjusted scores to ensure higher quality matches appear first\n", + " results.sort(key=lambda x: x[\"similarity\"], reverse=True)\n", + " \n", + " return results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Fine-tuning Our Index with Feedback" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "def fine_tune_index(current_store, chunks, feedback_data):\n", + " \"\"\"\n", + " Enhance vector store with high-quality feedback to improve retrieval quality over time.\n", + " \n", + " This function implements a continuous learning process by:\n", + " 1. Identifying high-quality feedback (highly rated Q&A pairs)\n", + " 2. Creating new retrieval items from successful interactions\n", + " 3. Adding these to the vector store with boosted relevance weights\n", + " \n", + " Args:\n", + " current_store (SimpleVectorStore): Current vector store containing original document chunks\n", + " chunks (List[str]): Original document text chunks \n", + " feedback_data (List[Dict]): Historical user feedback with relevance and quality ratings\n", + " \n", + " Returns:\n", + " SimpleVectorStore: Enhanced vector store containing both original chunks and feedback-derived content\n", + " \"\"\"\n", + " print(\"Fine-tuning index with high-quality feedback...\")\n", + " \n", + " # Filter for only high-quality responses (both relevance and quality rated 4 or 5)\n", + " # This ensures we only learn from the most successful interactions\n", + " good_feedback = [f for f in feedback_data if f['relevance'] >= 4 and f['quality'] >= 4]\n", + " \n", + " if not good_feedback:\n", + " print(\"No high-quality feedback found for fine-tuning.\")\n", + " return current_store # Return original store unchanged if no good feedback exists\n", + " \n", + " # Initialize new store that will contain both original and enhanced content\n", + " new_store = SimpleVectorStore()\n", + " \n", + " # First transfer all original document chunks with their existing metadata\n", + " for i in range(len(current_store.texts)):\n", + " new_store.add_item(\n", + " text=current_store.texts[i],\n", + " embedding=current_store.vectors[i],\n", + " metadata=current_store.metadata[i].copy() # Use copy to prevent reference issues\n", + " )\n", + " \n", + " # Create and add enhanced content from good feedback\n", + " for feedback in good_feedback:\n", + " # Format a new document that combines the question and its high-quality answer\n", + " # This creates retrievable content that directly addresses user queries\n", + " enhanced_text = f\"Question: {feedback['query']}\\nAnswer: {feedback['response']}\"\n", + " \n", + " # Generate embedding vector for this new synthetic document\n", + " embedding = create_embeddings(enhanced_text)\n", + " \n", + " # Add to vector store with special metadata that identifies its origin and importance\n", + " new_store.add_item(\n", + " text=enhanced_text,\n", + " embedding=embedding,\n", + " metadata={\n", + " \"type\": \"feedback_enhanced\", # Mark as derived from feedback\n", + " \"query\": feedback[\"query\"], # Store original query for reference\n", + " \"relevance_score\": 1.2, # Boost initial relevance to prioritize these items\n", + " \"feedback_count\": 1, # Track feedback incorporation\n", + " \"original_feedback\": feedback # Preserve complete feedback record\n", + " }\n", + " )\n", + " \n", + " print(f\"Added enhanced content from feedback: {feedback['query'][:50]}...\")\n", + " \n", + " # Log summary statistics about the enhancement\n", + " print(f\"Fine-tuned index now has {len(new_store.texts)} items (original: {len(chunks)})\")\n", + " return new_store" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Complete RAG Pipeline with Feedback Loop" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_response(query, context, model=\"meta-llama/Llama-3.2-3B-Instruct\"):\n", + " \"\"\"\n", + " Generate a response based on the query and context.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " context (str): Context text from retrieved documents\n", + " model (str): LLM model to use\n", + " \n", + " Returns:\n", + " str: Generated response\n", + " \"\"\"\n", + " # Define the system prompt to guide the AI's behavior\n", + " system_prompt = \"\"\"You are a helpful AI assistant. Answer the user's question based only on the provided context. If you cannot find the answer in the context, state that you don't have enough information.\"\"\"\n", + " \n", + " # Create the user prompt by combining the context and the query\n", + " user_prompt = f\"\"\"\n", + " Context:\n", + " {context}\n", + "\n", + " Question: {query}\n", + "\n", + " Please provide a comprehensive answer based only on the context above.\n", + " \"\"\"\n", + " \n", + " # Call the OpenAI API to generate a response based on the system and user prompts\n", + " response = client.chat.completions.create(\n", + " model=model,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0 # Use temperature=0 for consistent, deterministic responses\n", + " )\n", + " \n", + " # Return the generated response content\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "def rag_with_feedback_loop(query, vector_store, feedback_data, k=5, model=\"meta-llama/Llama-3.2-3B-Instruct\"):\n", + " \"\"\"\n", + " Complete RAG pipeline incorporating feedback loop.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " vector_store (SimpleVectorStore): Vector store with document chunks\n", + " feedback_data (List[Dict]): History of feedback\n", + " k (int): Number of documents to retrieve\n", + " model (str): LLM model for response generation\n", + " \n", + " Returns:\n", + " Dict: Results including query, retrieved documents, and response\n", + " \"\"\"\n", + " print(f\"\\n=== Processing query with feedback-enhanced RAG ===\")\n", + " print(f\"Query: {query}\")\n", + " \n", + " # Step 1: Create query embedding\n", + " query_embedding = create_embeddings(query)\n", + " \n", + " # Step 2: Perform initial retrieval based on query embedding\n", + " results = vector_store.similarity_search(query_embedding, k=k)\n", + " \n", + " # Step 3: Adjust relevance scores of retrieved documents based on feedback\n", + " adjusted_results = adjust_relevance_scores(query, results, feedback_data)\n", + " \n", + " # Step 4: Extract texts from adjusted results for context building\n", + " retrieved_texts = [result[\"text\"] for result in adjusted_results]\n", + " \n", + " # Step 5: Build context for response generation by concatenating retrieved texts\n", + " context = \"\\n\\n---\\n\\n\".join(retrieved_texts)\n", + " \n", + " # Step 6: Generate response using the context and query\n", + " print(\"Generating response...\")\n", + " response = generate_response(query, context, model)\n", + " \n", + " # Step 7: Compile the final result\n", + " result = {\n", + " \"query\": query,\n", + " \"retrieved_documents\": adjusted_results,\n", + " \"response\": response\n", + " }\n", + " \n", + " print(\"\\n=== Response ===\")\n", + " print(response)\n", + " \n", + " return result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Complete Workflow: From Initial Setup to Feedback Collection" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "def full_rag_workflow(pdf_path, query, feedback_data=None, feedback_file=\"feedback_data.json\", fine_tune=False):\n", + " \"\"\"\n", + " Execute a complete RAG workflow with feedback integration for continuous improvement.\n", + " \n", + " This function orchestrates the entire Retrieval-Augmented Generation process:\n", + " 1. Load historical feedback data\n", + " 2. Process and chunk the document\n", + " 3. Optionally fine-tune the vector index with prior feedback\n", + " 4. Perform retrieval and generation with feedback-adjusted relevance scores\n", + " 5. Collect new user feedback for future improvement\n", + " 6. Store feedback to enable system learning over time\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF document to be processed\n", + " query (str): User's natural language query\n", + " feedback_data (List[Dict], optional): Pre-loaded feedback data, loads from file if None\n", + " feedback_file (str): Path to the JSON file storing feedback history\n", + " fine_tune (bool): Whether to enhance the index with successful past Q&A pairs\n", + " \n", + " Returns:\n", + " Dict: Results containing the response and retrieval metadata\n", + " \"\"\"\n", + " # Step 1: Load historical feedback for relevance adjustment if not explicitly provided\n", + " if feedback_data is None:\n", + " feedback_data = load_feedback_data(feedback_file)\n", + " print(f\"Loaded {len(feedback_data)} feedback entries from {feedback_file}\")\n", + " \n", + " # Step 2: Process document through extraction, chunking and embedding pipeline\n", + " chunks, vector_store = process_document(pdf_path)\n", + " \n", + " # Step 3: Fine-tune the vector index by incorporating high-quality past interactions\n", + " # This creates enhanced retrievable content from successful Q&A pairs\n", + " if fine_tune and feedback_data:\n", + " vector_store = fine_tune_index(vector_store, chunks, feedback_data)\n", + " \n", + " # Step 4: Execute core RAG with feedback-aware retrieval\n", + " # Note: This depends on the rag_with_feedback_loop function which should be defined elsewhere\n", + " result = rag_with_feedback_loop(query, vector_store, feedback_data)\n", + " \n", + " # Step 5: Collect user feedback to improve future performance\n", + " print(\"\\n=== Would you like to provide feedback on this response? ===\")\n", + " print(\"Rate relevance (1-5, with 5 being most relevant):\")\n", + " relevance = input()\n", + " \n", + " print(\"Rate quality (1-5, with 5 being highest quality):\")\n", + " quality = input()\n", + " \n", + " print(\"Any comments? (optional, press Enter to skip)\")\n", + " comments = input()\n", + " \n", + " # Step 6: Format feedback into structured data\n", + " feedback = get_user_feedback(\n", + " query=query,\n", + " response=result[\"response\"],\n", + " relevance=int(relevance),\n", + " quality=int(quality),\n", + " comments=comments\n", + " )\n", + " \n", + " # Step 7: Persist feedback to enable continuous system learning\n", + " store_feedback(feedback, feedback_file)\n", + " print(\"Feedback recorded. Thank you!\")\n", + " \n", + " return result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluating Our Feedback Loop" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "def evaluate_feedback_loop(pdf_path, test_queries, reference_answers=None):\n", + " \"\"\"\n", + " Evaluate the impact of feedback loop on RAG quality by comparing performance before and after feedback integration.\n", + " \n", + " This function runs a controlled experiment to measure how incorporating feedback affects retrieval and generation:\n", + " 1. First round: Run all test queries with no feedback\n", + " 2. Generate synthetic feedback based on reference answers (if provided)\n", + " 3. Second round: Run the same queries with feedback-enhanced retrieval\n", + " 4. Compare results between rounds to quantify feedback impact\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF document used as the knowledge base\n", + " test_queries (List[str]): List of test queries to evaluate system performance\n", + " reference_answers (List[str], optional): Reference/gold standard answers for evaluation\n", + " and synthetic feedback generation\n", + " \n", + " Returns:\n", + " Dict: Evaluation results containing:\n", + " - round1_results: Results without feedback\n", + " - round2_results: Results with feedback\n", + " - comparison: Quantitative comparison metrics between rounds\n", + " \"\"\"\n", + " print(\"=== Evaluating Feedback Loop Impact ===\")\n", + " \n", + " # Create a temporary feedback file for this evaluation session only\n", + " temp_feedback_file = \"temp_evaluation_feedback.json\"\n", + " \n", + " # Initialize feedback collection (empty at the start)\n", + " feedback_data = []\n", + " \n", + " # ----------------------- FIRST EVALUATION ROUND -----------------------\n", + " # Run all queries without any feedback influence to establish baseline performance\n", + " print(\"\\n=== ROUND 1: NO FEEDBACK ===\")\n", + " round1_results = []\n", + " \n", + " for i, query in enumerate(test_queries):\n", + " print(f\"\\nQuery {i+1}: {query}\")\n", + " \n", + " # Process document to create initial vector store\n", + " chunks, vector_store = process_document(pdf_path)\n", + " \n", + " # Execute RAG without feedback influence (empty feedback list)\n", + " result = rag_with_feedback_loop(query, vector_store, [])\n", + " round1_results.append(result)\n", + " \n", + " # Generate synthetic feedback if reference answers are available\n", + " # This simulates user feedback for training the system\n", + " if reference_answers and i < len(reference_answers):\n", + " # Calculate synthetic feedback scores based on similarity to reference answer\n", + " similarity_to_ref = calculate_similarity(result[\"response\"], reference_answers[i])\n", + " # Convert similarity (0-1) to rating scale (1-5)\n", + " relevance = max(1, min(5, int(similarity_to_ref * 5)))\n", + " quality = max(1, min(5, int(similarity_to_ref * 5)))\n", + " \n", + " # Create structured feedback entry\n", + " feedback = get_user_feedback(\n", + " query=query,\n", + " response=result[\"response\"],\n", + " relevance=relevance,\n", + " quality=quality,\n", + " comments=f\"Synthetic feedback based on reference similarity: {similarity_to_ref:.2f}\"\n", + " )\n", + " \n", + " # Add to in-memory collection and persist to temporary file\n", + " feedback_data.append(feedback)\n", + " store_feedback(feedback, temp_feedback_file)\n", + " \n", + " # ----------------------- SECOND EVALUATION ROUND -----------------------\n", + " # Run the same queries with feedback incorporation to measure improvement\n", + " print(\"\\n=== ROUND 2: WITH FEEDBACK ===\")\n", + " round2_results = []\n", + " \n", + " # Process document and enhance with feedback-derived content\n", + " chunks, vector_store = process_document(pdf_path)\n", + " vector_store = fine_tune_index(vector_store, chunks, feedback_data)\n", + " \n", + " for i, query in enumerate(test_queries):\n", + " print(f\"\\nQuery {i+1}: {query}\")\n", + " \n", + " # Execute RAG with feedback influence\n", + " result = rag_with_feedback_loop(query, vector_store, feedback_data)\n", + " round2_results.append(result)\n", + " \n", + " # ----------------------- RESULTS ANALYSIS -----------------------\n", + " # Compare performance metrics between the two rounds\n", + " comparison = compare_results(test_queries, round1_results, round2_results, reference_answers)\n", + " \n", + " # Clean up temporary evaluation artifacts\n", + " if os.path.exists(temp_feedback_file):\n", + " os.remove(temp_feedback_file)\n", + " \n", + " return {\n", + " \"round1_results\": round1_results,\n", + " \"round2_results\": round2_results,\n", + " \"comparison\": comparison\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Helper Functions for Evaluation" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "def calculate_similarity(text1, text2):\n", + " \"\"\"\n", + " Calculate semantic similarity between two texts using embeddings.\n", + " \n", + " Args:\n", + " text1 (str): First text\n", + " text2 (str): Second text\n", + " \n", + " Returns:\n", + " float: Similarity score between 0 and 1\n", + " \"\"\"\n", + " # Generate embeddings for both texts\n", + " embedding1 = create_embeddings(text1)\n", + " embedding2 = create_embeddings(text2)\n", + " \n", + " # Convert embeddings to numpy arrays\n", + " vec1 = np.array(embedding1)\n", + " vec2 = np.array(embedding2)\n", + " \n", + " # Calculate cosine similarity between the two vectors\n", + " similarity = np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))\n", + " \n", + " return similarity" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "def compare_results(queries, round1_results, round2_results, reference_answers=None):\n", + " \"\"\"\n", + " Compare results from two rounds of RAG.\n", + " \n", + " Args:\n", + " queries (List[str]): Test queries\n", + " round1_results (List[Dict]): Results from round 1\n", + " round2_results (List[Dict]): Results from round 2\n", + " reference_answers (List[str], optional): Reference answers\n", + " \n", + " Returns:\n", + " str: Comparison analysis\n", + " \"\"\"\n", + " print(\"\\n=== COMPARING RESULTS ===\")\n", + " \n", + " # System prompt to guide the AI's evaluation behavior\n", + " system_prompt = \"\"\"You are an expert evaluator of RAG systems. Compare responses from two versions:\n", + " 1. Standard RAG: No feedback used\n", + " 2. Feedback-enhanced RAG: Uses a feedback loop to improve retrieval\n", + "\n", + " Analyze which version provides better responses in terms of:\n", + " - Relevance to the query\n", + " - Accuracy of information\n", + " - Completeness\n", + " - Clarity and conciseness\n", + " \"\"\"\n", + "\n", + " comparisons = []\n", + " \n", + " # Iterate over each query and its corresponding results from both rounds\n", + " for i, (query, r1, r2) in enumerate(zip(queries, round1_results, round2_results)):\n", + " # Create a prompt for comparing the responses\n", + " comparison_prompt = f\"\"\"\n", + " Query: {query}\n", + "\n", + " Standard RAG Response:\n", + " {r1[\"response\"]}\n", + "\n", + " Feedback-enhanced RAG Response:\n", + " {r2[\"response\"]}\n", + " \"\"\"\n", + "\n", + " # Include reference answer if available\n", + " if reference_answers and i < len(reference_answers):\n", + " comparison_prompt += f\"\"\"\n", + " Reference Answer:\n", + " {reference_answers[i]}\n", + " \"\"\"\n", + "\n", + " comparison_prompt += \"\"\"\n", + " Compare these responses and explain which one is better and why.\n", + " Focus specifically on how the feedback loop has (or hasn't) improved the response quality.\n", + " \"\"\"\n", + "\n", + " # Call the OpenAI API to generate a comparison analysis\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": comparison_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " # Append the comparison analysis to the results\n", + " comparisons.append({\n", + " \"query\": query,\n", + " \"analysis\": response.choices[0].message.content\n", + " })\n", + " \n", + " # Print a snippet of the analysis for each query\n", + " print(f\"\\nQuery {i+1}: {query}\")\n", + " print(f\"Analysis: {response.choices[0].message.content[:200]}...\")\n", + " \n", + " return comparisons" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluation of the feedback loop (Custom Validation Queries)" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "=== Evaluating Feedback Loop Impact ===\n", + "\n", + "=== ROUND 1: NO FEEDBACK ===\n", + "\n", + "Query 1: What is a neural network and how does it function?\n", + "Extracting text from PDF...\n", + "Chunking text...\n", + "Created 42 text chunks\n", + "Creating embeddings for chunks...\n", + "Added 42 chunks to the vector store\n", + "\n", + "=== Processing query with feedback-enhanced RAG ===\n", + "Query: What is a neural network and how does it function?\n", + "Generating response...\n", + "\n", + "=== Response ===\n", + "Based on the provided context, a neural network is a type of deep neural network that is particularly effective for processing data. The context does not provide a detailed explanation of how a neural network functions, but it does mention that neural networks are inspired by the structure and function of the human brain.\n", + "\n", + "However, it can be inferred that a neural network is a complex system that uses multiple layers to analyze data. The context mentions that deep learning is a subfield of machine learning that uses artificial neural networks with multiple layers (deep neural networks) to analyze data.\n", + "\n", + "In the context of the provided text, neural networks are used in various applications such as image recognition, natural language processing, and speech recognition. They are also used in tasks like object detection, facial recognition, and medical image analysis.\n", + "\n", + "While the context does not provide a comprehensive explanation of how a neural network functions, it can be inferred that neural networks are designed to learn patterns and structures in the data through a process of trial and error, similar to reinforcement learning. However, the exact mechanism of how neural networks function is not explicitly stated in the provided context.\n", + "\n", + "=== ROUND 2: WITH FEEDBACK ===\n", + "Extracting text from PDF...\n", + "Chunking text...\n", + "Created 42 text chunks\n", + "Creating embeddings for chunks...\n", + "Added 42 chunks to the vector store\n", + "Fine-tuning index with high-quality feedback...\n", + "Added enhanced content from feedback: What is a neural network and how does it function?...\n", + "Fine-tuned index now has 43 items (original: 42)\n", + "\n", + "Query 1: What is a neural network and how does it function?\n", + "\n", + "=== Processing query with feedback-enhanced RAG ===\n", + "Query: What is a neural network and how does it function?\n", + "Adjusting relevance scores based on feedback history...\n", + " Document 1: Adjusted score from 0.8386 to 1.0902 based on 1 feedback(s)\n", + " Document 4: Adjusted score from 0.6162 to 0.8010 based on 1 feedback(s)\n", + " Document 5: Adjusted score from 0.6023 to 0.7830 based on 1 feedback(s)\n", + "Generating response...\n", + "\n", + "=== Response ===\n", + "Based on the provided context, a neural network is a complex system that uses multiple layers to analyze data. It is inspired by the structure and function of the human brain and is particularly effective for processing data. Neural networks are used in various applications such as image recognition, natural language processing, and speech recognition.\n", + "\n", + "The context does not provide a detailed explanation of how a neural network functions, but it can be inferred that neural networks are designed to learn patterns and structures in the data through a process of trial and error. This process is similar to reinforcement learning, where the neural network receives feedback in the form of rewards or penalties, allowing it to adjust its parameters and improve its performance over time.\n", + "\n", + "Neural networks are composed of multiple layers, including convolutional layers, recurrent layers, and others. These layers work together to analyze the input data and make predictions or take actions. The exact mechanism of how neural networks function is not explicitly stated in the provided context, but it is clear that they are a powerful tool for analyzing and processing complex data.\n", + "\n", + "In the context of the provided text, neural networks are used in various applications such as:\n", + "\n", + "* Image recognition\n", + "* Natural language processing\n", + "* Speech recognition\n", + "* Object detection\n", + "* Facial recognition\n", + "* Medical image analysis\n", + "\n", + "Overall, neural networks are a complex and powerful tool for analyzing and processing data, and their applications continue to expand across various industries and domains.\n", + "\n", + "=== COMPARING RESULTS ===\n", + "\n", + "Query 1: What is a neural network and how does it function?\n", + "Analysis: Comparing the two responses, the feedback-enhanced RAG response is significantly better than the standard RAG response. Here's a breakdown of the improvements:\n", + "\n", + "1. **Relevance to the query**: Both res...\n" + ] + } + ], + "source": [ + "# AI Document Path\n", + "pdf_path = \"data/AI_Information.pdf\"\n", + "\n", + "# Define test queries\n", + "test_queries = [\n", + " \"What is a neural network and how does it function?\",\n", + "\n", + " #################################################################################\n", + " ### Commented out queries to reduce the number of queries for testing purposes ###\n", + " \n", + " # \"Describe the process and applications of reinforcement learning.\",\n", + " # \"What are the main applications of natural language processing in today's technology?\",\n", + " # \"Explain the impact of overfitting in machine learning models and how it can be mitigated.\"\n", + "]\n", + "\n", + "# Define reference answers for evaluation\n", + "reference_answers = [\n", + " \"A neural network is a series of algorithms that attempt to recognize underlying relationships in a set of data through a process that mimics the way the human brain operates. It consists of layers of nodes, with each node representing a neuron. Neural networks function by adjusting the weights of connections between nodes based on the error of the output compared to the expected result.\",\n", + "\n", + " ############################################################################################\n", + " #### Commented out reference answers to reduce the number of queries for testing purposes ###\n", + "\n", + "# \"Reinforcement learning is a type of machine learning where an agent learns to make decisions by performing actions in an environment to maximize cumulative reward. It involves exploration, exploitation, and learning from the consequences of actions. Applications include robotics, game playing, and autonomous vehicles.\",\n", + "# \"The main applications of natural language processing in today's technology include machine translation, sentiment analysis, chatbots, information retrieval, text summarization, and speech recognition. NLP enables machines to understand and generate human language, facilitating human-computer interaction.\",\n", + "# \"Overfitting in machine learning models occurs when a model learns the training data too well, capturing noise and outliers. This results in poor generalization to new data, as the model performs well on training data but poorly on unseen data. Mitigation techniques include cross-validation, regularization, pruning, and using more training data.\"\n", + "]\n", + "\n", + "# Run the evaluation\n", + "evaluation_results = evaluate_feedback_loop(\n", + " pdf_path=pdf_path,\n", + " test_queries=test_queries,\n", + " reference_answers=reference_answers\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "########################################\n", + "# # Run a full RAG workflow\n", + "########################################\n", + "\n", + "# # Run an interactive example\n", + "# print(\"\\n\\n=== INTERACTIVE EXAMPLE ===\")\n", + "# print(\"Enter your query about AI:\")\n", + "# user_query = input()\n", + "\n", + "# # Load accumulated feedback\n", + "# all_feedback = load_feedback_data()\n", + "\n", + "# # Run full workflow\n", + "# result = full_rag_workflow(\n", + "# pdf_path=pdf_path,\n", + "# query=user_query,\n", + "# feedback_data=all_feedback,\n", + "# fine_tune=True\n", + "# )\n", + "\n", + "########################################\n", + "# # Run a full RAG workflow\n", + "########################################" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualizing Feedback Impact" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "=== FEEDBACK IMPACT ANALYSIS ===\n", + "\n", + "Query 1: What is a neural network and how does it function?\n", + "\n", + "Analysis of feedback impact:\n", + "Comparing the two responses, the feedback-enhanced RAG response is significantly better than the standard RAG response. Here's a breakdown of the improvements:\n", + "\n", + "1. **Relevance to the query**: Both responses address the query, but the feedback-enhanced RAG response provides a more comprehensive and detailed explanation of what a neural network is and how it functions. It also provides more specific examples of applications, which shows a better understanding of the topic.\n", + "\n", + "2. **Accuracy of information**: The feedback-enhanced RAG response is more accurate, as it provides a clear and concise explanation of how neural networks function, including the concept of trial and error, reinforcement learning, and the role of feedback. The standard RAG response is less accurate, as it only mentions that neural networks are inspired by the human brain and that they are used in various applications, but does not provide a clear explanation of how they function.\n", + "\n", + "3. **Completeness**: The feedback-enhanced RAG response is more complete, as it provides a detailed explanation of the components of a neural network, including convolutional layers, recurrent layers, and others. The standard RAG response only mentions that neural networks are used in various applications, but does not provide a clear explanation of how they function.\n", + "\n", + "4. **Clarity and conciseness**: The feedback-enhanced RAG response is more concise and clear, as it provides a clear and concise explanation of how neural networks function and their applications. The standard RAG response is less concise and less clear, as it only mentions that neural networks are inspired by the human brain and that they are used in various applications, but does not provide a clear explanation of how they function.\n", + "\n", + "The feedback loop has significantly improved the response quality by:\n", + "\n", + "* Providing a clear and concise explanation of how neural networks function\n", + "* Providing more specific examples of applications\n", + "* Addressing the limitations of the standard RAG response (e.g., lack of clarity and concision)\n", + "* Providing a more accurate explanation of the components of a neural network\n", + "\n", + "The feedback loop has helped to identify the areas where the standard RAG response was lacking and has provided a more comprehensive and detailed explanation of the topic. This is a clear example of how a feedback loop can improve the quality of a response.\n", + "\n", + "--------------------------------------------------\n", + "\n", + "\n", + "Response length comparison (proxy for completeness):\n", + "Round 1: 1256.0 chars\n" + ] + } + ], + "source": [ + "# Extract the comparison data which contains the analysis of feedback impact\n", + "comparisons = evaluation_results['comparison']\n", + "\n", + "# Print out the analysis results to visualize feedback impact\n", + "print(\"\\n=== FEEDBACK IMPACT ANALYSIS ===\\n\")\n", + "for i, comparison in enumerate(comparisons):\n", + " print(f\"Query {i+1}: {comparison['query']}\")\n", + " print(f\"\\nAnalysis of feedback impact:\")\n", + " print(comparison['analysis'])\n", + " print(\"\\n\" + \"-\"*50 + \"\\n\")\n", + "\n", + "# Additionally, we can compare some metrics between rounds\n", + "round_responses = [evaluation_results[f'round{round_num}_results'] for round_num in range(1, len(evaluation_results) - 1)]\n", + "response_lengths = [[len(r[\"response\"]) for r in round] for round in round_responses]\n", + "\n", + "print(\"\\nResponse length comparison (proxy for completeness):\")\n", + "avg_lengths = [sum(lengths) / len(lengths) for lengths in response_lengths]\n", + "for round_num, avg_len in enumerate(avg_lengths, start=1):\n", + " print(f\"Round {round_num}: {avg_len:.1f} chars\")\n", + "\n", + "if len(avg_lengths) > 1:\n", + " changes = [(avg_lengths[i] - avg_lengths[i-1]) / avg_lengths[i-1] * 100 for i in range(1, len(avg_lengths))]\n", + " for round_num, change in enumerate(changes, start=2):\n", + " print(f\"Change from Round {round_num-1} to Round {round_num}: {change:.1f}%\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv-new-specific-rag", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/12_adaptive_rag.ipynb b/12_adaptive_rag.ipynb new file mode 100644 index 0000000..460006a --- /dev/null +++ b/12_adaptive_rag.ipynb @@ -0,0 +1,1483 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "markdown" + } + }, + "source": [ + "# Adaptive Retrieval for Enhanced RAG Systems\n", + "\n", + "In this notebook, I implement an Adaptive Retrieval system that dynamically selects the most appropriate retrieval strategy based on the type of query. This approach significantly enhances our RAG system's ability to provide accurate and relevant responses across a diverse range of questions.\n", + "\n", + "Different questions demand different retrieval strategies. Our system:\n", + "\n", + "1. Classifies the query type (Factual, Analytical, Opinion, or Contextual)\n", + "2. Selects the appropriate retrieval strategy\n", + "3. Executes specialized retrieval techniques\n", + "4. Generates a tailored response" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the Environment\n", + "We begin by importing necessary libraries." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import numpy as np\n", + "import json\n", + "import fitz\n", + "from openai import OpenAI\n", + "import re" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Extracting Text from a PDF File\n", + "To implement RAG, we first need a source of textual data. In this case, we extract text from a PDF file using the PyMuPDF library." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def extract_text_from_pdf(pdf_path):\n", + " \"\"\"\n", + " Extracts text from a PDF file and prints the first `num_chars` characters.\n", + "\n", + " Args:\n", + " pdf_path (str): Path to the PDF file.\n", + "\n", + " Returns:\n", + " str: Extracted text from the PDF.\n", + " \"\"\"\n", + " # Open the PDF file\n", + " mypdf = fitz.open(pdf_path)\n", + " all_text = \"\" # Initialize an empty string to store the extracted text\n", + "\n", + " # Iterate through each page in the PDF\n", + " for page_num in range(mypdf.page_count):\n", + " page = mypdf[page_num] # Get the page\n", + " text = page.get_text(\"text\") # Extract text from the page\n", + " all_text += text # Append the extracted text to the all_text string\n", + "\n", + " return all_text # Return the extracted text" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Chunking the Extracted Text\n", + "Once we have the extracted text, we divide it into smaller, overlapping chunks to improve retrieval accuracy." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def chunk_text(text, n, overlap):\n", + " \"\"\"\n", + " Chunks the given text into segments of n characters with overlap.\n", + "\n", + " Args:\n", + " text (str): The text to be chunked.\n", + " n (int): The number of characters in each chunk.\n", + " overlap (int): The number of overlapping characters between chunks.\n", + "\n", + " Returns:\n", + " List[str]: A list of text chunks.\n", + " \"\"\"\n", + " chunks = [] # Initialize an empty list to store the chunks\n", + " \n", + " # Loop through the text with a step size of (n - overlap)\n", + " for i in range(0, len(text), n - overlap):\n", + " # Append a chunk of text from index i to i + n to the chunks list\n", + " chunks.append(text[i:i + n])\n", + "\n", + " return chunks # Return the list of text chunks" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the OpenAI API Client\n", + "We initialize the OpenAI client to generate embeddings and responses." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize the OpenAI client with the base URL and API key\n", + "client = OpenAI(\n", + " base_url=\"https://api.studio.nebius.com/v1/\",\n", + " api_key=os.getenv(\"OPENAI_API_KEY\") # Retrieve the API key from environment variables\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Simple Vector Store Implementation\n", + "We'll create a basic vector store to manage document chunks and their embeddings." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "class SimpleVectorStore:\n", + " \"\"\"\n", + " A simple vector store implementation using NumPy.\n", + " \"\"\"\n", + " def __init__(self):\n", + " \"\"\"\n", + " Initialize the vector store.\n", + " \"\"\"\n", + " self.vectors = [] # List to store embedding vectors\n", + " self.texts = [] # List to store original texts\n", + " self.metadata = [] # List to store metadata for each text\n", + " \n", + " def add_item(self, text, embedding, metadata=None):\n", + " \"\"\"\n", + " Add an item to the vector store.\n", + "\n", + " Args:\n", + " text (str): The original text.\n", + " embedding (List[float]): The embedding vector.\n", + " metadata (dict, optional): Additional metadata.\n", + " \"\"\"\n", + " self.vectors.append(np.array(embedding)) # Convert embedding to numpy array and add to vectors list\n", + " self.texts.append(text) # Add the original text to texts list\n", + " self.metadata.append(metadata or {}) # Add metadata to metadata list, default to empty dict if None\n", + " \n", + " def similarity_search(self, query_embedding, k=5, filter_func=None):\n", + " \"\"\"\n", + " Find the most similar items to a query embedding.\n", + "\n", + " Args:\n", + " query_embedding (List[float]): Query embedding vector.\n", + " k (int): Number of results to return.\n", + " filter_func (callable, optional): Function to filter results.\n", + "\n", + " Returns:\n", + " List[Dict]: Top k most similar items with their texts and metadata.\n", + " \"\"\"\n", + " if not self.vectors:\n", + " return [] # Return empty list if no vectors are stored\n", + " \n", + " # Convert query embedding to numpy array\n", + " query_vector = np.array(query_embedding)\n", + " \n", + " # Calculate similarities using cosine similarity\n", + " similarities = []\n", + " for i, vector in enumerate(self.vectors):\n", + " # Apply filter if provided\n", + " if filter_func and not filter_func(self.metadata[i]):\n", + " continue\n", + " \n", + " # Calculate cosine similarity\n", + " similarity = np.dot(query_vector, vector) / (np.linalg.norm(query_vector) * np.linalg.norm(vector))\n", + " similarities.append((i, similarity)) # Append index and similarity score\n", + " \n", + " # Sort by similarity (descending)\n", + " similarities.sort(key=lambda x: x[1], reverse=True)\n", + " \n", + " # Return top k results\n", + " results = []\n", + " for i in range(min(k, len(similarities))):\n", + " idx, score = similarities[i]\n", + " results.append({\n", + " \"text\": self.texts[idx], # Add the text\n", + " \"metadata\": self.metadata[idx], # Add the metadata\n", + " \"similarity\": score # Add the similarity score\n", + " })\n", + " \n", + " return results # Return the list of top k results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating Embeddings" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def create_embeddings(text, model=\"BAAI/bge-en-icl\"):\n", + " \"\"\"\n", + " Creates embeddings for the given text.\n", + "\n", + " Args:\n", + " text (str or List[str]): The input text(s) for which embeddings are to be created.\n", + " model (str): The model to be used for creating embeddings.\n", + "\n", + " Returns:\n", + " List[float] or List[List[float]]: The embedding vector(s).\n", + " \"\"\"\n", + " # Handle both string and list inputs by converting string input to a list\n", + " input_text = text if isinstance(text, list) else [text]\n", + " \n", + " # Create embeddings for the input text using the specified model\n", + " response = client.embeddings.create(\n", + " model=model,\n", + " input=input_text\n", + " )\n", + " \n", + " # If the input was a single string, return just the first embedding\n", + " if isinstance(text, str):\n", + " return response.data[0].embedding\n", + " \n", + " # Otherwise, return all embeddings for the list of texts\n", + " return [item.embedding for item in response.data]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Document Processing Pipeline" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def process_document(pdf_path, chunk_size=1000, chunk_overlap=200):\n", + " \"\"\"\n", + " Process a document for use with adaptive retrieval.\n", + "\n", + " Args:\n", + " pdf_path (str): Path to the PDF file.\n", + " chunk_size (int): Size of each chunk in characters.\n", + " chunk_overlap (int): Overlap between chunks in characters.\n", + "\n", + " Returns:\n", + " Tuple[List[str], SimpleVectorStore]: Document chunks and vector store.\n", + " \"\"\"\n", + " # Extract text from the PDF file\n", + " print(\"Extracting text from PDF...\")\n", + " extracted_text = extract_text_from_pdf(pdf_path)\n", + " \n", + " # Chunk the extracted text\n", + " print(\"Chunking text...\")\n", + " chunks = chunk_text(extracted_text, chunk_size, chunk_overlap)\n", + " print(f\"Created {len(chunks)} text chunks\")\n", + " \n", + " # Create embeddings for the text chunks\n", + " print(\"Creating embeddings for chunks...\")\n", + " chunk_embeddings = create_embeddings(chunks)\n", + " \n", + " # Initialize the vector store\n", + " store = SimpleVectorStore()\n", + " \n", + " # Add each chunk and its embedding to the vector store with metadata\n", + " for i, (chunk, embedding) in enumerate(zip(chunks, chunk_embeddings)):\n", + " store.add_item(\n", + " text=chunk,\n", + " embedding=embedding,\n", + " metadata={\"index\": i, \"source\": pdf_path}\n", + " )\n", + " \n", + " print(f\"Added {len(chunks)} chunks to the vector store\")\n", + " \n", + " # Return the chunks and the vector store\n", + " return chunks, store" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Query Classification" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "def classify_query(query, model=\"meta-llama/Llama-3.2-3B-Instruct\"):\n", + " \"\"\"\n", + " Classify a query into one of four categories: Factual, Analytical, Opinion, or Contextual.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " model (str): LLM model to use\n", + " \n", + " Returns:\n", + " str: Query category\n", + " \"\"\"\n", + " # Define the system prompt to guide the AI's classification\n", + " system_prompt = \"\"\"You are an expert at classifying questions. \n", + " Classify the given query into exactly one of these categories:\n", + " - Factual: Queries seeking specific, verifiable information.\n", + " - Analytical: Queries requiring comprehensive analysis or explanation.\n", + " - Opinion: Queries about subjective matters or seeking diverse viewpoints.\n", + " - Contextual: Queries that depend on user-specific context.\n", + "\n", + " Return ONLY the category name, without any explanation or additional text.\n", + " \"\"\"\n", + "\n", + " # Create the user prompt with the query to be classified\n", + " user_prompt = f\"Classify this query: {query}\"\n", + " \n", + " # Generate the classification response from the AI model\n", + " response = client.chat.completions.create(\n", + " model=model,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " # Extract and strip the category from the response\n", + " category = response.choices[0].message.content.strip()\n", + " \n", + " # Define the list of valid categories\n", + " valid_categories = [\"Factual\", \"Analytical\", \"Opinion\", \"Contextual\"]\n", + " \n", + " # Ensure the returned category is valid\n", + " for valid in valid_categories:\n", + " if valid in category:\n", + " return valid\n", + " \n", + " # Default to \"Factual\" if classification fails\n", + " return \"Factual\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Implementing Specialized Retrieval Strategies\n", + "### 1. Factual Strategy - Focus on Precision" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "def factual_retrieval_strategy(query, vector_store, k=4):\n", + " \"\"\"\n", + " Retrieval strategy for factual queries focusing on precision.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " vector_store (SimpleVectorStore): Vector store\n", + " k (int): Number of documents to return\n", + " \n", + " Returns:\n", + " List[Dict]: Retrieved documents\n", + " \"\"\"\n", + " print(f\"Executing Factual retrieval strategy for: '{query}'\")\n", + " \n", + " # Use LLM to enhance the query for better precision\n", + " system_prompt = \"\"\"You are an expert at enhancing search queries.\n", + " Your task is to reformulate the given factual query to make it more precise and \n", + " specific for information retrieval. Focus on key entities and their relationships.\n", + "\n", + " Provide ONLY the enhanced query without any explanation.\n", + " \"\"\"\n", + "\n", + " user_prompt = f\"Enhance this factual query: {query}\"\n", + " \n", + " # Generate the enhanced query using the LLM\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " # Extract and print the enhanced query\n", + " enhanced_query = response.choices[0].message.content.strip()\n", + " print(f\"Enhanced query: {enhanced_query}\")\n", + " \n", + " # Create embeddings for the enhanced query\n", + " query_embedding = create_embeddings(enhanced_query)\n", + " \n", + " # Perform initial similarity search to retrieve documents\n", + " initial_results = vector_store.similarity_search(query_embedding, k=k*2)\n", + " \n", + " # Initialize a list to store ranked results\n", + " ranked_results = []\n", + " \n", + " # Score and rank documents by relevance using LLM\n", + " for doc in initial_results:\n", + " relevance_score = score_document_relevance(enhanced_query, doc[\"text\"])\n", + " ranked_results.append({\n", + " \"text\": doc[\"text\"],\n", + " \"metadata\": doc[\"metadata\"],\n", + " \"similarity\": doc[\"similarity\"],\n", + " \"relevance_score\": relevance_score\n", + " })\n", + " \n", + " # Sort the results by relevance score in descending order\n", + " ranked_results.sort(key=lambda x: x[\"relevance_score\"], reverse=True)\n", + " \n", + " # Return the top k results\n", + " return ranked_results[:k]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. Analytical Strategy - Comprehensive Coverage" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "def analytical_retrieval_strategy(query, vector_store, k=4):\n", + " \"\"\"\n", + " Retrieval strategy for analytical queries focusing on comprehensive coverage.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " vector_store (SimpleVectorStore): Vector store\n", + " k (int): Number of documents to return\n", + " \n", + " Returns:\n", + " List[Dict]: Retrieved documents\n", + " \"\"\"\n", + " print(f\"Executing Analytical retrieval strategy for: '{query}'\")\n", + " \n", + " # Define the system prompt to guide the AI in generating sub-questions\n", + " system_prompt = \"\"\"You are an expert at breaking down complex questions.\n", + " Generate sub-questions that explore different aspects of the main analytical query.\n", + " These sub-questions should cover the breadth of the topic and help retrieve \n", + " comprehensive information.\n", + "\n", + " Return a list of exactly 3 sub-questions, one per line.\n", + " \"\"\"\n", + "\n", + " # Create the user prompt with the main query\n", + " user_prompt = f\"Generate sub-questions for this analytical query: {query}\"\n", + " \n", + " # Generate the sub-questions using the LLM\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0.3\n", + " )\n", + " \n", + " # Extract and clean the sub-questions\n", + " sub_queries = response.choices[0].message.content.strip().split('\\n')\n", + " sub_queries = [q.strip() for q in sub_queries if q.strip()]\n", + " print(f\"Generated sub-queries: {sub_queries}\")\n", + " \n", + " # Retrieve documents for each sub-query\n", + " all_results = []\n", + " for sub_query in sub_queries:\n", + " # Create embeddings for the sub-query\n", + " sub_query_embedding = create_embeddings(sub_query)\n", + " # Perform similarity search for the sub-query\n", + " results = vector_store.similarity_search(sub_query_embedding, k=2)\n", + " all_results.extend(results)\n", + " \n", + " # Ensure diversity by selecting from different sub-query results\n", + " # Remove duplicates (same text content)\n", + " unique_texts = set()\n", + " diverse_results = []\n", + " \n", + " for result in all_results:\n", + " if result[\"text\"] not in unique_texts:\n", + " unique_texts.add(result[\"text\"])\n", + " diverse_results.append(result)\n", + " \n", + " # If we need more results to reach k, add more from initial results\n", + " if len(diverse_results) < k:\n", + " # Direct retrieval for the main query\n", + " main_query_embedding = create_embeddings(query)\n", + " main_results = vector_store.similarity_search(main_query_embedding, k=k)\n", + " \n", + " for result in main_results:\n", + " if result[\"text\"] not in unique_texts and len(diverse_results) < k:\n", + " unique_texts.add(result[\"text\"])\n", + " diverse_results.append(result)\n", + " \n", + " # Return the top k diverse results\n", + " return diverse_results[:k]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3. Opinion Strategy - Diverse Perspectives" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "def opinion_retrieval_strategy(query, vector_store, k=4):\n", + " \"\"\"\n", + " Retrieval strategy for opinion queries focusing on diverse perspectives.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " vector_store (SimpleVectorStore): Vector store\n", + " k (int): Number of documents to return\n", + " \n", + " Returns:\n", + " List[Dict]: Retrieved documents\n", + " \"\"\"\n", + " print(f\"Executing Opinion retrieval strategy for: '{query}'\")\n", + " \n", + " # Define the system prompt to guide the AI in identifying different perspectives\n", + " system_prompt = \"\"\"You are an expert at identifying different perspectives on a topic.\n", + " For the given query about opinions or viewpoints, identify different perspectives \n", + " that people might have on this topic.\n", + "\n", + " Return a list of exactly 3 different viewpoint angles, one per line.\n", + " \"\"\"\n", + "\n", + " # Create the user prompt with the main query\n", + " user_prompt = f\"Identify different perspectives on: {query}\"\n", + " \n", + " # Generate the different perspectives using the LLM\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0.3\n", + " )\n", + " \n", + " # Extract and clean the viewpoints\n", + " viewpoints = response.choices[0].message.content.strip().split('\\n')\n", + " viewpoints = [v.strip() for v in viewpoints if v.strip()]\n", + " print(f\"Identified viewpoints: {viewpoints}\")\n", + " \n", + " # Retrieve documents representing each viewpoint\n", + " all_results = []\n", + " for viewpoint in viewpoints:\n", + " # Combine the main query with the viewpoint\n", + " combined_query = f\"{query} {viewpoint}\"\n", + " # Create embeddings for the combined query\n", + " viewpoint_embedding = create_embeddings(combined_query)\n", + " # Perform similarity search for the combined query\n", + " results = vector_store.similarity_search(viewpoint_embedding, k=2)\n", + " \n", + " # Mark results with the viewpoint they represent\n", + " for result in results:\n", + " result[\"viewpoint\"] = viewpoint\n", + " \n", + " # Add the results to the list of all results\n", + " all_results.extend(results)\n", + " \n", + " # Select a diverse range of opinions\n", + " # Ensure we get at least one document from each viewpoint if possible\n", + " selected_results = []\n", + " for viewpoint in viewpoints:\n", + " # Filter documents by viewpoint\n", + " viewpoint_docs = [r for r in all_results if r.get(\"viewpoint\") == viewpoint]\n", + " if viewpoint_docs:\n", + " selected_results.append(viewpoint_docs[0])\n", + " \n", + " # Fill remaining slots with highest similarity docs\n", + " remaining_slots = k - len(selected_results)\n", + " if remaining_slots > 0:\n", + " # Sort remaining docs by similarity\n", + " remaining_docs = [r for r in all_results if r not in selected_results]\n", + " remaining_docs.sort(key=lambda x: x[\"similarity\"], reverse=True)\n", + " selected_results.extend(remaining_docs[:remaining_slots])\n", + " \n", + " # Return the top k results\n", + " return selected_results[:k]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4. Contextual Strategy - User Context Integration" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "def contextual_retrieval_strategy(query, vector_store, k=4, user_context=None):\n", + " \"\"\"\n", + " Retrieval strategy for contextual queries integrating user context.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " vector_store (SimpleVectorStore): Vector store\n", + " k (int): Number of documents to return\n", + " user_context (str): Additional user context\n", + " \n", + " Returns:\n", + " List[Dict]: Retrieved documents\n", + " \"\"\"\n", + " print(f\"Executing Contextual retrieval strategy for: '{query}'\")\n", + " \n", + " # If no user context provided, try to infer it from the query\n", + " if not user_context:\n", + " system_prompt = \"\"\"You are an expert at understanding implied context in questions.\n", + "For the given query, infer what contextual information might be relevant or implied \n", + "but not explicitly stated. Focus on what background would help answering this query.\n", + "\n", + "Return a brief description of the implied context.\"\"\"\n", + "\n", + " user_prompt = f\"Infer the implied context in this query: {query}\"\n", + " \n", + " # Generate the inferred context using the LLM\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0.1\n", + " )\n", + " \n", + " # Extract and print the inferred context\n", + " user_context = response.choices[0].message.content.strip()\n", + " print(f\"Inferred context: {user_context}\")\n", + " \n", + " # Reformulate the query to incorporate context\n", + " system_prompt = \"\"\"You are an expert at reformulating questions with context.\n", + " Given a query and some contextual information, create a more specific query that \n", + " incorporates the context to get more relevant information.\n", + "\n", + " Return ONLY the reformulated query without explanation.\"\"\"\n", + "\n", + " user_prompt = f\"\"\"\n", + " Query: {query}\n", + " Context: {user_context}\n", + "\n", + " Reformulate the query to incorporate this context:\"\"\"\n", + " \n", + " # Generate the contextualized query using the LLM\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " # Extract and print the contextualized query\n", + " contextualized_query = response.choices[0].message.content.strip()\n", + " print(f\"Contextualized query: {contextualized_query}\")\n", + " \n", + " # Retrieve documents based on the contextualized query\n", + " query_embedding = create_embeddings(contextualized_query)\n", + " initial_results = vector_store.similarity_search(query_embedding, k=k*2)\n", + " \n", + " # Rank documents considering both relevance and user context\n", + " ranked_results = []\n", + " \n", + " for doc in initial_results:\n", + " # Score document relevance considering the context\n", + " context_relevance = score_document_context_relevance(query, user_context, doc[\"text\"])\n", + " ranked_results.append({\n", + " \"text\": doc[\"text\"],\n", + " \"metadata\": doc[\"metadata\"],\n", + " \"similarity\": doc[\"similarity\"],\n", + " \"context_relevance\": context_relevance\n", + " })\n", + " \n", + " # Sort by context relevance and return top k results\n", + " ranked_results.sort(key=lambda x: x[\"context_relevance\"], reverse=True)\n", + " return ranked_results[:k]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Helper Functions for Document Scoring" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "def score_document_relevance(query, document, model=\"meta-llama/Llama-3.2-3B-Instruct\"):\n", + " \"\"\"\n", + " Score document relevance to a query using LLM.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " document (str): Document text\n", + " model (str): LLM model\n", + " \n", + " Returns:\n", + " float: Relevance score from 0-10\n", + " \"\"\"\n", + " # System prompt to instruct the model on how to rate relevance\n", + " system_prompt = \"\"\"You are an expert at evaluating document relevance.\n", + " Rate the relevance of a document to a query on a scale from 0 to 10, where:\n", + " 0 = Completely irrelevant\n", + " 10 = Perfectly addresses the query\n", + "\n", + " Return ONLY a numerical score between 0 and 10, nothing else.\n", + " \"\"\"\n", + "\n", + " # Truncate document if it's too long\n", + " doc_preview = document[:1500] + \"...\" if len(document) > 1500 else document\n", + " \n", + " # User prompt containing the query and document preview\n", + " user_prompt = f\"\"\"\n", + " Query: {query}\n", + "\n", + " Document: {doc_preview}\n", + "\n", + " Relevance score (0-10):\n", + " \"\"\"\n", + " \n", + " # Generate response from the model\n", + " response = client.chat.completions.create(\n", + " model=model,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " # Extract the score from the model's response\n", + " score_text = response.choices[0].message.content.strip()\n", + " \n", + " # Extract numeric score using regex\n", + " match = re.search(r'(\\d+(\\.\\d+)?)', score_text)\n", + " if match:\n", + " score = float(match.group(1))\n", + " return min(10, max(0, score)) # Ensure score is within 0-10\n", + " else:\n", + " # Default score if extraction fails\n", + " return 5.0" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "def score_document_context_relevance(query, context, document, model=\"meta-llama/Llama-3.2-3B-Instruct\"):\n", + " \"\"\"\n", + " Score document relevance considering both query and context.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " context (str): User context\n", + " document (str): Document text\n", + " model (str): LLM model\n", + " \n", + " Returns:\n", + " float: Relevance score from 0-10\n", + " \"\"\"\n", + " # System prompt to instruct the model on how to rate relevance considering context\n", + " system_prompt = \"\"\"You are an expert at evaluating document relevance considering context.\n", + " Rate the document on a scale from 0 to 10 based on how well it addresses the query\n", + " when considering the provided context, where:\n", + " 0 = Completely irrelevant\n", + " 10 = Perfectly addresses the query in the given context\n", + "\n", + " Return ONLY a numerical score between 0 and 10, nothing else.\n", + " \"\"\"\n", + "\n", + " # Truncate document if it's too long\n", + " doc_preview = document[:1500] + \"...\" if len(document) > 1500 else document\n", + " \n", + " # User prompt containing the query, context, and document preview\n", + " user_prompt = f\"\"\"\n", + " Query: {query}\n", + " Context: {context}\n", + "\n", + " Document: {doc_preview}\n", + "\n", + " Relevance score considering context (0-10):\n", + " \"\"\"\n", + " \n", + " # Generate response from the model\n", + " response = client.chat.completions.create(\n", + " model=model,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " # Extract the score from the model's response\n", + " score_text = response.choices[0].message.content.strip()\n", + " \n", + " # Extract numeric score using regex\n", + " match = re.search(r'(\\d+(\\.\\d+)?)', score_text)\n", + " if match:\n", + " score = float(match.group(1))\n", + " return min(10, max(0, score)) # Ensure score is within 0-10\n", + " else:\n", + " # Default score if extraction fails\n", + " return 5.0" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The Core Adaptive Retriever" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "def adaptive_retrieval(query, vector_store, k=4, user_context=None):\n", + " \"\"\"\n", + " Perform adaptive retrieval by selecting and executing the appropriate strategy.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " vector_store (SimpleVectorStore): Vector store\n", + " k (int): Number of documents to retrieve\n", + " user_context (str): Optional user context for contextual queries\n", + " \n", + " Returns:\n", + " List[Dict]: Retrieved documents\n", + " \"\"\"\n", + " # Classify the query to determine its type\n", + " query_type = classify_query(query)\n", + " print(f\"Query classified as: {query_type}\")\n", + " \n", + " # Select and execute the appropriate retrieval strategy based on the query type\n", + " if query_type == \"Factual\":\n", + " # Use the factual retrieval strategy for precise information\n", + " results = factual_retrieval_strategy(query, vector_store, k)\n", + " elif query_type == \"Analytical\":\n", + " # Use the analytical retrieval strategy for comprehensive coverage\n", + " results = analytical_retrieval_strategy(query, vector_store, k)\n", + " elif query_type == \"Opinion\":\n", + " # Use the opinion retrieval strategy for diverse perspectives\n", + " results = opinion_retrieval_strategy(query, vector_store, k)\n", + " elif query_type == \"Contextual\":\n", + " # Use the contextual retrieval strategy, incorporating user context\n", + " results = contextual_retrieval_strategy(query, vector_store, k, user_context)\n", + " else:\n", + " # Default to factual retrieval strategy if classification fails\n", + " results = factual_retrieval_strategy(query, vector_store, k)\n", + " \n", + " return results # Return the retrieved documents" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Response Generation" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_response(query, results, query_type, model=\"meta-llama/Llama-3.2-3B-Instruct\"):\n", + " \"\"\"\n", + " Generate a response based on query, retrieved documents, and query type.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " results (List[Dict]): Retrieved documents\n", + " query_type (str): Type of query\n", + " model (str): LLM model\n", + " \n", + " Returns:\n", + " str: Generated response\n", + " \"\"\"\n", + " # Prepare context from retrieved documents by joining their texts with separators\n", + " context = \"\\n\\n---\\n\\n\".join([r[\"text\"] for r in results])\n", + " \n", + " # Create custom system prompt based on query type\n", + " if query_type == \"Factual\":\n", + " system_prompt = \"\"\"You are a helpful assistant providing factual information.\n", + " Answer the question based on the provided context. Focus on accuracy and precision.\n", + " If the context doesn't contain the information needed, acknowledge the limitations.\"\"\"\n", + " \n", + " elif query_type == \"Analytical\":\n", + " system_prompt = \"\"\"You are a helpful assistant providing analytical insights.\n", + " Based on the provided context, offer a comprehensive analysis of the topic.\n", + " Cover different aspects and perspectives in your explanation.\n", + " If the context has gaps, acknowledge them while providing the best analysis possible.\"\"\"\n", + " \n", + " elif query_type == \"Opinion\":\n", + " system_prompt = \"\"\"You are a helpful assistant discussing topics with multiple viewpoints.\n", + " Based on the provided context, present different perspectives on the topic.\n", + " Ensure fair representation of diverse opinions without showing bias.\n", + " Acknowledge where the context presents limited viewpoints.\"\"\"\n", + " \n", + " elif query_type == \"Contextual\":\n", + " system_prompt = \"\"\"You are a helpful assistant providing contextually relevant information.\n", + " Answer the question considering both the query and its context.\n", + " Make connections between the query context and the information in the provided documents.\n", + " If the context doesn't fully address the specific situation, acknowledge the limitations.\"\"\"\n", + " \n", + " else:\n", + " system_prompt = \"\"\"You are a helpful assistant. Answer the question based on the provided context. If you cannot answer from the context, acknowledge the limitations.\"\"\"\n", + " \n", + " # Create user prompt by combining the context and the query\n", + " user_prompt = f\"\"\"\n", + " Context:\n", + " {context}\n", + "\n", + " Question: {query}\n", + "\n", + " Please provide a helpful response based on the context.\n", + " \"\"\"\n", + " \n", + " # Generate response using the OpenAI client\n", + " response = client.chat.completions.create(\n", + " model=model,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0.2\n", + " )\n", + " \n", + " # Return the generated response content\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Complete RAG Pipeline with Adaptive Retrieval" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "def rag_with_adaptive_retrieval(pdf_path, query, k=4, user_context=None):\n", + " \"\"\"\n", + " Complete RAG pipeline with adaptive retrieval.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to PDF document\n", + " query (str): User query\n", + " k (int): Number of documents to retrieve\n", + " user_context (str): Optional user context\n", + " \n", + " Returns:\n", + " Dict: Results including query, retrieved documents, query type, and response\n", + " \"\"\"\n", + " print(\"\\n=== RAG WITH ADAPTIVE RETRIEVAL ===\")\n", + " print(f\"Query: {query}\")\n", + " \n", + " # Process the document to extract text, chunk it, and create embeddings\n", + " chunks, vector_store = process_document(pdf_path)\n", + " \n", + " # Classify the query to determine its type\n", + " query_type = classify_query(query)\n", + " print(f\"Query classified as: {query_type}\")\n", + " \n", + " # Retrieve documents using the adaptive retrieval strategy based on the query type\n", + " retrieved_docs = adaptive_retrieval(query, vector_store, k, user_context)\n", + " \n", + " # Generate a response based on the query, retrieved documents, and query type\n", + " response = generate_response(query, retrieved_docs, query_type)\n", + " \n", + " # Compile the results into a dictionary\n", + " result = {\n", + " \"query\": query,\n", + " \"query_type\": query_type,\n", + " \"retrieved_documents\": retrieved_docs,\n", + " \"response\": response\n", + " }\n", + " \n", + " print(\"\\n=== RESPONSE ===\")\n", + " print(response)\n", + " \n", + " return result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluation Framework" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "def evaluate_adaptive_vs_standard(pdf_path, test_queries, reference_answers=None):\n", + " \"\"\"\n", + " Compare adaptive retrieval with standard retrieval on a set of test queries.\n", + " \n", + " This function processes a document, runs both standard and adaptive retrieval methods\n", + " on each test query, and compares their performance. If reference answers are provided,\n", + " it also evaluates the quality of responses against these references.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to PDF document to be processed as the knowledge source\n", + " test_queries (List[str]): List of test queries to evaluate both retrieval methods\n", + " reference_answers (List[str], optional): Reference answers for evaluation metrics\n", + " \n", + " Returns:\n", + " Dict: Evaluation results containing individual query results and overall comparison\n", + " \"\"\"\n", + " print(\"=== EVALUATING ADAPTIVE VS. STANDARD RETRIEVAL ===\")\n", + " \n", + " # Process document to extract text, create chunks and build the vector store\n", + " chunks, vector_store = process_document(pdf_path)\n", + " \n", + " # Initialize collection for storing comparison results\n", + " results = []\n", + " \n", + " # Process each test query with both retrieval methods\n", + " for i, query in enumerate(test_queries):\n", + " print(f\"\\n\\nQuery {i+1}: {query}\")\n", + " \n", + " # --- Standard retrieval approach ---\n", + " print(\"\\n--- Standard Retrieval ---\")\n", + " # Create embedding for the query\n", + " query_embedding = create_embeddings(query)\n", + " # Retrieve documents using simple vector similarity\n", + " standard_docs = vector_store.similarity_search(query_embedding, k=4)\n", + " # Generate response using a generic approach\n", + " standard_response = generate_response(query, standard_docs, \"General\")\n", + " \n", + " # --- Adaptive retrieval approach ---\n", + " print(\"\\n--- Adaptive Retrieval ---\")\n", + " # Classify the query to determine its type (Factual, Analytical, Opinion, Contextual)\n", + " query_type = classify_query(query)\n", + " # Retrieve documents using the strategy appropriate for this query type\n", + " adaptive_docs = adaptive_retrieval(query, vector_store, k=4)\n", + " # Generate a response tailored to the query type\n", + " adaptive_response = generate_response(query, adaptive_docs, query_type)\n", + " \n", + " # Store complete results for this query\n", + " result = {\n", + " \"query\": query,\n", + " \"query_type\": query_type,\n", + " \"standard_retrieval\": {\n", + " \"documents\": standard_docs,\n", + " \"response\": standard_response\n", + " },\n", + " \"adaptive_retrieval\": {\n", + " \"documents\": adaptive_docs,\n", + " \"response\": adaptive_response\n", + " }\n", + " }\n", + " \n", + " # Add reference answer if available for this query\n", + " if reference_answers and i < len(reference_answers):\n", + " result[\"reference_answer\"] = reference_answers[i]\n", + " \n", + " results.append(result)\n", + " \n", + " # Display preview of both responses for quick comparison\n", + " print(\"\\n--- Responses ---\")\n", + " print(f\"Standard: {standard_response[:200]}...\")\n", + " print(f\"Adaptive: {adaptive_response[:200]}...\")\n", + " \n", + " # Calculate comparative metrics if reference answers are available\n", + " if reference_answers:\n", + " comparison = compare_responses(results)\n", + " print(\"\\n=== EVALUATION RESULTS ===\")\n", + " print(comparison)\n", + " \n", + " # Return the complete evaluation results\n", + " return {\n", + " \"results\": results,\n", + " \"comparison\": comparison if reference_answers else \"No reference answers provided for evaluation\"\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "def compare_responses(results):\n", + " \"\"\"\n", + " Compare standard and adaptive responses against reference answers.\n", + " \n", + " Args:\n", + " results (List[Dict]): Results containing both types of responses\n", + " \n", + " Returns:\n", + " str: Comparison analysis\n", + " \"\"\"\n", + " # Define the system prompt to guide the AI in comparing responses\n", + " comparison_prompt = \"\"\"You are an expert evaluator of information retrieval systems.\n", + " Compare the standard retrieval and adaptive retrieval responses for each query.\n", + " Consider factors like accuracy, relevance, comprehensiveness, and alignment with the reference answer.\n", + " Provide a detailed analysis of the strengths and weaknesses of each approach.\"\"\"\n", + " \n", + " # Initialize the comparison text with a header\n", + " comparison_text = \"# Evaluation of Standard vs. Adaptive Retrieval\\n\\n\"\n", + " \n", + " # Iterate through each result to compare responses\n", + " for i, result in enumerate(results):\n", + " # Skip if there is no reference answer for the query\n", + " if \"reference_answer\" not in result:\n", + " continue\n", + " \n", + " # Add query details to the comparison text\n", + " comparison_text += f\"## Query {i+1}: {result['query']}\\n\"\n", + " comparison_text += f\"*Query Type: {result['query_type']}*\\n\\n\"\n", + " comparison_text += f\"**Reference Answer:**\\n{result['reference_answer']}\\n\\n\"\n", + " \n", + " # Add standard retrieval response to the comparison text\n", + " comparison_text += f\"**Standard Retrieval Response:**\\n{result['standard_retrieval']['response']}\\n\\n\"\n", + " \n", + " # Add adaptive retrieval response to the comparison text\n", + " comparison_text += f\"**Adaptive Retrieval Response:**\\n{result['adaptive_retrieval']['response']}\\n\\n\"\n", + " \n", + " # Create the user prompt for the AI to compare the responses\n", + " user_prompt = f\"\"\"\n", + " Reference Answer: {result['reference_answer']}\n", + " \n", + " Standard Retrieval Response: {result['standard_retrieval']['response']}\n", + " \n", + " Adaptive Retrieval Response: {result['adaptive_retrieval']['response']}\n", + " \n", + " Provide a detailed comparison of the two responses.\n", + " \"\"\"\n", + " \n", + " # Generate the comparison analysis using the OpenAI client\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": comparison_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0.2\n", + " )\n", + " \n", + " # Add the AI's comparison analysis to the comparison text\n", + " comparison_text += f\"**Comparison Analysis:**\\n{response.choices[0].message.content}\\n\\n\"\n", + " \n", + " return comparison_text # Return the complete comparison analysis" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluating the Adaptive Retrieval System (Customized Queries)\n", + "\n", + "The final step to use the adaptive RAG evaluation system is to call the evaluate_adaptive_vs_standard() function with your PDF document and test queries:" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "# Path to your knowledge source document\n", + "# This PDF file contains the information that the RAG system will use\n", + "pdf_path = \"data/AI_Information.pdf\"\n", + "\n", + "# Define test queries covering different query types to demonstrate \n", + "# how adaptive retrieval handles various query intentions\n", + "test_queries = [\n", + " \"What is Explainable AI (XAI)?\", # Factual query - seeking definition/specific information\n", + " # \"How do AI ethics and governance frameworks address potential societal impacts?\", # Analytical query - requiring comprehensive analysis\n", + " # \"Is AI development moving too fast for proper regulation?\", # Opinion query - seeking diverse perspectives\n", + " # \"How might explainable AI help in healthcare decisions?\", # Contextual query - benefits from context-awareness\n", + "]\n", + "\n", + "# Reference answers for more thorough evaluation\n", + "# These can be used to objectively assess response quality against a known standard\n", + "reference_answers = [\n", + " \"Explainable AI (XAI) aims to make AI systems transparent and understandable by providing clear explanations of how decisions are made. This helps users trust and effectively manage AI technologies.\",\n", + " # \"AI ethics and governance frameworks address potential societal impacts by establishing guidelines and principles to ensure AI systems are developed and used responsibly. These frameworks focus on fairness, accountability, transparency, and the protection of human rights to mitigate risks and promote beneficial output.5.\",\n", + " # \"Opinions on whether AI development is moving too fast for proper regulation vary. Some argue that rapid advancements outpace regulatory efforts, leading to potential risks and ethical concerns. Others believe that innovation should continue at its current pace, with regulations evolving alongside to address emerging challenges.\",\n", + " # \"Explainable AI can significantly aid healthcare decisions by providing transparent and understandable insights into AI-driven recommendations. This transparency helps healthcare professionals trust AI systems, make informed decisions, and improve patient output by understanding the rationale behind AI suggestions.\"\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "=== EVALUATING ADAPTIVE VS. STANDARD RETRIEVAL ===\n", + "Extracting text from PDF...\n", + "Chunking text...\n", + "Created 42 text chunks\n", + "Creating embeddings for chunks...\n", + "Added 42 chunks to the vector store\n", + "\n", + "\n", + "Query 1: What is Explainable AI (XAI)?\n", + "\n", + "--- Standard Retrieval ---\n", + "\n", + "--- Adaptive Retrieval ---\n", + "Query classified as: Factual\n", + "Executing Factual retrieval strategy for: 'What is Explainable AI (XAI)?'\n", + "Enhanced query: What are the key applications and techniques of Explainable Artificial Intelligence (XAI) in machine learning and deep learning?\n", + "\n", + "--- Responses ---\n", + "Standard: Based on the provided context, Explainable AI (XAI) is a set of techniques aimed at making AI decisions more understandable, enabling users to assess their fairness and accuracy. The goal of XAI is to...\n", + "Adaptive: Explainable AI (XAI) is a subfield of artificial intelligence (AI) that aims to make AI systems more transparent and understandable. The primary goal of XAI is to provide insights into how AI models m...\n", + "\n", + "=== EVALUATION RESULTS ===\n", + "# Evaluation of Standard vs. Adaptive Retrieval\n", + "\n", + "## Query 1: What is Explainable AI (XAI)?\n", + "*Query Type: Factual*\n", + "\n", + "**Reference Answer:**\n", + "Explainable AI (XAI) aims to make AI systems transparent and understandable by providing clear explanations of how decisions are made. This helps users trust and effectively manage AI technologies.\n", + "\n", + "**Standard Retrieval Response:**\n", + "Based on the provided context, Explainable AI (XAI) is a set of techniques aimed at making AI decisions more understandable, enabling users to assess their fairness and accuracy. The goal of XAI is to provide insights into how AI models make decisions, enhancing trust and accountability in AI systems. This involves developing methods for explaining AI decisions, which can help users understand the reliability and fairness of AI-driven outcomes.\n", + "\n", + "**Adaptive Retrieval Response:**\n", + "Explainable AI (XAI) is a subfield of artificial intelligence (AI) that aims to make AI systems more transparent and understandable. The primary goal of XAI is to provide insights into how AI models make decisions, thereby enhancing trust and accountability in AI systems.\n", + "\n", + "XAI techniques are being developed to explain the reasoning behind AI decisions, making it possible for users to assess the reliability and fairness of AI outputs. This is particularly important in high-stakes applications such as medical diagnosis, finance, transportation, and manufacturing, where the consequences of AI errors can be severe.\n", + "\n", + "By making AI systems more explainable, XAI techniques can help address concerns about the potential for unintended consequences, accountability, and responsibility in AI development and deployment.\n", + "\n", + "**Comparison Analysis:**\n", + "**Comparison of Standard Retrieval Response and Adaptive Retrieval Response**\n", + "\n", + "The two responses provide similar information about Explainable AI (XAI), but they differ in their tone, structure, and level of detail. Here's a detailed comparison of the two responses:\n", + "\n", + "**Accuracy and Relevance**\n", + "\n", + "* Both responses accurately convey the main idea of XAI, which is to make AI systems more transparent and understandable.\n", + "* However, the Standard Retrieval Response provides more context and background information about XAI, including its goals and applications.\n", + "* The Adaptive Retrieval Response is more concise and to the point, but it lacks the depth and detail of the Standard Retrieval Response.\n", + "\n", + "**Comprehensiveness**\n", + "\n", + "* The Standard Retrieval Response provides a more comprehensive overview of XAI, including its techniques, goals, and applications.\n", + "* The Adaptive Retrieval Response focuses primarily on the definition and purpose of XAI, without providing much additional context or information.\n", + "* The Standard Retrieval Response also highlights the importance of XAI in high-stakes applications, such as medical diagnosis, finance, transportation, and manufacturing.\n", + "\n", + "**Alignment with Reference Answer**\n", + "\n", + "* Both responses align with the reference answer, but the Standard Retrieval Response is more closely aligned due to its more detailed and comprehensive explanation of XAI.\n", + "* The Adaptive Retrieval Response is more concise and to the point, but it may not fully capture the nuances and complexities of XAI.\n", + "\n", + "**Strengths and Weaknesses**\n", + "\n", + "**Standard Retrieval Response**\n", + "\n", + "Strengths:\n", + "\n", + "* Provides a more comprehensive overview of XAI\n", + "* Offers more context and background information\n", + "* Aligns closely with the reference answer\n", + "\n", + "Weaknesses:\n", + "\n", + "* May be too lengthy or wordy for some readers\n", + "* Lacks the concise and to-the-point style of the Adaptive Retrieval Response\n", + "\n", + "**Adaptive Retrieval Response**\n", + "\n", + "Strengths:\n", + "\n", + "* Is concise and to the point\n", + "* Provides a clear and direct definition of XAI\n", + "* May be more suitable for readers who prefer a brief overview\n", + "\n", + "Weaknesses:\n", + "\n", + "* Lacks depth and detail\n", + "* Fails to provide much additional context or information about XAI\n", + "* May not fully capture the nuances and complexities of XAI\n", + "\n", + "**Conclusion**\n", + "\n", + "The Standard Retrieval Response provides a more comprehensive and detailed overview of XAI, while the Adaptive Retrieval Response is more concise and to the point. Both responses align with the reference answer, but the Standard Retrieval Response is more closely aligned due to its more detailed and comprehensive explanation of XAI.\n", + "\n", + "\n", + "# Evaluation of Standard vs. Adaptive Retrieval\n", + "\n", + "## Query 1: What is Explainable AI (XAI)?\n", + "*Query Type: Factual*\n", + "\n", + "**Reference Answer:**\n", + "Explainable AI (XAI) aims to make AI systems transparent and understandable by providing clear explanations of how decisions are made. This helps users trust and effectively manage AI technologies.\n", + "\n", + "**Standard Retrieval Response:**\n", + "Based on the provided context, Explainable AI (XAI) is a set of techniques aimed at making AI decisions more understandable, enabling users to assess their fairness and accuracy. The goal of XAI is to provide insights into how AI models make decisions, enhancing trust and accountability in AI systems. This involves developing methods for explaining AI decisions, which can help users understand the reliability and fairness of AI-driven outcomes.\n", + "\n", + "**Adaptive Retrieval Response:**\n", + "Explainable AI (XAI) is a subfield of artificial intelligence (AI) that aims to make AI systems more transparent and understandable. The primary goal of XAI is to provide insights into how AI models make decisions, thereby enhancing trust and accountability in AI systems.\n", + "\n", + "XAI techniques are being developed to explain the reasoning behind AI decisions, making it possible for users to assess the reliability and fairness of AI outputs. This is particularly important in high-stakes applications such as medical diagnosis, finance, transportation, and manufacturing, where the consequences of AI errors can be severe.\n", + "\n", + "By making AI systems more explainable, XAI techniques can help address concerns about the potential for unintended consequences, accountability, and responsibility in AI development and deployment.\n", + "\n", + "**Comparison Analysis:**\n", + "**Comparison of Standard Retrieval Response and Adaptive Retrieval Response**\n", + "\n", + "The two responses provide similar information about Explainable AI (XAI), but they differ in their tone, structure, and level of detail. Here's a detailed comparison of the two responses:\n", + "\n", + "**Accuracy and Relevance**\n", + "\n", + "* Both responses accurately convey the main idea of XAI, which is to make AI systems more transparent and understandable.\n", + "* However, the Standard Retrieval Response provides more context and background information about XAI, including its goals and applications.\n", + "* The Adaptive Retrieval Response is more concise and to the point, but it lacks the depth and detail of the Standard Retrieval Response.\n", + "\n", + "**Comprehensiveness**\n", + "\n", + "* The Standard Retrieval Response provides a more comprehensive overview of XAI, including its techniques, goals, and applications.\n", + "* The Adaptive Retrieval Response focuses primarily on the definition and purpose of XAI, without providing much additional context or information.\n", + "* The Standard Retrieval Response also highlights the importance of XAI in high-stakes applications, such as medical diagnosis, finance, transportation, and manufacturing.\n", + "\n", + "**Alignment with Reference Answer**\n", + "\n", + "* Both responses align with the reference answer, but the Standard Retrieval Response is more closely aligned due to its more detailed and comprehensive explanation of XAI.\n", + "* The Adaptive Retrieval Response is more concise and to the point, but it may not fully capture the nuances and complexities of XAI.\n", + "\n", + "**Strengths and Weaknesses**\n", + "\n", + "**Standard Retrieval Response**\n", + "\n", + "Strengths:\n", + "\n", + "* Provides a more comprehensive overview of XAI\n", + "* Offers more context and background information\n", + "* Aligns closely with the reference answer\n", + "\n", + "Weaknesses:\n", + "\n", + "* May be too lengthy or wordy for some readers\n", + "* Lacks the concise and to-the-point style of the Adaptive Retrieval Response\n", + "\n", + "**Adaptive Retrieval Response**\n", + "\n", + "Strengths:\n", + "\n", + "* Is concise and to the point\n", + "* Provides a clear and direct definition of XAI\n", + "* May be more suitable for readers who prefer a brief overview\n", + "\n", + "Weaknesses:\n", + "\n", + "* Lacks depth and detail\n", + "* Fails to provide much additional context or information about XAI\n", + "* May not fully capture the nuances and complexities of XAI\n", + "\n", + "**Conclusion**\n", + "\n", + "The Standard Retrieval Response provides a more comprehensive and detailed overview of XAI, while the Adaptive Retrieval Response is more concise and to the point. Both responses align with the reference answer, but the Standard Retrieval Response is more closely aligned due to its more detailed and comprehensive explanation of XAI.\n", + "\n", + "\n" + ] + } + ], + "source": [ + "# Run the evaluation comparing adaptive vs standard retrieval\n", + "# This will process each query using both methods and compare the results\n", + "evaluation_results = evaluate_adaptive_vs_standard(\n", + " pdf_path=pdf_path, # Source document for knowledge extraction\n", + " test_queries=test_queries, # List of test queries to evaluate\n", + " reference_answers=reference_answers # Optional ground truth for comparison\n", + ")\n", + "\n", + "# The results will show a detailed comparison between standard retrieval and \n", + "# adaptive retrieval performance across different query types, highlighting\n", + "# where adaptive strategies provide improved outcomes\n", + "print(evaluation_results[\"comparison\"])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv-new-specific-rag", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/13_self_rag.ipynb b/13_self_rag.ipynb new file mode 100644 index 0000000..cae2d56 --- /dev/null +++ b/13_self_rag.ipynb @@ -0,0 +1,1177 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "markdown" + } + }, + "source": [ + "# Self-RAG: A Dynamic Approach to RAG\n", + "\n", + "In this notebook, I implement Self-RAG, an advanced RAG system that dynamically decides when and how to use retrieved information. Unlike traditional RAG approaches, Self-RAG introduces reflection points throughout the retrieval and generation process, resulting in higher quality and more reliable responses.\n", + "\n", + "## Key Components of Self-RAG\n", + "\n", + "1. **Retrieval Decision**: Determines if retrieval is even necessary for a given query\n", + "2. **Document Retrieval**: Fetches potentially relevant documents when needed \n", + "3. **Relevance Evaluation**: Assesses how relevant each retrieved document is\n", + "4. **Response Generation**: Creates responses based on relevant contexts\n", + "5. **Support Assessment**: Evaluates if responses are properly grounded in the context\n", + "6. **Utility Evaluation**: Rates the overall usefulness of generated responses" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the Environment\n", + "We begin by importing necessary libraries." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import numpy as np\n", + "import json\n", + "import fitz\n", + "from openai import OpenAI\n", + "import re" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Extracting Text from a PDF File\n", + "To implement RAG, we first need a source of textual data. In this case, we extract text from a PDF file using the PyMuPDF library." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def extract_text_from_pdf(pdf_path):\n", + " \"\"\"\n", + " Extracts text from a PDF file and prints the first `num_chars` characters.\n", + "\n", + " Args:\n", + " pdf_path (str): Path to the PDF file.\n", + "\n", + " Returns:\n", + " str: Extracted text from the PDF.\n", + " \"\"\"\n", + " # Open the PDF file\n", + " mypdf = fitz.open(pdf_path)\n", + " all_text = \"\" # Initialize an empty string to store the extracted text\n", + "\n", + " # Iterate through each page in the PDF\n", + " for page_num in range(mypdf.page_count):\n", + " page = mypdf[page_num] # Get the page\n", + " text = page.get_text(\"text\") # Extract text from the page\n", + " all_text += text # Append the extracted text to the all_text string\n", + "\n", + " return all_text # Return the extracted text" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Chunking the Extracted Text\n", + "Once we have the extracted text, we divide it into smaller, overlapping chunks to improve retrieval accuracy." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def chunk_text(text, n, overlap):\n", + " \"\"\"\n", + " Chunks the given text into segments of n characters with overlap.\n", + "\n", + " Args:\n", + " text (str): The text to be chunked.\n", + " n (int): The number of characters in each chunk.\n", + " overlap (int): The number of overlapping characters between chunks.\n", + "\n", + " Returns:\n", + " List[str]: A list of text chunks.\n", + " \"\"\"\n", + " chunks = [] # Initialize an empty list to store the chunks\n", + " \n", + " # Loop through the text with a step size of (n - overlap)\n", + " for i in range(0, len(text), n - overlap):\n", + " # Append a chunk of text from index i to i + n to the chunks list\n", + " chunks.append(text[i:i + n])\n", + "\n", + " return chunks # Return the list of text chunks" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the OpenAI API Client\n", + "We initialize the OpenAI client to generate embeddings and responses." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize the OpenAI client with the base URL and API key\n", + "client = OpenAI(\n", + " base_url=\"https://api.studio.nebius.com/v1/\",\n", + " api_key=os.getenv(\"OPENAI_API_KEY\") # Retrieve the API key from environment variables\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Simple Vector Store Implementation\n", + "We'll create a basic vector store to manage document chunks and their embeddings." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "class SimpleVectorStore:\n", + " \"\"\"\n", + " A simple vector store implementation using NumPy.\n", + " \"\"\"\n", + " def __init__(self):\n", + " \"\"\"\n", + " Initialize the vector store.\n", + " \"\"\"\n", + " self.vectors = [] # List to store embedding vectors\n", + " self.texts = [] # List to store original texts\n", + " self.metadata = [] # List to store metadata for each text\n", + " \n", + " def add_item(self, text, embedding, metadata=None):\n", + " \"\"\"\n", + " Add an item to the vector store.\n", + "\n", + " Args:\n", + " text (str): The original text.\n", + " embedding (List[float]): The embedding vector.\n", + " metadata (dict, optional): Additional metadata.\n", + " \"\"\"\n", + " self.vectors.append(np.array(embedding)) # Convert embedding to numpy array and add to vectors list\n", + " self.texts.append(text) # Add the original text to texts list\n", + " self.metadata.append(metadata or {}) # Add metadata to metadata list, default to empty dict if None\n", + " \n", + " def similarity_search(self, query_embedding, k=5, filter_func=None):\n", + " \"\"\"\n", + " Find the most similar items to a query embedding.\n", + "\n", + " Args:\n", + " query_embedding (List[float]): Query embedding vector.\n", + " k (int): Number of results to return.\n", + " filter_func (callable, optional): Function to filter results.\n", + "\n", + " Returns:\n", + " List[Dict]: Top k most similar items with their texts and metadata.\n", + " \"\"\"\n", + " if not self.vectors:\n", + " return [] # Return empty list if no vectors are stored\n", + " \n", + " # Convert query embedding to numpy array\n", + " query_vector = np.array(query_embedding)\n", + " \n", + " # Calculate similarities using cosine similarity\n", + " similarities = []\n", + " for i, vector in enumerate(self.vectors):\n", + " # Apply filter if provided\n", + " if filter_func and not filter_func(self.metadata[i]):\n", + " continue\n", + " \n", + " # Calculate cosine similarity\n", + " similarity = np.dot(query_vector, vector) / (np.linalg.norm(query_vector) * np.linalg.norm(vector))\n", + " similarities.append((i, similarity)) # Append index and similarity score\n", + " \n", + " # Sort by similarity (descending)\n", + " similarities.sort(key=lambda x: x[1], reverse=True)\n", + " \n", + " # Return top k results\n", + " results = []\n", + " for i in range(min(k, len(similarities))):\n", + " idx, score = similarities[i]\n", + " results.append({\n", + " \"text\": self.texts[idx], # Add the text\n", + " \"metadata\": self.metadata[idx], # Add the metadata\n", + " \"similarity\": score # Add the similarity score\n", + " })\n", + " \n", + " return results # Return the list of top k results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating Embeddings" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def create_embeddings(text, model=\"BAAI/bge-en-icl\"):\n", + " \"\"\"\n", + " Creates embeddings for the given text.\n", + "\n", + " Args:\n", + " text (str or List[str]): The input text(s) for which embeddings are to be created.\n", + " model (str): The model to be used for creating embeddings.\n", + "\n", + " Returns:\n", + " List[float] or List[List[float]]: The embedding vector(s).\n", + " \"\"\"\n", + " # Handle both string and list inputs by converting string input to a list\n", + " input_text = text if isinstance(text, list) else [text]\n", + " \n", + " # Create embeddings for the input text using the specified model\n", + " response = client.embeddings.create(\n", + " model=model,\n", + " input=input_text\n", + " )\n", + " \n", + " # If the input was a single string, return just the first embedding\n", + " if isinstance(text, str):\n", + " return response.data[0].embedding\n", + " \n", + " # Otherwise, return all embeddings for the list of texts\n", + " return [item.embedding for item in response.data]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Document Processing Pipeline" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def process_document(pdf_path, chunk_size=1000, chunk_overlap=200):\n", + " \"\"\"\n", + " Process a document for Self-RAG.\n", + "\n", + " Args:\n", + " pdf_path (str): Path to the PDF file.\n", + " chunk_size (int): Size of each chunk in characters.\n", + " chunk_overlap (int): Overlap between chunks in characters.\n", + "\n", + " Returns:\n", + " SimpleVectorStore: A vector store containing document chunks and their embeddings.\n", + " \"\"\"\n", + " # Extract text from the PDF file\n", + " print(\"Extracting text from PDF...\")\n", + " extracted_text = extract_text_from_pdf(pdf_path)\n", + " \n", + " # Chunk the extracted text\n", + " print(\"Chunking text...\")\n", + " chunks = chunk_text(extracted_text, chunk_size, chunk_overlap)\n", + " print(f\"Created {len(chunks)} text chunks\")\n", + " \n", + " # Create embeddings for each chunk\n", + " print(\"Creating embeddings for chunks...\")\n", + " chunk_embeddings = create_embeddings(chunks)\n", + " \n", + " # Initialize the vector store\n", + " store = SimpleVectorStore()\n", + " \n", + " # Add each chunk and its embedding to the vector store\n", + " for i, (chunk, embedding) in enumerate(zip(chunks, chunk_embeddings)):\n", + " store.add_item(\n", + " text=chunk,\n", + " embedding=embedding,\n", + " metadata={\"index\": i, \"source\": pdf_path}\n", + " )\n", + " \n", + " print(f\"Added {len(chunks)} chunks to the vector store\")\n", + " return store" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Self-RAG Components\n", + "### 1. Retrieval Decision" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "def determine_if_retrieval_needed(query):\n", + " \"\"\"\n", + " Determines if retrieval is necessary for the given query.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " \n", + " Returns:\n", + " bool: True if retrieval is needed, False otherwise\n", + " \"\"\"\n", + " # System prompt to instruct the AI on how to determine if retrieval is necessary\n", + " system_prompt = \"\"\"You are an AI assistant that determines if retrieval is necessary to answer a query.\n", + " For factual questions, specific information requests, or questions about events, people, or concepts, answer \"Yes\".\n", + " For opinions, hypothetical scenarios, or simple queries with common knowledge, answer \"No\".\n", + " Answer with ONLY \"Yes\" or \"No\".\"\"\"\n", + "\n", + " # User prompt containing the query\n", + " user_prompt = f\"Query: {query}\\n\\nIs retrieval necessary to answer this query accurately?\"\n", + " \n", + " # Generate response from the model\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " # Extract the answer from the model's response and convert to lowercase\n", + " answer = response.choices[0].message.content.strip().lower()\n", + " \n", + " # Return True if the answer contains \"yes\", otherwise return False\n", + " return \"yes\" in answer" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. Relevance Evaluation" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "def evaluate_relevance(query, context):\n", + " \"\"\"\n", + " Evaluates the relevance of a context to the query.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " context (str): Context text\n", + " \n", + " Returns:\n", + " str: 'relevant' or 'irrelevant'\n", + " \"\"\"\n", + " # System prompt to instruct the AI on how to determine document relevance\n", + " system_prompt = \"\"\"You are an AI assistant that determines if a document is relevant to a query.\n", + " Consider whether the document contains information that would be helpful in answering the query.\n", + " Answer with ONLY \"Relevant\" or \"Irrelevant\".\"\"\"\n", + "\n", + " # Truncate context if it is too long to avoid exceeding token limits\n", + " max_context_length = 2000\n", + " if len(context) > max_context_length:\n", + " context = context[:max_context_length] + \"... [truncated]\"\n", + "\n", + " # User prompt containing the query and the document content\n", + " user_prompt = f\"\"\"Query: {query}\n", + " Document content:\n", + " {context}\n", + "\n", + " Is this document relevant to the query? Answer with ONLY \"Relevant\" or \"Irrelevant\".\n", + " \"\"\"\n", + " \n", + " # Generate response from the model\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " # Extract the answer from the model's response and convert to lowercase\n", + " answer = response.choices[0].message.content.strip().lower()\n", + " \n", + " return answer # Return the relevance evaluation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3. Support Assessment" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "def assess_support(response, context):\n", + " \"\"\"\n", + " Assesses how well a response is supported by the context.\n", + " \n", + " Args:\n", + " response (str): Generated response\n", + " context (str): Context text\n", + " \n", + " Returns:\n", + " str: 'fully supported', 'partially supported', or 'no support'\n", + " \"\"\"\n", + " # System prompt to instruct the AI on how to evaluate support\n", + " system_prompt = \"\"\"You are an AI assistant that determines if a response is supported by the given context.\n", + " Evaluate if the facts, claims, and information in the response are backed by the context.\n", + " Answer with ONLY one of these three options:\n", + " - \"Fully supported\": All information in the response is directly supported by the context.\n", + " - \"Partially supported\": Some information in the response is supported by the context, but some is not.\n", + " - \"No support\": The response contains significant information not found in or contradicting the context.\n", + " \"\"\"\n", + "\n", + " # Truncate context if it is too long to avoid exceeding token limits\n", + " max_context_length = 2000\n", + " if len(context) > max_context_length:\n", + " context = context[:max_context_length] + \"... [truncated]\"\n", + "\n", + " # User prompt containing the context and the response to be evaluated\n", + " user_prompt = f\"\"\"Context:\n", + " {context}\n", + "\n", + " Response:\n", + " {response}\n", + "\n", + " How well is this response supported by the context? Answer with ONLY \"Fully supported\", \"Partially supported\", or \"No support\".\n", + " \"\"\"\n", + " \n", + " # Generate response from the model\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " # Extract the answer from the model's response and convert to lowercase\n", + " answer = response.choices[0].message.content.strip().lower()\n", + " \n", + " return answer # Return the support assessment" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4. Utility Evaluation" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "def rate_utility(query, response):\n", + " \"\"\"\n", + " Rates the utility of a response for the query.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " response (str): Generated response\n", + " \n", + " Returns:\n", + " int: Utility rating from 1 to 5\n", + " \"\"\"\n", + " # System prompt to instruct the AI on how to rate the utility of the response\n", + " system_prompt = \"\"\"You are an AI assistant that rates the utility of a response to a query.\n", + " Consider how well the response answers the query, its completeness, correctness, and helpfulness.\n", + " Rate the utility on a scale from 1 to 5, where:\n", + " - 1: Not useful at all\n", + " - 2: Slightly useful\n", + " - 3: Moderately useful\n", + " - 4: Very useful\n", + " - 5: Exceptionally useful\n", + " Answer with ONLY a single number from 1 to 5.\"\"\"\n", + "\n", + " # User prompt containing the query and the response to be rated\n", + " user_prompt = f\"\"\"Query: {query}\n", + " Response:\n", + " {response}\n", + "\n", + " Rate the utility of this response on a scale from 1 to 5:\"\"\"\n", + " \n", + " # Generate the utility rating using the OpenAI client\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " # Extract the rating from the model's response\n", + " rating = response.choices[0].message.content.strip()\n", + " \n", + " # Extract just the number from the rating\n", + " rating_match = re.search(r'[1-5]', rating)\n", + " if rating_match:\n", + " return int(rating_match.group()) # Return the extracted rating as an integer\n", + " \n", + " return 3 # Default to middle rating if parsing fails" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Response Generation" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_response(query, context=None):\n", + " \"\"\"\n", + " Generates a response based on the query and optional context.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " context (str, optional): Context text\n", + " \n", + " Returns:\n", + " str: Generated response\n", + " \"\"\"\n", + " # System prompt to instruct the AI on how to generate a helpful response\n", + " system_prompt = \"\"\"You are a helpful AI assistant. Provide a clear, accurate, and informative response to the query.\"\"\"\n", + " \n", + " # Create the user prompt based on whether context is provided\n", + " if context:\n", + " user_prompt = f\"\"\"Context:\n", + " {context}\n", + "\n", + " Query: {query}\n", + "\n", + " Please answer the query based on the provided context.\n", + " \"\"\"\n", + " else:\n", + " user_prompt = f\"\"\"Query: {query}\n", + " \n", + " Please answer the query to the best of your ability.\"\"\"\n", + " \n", + " # Generate the response using the OpenAI client\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0.2\n", + " )\n", + " \n", + " # Return the generated response text\n", + " return response.choices[0].message.content.strip()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Complete Self-RAG Implementation" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "def self_rag(query, vector_store, top_k=3):\n", + " \"\"\"\n", + " Implements the complete Self-RAG pipeline.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " vector_store (SimpleVectorStore): Vector store containing document chunks\n", + " top_k (int): Number of documents to retrieve initially\n", + " \n", + " Returns:\n", + " dict: Results including query, response, and metrics from the Self-RAG process\n", + " \"\"\"\n", + " print(f\"\\n=== Starting Self-RAG for query: {query} ===\\n\")\n", + " \n", + " # Step 1: Determine if retrieval is necessary\n", + " print(\"Step 1: Determining if retrieval is necessary...\")\n", + " retrieval_needed = determine_if_retrieval_needed(query)\n", + " print(f\"Retrieval needed: {retrieval_needed}\")\n", + " \n", + " # Initialize metrics to track the Self-RAG process\n", + " metrics = {\n", + " \"retrieval_needed\": retrieval_needed,\n", + " \"documents_retrieved\": 0,\n", + " \"relevant_documents\": 0,\n", + " \"response_support_ratings\": [],\n", + " \"utility_ratings\": []\n", + " }\n", + " \n", + " best_response = None\n", + " best_score = -1\n", + " \n", + " if retrieval_needed:\n", + " # Step 2: Retrieve documents\n", + " print(\"\\nStep 2: Retrieving relevant documents...\")\n", + " query_embedding = create_embeddings(query)\n", + " results = vector_store.similarity_search(query_embedding, k=top_k)\n", + " metrics[\"documents_retrieved\"] = len(results)\n", + " print(f\"Retrieved {len(results)} documents\")\n", + " \n", + " # Step 3: Evaluate relevance of each document\n", + " print(\"\\nStep 3: Evaluating document relevance...\")\n", + " relevant_contexts = []\n", + " \n", + " for i, result in enumerate(results):\n", + " context = result[\"text\"]\n", + " relevance = evaluate_relevance(query, context)\n", + " print(f\"Document {i+1} relevance: {relevance}\")\n", + " \n", + " if relevance == \"relevant\":\n", + " relevant_contexts.append(context)\n", + " \n", + " metrics[\"relevant_documents\"] = len(relevant_contexts)\n", + " print(f\"Found {len(relevant_contexts)} relevant documents\")\n", + " \n", + " if relevant_contexts:\n", + " # Step 4: Process each relevant context\n", + " print(\"\\nStep 4: Processing relevant contexts...\")\n", + " for i, context in enumerate(relevant_contexts):\n", + " print(f\"\\nProcessing context {i+1}/{len(relevant_contexts)}...\")\n", + " \n", + " # Generate response based on the context\n", + " print(\"Generating response...\")\n", + " response = generate_response(query, context)\n", + " \n", + " # Assess how well the response is supported by the context\n", + " print(\"Assessing support...\")\n", + " support_rating = assess_support(response, context)\n", + " print(f\"Support rating: {support_rating}\")\n", + " metrics[\"response_support_ratings\"].append(support_rating)\n", + " \n", + " # Rate the utility of the response\n", + " print(\"Rating utility...\")\n", + " utility_rating = rate_utility(query, response)\n", + " print(f\"Utility rating: {utility_rating}/5\")\n", + " metrics[\"utility_ratings\"].append(utility_rating)\n", + " \n", + " # Calculate overall score (higher for better support and utility)\n", + " support_score = {\n", + " \"fully supported\": 3, \n", + " \"partially supported\": 1, \n", + " \"no support\": 0\n", + " }.get(support_rating, 0)\n", + " \n", + " overall_score = support_score * 5 + utility_rating\n", + " print(f\"Overall score: {overall_score}\")\n", + " \n", + " # Keep track of the best response\n", + " if overall_score > best_score:\n", + " best_response = response\n", + " best_score = overall_score\n", + " print(\"New best response found!\")\n", + " \n", + " # If no relevant contexts were found or all responses scored poorly\n", + " if not relevant_contexts or best_score <= 0:\n", + " print(\"\\nNo suitable context found or poor responses, generating without retrieval...\")\n", + " best_response = generate_response(query)\n", + " else:\n", + " # No retrieval needed, generate directly\n", + " print(\"\\nNo retrieval needed, generating response directly...\")\n", + " best_response = generate_response(query)\n", + " \n", + " # Final metrics\n", + " metrics[\"best_score\"] = best_score\n", + " metrics[\"used_retrieval\"] = retrieval_needed and best_score > 0\n", + " \n", + " print(\"\\n=== Self-RAG Completed ===\")\n", + " \n", + " return {\n", + " \"query\": query,\n", + " \"response\": best_response,\n", + " \"metrics\": metrics\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Running the Complete Self-RAG System" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "def run_self_rag_example():\n", + " \"\"\"\n", + " Demonstrates the complete Self-RAG system with examples.\n", + " \"\"\"\n", + " # Process document\n", + " pdf_path = \"data/AI_Information.pdf\" # Path to the PDF document\n", + " print(f\"Processing document: {pdf_path}\")\n", + " vector_store = process_document(pdf_path) # Process the document and create a vector store\n", + " \n", + " # Example 1: Query likely needing retrieval\n", + " query1 = \"What are the main ethical concerns in AI development?\"\n", + " print(\"\\n\" + \"=\"*80)\n", + " print(f\"EXAMPLE 1: {query1}\")\n", + " result1 = self_rag(query1, vector_store) # Run Self-RAG for the first query\n", + " print(\"\\nFinal response:\")\n", + " print(result1[\"response\"]) # Print the final response for the first query\n", + " print(\"\\nMetrics:\")\n", + " print(json.dumps(result1[\"metrics\"], indent=2)) # Print the metrics for the first query\n", + " \n", + " # Example 2: Query likely not needing retrieval\n", + " query2 = \"Can you write a short poem about artificial intelligence?\"\n", + " print(\"\\n\" + \"=\"*80)\n", + " print(f\"EXAMPLE 2: {query2}\")\n", + " result2 = self_rag(query2, vector_store) # Run Self-RAG for the second query\n", + " print(\"\\nFinal response:\")\n", + " print(result2[\"response\"]) # Print the final response for the second query\n", + " print(\"\\nMetrics:\")\n", + " print(json.dumps(result2[\"metrics\"], indent=2)) # Print the metrics for the second query\n", + " \n", + " # Example 3: Query with some relevance to document but requiring additional knowledge\n", + " query3 = \"How might AI impact economic growth in developing countries?\"\n", + " print(\"\\n\" + \"=\"*80)\n", + " print(f\"EXAMPLE 3: {query3}\")\n", + " result3 = self_rag(query3, vector_store) # Run Self-RAG for the third query\n", + " print(\"\\nFinal response:\")\n", + " print(result3[\"response\"]) # Print the final response for the third query\n", + " print(\"\\nMetrics:\")\n", + " print(json.dumps(result3[\"metrics\"], indent=2)) # Print the metrics for the third query\n", + " \n", + " return {\n", + " \"example1\": result1,\n", + " \"example2\": result2,\n", + " \"example3\": result3\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluating Self-RAG Against Traditional RAG" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "def traditional_rag(query, vector_store, top_k=3):\n", + " \"\"\"\n", + " Implements a traditional RAG approach for comparison.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " vector_store (SimpleVectorStore): Vector store containing document chunks\n", + " top_k (int): Number of documents to retrieve\n", + " \n", + " Returns:\n", + " str: Generated response\n", + " \"\"\"\n", + " print(f\"\\n=== Running traditional RAG for query: {query} ===\\n\")\n", + " \n", + " # Retrieve documents\n", + " print(\"Retrieving documents...\")\n", + " query_embedding = create_embeddings(query) # Create embeddings for the query\n", + " results = vector_store.similarity_search(query_embedding, k=top_k) # Search for similar documents\n", + " print(f\"Retrieved {len(results)} documents\")\n", + " \n", + " # Combine contexts from retrieved documents\n", + " contexts = [result[\"text\"] for result in results] # Extract text from results\n", + " combined_context = \"\\n\\n\".join(contexts) # Combine texts into a single context\n", + " \n", + " # Generate response using the combined context\n", + " print(\"Generating response...\")\n", + " response = generate_response(query, combined_context) # Generate response based on the combined context\n", + " \n", + " return response" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "def evaluate_rag_approaches(pdf_path, test_queries, reference_answers=None):\n", + " \"\"\"\n", + " Compare Self-RAG with traditional RAG.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the document\n", + " test_queries (List[str]): List of test queries\n", + " reference_answers (List[str], optional): Reference answers for evaluation\n", + " \n", + " Returns:\n", + " dict: Evaluation results\n", + " \"\"\"\n", + " print(\"=== Evaluating RAG Approaches ===\")\n", + " \n", + " # Process document to create a vector store\n", + " vector_store = process_document(pdf_path)\n", + " \n", + " results = []\n", + " \n", + " for i, query in enumerate(test_queries):\n", + " print(f\"\\nProcessing query {i+1}: {query}\")\n", + " \n", + " # Run Self-RAG\n", + " self_rag_result = self_rag(query, vector_store) # Get response from Self-RAG\n", + " self_rag_response = self_rag_result[\"response\"]\n", + " \n", + " # Run traditional RAG\n", + " trad_rag_response = traditional_rag(query, vector_store) # Get response from traditional RAG\n", + " \n", + " # Compare results if reference answer is available\n", + " reference = reference_answers[i] if reference_answers and i < len(reference_answers) else None\n", + " comparison = compare_responses(query, self_rag_response, trad_rag_response, reference) # Compare responses\n", + " \n", + " results.append({\n", + " \"query\": query,\n", + " \"self_rag_response\": self_rag_response,\n", + " \"traditional_rag_response\": trad_rag_response,\n", + " \"reference_answer\": reference,\n", + " \"comparison\": comparison,\n", + " \"self_rag_metrics\": self_rag_result[\"metrics\"]\n", + " })\n", + " \n", + " # Generate overall analysis\n", + " overall_analysis = generate_overall_analysis(results)\n", + " \n", + " return {\n", + " \"results\": results,\n", + " \"overall_analysis\": overall_analysis\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "def compare_responses(query, self_rag_response, trad_rag_response, reference=None):\n", + " \"\"\"\n", + " Compare responses from Self-RAG and traditional RAG.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " self_rag_response (str): Response from Self-RAG\n", + " trad_rag_response (str): Response from traditional RAG\n", + " reference (str, optional): Reference answer\n", + " \n", + " Returns:\n", + " str: Comparison analysis\n", + " \"\"\"\n", + " system_prompt = \"\"\"You are an expert evaluator of RAG systems. Your task is to compare responses from two different RAG approaches:\n", + "1. Self-RAG: A dynamic approach that decides if retrieval is needed and evaluates information relevance and response quality\n", + "2. Traditional RAG: Always retrieves documents and uses them to generate a response\n", + "\n", + "Compare the responses based on:\n", + "- Relevance to the query\n", + "- Factual correctness\n", + "- Completeness and informativeness\n", + "- Conciseness and focus\"\"\"\n", + "\n", + " user_prompt = f\"\"\"Query: {query}\n", + "\n", + "Response from Self-RAG:\n", + "{self_rag_response}\n", + "\n", + "Response from Traditional RAG:\n", + "{trad_rag_response}\n", + "\"\"\"\n", + "\n", + " if reference:\n", + " user_prompt += f\"\"\"\n", + "Reference Answer (for factual checking):\n", + "{reference}\n", + "\"\"\"\n", + "\n", + " user_prompt += \"\"\"\n", + "Compare these responses and explain which one is better and why.\n", + "Focus on accuracy, relevance, completeness, and quality.\n", + "\"\"\"\n", + "\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\", # Using a stronger model for evaluation\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_overall_analysis(results):\n", + " \"\"\"\n", + " Generate an overall analysis of Self-RAG vs traditional RAG.\n", + " \n", + " Args:\n", + " results (List[Dict]): Results from evaluate_rag_approaches\n", + " \n", + " Returns:\n", + " str: Overall analysis\n", + " \"\"\"\n", + " system_prompt = \"\"\"You are an expert evaluator of RAG systems. Your task is to provide an overall analysis comparing\n", + " Self-RAG and Traditional RAG based on multiple test queries.\n", + "\n", + " Focus your analysis on:\n", + " 1. When Self-RAG performs better and why\n", + " 2. When Traditional RAG performs better and why\n", + " 3. The impact of dynamic retrieval decisions in Self-RAG\n", + " 4. The value of relevance and support evaluation in Self-RAG\n", + " 5. Overall recommendations on which approach to use for different types of queries\"\"\"\n", + "\n", + " # Prepare a summary of the individual comparisons\n", + " comparisons_summary = \"\"\n", + " for i, result in enumerate(results):\n", + " comparisons_summary += f\"Query {i+1}: {result['query']}\\n\"\n", + " comparisons_summary += f\"Self-RAG metrics: Retrieval needed: {result['self_rag_metrics']['retrieval_needed']}, \"\n", + " comparisons_summary += f\"Relevant docs: {result['self_rag_metrics']['relevant_documents']}/{result['self_rag_metrics']['documents_retrieved']}\\n\"\n", + " comparisons_summary += f\"Comparison summary: {result['comparison'][:200]}...\\n\\n\"\n", + "\n", + " user_prompt = f\"\"\"Based on the following comparison results from {len(results)} test queries, please provide an overall analysis of\n", + " Self-RAG versus Traditional RAG:\n", + "\n", + " {comparisons_summary}\n", + "\n", + " Please provide your comprehensive analysis.\n", + " \"\"\"\n", + "\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluating the Self-RAG System\n", + "\n", + "The final step is to evaluate the Self-RAG system against traditional RAG approaches. We'll compare the quality of responses generated by both systems and analyze the performance of Self-RAG in different scenarios." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "=== Evaluating RAG Approaches ===\n", + "Extracting text from PDF...\n", + "Chunking text...\n", + "Created 42 text chunks\n", + "Creating embeddings for chunks...\n", + "Added 42 chunks to the vector store\n", + "\n", + "Processing query 1: What are the main ethical concerns in AI development?\n", + "\n", + "=== Starting Self-RAG for query: What are the main ethical concerns in AI development? ===\n", + "\n", + "Step 1: Determining if retrieval is necessary...\n", + "Retrieval needed: True\n", + "\n", + "Step 2: Retrieving relevant documents...\n", + "Retrieved 3 documents\n", + "\n", + "Step 3: Evaluating document relevance...\n", + "Document 1 relevance: relevant\n", + "Document 2 relevance: relevant\n", + "Document 3 relevance: relevant\n", + "Found 3 relevant documents\n", + "\n", + "Step 4: Processing relevant contexts...\n", + "\n", + "Processing context 1/3...\n", + "Generating response...\n", + "Assessing support...\n", + "Support rating: fully supported\n", + "Rating utility...\n", + "Utility rating: 4/5\n", + "Overall score: 19\n", + "New best response found!\n", + "\n", + "Processing context 2/3...\n", + "Generating response...\n", + "Assessing support...\n", + "Support rating: partially supported\n", + "Rating utility...\n", + "Utility rating: 4/5\n", + "Overall score: 9\n", + "\n", + "Processing context 3/3...\n", + "Generating response...\n", + "Assessing support...\n", + "Support rating: fully supported\n", + "Rating utility...\n", + "Utility rating: 5/5\n", + "Overall score: 20\n", + "New best response found!\n", + "\n", + "=== Self-RAG Completed ===\n", + "\n", + "=== Running traditional RAG for query: What are the main ethical concerns in AI development? ===\n", + "\n", + "Retrieving documents...\n", + "Retrieved 3 documents\n", + "Generating response...\n", + "\n", + "=== OVERALL ANALYSIS ===\n", + "\n", + "**Overall Analysis: Self-RAG vs Traditional RAG**\n", + "\n", + "Based on the comparison results from the test query \"What are the main ethical concerns in AI development?\", I will provide a comprehensive analysis of the strengths and weaknesses of both Self-RAG and Traditional RAG systems.\n", + "\n", + "**When Self-RAG performs better:**\n", + "\n", + "1. **Dynamic retrieval decisions**: Self-RAG's ability to dynamically adjust its retrieval decisions based on the query context and user feedback can lead to better results in complex queries with multiple relevant documents. In the case of Query 1, Self-RAG's retrieval needed was True, indicating that it was able to identify the most relevant documents for the query. This suggests that Self-RAG's dynamic retrieval decisions were effective in this scenario.\n", + "2. **Relevance and support evaluation**: Self-RAG's evaluation of relevance and support can lead to more accurate and informative responses. In this case, Self-RAG's relevant docs were 3/3, indicating that it was able to identify the most relevant documents for the query. This suggests that Self-RAG's evaluation of relevance and support was effective in this scenario.\n", + "\n", + "**When Traditional RAG performs better:**\n", + "\n", + "1. **Simple queries**: Traditional RAG may perform better in simple queries with a single relevant document. In this case, the query \"What are the main ethical concerns in AI development?\" may have been too complex for Traditional RAG to handle effectively.\n", + "2. **Pre-defined ranking**: Traditional RAG's pre-defined ranking may be more effective in scenarios where the ranking of documents is not critical. In this case, the query \"What are the main ethical concerns in AI development?\" may not have required a highly ranked response.\n", + "\n", + "**The impact of dynamic retrieval decisions in Self-RAG:**\n", + "\n", + "Self-RAG's dynamic retrieval decisions can lead to better results in complex queries with multiple relevant documents. However, this may also lead to over-retrieval or under-retrieval of documents, depending on the query context and user feedback. To mitigate this, Self-RAG's dynamic retrieval decisions should be carefully tuned to ensure that the most relevant documents are retrieved.\n", + "\n", + "**The value of relevance and support evaluation in Self-RAG:**\n", + "\n", + "Self-RAG's evaluation of relevance and support is critical in ensuring that the retrieved documents are accurate and informative. By evaluating the relevance and support of each document, Self-RAG can provide more accurate and informative responses. However, this evaluation should be carefully tuned to ensure that the most relevant documents are retrieved.\n", + "\n", + "**Overall recommendations:**\n", + "\n", + "1. **Use Self-RAG for complex queries**: Self-RAG's dynamic retrieval decisions and evaluation of relevance and support make it a better choice for complex queries with multiple relevant documents.\n", + "2. **Use Traditional RAG for simple queries**: Traditional RAG's pre-defined ranking and simplicity make it a better choice for simple queries with a single relevant document.\n", + "3. **Tune Self-RAG's dynamic retrieval decisions**: Self-RAG's dynamic retrieval decisions should be carefully tuned to ensure that the most relevant documents are retrieved.\n", + "4. **Evaluate relevance and support in Self-RAG**: Self-RAG's evaluation of relevance and support is critical in ensuring that the retrieved documents are accurate and informative.\n", + "\n", + "In conclusion, Self-RAG and Traditional RAG have different strengths and weaknesses, and the choice of which system to use depends on the type of query and the desired outcome. By understanding the strengths and weaknesses of each system, we can make informed decisions about which system to use in different scenarios.\n" + ] + } + ], + "source": [ + "# Path to the AI information document\n", + "pdf_path = \"data/AI_Information.pdf\"\n", + "\n", + "# Define test queries covering different query types to test Self-RAG's adaptive retrieval\n", + "test_queries = [\n", + " \"What are the main ethical concerns in AI development?\", # Document-focused query\n", + " # \"How does explainable AI improve trust in AI systems?\", # Document-focused query\n", + " # \"Write a poem about artificial intelligence\", # Creative query, doesn't need retrieval\n", + " # \"Will superintelligent AI lead to human obsolescence?\" # Speculative query, partial retrieval needed\n", + "]\n", + "\n", + "# Reference answers for more objective evaluation\n", + "reference_answers = [\n", + " \"The main ethical concerns in AI development include bias and fairness, privacy, transparency, accountability, safety, and the potential for misuse or harmful applications.\",\n", + " # \"Explainable AI improves trust by making AI decision-making processes transparent and understandable to users, helping them verify fairness, identify potential biases, and better understand AI limitations.\",\n", + " # \"A quality poem about artificial intelligence should creatively explore themes of AI's capabilities, limitations, relationship with humanity, potential futures, or philosophical questions about consciousness and intelligence.\",\n", + " # \"Views on superintelligent AI's impact on human relevance vary widely. Some experts warn of potential risks if AI surpasses human capabilities across domains, possibly leading to economic displacement or loss of human agency. Others argue humans will remain relevant through complementary skills, emotional intelligence, and by defining AI's purpose. Most experts agree that thoughtful governance and human-centered design are essential regardless of the outcome.\"\n", + "]\n", + "\n", + "# Run the evaluation comparing Self-RAG with traditional RAG approaches\n", + "evaluation_results = evaluate_rag_approaches(\n", + " pdf_path=pdf_path, # Source document containing AI information\n", + " test_queries=test_queries, # List of AI-related test queries\n", + " reference_answers=reference_answers # Ground truth answers for evaluation\n", + ")\n", + "\n", + "# Print the overall comparative analysis\n", + "print(\"\\n=== OVERALL ANALYSIS ===\\n\")\n", + "print(evaluation_results[\"overall_analysis\"])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv-new-specific-rag", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/14_proposition_chunking.ipynb b/14_proposition_chunking.ipynb new file mode 100644 index 0000000..6eac37a --- /dev/null +++ b/14_proposition_chunking.ipynb @@ -0,0 +1,1010 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "markdown" + } + }, + "source": [ + "# Proposition Chunking for Enhanced RAG\n", + "\n", + "In this notebook, I implement proposition chunking - an advanced technique to break down documents into atomic, factual statements for more accurate retrieval. Unlike traditional chunking that simply divides text by character count, proposition chunking preserves the semantic integrity of individual facts.\n", + "\n", + "Proposition chunking delivers more precise retrieval by:\n", + "\n", + "1. Breaking content into atomic, self-contained facts\n", + "2. Creating smaller, more granular units for retrieval \n", + "3. Enabling more precise matching between queries and relevant content\n", + "4. Filtering out low-quality or incomplete propositions\n", + "\n", + "Let's build a complete implementation without relying on LangChain or FAISS." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the Environment\n", + "We begin by importing necessary libraries." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import numpy as np\n", + "import json\n", + "import fitz\n", + "from openai import OpenAI\n", + "import re" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Extracting Text from a PDF File\n", + "To implement RAG, we first need a source of textual data. In this case, we extract text from a PDF file using the PyMuPDF library." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def extract_text_from_pdf(pdf_path):\n", + " \"\"\"\n", + " Extracts text from a PDF file and prints the first `num_chars` characters.\n", + "\n", + " Args:\n", + " pdf_path (str): Path to the PDF file.\n", + "\n", + " Returns:\n", + " str: Extracted text from the PDF.\n", + " \"\"\"\n", + " # Open the PDF file\n", + " mypdf = fitz.open(pdf_path)\n", + " all_text = \"\" # Initialize an empty string to store the extracted text\n", + "\n", + " # Iterate through each page in the PDF\n", + " for page_num in range(mypdf.page_count):\n", + " page = mypdf[page_num] # Get the page\n", + " text = page.get_text(\"text\") # Extract text from the page\n", + " all_text += text # Append the extracted text to the all_text string\n", + "\n", + " return all_text # Return the extracted text" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Chunking the Extracted Text\n", + "Once we have the extracted text, we divide it into smaller, overlapping chunks to improve retrieval accuracy." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def chunk_text(text, chunk_size=800, overlap=100):\n", + " \"\"\"\n", + " Split text into overlapping chunks.\n", + " \n", + " Args:\n", + " text (str): Input text to chunk\n", + " chunk_size (int): Size of each chunk in characters\n", + " overlap (int): Overlap between chunks in characters\n", + " \n", + " Returns:\n", + " List[Dict]: List of chunk dictionaries with text and metadata\n", + " \"\"\"\n", + " chunks = [] # Initialize an empty list to store the chunks\n", + " \n", + " # Iterate over the text with the specified chunk size and overlap\n", + " for i in range(0, len(text), chunk_size - overlap):\n", + " chunk = text[i:i + chunk_size] # Extract a chunk of the specified size\n", + " if chunk: # Ensure we don't add empty chunks\n", + " chunks.append({\n", + " \"text\": chunk, # The chunk text\n", + " \"chunk_id\": len(chunks) + 1, # Unique ID for the chunk\n", + " \"start_char\": i, # Starting character index of the chunk\n", + " \"end_char\": i + len(chunk) # Ending character index of the chunk\n", + " })\n", + " \n", + " print(f\"Created {len(chunks)} text chunks\") # Print the number of created chunks\n", + " return chunks # Return the list of chunks" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the OpenAI API Client\n", + "We initialize the OpenAI client to generate embeddings and responses." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize the OpenAI client with the base URL and API key\n", + "client = OpenAI(\n", + " base_url=\"https://api.studio.nebius.com/v1/\",\n", + " api_key=os.getenv(\"OPENAI_API_KEY\") # Retrieve the API key from environment variables\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Simple Vector Store Implementation\n", + "We'll create a basic vector store to manage document chunks and their embeddings." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "class SimpleVectorStore:\n", + " \"\"\"\n", + " A simple vector store implementation using NumPy.\n", + " \"\"\"\n", + " def __init__(self):\n", + " # Initialize lists to store vectors, texts, and metadata\n", + " self.vectors = []\n", + " self.texts = []\n", + " self.metadata = []\n", + " \n", + " def add_item(self, text, embedding, metadata=None):\n", + " \"\"\"\n", + " Add an item to the vector store.\n", + " \n", + " Args:\n", + " text (str): The text content\n", + " embedding (List[float]): The embedding vector\n", + " metadata (Dict, optional): Additional metadata\n", + " \"\"\"\n", + " # Append the embedding, text, and metadata to their respective lists\n", + " self.vectors.append(np.array(embedding))\n", + " self.texts.append(text)\n", + " self.metadata.append(metadata or {})\n", + " \n", + " def add_items(self, texts, embeddings, metadata_list=None):\n", + " \"\"\"\n", + " Add multiple items to the vector store.\n", + " \n", + " Args:\n", + " texts (List[str]): List of text contents\n", + " embeddings (List[List[float]]): List of embedding vectors\n", + " metadata_list (List[Dict], optional): List of metadata dictionaries\n", + " \"\"\"\n", + " # If no metadata list is provided, create an empty dictionary for each text\n", + " if metadata_list is None:\n", + " metadata_list = [{} for _ in range(len(texts))]\n", + " \n", + " # Add each text, embedding, and metadata to the store\n", + " for text, embedding, metadata in zip(texts, embeddings, metadata_list):\n", + " self.add_item(text, embedding, metadata)\n", + " \n", + " def similarity_search(self, query_embedding, k=5):\n", + " \"\"\"\n", + " Find the most similar items to a query embedding.\n", + " \n", + " Args:\n", + " query_embedding (List[float]): Query embedding vector\n", + " k (int): Number of results to return\n", + " \n", + " Returns:\n", + " List[Dict]: Top k most similar items\n", + " \"\"\"\n", + " # Return an empty list if there are no vectors in the store\n", + " if not self.vectors:\n", + " return []\n", + " \n", + " # Convert query embedding to a numpy array\n", + " query_vector = np.array(query_embedding)\n", + " \n", + " # Calculate similarities using cosine similarity\n", + " similarities = []\n", + " for i, vector in enumerate(self.vectors):\n", + " similarity = np.dot(query_vector, vector) / (np.linalg.norm(query_vector) * np.linalg.norm(vector))\n", + " similarities.append((i, similarity))\n", + " \n", + " # Sort by similarity in descending order\n", + " similarities.sort(key=lambda x: x[1], reverse=True)\n", + " \n", + " # Collect the top k results\n", + " results = []\n", + " for i in range(min(k, len(similarities))):\n", + " idx, score = similarities[i]\n", + " results.append({\n", + " \"text\": self.texts[idx],\n", + " \"metadata\": self.metadata[idx],\n", + " \"similarity\": float(score) # Convert to float for JSON serialization\n", + " })\n", + " \n", + " return results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating Embeddings" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def create_embeddings(texts, model=\"BAAI/bge-en-icl\"):\n", + " \"\"\"\n", + " Create embeddings for the given texts.\n", + " \n", + " Args:\n", + " texts (str or List[str]): Input text(s)\n", + " model (str): Embedding model name\n", + " \n", + " Returns:\n", + " List[List[float]]: Embedding vector(s)\n", + " \"\"\"\n", + " # Handle both string and list inputs\n", + " input_texts = texts if isinstance(texts, list) else [texts]\n", + " \n", + " # Process in batches if needed (OpenAI API limits)\n", + " batch_size = 100\n", + " all_embeddings = []\n", + " \n", + " # Iterate over the input texts in batches\n", + " for i in range(0, len(input_texts), batch_size):\n", + " batch = input_texts[i:i + batch_size] # Get the current batch of texts\n", + " \n", + " # Create embeddings for the current batch\n", + " response = client.embeddings.create(\n", + " model=model,\n", + " input=batch\n", + " )\n", + " \n", + " # Extract embeddings from the response\n", + " batch_embeddings = [item.embedding for item in response.data]\n", + " all_embeddings.extend(batch_embeddings) # Add the batch embeddings to the list\n", + " \n", + " # If input was a single string, return just the first embedding\n", + " if isinstance(texts, str):\n", + " return all_embeddings[0]\n", + " \n", + " # Otherwise, return all embeddings\n", + " return all_embeddings" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Proposition Generation" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_propositions(chunk):\n", + " \"\"\"\n", + " Generate atomic, self-contained propositions from a text chunk.\n", + " \n", + " Args:\n", + " chunk (Dict): Text chunk with content and metadata\n", + " \n", + " Returns:\n", + " List[str]: List of generated propositions\n", + " \"\"\"\n", + " # System prompt to instruct the AI on how to generate propositions\n", + " system_prompt = \"\"\"Please break down the following text into simple, self-contained propositions. \n", + " Ensure that each proposition meets the following criteria:\n", + "\n", + " 1. Express a Single Fact: Each proposition should state one specific fact or claim.\n", + " 2. Be Understandable Without Context: The proposition should be self-contained, meaning it can be understood without needing additional context.\n", + " 3. Use Full Names, Not Pronouns: Avoid pronouns or ambiguous references; use full entity names.\n", + " 4. Include Relevant Dates/Qualifiers: If applicable, include necessary dates, times, and qualifiers to make the fact precise.\n", + " 5. Contain One Subject-Predicate Relationship: Focus on a single subject and its corresponding action or attribute, without conjunctions or multiple clauses.\n", + "\n", + " Output ONLY the list of propositions without any additional text or explanations.\"\"\"\n", + "\n", + " # User prompt containing the text chunk to be converted into propositions\n", + " user_prompt = f\"Text to convert into propositions:\\n\\n{chunk['text']}\"\n", + " \n", + " # Generate response from the model\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\", # Using a stronger model for accurate proposition generation\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " # Extract propositions from the response\n", + " raw_propositions = response.choices[0].message.content.strip().split('\\n')\n", + " \n", + " # Clean up propositions (remove numbering, bullets, etc.)\n", + " clean_propositions = []\n", + " for prop in raw_propositions:\n", + " # Remove numbering (1., 2., etc.) and bullet points\n", + " cleaned = re.sub(r'^\\s*(\\d+\\.|\\-|\\*)\\s*', '', prop).strip()\n", + " if cleaned and len(cleaned) > 10: # Simple filter for empty or very short propositions\n", + " clean_propositions.append(cleaned)\n", + " \n", + " return clean_propositions" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Quality Checking for Propositions" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "def evaluate_proposition(proposition, original_text):\n", + " \"\"\"\n", + " Evaluate a proposition's quality based on accuracy, clarity, completeness, and conciseness.\n", + " \n", + " Args:\n", + " proposition (str): The proposition to evaluate\n", + " original_text (str): The original text for comparison\n", + " \n", + " Returns:\n", + " Dict: Scores for each evaluation dimension\n", + " \"\"\"\n", + " # System prompt to instruct the AI on how to evaluate the proposition\n", + " system_prompt = \"\"\"You are an expert at evaluating the quality of propositions extracted from text.\n", + " Rate the given proposition on the following criteria (scale 1-10):\n", + "\n", + " - Accuracy: How well the proposition reflects information in the original text\n", + " - Clarity: How easy it is to understand the proposition without additional context\n", + " - Completeness: Whether the proposition includes necessary details (dates, qualifiers, etc.)\n", + " - Conciseness: Whether the proposition is concise without losing important information\n", + "\n", + " The response must be in valid JSON format with numerical scores for each criterion:\n", + " {\"accuracy\": X, \"clarity\": X, \"completeness\": X, \"conciseness\": X}\n", + " \"\"\"\n", + "\n", + " # User prompt containing the proposition and the original text\n", + " user_prompt = f\"\"\"Proposition: {proposition}\n", + "\n", + " Original Text: {original_text}\n", + "\n", + " Please provide your evaluation scores in JSON format.\"\"\"\n", + "\n", + " # Generate response from the model\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " response_format={\"type\": \"json_object\"},\n", + " temperature=0\n", + " )\n", + " \n", + " # Parse the JSON response\n", + " try:\n", + " scores = json.loads(response.choices[0].message.content.strip())\n", + " return scores\n", + " except json.JSONDecodeError:\n", + " # Fallback if JSON parsing fails\n", + " return {\n", + " \"accuracy\": 5,\n", + " \"clarity\": 5,\n", + " \"completeness\": 5,\n", + " \"conciseness\": 5\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Complete Proposition Processing Pipeline" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "def process_document_into_propositions(pdf_path, chunk_size=800, chunk_overlap=100, \n", + " quality_thresholds=None):\n", + " \"\"\"\n", + " Process a document into quality-checked propositions.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF file\n", + " chunk_size (int): Size of each chunk in characters\n", + " chunk_overlap (int): Overlap between chunks in characters\n", + " quality_thresholds (Dict): Threshold scores for proposition quality\n", + " \n", + " Returns:\n", + " Tuple[List[Dict], List[Dict]]: Original chunks and proposition chunks\n", + " \"\"\"\n", + " # Set default quality thresholds if not provided\n", + " if quality_thresholds is None:\n", + " quality_thresholds = {\n", + " \"accuracy\": 7,\n", + " \"clarity\": 7,\n", + " \"completeness\": 7,\n", + " \"conciseness\": 7\n", + " }\n", + " \n", + " # Extract text from the PDF file\n", + " text = extract_text_from_pdf(pdf_path)\n", + " \n", + " # Create chunks from the extracted text\n", + " chunks = chunk_text(text, chunk_size, chunk_overlap)\n", + " \n", + " # Initialize a list to store all propositions\n", + " all_propositions = []\n", + " \n", + " print(\"Generating propositions from chunks...\")\n", + " for i, chunk in enumerate(chunks):\n", + " print(f\"Processing chunk {i+1}/{len(chunks)}...\")\n", + " \n", + " # Generate propositions for the current chunk\n", + " chunk_propositions = generate_propositions(chunk)\n", + " print(f\"Generated {len(chunk_propositions)} propositions\")\n", + " \n", + " # Process each generated proposition\n", + " for prop in chunk_propositions:\n", + " proposition_data = {\n", + " \"text\": prop,\n", + " \"source_chunk_id\": chunk[\"chunk_id\"],\n", + " \"source_text\": chunk[\"text\"]\n", + " }\n", + " all_propositions.append(proposition_data)\n", + " \n", + " # Evaluate the quality of the generated propositions\n", + " print(\"\\nEvaluating proposition quality...\")\n", + " quality_propositions = []\n", + " \n", + " for i, prop in enumerate(all_propositions):\n", + " if i % 10 == 0: # Status update every 10 propositions\n", + " print(f\"Evaluating proposition {i+1}/{len(all_propositions)}...\")\n", + " \n", + " # Evaluate the quality of the current proposition\n", + " scores = evaluate_proposition(prop[\"text\"], prop[\"source_text\"])\n", + " prop[\"quality_scores\"] = scores\n", + " \n", + " # Check if the proposition passes the quality thresholds\n", + " passes_quality = True\n", + " for metric, threshold in quality_thresholds.items():\n", + " if scores.get(metric, 0) < threshold:\n", + " passes_quality = False\n", + " break\n", + " \n", + " if passes_quality:\n", + " quality_propositions.append(prop)\n", + " else:\n", + " print(f\"Proposition failed quality check: {prop['text'][:50]}...\")\n", + " \n", + " print(f\"\\nRetained {len(quality_propositions)}/{len(all_propositions)} propositions after quality filtering\")\n", + " \n", + " return chunks, quality_propositions" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Building Vector Stores for Both Approaches" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "def build_vector_stores(chunks, propositions):\n", + " \"\"\"\n", + " Build vector stores for both chunk-based and proposition-based approaches.\n", + " \n", + " Args:\n", + " chunks (List[Dict]): Original document chunks\n", + " propositions (List[Dict]): Quality-filtered propositions\n", + " \n", + " Returns:\n", + " Tuple[SimpleVectorStore, SimpleVectorStore]: Chunk and proposition vector stores\n", + " \"\"\"\n", + " # Create vector store for chunks\n", + " chunk_store = SimpleVectorStore()\n", + " \n", + " # Extract chunk texts and create embeddings\n", + " chunk_texts = [chunk[\"text\"] for chunk in chunks]\n", + " print(f\"Creating embeddings for {len(chunk_texts)} chunks...\")\n", + " chunk_embeddings = create_embeddings(chunk_texts)\n", + " \n", + " # Add chunks to vector store with metadata\n", + " chunk_metadata = [{\"chunk_id\": chunk[\"chunk_id\"], \"type\": \"chunk\"} for chunk in chunks]\n", + " chunk_store.add_items(chunk_texts, chunk_embeddings, chunk_metadata)\n", + " \n", + " # Create vector store for propositions\n", + " prop_store = SimpleVectorStore()\n", + " \n", + " # Extract proposition texts and create embeddings\n", + " prop_texts = [prop[\"text\"] for prop in propositions]\n", + " print(f\"Creating embeddings for {len(prop_texts)} propositions...\")\n", + " prop_embeddings = create_embeddings(prop_texts)\n", + " \n", + " # Add propositions to vector store with metadata\n", + " prop_metadata = [\n", + " {\n", + " \"type\": \"proposition\", \n", + " \"source_chunk_id\": prop[\"source_chunk_id\"],\n", + " \"quality_scores\": prop[\"quality_scores\"]\n", + " } \n", + " for prop in propositions\n", + " ]\n", + " prop_store.add_items(prop_texts, prop_embeddings, prop_metadata)\n", + " \n", + " return chunk_store, prop_store" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Query and Retrieval Functions" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "def retrieve_from_store(query, vector_store, k=5):\n", + " \"\"\"\n", + " Retrieve relevant items from a vector store based on query.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " vector_store (SimpleVectorStore): Vector store to search\n", + " k (int): Number of results to retrieve\n", + " \n", + " Returns:\n", + " List[Dict]: Retrieved items with scores and metadata\n", + " \"\"\"\n", + " # Create query embedding\n", + " query_embedding = create_embeddings(query)\n", + " \n", + " # Search vector store for the top k most similar items\n", + " results = vector_store.similarity_search(query_embedding, k=k)\n", + " \n", + " return results" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "def compare_retrieval_approaches(query, chunk_store, prop_store, k=5):\n", + " \"\"\"\n", + " Compare chunk-based and proposition-based retrieval for a query.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " chunk_store (SimpleVectorStore): Chunk-based vector store\n", + " prop_store (SimpleVectorStore): Proposition-based vector store\n", + " k (int): Number of results to retrieve from each store\n", + " \n", + " Returns:\n", + " Dict: Comparison results\n", + " \"\"\"\n", + " print(f\"\\n=== Query: {query} ===\")\n", + " \n", + " # Retrieve results from the proposition-based vector store\n", + " print(\"\\nRetrieving with proposition-based approach...\")\n", + " prop_results = retrieve_from_store(query, prop_store, k)\n", + " \n", + " # Retrieve results from the chunk-based vector store\n", + " print(\"Retrieving with chunk-based approach...\")\n", + " chunk_results = retrieve_from_store(query, chunk_store, k)\n", + " \n", + " # Display proposition-based results\n", + " print(\"\\n=== Proposition-Based Results ===\")\n", + " for i, result in enumerate(prop_results):\n", + " print(f\"{i+1}) {result['text']} (Score: {result['similarity']:.4f})\")\n", + " \n", + " # Display chunk-based results\n", + " print(\"\\n=== Chunk-Based Results ===\")\n", + " for i, result in enumerate(chunk_results):\n", + " # Truncate text to keep the output manageable\n", + " truncated_text = result['text'][:150] + \"...\" if len(result['text']) > 150 else result['text']\n", + " print(f\"{i+1}) {truncated_text} (Score: {result['similarity']:.4f})\")\n", + " \n", + " # Return the comparison results\n", + " return {\n", + " \"query\": query,\n", + " \"proposition_results\": prop_results,\n", + " \"chunk_results\": chunk_results\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Response Generation and Evaluation" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_response(query, results, result_type=\"proposition\"):\n", + " \"\"\"\n", + " Generate a response based on retrieved results.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " results (List[Dict]): Retrieved items\n", + " result_type (str): Type of results ('proposition' or 'chunk')\n", + " \n", + " Returns:\n", + " str: Generated response\n", + " \"\"\"\n", + " # Combine retrieved texts into a single context string\n", + " context = \"\\n\\n\".join([result[\"text\"] for result in results])\n", + " \n", + " # System prompt to instruct the AI on how to generate the response\n", + " system_prompt = f\"\"\"You are an AI assistant answering questions based on retrieved information.\n", + "Your answer should be based on the following {result_type}s that were retrieved from a knowledge base.\n", + "If the retrieved information doesn't answer the question, acknowledge this limitation.\"\"\"\n", + "\n", + " # User prompt containing the query and the retrieved context\n", + " user_prompt = f\"\"\"Query: {query}\n", + "\n", + "Retrieved {result_type}s:\n", + "{context}\n", + "\n", + "Please answer the query based on the retrieved information.\"\"\"\n", + "\n", + " # Generate the response using the OpenAI client\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0.2\n", + " )\n", + " \n", + " # Return the generated response text\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "def evaluate_responses(query, prop_response, chunk_response, reference_answer=None):\n", + " \"\"\"\n", + " Evaluate and compare responses from both approaches.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " prop_response (str): Response from proposition-based approach\n", + " chunk_response (str): Response from chunk-based approach\n", + " reference_answer (str, optional): Reference answer for comparison\n", + " \n", + " Returns:\n", + " str: Evaluation analysis\n", + " \"\"\"\n", + " # System prompt to instruct the AI on how to evaluate the responses\n", + " system_prompt = \"\"\"You are an expert evaluator of information retrieval systems. \n", + " Compare the two responses to the same query, one generated from proposition-based retrieval \n", + " and the other from chunk-based retrieval.\n", + "\n", + " Evaluate them based on:\n", + " 1. Accuracy: Which response provides more factually correct information?\n", + " 2. Relevance: Which response better addresses the specific query?\n", + " 3. Conciseness: Which response is more concise while maintaining completeness?\n", + " 4. Clarity: Which response is easier to understand?\n", + "\n", + " Be specific about the strengths and weaknesses of each approach.\"\"\"\n", + "\n", + " # User prompt containing the query and the responses to be compared\n", + " user_prompt = f\"\"\"Query: {query}\n", + "\n", + " Response from Proposition-Based Retrieval:\n", + " {prop_response}\n", + "\n", + " Response from Chunk-Based Retrieval:\n", + " {chunk_response}\"\"\"\n", + "\n", + " # If a reference answer is provided, include it in the user prompt for factual checking\n", + " if reference_answer:\n", + " user_prompt += f\"\"\"\n", + "\n", + " Reference Answer (for factual checking):\n", + " {reference_answer}\"\"\"\n", + "\n", + " # Add the final instruction to the user prompt\n", + " user_prompt += \"\"\"\n", + " Please provide a detailed comparison of these two responses, highlighting which approach performed better and why.\"\"\"\n", + "\n", + " # Generate the evaluation analysis using the OpenAI client\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " # Return the generated evaluation analysis\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Complete End-to-End Evaluation Pipeline" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "def run_proposition_chunking_evaluation(pdf_path, test_queries, reference_answers=None):\n", + " \"\"\"\n", + " Run a complete evaluation of proposition chunking vs standard chunking.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF file\n", + " test_queries (List[str]): List of test queries\n", + " reference_answers (List[str], optional): Reference answers for queries\n", + " \n", + " Returns:\n", + " Dict: Evaluation results\n", + " \"\"\"\n", + " print(\"=== Starting Proposition Chunking Evaluation ===\\n\")\n", + " \n", + " # Process document into propositions and chunks\n", + " chunks, propositions = process_document_into_propositions(pdf_path)\n", + " \n", + " # Build vector stores for chunks and propositions\n", + " chunk_store, prop_store = build_vector_stores(chunks, propositions)\n", + " \n", + " # Initialize a list to store results for each query\n", + " results = []\n", + " \n", + " # Run tests for each query\n", + " for i, query in enumerate(test_queries):\n", + " print(f\"\\n\\n=== Testing Query {i+1}/{len(test_queries)} ===\")\n", + " print(f\"Query: {query}\")\n", + " \n", + " # Get retrieval results from both chunk-based and proposition-based approaches\n", + " retrieval_results = compare_retrieval_approaches(query, chunk_store, prop_store)\n", + " \n", + " # Generate responses based on the retrieved proposition-based results\n", + " print(\"\\nGenerating response from proposition-based results...\")\n", + " prop_response = generate_response(\n", + " query, \n", + " retrieval_results[\"proposition_results\"], \n", + " \"proposition\"\n", + " )\n", + " \n", + " # Generate responses based on the retrieved chunk-based results\n", + " print(\"Generating response from chunk-based results...\")\n", + " chunk_response = generate_response(\n", + " query, \n", + " retrieval_results[\"chunk_results\"], \n", + " \"chunk\"\n", + " )\n", + " \n", + " # Get reference answer if available\n", + " reference = None\n", + " if reference_answers and i < len(reference_answers):\n", + " reference = reference_answers[i]\n", + " \n", + " # Evaluate the generated responses\n", + " print(\"\\nEvaluating responses...\")\n", + " evaluation = evaluate_responses(query, prop_response, chunk_response, reference)\n", + " \n", + " # Compile results for the current query\n", + " query_result = {\n", + " \"query\": query,\n", + " \"proposition_results\": retrieval_results[\"proposition_results\"],\n", + " \"chunk_results\": retrieval_results[\"chunk_results\"],\n", + " \"proposition_response\": prop_response,\n", + " \"chunk_response\": chunk_response,\n", + " \"reference_answer\": reference,\n", + " \"evaluation\": evaluation\n", + " }\n", + " \n", + " # Append the results to the overall results list\n", + " results.append(query_result)\n", + " \n", + " # Print the responses and evaluation for the current query\n", + " print(\"\\n=== Proposition-Based Response ===\")\n", + " print(prop_response)\n", + " \n", + " print(\"\\n=== Chunk-Based Response ===\")\n", + " print(chunk_response)\n", + " \n", + " print(\"\\n=== Evaluation ===\")\n", + " print(evaluation)\n", + " \n", + " # Generate overall analysis of the evaluation\n", + " print(\"\\n\\n=== Generating Overall Analysis ===\")\n", + " overall_analysis = generate_overall_analysis(results)\n", + " print(\"\\n\" + overall_analysis)\n", + " \n", + " # Return the evaluation results, overall analysis, and counts of propositions and chunks\n", + " return {\n", + " \"results\": results,\n", + " \"overall_analysis\": overall_analysis,\n", + " \"proposition_count\": len(propositions),\n", + " \"chunk_count\": len(chunks)\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_overall_analysis(results):\n", + " \"\"\"\n", + " Generate an overall analysis of proposition vs chunk approaches.\n", + " \n", + " Args:\n", + " results (List[Dict]): Results from each test query\n", + " \n", + " Returns:\n", + " str: Overall analysis\n", + " \"\"\"\n", + " # System prompt to instruct the AI on how to generate the overall analysis\n", + " system_prompt = \"\"\"You are an expert at evaluating information retrieval systems.\n", + " Based on multiple test queries, provide an overall analysis comparing proposition-based retrieval \n", + " to chunk-based retrieval for RAG (Retrieval-Augmented Generation) systems.\n", + "\n", + " Focus on:\n", + " 1. When proposition-based retrieval performs better\n", + " 2. When chunk-based retrieval performs better\n", + " 3. The overall strengths and weaknesses of each approach\n", + " 4. Recommendations for when to use each approach\"\"\"\n", + "\n", + " # Create a summary of evaluations for each query\n", + " evaluations_summary = \"\"\n", + " for i, result in enumerate(results):\n", + " evaluations_summary += f\"Query {i+1}: {result['query']}\\n\"\n", + " evaluations_summary += f\"Evaluation Summary: {result['evaluation'][:200]}...\\n\\n\"\n", + "\n", + " # User prompt containing the summary of evaluations\n", + " user_prompt = f\"\"\"Based on the following evaluations of proposition-based vs chunk-based retrieval across {len(results)} queries, \n", + " provide an overall analysis comparing these two approaches:\n", + "\n", + " {evaluations_summary}\n", + "\n", + " Please provide a comprehensive analysis on the relative strengths and weaknesses of proposition-based \n", + " and chunk-based retrieval for RAG systems.\"\"\"\n", + "\n", + " # Generate the overall analysis using the OpenAI client\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " # Return the generated analysis text\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluation of Proposition Chunking" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Path to the AI information document that will be processed\n", + "pdf_path = \"data/AI_Information.pdf\"\n", + "\n", + "# Define test queries covering different aspects of AI to evaluate proposition chunking\n", + "test_queries = [\n", + " \"What are the main ethical concerns in AI development?\",\n", + " # \"How does explainable AI improve trust in AI systems?\",\n", + " # \"What are the key challenges in developing fair AI systems?\",\n", + " # \"What role does human oversight play in AI safety?\"\n", + "]\n", + "\n", + "# Reference answers for more thorough evaluation and comparison of results\n", + "# These provide a ground truth to measure the quality of generated responses\n", + "reference_answers = [\n", + " \"The main ethical concerns in AI development include bias and fairness, privacy, transparency, accountability, safety, and the potential for misuse or harmful applications.\",\n", + " # \"Explainable AI improves trust by making AI decision-making processes transparent and understandable to users, helping them verify fairness, identify potential biases, and better understand AI limitations.\",\n", + " # \"Key challenges in developing fair AI systems include addressing data bias, ensuring diverse representation in training data, creating transparent algorithms, defining fairness across different contexts, and balancing competing fairness criteria.\",\n", + " # \"Human oversight plays a critical role in AI safety by monitoring system behavior, verifying outputs, intervening when necessary, setting ethical boundaries, and ensuring AI systems remain aligned with human values and intentions throughout their operation.\"\n", + "]\n", + "\n", + "# Run the evaluation\n", + "evaluation_results = run_proposition_chunking_evaluation(\n", + " pdf_path=pdf_path,\n", + " test_queries=test_queries,\n", + " reference_answers=reference_answers\n", + ")\n", + "\n", + "# Print the overall analysis\n", + "print(\"\\n\\n=== Overall Analysis ===\")\n", + "print(evaluation_results[\"overall_analysis\"])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv-new-specific-rag", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/15_multimodel_rag.ipynb b/15_multimodel_rag.ipynb new file mode 100644 index 0000000..71a1e98 --- /dev/null +++ b/15_multimodel_rag.ipynb @@ -0,0 +1,1075 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "markdown" + } + }, + "source": [ + "# Multi-Modal RAG with Image Captioning\n", + "\n", + "In this notebook, I implement a Multi-Modal RAG system that extracts both text and images from documents, generates captions for images, and uses both content types to respond to queries. This approach enhances traditional RAG by incorporating visual information into the knowledge base.\n", + "\n", + "Traditional RAG systems only work with text, but many documents contain crucial information in images, charts, and tables. By captioning these visual elements and incorporating them into our retrieval system, we can:\n", + "\n", + "- Access information locked in figures and diagrams\n", + "- Understand tables and charts that complement the text\n", + "- Create a more comprehensive knowledge base\n", + "- Answer questions that rely on visual data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the Environment\n", + "We begin by importing necessary libraries." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import io\n", + "import numpy as np\n", + "import json\n", + "import fitz\n", + "from PIL import Image\n", + "from openai import OpenAI\n", + "import base64\n", + "import re\n", + "import tempfile\n", + "import shutil" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the OpenAI API Client\n", + "We initialize the OpenAI client to generate embeddings and responses." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize the OpenAI client with the base URL and API key\n", + "client = OpenAI(\n", + " base_url=\"https://api.studio.nebius.com/v1/\",\n", + " api_key=os.getenv(\"OPENAI_API_KEY\") # Retrieve the API key from environment variables\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Document Processing Functions" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def extract_content_from_pdf(pdf_path, output_dir=None):\n", + " \"\"\"\n", + " Extract both text and images from a PDF file.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF file\n", + " output_dir (str, optional): Directory to save extracted images\n", + " \n", + " Returns:\n", + " Tuple[List[Dict], List[Dict]]: Text data and image data\n", + " \"\"\"\n", + " # Create a temporary directory for images if not provided\n", + " temp_dir = None\n", + " if output_dir is None:\n", + " temp_dir = tempfile.mkdtemp()\n", + " output_dir = temp_dir\n", + " else:\n", + " os.makedirs(output_dir, exist_ok=True)\n", + " \n", + " text_data = [] # List to store extracted text data\n", + " image_paths = [] # List to store paths of extracted images\n", + " \n", + " print(f\"Extracting content from {pdf_path}...\")\n", + " \n", + " try:\n", + " with fitz.open(pdf_path) as pdf_file:\n", + " # Loop through every page in the PDF\n", + " for page_number in range(len(pdf_file)):\n", + " page = pdf_file[page_number]\n", + " \n", + " # Extract text from the page\n", + " text = page.get_text().strip()\n", + " if text:\n", + " text_data.append({\n", + " \"content\": text,\n", + " \"metadata\": {\n", + " \"source\": pdf_path,\n", + " \"page\": page_number + 1,\n", + " \"type\": \"text\"\n", + " }\n", + " })\n", + " \n", + " # Extract images from the page\n", + " image_list = page.get_images(full=True)\n", + " for img_index, img in enumerate(image_list):\n", + " xref = img[0] # XREF of the image\n", + " base_image = pdf_file.extract_image(xref)\n", + " \n", + " if base_image:\n", + " image_bytes = base_image[\"image\"]\n", + " image_ext = base_image[\"ext\"]\n", + " \n", + " # Save the image to the output directory\n", + " img_filename = f\"page_{page_number+1}_img_{img_index+1}.{image_ext}\"\n", + " img_path = os.path.join(output_dir, img_filename)\n", + " \n", + " with open(img_path, \"wb\") as img_file:\n", + " img_file.write(image_bytes)\n", + " \n", + " image_paths.append({\n", + " \"path\": img_path,\n", + " \"metadata\": {\n", + " \"source\": pdf_path,\n", + " \"page\": page_number + 1,\n", + " \"image_index\": img_index + 1,\n", + " \"type\": \"image\"\n", + " }\n", + " })\n", + " \n", + " print(f\"Extracted {len(text_data)} text segments and {len(image_paths)} images\")\n", + " return text_data, image_paths\n", + " \n", + " except Exception as e:\n", + " print(f\"Error extracting content: {e}\")\n", + " if temp_dir and os.path.exists(temp_dir):\n", + " shutil.rmtree(temp_dir)\n", + " raise" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Chunking Text Content" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def chunk_text(text_data, chunk_size=1000, overlap=200):\n", + " \"\"\"\n", + " Split text data into overlapping chunks.\n", + " \n", + " Args:\n", + " text_data (List[Dict]): Text data extracted from PDF\n", + " chunk_size (int): Size of each chunk in characters\n", + " overlap (int): Overlap between chunks in characters\n", + " \n", + " Returns:\n", + " List[Dict]: Chunked text data\n", + " \"\"\"\n", + " chunked_data = [] # Initialize an empty list to store chunked data\n", + " \n", + " for item in text_data:\n", + " text = item[\"content\"] # Extract the text content\n", + " metadata = item[\"metadata\"] # Extract the metadata\n", + " \n", + " # Skip if text is too short\n", + " if len(text) < chunk_size / 2:\n", + " chunked_data.append({\n", + " \"content\": text,\n", + " \"metadata\": metadata\n", + " })\n", + " continue\n", + " \n", + " # Create chunks with overlap\n", + " chunks = []\n", + " for i in range(0, len(text), chunk_size - overlap):\n", + " chunk = text[i:i + chunk_size] # Extract a chunk of the specified size\n", + " if chunk: # Ensure we don't add empty chunks\n", + " chunks.append(chunk)\n", + " \n", + " # Add each chunk with updated metadata\n", + " for i, chunk in enumerate(chunks):\n", + " chunk_metadata = metadata.copy() # Copy the original metadata\n", + " chunk_metadata[\"chunk_index\"] = i # Add chunk index to metadata\n", + " chunk_metadata[\"chunk_count\"] = len(chunks) # Add total chunk count to metadata\n", + " \n", + " chunked_data.append({\n", + " \"content\": chunk, # The chunk text\n", + " \"metadata\": chunk_metadata # The updated metadata\n", + " })\n", + " \n", + " print(f\"Created {len(chunked_data)} text chunks\") # Print the number of created chunks\n", + " return chunked_data # Return the list of chunked data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Image Captioning with OpenAI Vision" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "def encode_image(image_path):\n", + " \"\"\"\n", + " Encode an image file as base64.\n", + " \n", + " Args:\n", + " image_path (str): Path to the image file\n", + " \n", + " Returns:\n", + " str: Base64 encoded image\n", + " \"\"\"\n", + " # Open the image file in binary read mode\n", + " with open(image_path, \"rb\") as image_file:\n", + " # Read the image file and encode it to base64\n", + " encoded_image = base64.b64encode(image_file.read())\n", + " # Decode the base64 bytes to a string and return\n", + " return encoded_image.decode('utf-8')" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_image_caption(image_path):\n", + " \"\"\"\n", + " Generate a caption for an image using OpenAI's vision capabilities.\n", + " \n", + " Args:\n", + " image_path (str): Path to the image file\n", + " \n", + " Returns:\n", + " str: Generated caption\n", + " \"\"\"\n", + " # Check if the file exists and is an image\n", + " if not os.path.exists(image_path):\n", + " return \"Error: Image file not found\"\n", + " \n", + " try:\n", + " # Open and validate the image\n", + " Image.open(image_path)\n", + " \n", + " # Encode the image to base64\n", + " base64_image = encode_image(image_path)\n", + " \n", + " # Create the API request to generate the caption\n", + " response = client.chat.completions.create(\n", + " model=\"llava-hf/llava-1.5-7b-hf\", # Use the llava-1.5-7b model\n", + " messages=[\n", + " {\n", + " \"role\": \"system\",\n", + " \"content\": \"You are an assistant specialized in describing images from academic papers. \"\n", + " \"Provide detailed captions for the image that capture key information. \"\n", + " \"If the image contains charts, tables, or diagrams, describe their content and purpose clearly. \"\n", + " \"Your caption should be optimized for future retrieval when people ask questions about this content.\"\n", + " },\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": [\n", + " {\"type\": \"text\", \"text\": \"Describe this image in detail, focusing on its academic content:\"},\n", + " {\n", + " \"type\": \"image_url\",\n", + " \"image_url\": {\n", + " \"url\": f\"data:image/jpeg;base64,{base64_image}\"\n", + " }\n", + " }\n", + " ]\n", + " }\n", + " ],\n", + " max_tokens=300\n", + " )\n", + " \n", + " # Extract the caption from the response\n", + " caption = response.choices[0].message.content\n", + " return caption\n", + " \n", + " except Exception as e:\n", + " # Return an error message if an exception occurs\n", + " return f\"Error generating caption: {str(e)}\"" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def process_images(image_paths):\n", + " \"\"\"\n", + " Process all images and generate captions.\n", + " \n", + " Args:\n", + " image_paths (List[Dict]): Paths to extracted images\n", + " \n", + " Returns:\n", + " List[Dict]: Image data with captions\n", + " \"\"\"\n", + " image_data = [] # Initialize an empty list to store image data with captions\n", + " \n", + " print(f\"Generating captions for {len(image_paths)} images...\") # Print the number of images to process\n", + " for i, img_item in enumerate(image_paths):\n", + " print(f\"Processing image {i+1}/{len(image_paths)}...\") # Print the current image being processed\n", + " img_path = img_item[\"path\"] # Get the image path\n", + " metadata = img_item[\"metadata\"] # Get the image metadata\n", + " \n", + " # Generate caption for the image\n", + " caption = generate_image_caption(img_path)\n", + " \n", + " # Add the image data with caption to the list\n", + " image_data.append({\n", + " \"content\": caption, # The generated caption\n", + " \"metadata\": metadata, # The image metadata\n", + " \"image_path\": img_path # The path to the image\n", + " })\n", + " \n", + " return image_data # Return the list of image data with captions" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Simple Vector Store Implementation" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "class MultiModalVectorStore:\n", + " \"\"\"\n", + " A simple vector store implementation for multi-modal content.\n", + " \"\"\"\n", + " def __init__(self):\n", + " # Initialize lists to store vectors, contents, and metadata\n", + " self.vectors = []\n", + " self.contents = []\n", + " self.metadata = []\n", + " \n", + " def add_item(self, content, embedding, metadata=None):\n", + " \"\"\"\n", + " Add an item to the vector store.\n", + " \n", + " Args:\n", + " content (str): The content (text or image caption)\n", + " embedding (List[float]): The embedding vector\n", + " metadata (Dict, optional): Additional metadata\n", + " \"\"\"\n", + " # Append the embedding vector, content, and metadata to their respective lists\n", + " self.vectors.append(np.array(embedding))\n", + " self.contents.append(content)\n", + " self.metadata.append(metadata or {})\n", + " \n", + " def add_items(self, items, embeddings):\n", + " \"\"\"\n", + " Add multiple items to the vector store.\n", + " \n", + " Args:\n", + " items (List[Dict]): List of content items\n", + " embeddings (List[List[float]]): List of embedding vectors\n", + " \"\"\"\n", + " # Loop through items and embeddings and add each to the vector store\n", + " for item, embedding in zip(items, embeddings):\n", + " self.add_item(\n", + " content=item[\"content\"],\n", + " embedding=embedding,\n", + " metadata=item.get(\"metadata\", {})\n", + " )\n", + " \n", + " def similarity_search(self, query_embedding, k=5):\n", + " \"\"\"\n", + " Find the most similar items to a query embedding.\n", + " \n", + " Args:\n", + " query_embedding (List[float]): Query embedding vector\n", + " k (int): Number of results to return\n", + " \n", + " Returns:\n", + " List[Dict]: Top k most similar items\n", + " \"\"\"\n", + " # Return an empty list if there are no vectors in the store\n", + " if not self.vectors:\n", + " return []\n", + " \n", + " # Convert query embedding to numpy array\n", + " query_vector = np.array(query_embedding)\n", + " \n", + " # Calculate similarities using cosine similarity\n", + " similarities = []\n", + " for i, vector in enumerate(self.vectors):\n", + " similarity = np.dot(query_vector, vector) / (np.linalg.norm(query_vector) * np.linalg.norm(vector))\n", + " similarities.append((i, similarity))\n", + " \n", + " # Sort by similarity (descending)\n", + " similarities.sort(key=lambda x: x[1], reverse=True)\n", + " \n", + " # Return top k results\n", + " results = []\n", + " for i in range(min(k, len(similarities))):\n", + " idx, score = similarities[i]\n", + " results.append({\n", + " \"content\": self.contents[idx],\n", + " \"metadata\": self.metadata[idx],\n", + " \"similarity\": float(score) # Convert to float for JSON serialization\n", + " })\n", + " \n", + " return results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating Embeddings" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "def create_embeddings(texts, model=\"BAAI/bge-en-icl\"):\n", + " \"\"\"\n", + " Create embeddings for the given texts.\n", + " \n", + " Args:\n", + " texts (List[str]): Input texts\n", + " model (str): Embedding model name\n", + " \n", + " Returns:\n", + " List[List[float]]: Embedding vectors\n", + " \"\"\"\n", + " # Handle empty input\n", + " if not texts:\n", + " return []\n", + " \n", + " # Process in batches if needed (OpenAI API limits)\n", + " batch_size = 100\n", + " all_embeddings = []\n", + " \n", + " # Iterate over the input texts in batches\n", + " for i in range(0, len(texts), batch_size):\n", + " batch = texts[i:i + batch_size] # Get the current batch of texts\n", + " \n", + " # Create embeddings for the current batch\n", + " response = client.embeddings.create(\n", + " model=model,\n", + " input=batch\n", + " )\n", + " \n", + " # Extract embeddings from the response\n", + " batch_embeddings = [item.embedding for item in response.data]\n", + " all_embeddings.extend(batch_embeddings) # Add the batch embeddings to the list\n", + " \n", + " return all_embeddings # Return all embeddings" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Complete Processing Pipeline" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "def process_document(pdf_path, chunk_size=1000, chunk_overlap=200):\n", + " \"\"\"\n", + " Process a document for multi-modal RAG.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF file\n", + " chunk_size (int): Size of each chunk in characters\n", + " chunk_overlap (int): Overlap between chunks in characters\n", + " \n", + " Returns:\n", + " Tuple[MultiModalVectorStore, Dict]: Vector store and document info\n", + " \"\"\"\n", + " # Create a directory for extracted images\n", + " image_dir = \"extracted_images\"\n", + " os.makedirs(image_dir, exist_ok=True)\n", + " \n", + " # Extract text and images from the PDF\n", + " text_data, image_paths = extract_content_from_pdf(pdf_path, image_dir)\n", + " \n", + " # Chunk the extracted text\n", + " chunked_text = chunk_text(text_data, chunk_size, chunk_overlap)\n", + " \n", + " # Process the extracted images to generate captions\n", + " image_data = process_images(image_paths)\n", + " \n", + " # Combine all content items (text chunks and image captions)\n", + " all_items = chunked_text + image_data\n", + " \n", + " # Extract content for embedding\n", + " contents = [item[\"content\"] for item in all_items]\n", + " \n", + " # Create embeddings for all content\n", + " print(\"Creating embeddings for all content...\")\n", + " embeddings = create_embeddings(contents)\n", + " \n", + " # Build the vector store and add items with their embeddings\n", + " vector_store = MultiModalVectorStore()\n", + " vector_store.add_items(all_items, embeddings)\n", + " \n", + " # Prepare document info with counts of text chunks and image captions\n", + " doc_info = {\n", + " \"text_count\": len(chunked_text),\n", + " \"image_count\": len(image_data),\n", + " \"total_items\": len(all_items),\n", + " }\n", + " \n", + " # Print summary of added items\n", + " print(f\"Added {len(all_items)} items to vector store ({len(chunked_text)} text chunks, {len(image_data)} image captions)\")\n", + " \n", + " # Return the vector store and document info\n", + " return vector_store, doc_info" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Query Processing and Response Generation" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "def query_multimodal_rag(query, vector_store, k=5):\n", + " \"\"\"\n", + " Query the multi-modal RAG system.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " vector_store (MultiModalVectorStore): Vector store with document content\n", + " k (int): Number of results to retrieve\n", + " \n", + " Returns:\n", + " Dict: Query results and generated response\n", + " \"\"\"\n", + " print(f\"\\n=== Processing query: {query} ===\\n\")\n", + " \n", + " # Generate embedding for the query\n", + " query_embedding = create_embeddings(query)\n", + " \n", + " # Retrieve relevant content from the vector store\n", + " results = vector_store.similarity_search(query_embedding, k=k)\n", + " \n", + " # Separate text and image results\n", + " text_results = [r for r in results if r[\"metadata\"].get(\"type\") == \"text\"]\n", + " image_results = [r for r in results if r[\"metadata\"].get(\"type\") == \"image\"]\n", + " \n", + " print(f\"Retrieved {len(results)} relevant items ({len(text_results)} text, {len(image_results)} image captions)\")\n", + " \n", + " # Generate a response using the retrieved content\n", + " response = generate_response(query, results)\n", + " \n", + " return {\n", + " \"query\": query,\n", + " \"results\": results,\n", + " \"response\": response,\n", + " \"text_results_count\": len(text_results),\n", + " \"image_results_count\": len(image_results)\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "def generate_response(query, results):\n", + " \"\"\"\n", + " Generate a response based on the query and retrieved results.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " results (List[Dict]): Retrieved content\n", + " \n", + " Returns:\n", + " str: Generated response\n", + " \"\"\"\n", + " # Format the context from the retrieved results\n", + " context = \"\"\n", + " \n", + " for i, result in enumerate(results):\n", + " # Determine the type of content (text or image caption)\n", + " content_type = \"Text\" if result[\"metadata\"].get(\"type\") == \"text\" else \"Image caption\"\n", + " # Get the page number from the metadata\n", + " page_num = result[\"metadata\"].get(\"page\", \"unknown\")\n", + " \n", + " # Append the content type and page number to the context\n", + " context += f\"[{content_type} from page {page_num}]\\n\"\n", + " # Append the actual content to the context\n", + " context += result[\"content\"]\n", + " context += \"\\n\\n\"\n", + " \n", + " # System message to guide the AI assistant\n", + " system_message = \"\"\"You are an AI assistant specializing in answering questions about documents \n", + " that contain both text and images. You have been given relevant text passages and image captions \n", + " from the document. Use this information to provide a comprehensive, accurate response to the query.\n", + " If information comes from an image or chart, mention this in your answer.\n", + " If the retrieved information doesn't fully answer the query, acknowledge the limitations.\"\"\"\n", + "\n", + " # User message containing the query and the formatted context\n", + " user_message = f\"\"\"Query: {query}\n", + "\n", + " Retrieved content:\n", + " {context}\n", + "\n", + " Please answer the query based on the retrieved content.\n", + " \"\"\"\n", + " \n", + " # Generate the response using the OpenAI API\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": user_message}\n", + " ],\n", + " temperature=0.1\n", + " )\n", + " \n", + " # Return the generated response\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluation Against Text-Only RAG" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "def build_text_only_store(pdf_path, chunk_size=1000, chunk_overlap=200):\n", + " \"\"\"\n", + " Build a text-only vector store for comparison.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF file\n", + " chunk_size (int): Size of each chunk in characters\n", + " chunk_overlap (int): Overlap between chunks in characters\n", + " \n", + " Returns:\n", + " MultiModalVectorStore: Text-only vector store\n", + " \"\"\"\n", + " # Extract text from PDF (reuse function but ignore images)\n", + " text_data, _ = extract_content_from_pdf(pdf_path, None)\n", + " \n", + " # Chunk text\n", + " chunked_text = chunk_text(text_data, chunk_size, chunk_overlap)\n", + " \n", + " # Extract content for embedding\n", + " contents = [item[\"content\"] for item in chunked_text]\n", + " \n", + " # Create embeddings\n", + " print(\"Creating embeddings for text-only content...\")\n", + " embeddings = create_embeddings(contents)\n", + " \n", + " # Build vector store\n", + " vector_store = MultiModalVectorStore()\n", + " vector_store.add_items(chunked_text, embeddings)\n", + " \n", + " print(f\"Added {len(chunked_text)} text items to text-only vector store\")\n", + " return vector_store" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "def evaluate_multimodal_vs_textonly(pdf_path, test_queries, reference_answers=None):\n", + " \"\"\"\n", + " Compare multi-modal RAG with text-only RAG.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF file\n", + " test_queries (List[str]): Test queries\n", + " reference_answers (List[str], optional): Reference answers\n", + " \n", + " Returns:\n", + " Dict: Evaluation results\n", + " \"\"\"\n", + " print(\"=== EVALUATING MULTI-MODAL RAG VS TEXT-ONLY RAG ===\\n\")\n", + " \n", + " # Process document for multi-modal RAG\n", + " print(\"\\nProcessing document for multi-modal RAG...\")\n", + " mm_vector_store, mm_doc_info = process_document(pdf_path)\n", + " \n", + " # Build text-only store\n", + " print(\"\\nProcessing document for text-only RAG...\")\n", + " text_vector_store = build_text_only_store(pdf_path)\n", + " \n", + " # Run evaluation for each query\n", + " results = []\n", + " \n", + " for i, query in enumerate(test_queries):\n", + " print(f\"\\n\\n=== Evaluating Query {i+1}: {query} ===\")\n", + " \n", + " # Get reference answer if available\n", + " reference = None\n", + " if reference_answers and i < len(reference_answers):\n", + " reference = reference_answers[i]\n", + " \n", + " # Run multi-modal RAG\n", + " print(\"\\nRunning multi-modal RAG...\")\n", + " mm_result = query_multimodal_rag(query, mm_vector_store)\n", + " \n", + " # Run text-only RAG\n", + " print(\"\\nRunning text-only RAG...\")\n", + " text_result = query_multimodal_rag(query, text_vector_store)\n", + " \n", + " # Compare responses\n", + " comparison = compare_responses(query, mm_result[\"response\"], text_result[\"response\"], reference)\n", + " \n", + " # Add to results\n", + " results.append({\n", + " \"query\": query,\n", + " \"multimodal_response\": mm_result[\"response\"],\n", + " \"textonly_response\": text_result[\"response\"],\n", + " \"multimodal_results\": {\n", + " \"text_count\": mm_result[\"text_results_count\"],\n", + " \"image_count\": mm_result[\"image_results_count\"]\n", + " },\n", + " \"reference_answer\": reference,\n", + " \"comparison\": comparison\n", + " })\n", + " \n", + " # Generate overall analysis\n", + " overall_analysis = generate_overall_analysis(results)\n", + " \n", + " return {\n", + " \"results\": results,\n", + " \"overall_analysis\": overall_analysis,\n", + " \"multimodal_doc_info\": mm_doc_info\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "def compare_responses(query, mm_response, text_response, reference=None):\n", + " \"\"\"\n", + " Compare multi-modal and text-only responses.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " mm_response (str): Multi-modal response\n", + " text_response (str): Text-only response\n", + " reference (str, optional): Reference answer\n", + " \n", + " Returns:\n", + " str: Comparison analysis\n", + " \"\"\"\n", + " # System prompt for the evaluator\n", + " system_prompt = \"\"\"You are an expert evaluator comparing two RAG systems:\n", + " 1. Multi-modal RAG: Retrieves from both text and image captions\n", + " 2. Text-only RAG: Retrieves only from text\n", + "\n", + " Evaluate which response better answers the query based on:\n", + " - Accuracy and correctness\n", + " - Completeness of information\n", + " - Relevance to the query\n", + " - Unique information from visual elements (for multi-modal)\"\"\"\n", + "\n", + " # User prompt with query and responses\n", + " user_prompt = f\"\"\"Query: {query}\n", + "\n", + " Multi-modal RAG Response:\n", + " {mm_response}\n", + "\n", + " Text-only RAG Response:\n", + " {text_response}\n", + " \"\"\"\n", + "\n", + " if reference:\n", + " user_prompt += f\"\"\"\n", + " Reference Answer:\n", + " {reference}\n", + " \"\"\"\n", + "\n", + " user_prompt += \"\"\"\n", + " Compare these responses and explain which one better answers the query and why.\n", + " Note any specific information that came from images in the multi-modal response.\n", + " \"\"\"\n", + "\n", + " # Generate comparison using meta-llama/Llama-3.2-3B-Instruct\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_overall_analysis(results):\n", + " \"\"\"\n", + " Generate an overall analysis of multi-modal vs text-only RAG.\n", + " \n", + " Args:\n", + " results (List[Dict]): Evaluation results for each query\n", + " \n", + " Returns:\n", + " str: Overall analysis\n", + " \"\"\"\n", + " # System prompt for the evaluator\n", + " system_prompt = \"\"\"You are an expert evaluator of RAG systems. Provide an overall analysis comparing \n", + " multi-modal RAG (text + images) versus text-only RAG based on multiple test queries.\n", + "\n", + " Focus on:\n", + " 1. Types of queries where multi-modal RAG outperforms text-only\n", + " 2. Specific advantages of incorporating image information\n", + " 3. Any disadvantages or limitations of the multi-modal approach\n", + " 4. Overall recommendation on when to use each approach\"\"\"\n", + "\n", + " # Create summary of evaluations\n", + " evaluations_summary = \"\"\n", + " for i, result in enumerate(results):\n", + " evaluations_summary += f\"Query {i+1}: {result['query']}\\n\"\n", + " evaluations_summary += f\"Multi-modal retrieved {result['multimodal_results']['text_count']} text chunks and {result['multimodal_results']['image_count']} image captions\\n\"\n", + " evaluations_summary += f\"Comparison summary: {result['comparison'][:200]}...\\n\\n\"\n", + "\n", + " # User prompt with evaluations summary\n", + " user_prompt = f\"\"\"Based on the following evaluations of multi-modal vs text-only RAG across {len(results)} queries, \n", + " provide an overall analysis comparing these two approaches:\n", + "\n", + " {evaluations_summary}\n", + "\n", + " Please provide a comprehensive analysis of the relative strengths and weaknesses of multi-modal RAG \n", + " compared to text-only RAG, with specific attention to how image information contributed (or didn't contribute) to response quality.\"\"\"\n", + "\n", + " # Generate overall analysis using meta-llama/Llama-3.2-3B-Instruct\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluation on Multi-Modal RAG vs Text-Only RAG" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "=== EVALUATING MULTI-MODAL RAG VS TEXT-ONLY RAG ===\n", + "\n", + "\n", + "Processing document for multi-modal RAG...\n", + "Extracting content from data/attention_is_all_you_need.pdf...\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Extracted 15 text segments and 3 images\n", + "Created 59 text chunks\n", + "Generating captions for 3 images...\n", + "Processing image 1/3...\n", + "Processing image 2/3...\n", + "Processing image 3/3...\n", + "Creating embeddings for all content...\n", + "Added 62 items to vector store (59 text chunks, 3 image captions)\n", + "\n", + "Processing document for text-only RAG...\n", + "Extracting content from data/attention_is_all_you_need.pdf...\n", + "Extracted 15 text segments and 3 images\n", + "Created 59 text chunks\n", + "Creating embeddings for text-only content...\n", + "Added 59 text items to text-only vector store\n", + "\n", + "\n", + "=== Evaluating Query 1: What is the BLEU score of the Transformer (base model)? ===\n", + "\n", + "Running multi-modal RAG...\n", + "\n", + "=== Processing query: What is the BLEU score of the Transformer (base model)? ===\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "C:\\Users\\faree\\AppData\\Local\\Temp\\ipykernel_14692\\2117883450.py:75: DeprecationWarning: Conversion of an array with ndim > 0 to a scalar is deprecated, and will error in future. Ensure you extract a single element from your array before performing this operation. (Deprecated NumPy 1.25.)\n", + " \"similarity\": float(score) # Convert to float for JSON serialization\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Retrieved 5 relevant items (5 text, 0 image captions)\n", + "\n", + "Running text-only RAG...\n", + "\n", + "=== Processing query: What is the BLEU score of the Transformer (base model)? ===\n", + "\n", + "Retrieved 5 relevant items (5 text, 0 image captions)\n", + "\n", + "=== OVERALL ANALYSIS ===\n", + "\n", + "**Overall Analysis: Multi-Modal RAG vs Text-Only RAG**\n", + "\n", + "Our analysis compares the performance of multi-modal RAG (text + images) and text-only RAG across multiple test queries. We evaluate the strengths and weaknesses of each approach, focusing on the types of queries where multi-modal RAG outperforms text-only, the advantages of incorporating image information, and the limitations of the multi-modal approach.\n", + "\n", + "**Advantages of Multi-Modal RAG**\n", + "\n", + "1. **Improved Contextual Understanding**: Multi-modal RAG can leverage both text and image information to better understand the context of a query. This can lead to more accurate and informative responses, especially when the query requires a deeper understanding of the topic.\n", + "2. **Enhanced Visual Cues**: Images can provide visual cues that can help disambiguate ambiguous queries or provide additional context that is not explicitly stated in the text. For example, in Query 1, the image of the Transformer model could provide visual confirmation of the query, which was not present in the text-only response.\n", + "3. **Increased Retrieval Precision**: Multi-modal RAG can retrieve more relevant text chunks and image captions, leading to more accurate and precise responses. In Query 1, the multi-modal RAG retrieved 5 text chunks and 0 image captions, which may not have been sufficient to provide a clear answer.\n", + "\n", + "**Disadvantages of Multi-Modal RAG**\n", + "\n", + "1. **Increased Complexity**: Multi-modal RAG requires more complex processing and retrieval mechanisms, which can increase the computational cost and make the system more difficult to train and deploy.\n", + "2. **Image Quality and Relevance**: The quality and relevance of the image captions can significantly impact the performance of the multi-modal RAG. Poor-quality or irrelevant images can lead to suboptimal responses.\n", + "3. **Overreliance on Image Information**: If the image information is not relevant or accurate, the multi-modal RAG may rely too heavily on it, leading to suboptimal responses.\n", + "\n", + "**Types of Queries where Multi-Modal RAG Outperforms Text-Only**\n", + "\n", + "1. **Visual-Spatial Queries**: Multi-modal RAG can perform better on visual-spatial queries that require a deeper understanding of the visual context, such as Query 1 (What is the BLEU score of the Transformer (base model)?).\n", + "2. **Ambiguous Queries**: Multi-modal RAG can help disambiguate ambiguous queries by leveraging both text and image information, leading to more accurate and informative responses.\n", + "3. **Multi-Modal Queries**: Multi-modal RAG can perform better on multi-modal queries that require the integration of both text and image information, such as Query 1.\n", + "\n", + "**Specific Advantages of Incorporating Image Information**\n", + "\n", + "1. **Visual Confirmation**: Images can provide visual confirmation of the query, which can help disambiguate ambiguous queries or provide additional context that is not explicitly stated in the text.\n", + "2. **Visual Cues**: Images can provide visual cues that can help the model understand the context of the query, leading to more accurate and informative responses.\n", + "3. **Multimodal Fusion**: Images can be fused with text information to provide a more comprehensive understanding of the query, leading to more accurate and informative responses.\n", + "\n", + "**Overall Recommendation**\n", + "\n", + "1. **Use Text-Only RAG for Simple Queries**: Text-only RAG is sufficient for simple queries that do not require a deep understanding of the context or visual information.\n", + "2. **Use Multi-Modal RAG for Complex Queries**: Multi-modal RAG is recommended for complex queries that require a deeper understanding of the context, visual information, or both.\n", + "3. **Use Multi-Modal RAG for Ambiguous Queries**: Multi-modal RAG can help disambiguate ambiguous queries by leveraging both text and image information, leading to more accurate and informative responses.\n", + "\n", + "In conclusion, multi-modal RAG can outperform text-only RAG in certain types of queries, particularly those that require a deeper understanding of the context, visual information, or both. However, the multi-modal approach also has its limitations, such as increased complexity and the potential for overreliance on image information. The choice of approach depends on the specific use case and the type of queries being addressed.\n" + ] + } + ], + "source": [ + "# Path to your PDF document\n", + "pdf_path = \"data/attention_is_all_you_need.pdf\"\n", + "\n", + "# Define test queries targeting both text and visual content\n", + "test_queries = [\n", + " \"What is the BLEU score of the Transformer (base model)?\",\n", + "]\n", + "\n", + "# Optional reference answers for evaluation\n", + "reference_answers = [\n", + " \"The Transformer (base model) achieves a BLEU score of 27.3 on the WMT 2014 English-to-German translation task and 38.1 on the WMT 2014 English-to-French translation task.\",\n", + "]\n", + "\n", + "# Run evaluation\n", + "evaluation_results = evaluate_multimodal_vs_textonly(\n", + " pdf_path=pdf_path,\n", + " test_queries=test_queries,\n", + " reference_answers=reference_answers\n", + ")\n", + "\n", + "# Print overall analysis\n", + "print(\"\\n=== OVERALL ANALYSIS ===\\n\")\n", + "print(evaluation_results[\"overall_analysis\"])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv-new-specific-rag", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/16_fusion_rag.ipynb b/16_fusion_rag.ipynb new file mode 100644 index 0000000..8da33c3 --- /dev/null +++ b/16_fusion_rag.ipynb @@ -0,0 +1,1207 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "markdown" + } + }, + "source": [ + "# Fusion Retrieval: Combining Vector and Keyword Search\n", + "\n", + "In this notebook, I implement a fusion retrieval system that combines the strengths of semantic vector search with keyword-based BM25 retrieval. This approach improves retrieval quality by capturing both conceptual similarity and exact keyword matches.\n", + "\n", + "## Why Fusion Retrieval Matters\n", + "\n", + "Traditional RAG systems typically rely on vector search alone, but this has limitations:\n", + "\n", + "- Vector search excels at semantic similarity but may miss exact keyword matches\n", + "- Keyword search is great for specific terms but lacks semantic understanding\n", + "- Different queries perform better with different retrieval methods\n", + "\n", + "Fusion retrieval gives us the best of both worlds by:\n", + "\n", + "- Performing both vector-based and keyword-based retrieval\n", + "- Normalizing the scores from each approach\n", + "- Combining them with a weighted formula\n", + "- Ranking documents based on the combined score" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the Environment\n", + "We begin by importing necessary libraries." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import numpy as np\n", + "from rank_bm25 import BM25Okapi\n", + "import fitz\n", + "from openai import OpenAI\n", + "import re\n", + "import json\n", + "import time\n", + "from sklearn.metrics.pairwise import cosine_similarity" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the OpenAI API Client\n", + "We initialize the OpenAI client to generate embeddings and responses." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize the OpenAI client with the base URL and API key\n", + "client = OpenAI(\n", + " base_url=\"https://api.studio.nebius.com/v1/\",\n", + " api_key=os.getenv(\"OPENAI_API_KEY\") # Retrieve the API key from environment variables\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Document Processing Functions" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def extract_text_from_pdf(pdf_path):\n", + " \"\"\"\n", + " Extract text content from a PDF file.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF file\n", + " \n", + " Returns:\n", + " str: Extracted text content\n", + " \"\"\"\n", + " print(f\"Extracting text from {pdf_path}...\") # Print the path of the PDF being processed\n", + " pdf_document = fitz.open(pdf_path) # Open the PDF file using PyMuPDF\n", + " text = \"\" # Initialize an empty string to store the extracted text\n", + " \n", + " # Iterate through each page in the PDF\n", + " for page_num in range(pdf_document.page_count):\n", + " page = pdf_document[page_num] # Get the page object\n", + " text += page.get_text() # Extract text from the page and append to the text string\n", + " \n", + " return text # Return the extracted text content" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def chunk_text(text, chunk_size=1000, chunk_overlap=200):\n", + " \"\"\"\n", + " Split text into overlapping chunks.\n", + " \n", + " Args:\n", + " text (str): Input text to chunk\n", + " chunk_size (int): Size of each chunk in characters\n", + " chunk_overlap (int): Overlap between chunks in characters\n", + " \n", + " Returns:\n", + " List[Dict]: List of chunks with text and metadata\n", + " \"\"\"\n", + " chunks = [] # Initialize an empty list to store chunks\n", + " \n", + " # Iterate over the text with the specified chunk size and overlap\n", + " for i in range(0, len(text), chunk_size - chunk_overlap):\n", + " chunk = text[i:i + chunk_size] # Extract a chunk of the specified size\n", + " if chunk: # Ensure we don't add empty chunks\n", + " chunk_data = {\n", + " \"text\": chunk, # The chunk text\n", + " \"metadata\": {\n", + " \"start_char\": i, # Start character index of the chunk\n", + " \"end_char\": i + len(chunk) # End character index of the chunk\n", + " }\n", + " }\n", + " chunks.append(chunk_data) # Add the chunk data to the list\n", + " \n", + " print(f\"Created {len(chunks)} text chunks\") # Print the number of created chunks\n", + " return chunks # Return the list of chunks" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "def clean_text(text):\n", + " \"\"\"\n", + " Clean text by removing extra whitespace and special characters.\n", + " \n", + " Args:\n", + " text (str): Input text\n", + " \n", + " Returns:\n", + " str: Cleaned text\n", + " \"\"\"\n", + " # Replace multiple whitespace characters (including newlines and tabs) with a single space\n", + " text = re.sub(r'\\s+', ' ', text)\n", + " \n", + " # Fix common OCR issues by replacing tab and newline characters with a space\n", + " text = text.replace('\\\\t', ' ')\n", + " text = text.replace('\\\\n', ' ')\n", + " \n", + " # Remove any leading or trailing whitespace and ensure single spaces between words\n", + " text = ' '.join(text.split())\n", + " \n", + " return text" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating Our Vector Store" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def create_embeddings(texts, model=\"BAAI/bge-en-icl\"):\n", + " \"\"\"\n", + " Create embeddings for the given texts.\n", + " \n", + " Args:\n", + " texts (str or List[str]): Input text(s)\n", + " model (str): Embedding model name\n", + " \n", + " Returns:\n", + " List[List[float]]: Embedding vectors\n", + " \"\"\"\n", + " # Handle both string and list inputs\n", + " input_texts = texts if isinstance(texts, list) else [texts]\n", + " \n", + " # Process in batches if needed (OpenAI API limits)\n", + " batch_size = 100\n", + " all_embeddings = []\n", + " \n", + " # Iterate over the input texts in batches\n", + " for i in range(0, len(input_texts), batch_size):\n", + " batch = input_texts[i:i + batch_size] # Get the current batch of texts\n", + " \n", + " # Create embeddings for the current batch\n", + " response = client.embeddings.create(\n", + " model=model,\n", + " input=batch\n", + " )\n", + " \n", + " # Extract embeddings from the response\n", + " batch_embeddings = [item.embedding for item in response.data]\n", + " all_embeddings.extend(batch_embeddings) # Add the batch embeddings to the list\n", + " \n", + " # If input was a string, return just the first embedding\n", + " if isinstance(texts, str):\n", + " return all_embeddings[0]\n", + " \n", + " # Otherwise return all embeddings\n", + " return all_embeddings" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "class SimpleVectorStore:\n", + " \"\"\"\n", + " A simple vector store implementation using NumPy.\n", + " \"\"\"\n", + " def __init__(self):\n", + " self.vectors = [] # List to store embedding vectors\n", + " self.texts = [] # List to store text content\n", + " self.metadata = [] # List to store metadata\n", + " \n", + " def add_item(self, text, embedding, metadata=None):\n", + " \"\"\"\n", + " Add an item to the vector store.\n", + " \n", + " Args:\n", + " text (str): The text content\n", + " embedding (List[float]): The embedding vector\n", + " metadata (Dict, optional): Additional metadata\n", + " \"\"\"\n", + " self.vectors.append(np.array(embedding)) # Append the embedding vector\n", + " self.texts.append(text) # Append the text content\n", + " self.metadata.append(metadata or {}) # Append the metadata (or empty dict if None)\n", + " \n", + " def add_items(self, items, embeddings):\n", + " \"\"\"\n", + " Add multiple items to the vector store.\n", + " \n", + " Args:\n", + " items (List[Dict]): List of text items\n", + " embeddings (List[List[float]]): List of embedding vectors\n", + " \"\"\"\n", + " for i, (item, embedding) in enumerate(zip(items, embeddings)):\n", + " self.add_item(\n", + " text=item[\"text\"], # Extract text from item\n", + " embedding=embedding, # Use corresponding embedding\n", + " metadata={**item.get(\"metadata\", {}), \"index\": i} # Merge item metadata with index\n", + " )\n", + " \n", + " def similarity_search_with_scores(self, query_embedding, k=5):\n", + " \"\"\"\n", + " Find the most similar items to a query embedding with similarity scores.\n", + " \n", + " Args:\n", + " query_embedding (List[float]): Query embedding vector\n", + " k (int): Number of results to return\n", + " \n", + " Returns:\n", + " List[Tuple[Dict, float]]: Top k most similar items with scores\n", + " \"\"\"\n", + " if not self.vectors:\n", + " return [] # Return empty list if no vectors are stored\n", + " \n", + " # Convert query embedding to numpy array\n", + " query_vector = np.array(query_embedding)\n", + " \n", + " # Calculate similarities using cosine similarity\n", + " similarities = []\n", + " for i, vector in enumerate(self.vectors):\n", + " similarity = cosine_similarity([query_vector], [vector])[0][0] # Compute cosine similarity\n", + " similarities.append((i, similarity)) # Append index and similarity score\n", + " \n", + " # Sort by similarity (descending)\n", + " similarities.sort(key=lambda x: x[1], reverse=True)\n", + " \n", + " # Return top k results with scores\n", + " results = []\n", + " for i in range(min(k, len(similarities))):\n", + " idx, score = similarities[i]\n", + " results.append({\n", + " \"text\": self.texts[idx], # Retrieve text by index\n", + " \"metadata\": self.metadata[idx], # Retrieve metadata by index\n", + " \"similarity\": float(score) # Add similarity score\n", + " })\n", + " \n", + " return results\n", + " \n", + " def get_all_documents(self):\n", + " \"\"\"\n", + " Get all documents in the store.\n", + " \n", + " Returns:\n", + " List[Dict]: All documents\n", + " \"\"\"\n", + " return [{\"text\": text, \"metadata\": meta} for text, meta in zip(self.texts, self.metadata)] # Combine texts and metadata" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## BM25 Implementation" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "def create_bm25_index(chunks):\n", + " \"\"\"\n", + " Create a BM25 index from the given chunks.\n", + " \n", + " Args:\n", + " chunks (List[Dict]): List of text chunks\n", + " \n", + " Returns:\n", + " BM25Okapi: A BM25 index\n", + " \"\"\"\n", + " # Extract text from each chunk\n", + " texts = [chunk[\"text\"] for chunk in chunks]\n", + " \n", + " # Tokenize each document by splitting on whitespace\n", + " tokenized_docs = [text.split() for text in texts]\n", + " \n", + " # Create the BM25 index using the tokenized documents\n", + " bm25 = BM25Okapi(tokenized_docs)\n", + " \n", + " # Print the number of documents in the BM25 index\n", + " print(f\"Created BM25 index with {len(texts)} documents\")\n", + " \n", + " return bm25" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "def bm25_search(bm25, chunks, query, k=5):\n", + " \"\"\"\n", + " Search the BM25 index with a query.\n", + " \n", + " Args:\n", + " bm25 (BM25Okapi): BM25 index\n", + " chunks (List[Dict]): List of text chunks\n", + " query (str): Query string\n", + " k (int): Number of results to return\n", + " \n", + " Returns:\n", + " List[Dict]: Top k results with scores\n", + " \"\"\"\n", + " # Tokenize the query by splitting it into individual words\n", + " query_tokens = query.split()\n", + " \n", + " # Get BM25 scores for the query tokens against the indexed documents\n", + " scores = bm25.get_scores(query_tokens)\n", + " \n", + " # Initialize an empty list to store results with their scores\n", + " results = []\n", + " \n", + " # Iterate over the scores and corresponding chunks\n", + " for i, score in enumerate(scores):\n", + " # Create a copy of the metadata to avoid modifying the original\n", + " metadata = chunks[i].get(\"metadata\", {}).copy()\n", + " # Add index to metadata\n", + " metadata[\"index\"] = i\n", + " \n", + " results.append({\n", + " \"text\": chunks[i][\"text\"],\n", + " \"metadata\": metadata, # Add metadata with index\n", + " \"bm25_score\": float(score)\n", + " })\n", + " \n", + " # Sort the results by BM25 score in descending order\n", + " results.sort(key=lambda x: x[\"bm25_score\"], reverse=True)\n", + " \n", + " # Return the top k results\n", + " return results[:k]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Fusion Retrieval Function" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "def fusion_retrieval(query, chunks, vector_store, bm25_index, k=5, alpha=0.5):\n", + " \"\"\"\n", + " Perform fusion retrieval combining vector-based and BM25 search.\n", + " \n", + " Args:\n", + " query (str): Query string\n", + " chunks (List[Dict]): Original text chunks\n", + " vector_store (SimpleVectorStore): Vector store\n", + " bm25_index (BM25Okapi): BM25 index\n", + " k (int): Number of results to return\n", + " alpha (float): Weight for vector scores (0-1), where 1-alpha is BM25 weight\n", + " \n", + " Returns:\n", + " List[Dict]: Top k results based on combined scores\n", + " \"\"\"\n", + " print(f\"Performing fusion retrieval for query: {query}\")\n", + " \n", + " # Define small epsilon to avoid division by zero\n", + " epsilon = 1e-8\n", + " \n", + " # Get vector search results\n", + " query_embedding = create_embeddings(query) # Create embedding for the query\n", + " vector_results = vector_store.similarity_search_with_scores(query_embedding, k=len(chunks)) # Perform vector search\n", + " \n", + " # Get BM25 search results\n", + " bm25_results = bm25_search(bm25_index, chunks, query, k=len(chunks)) # Perform BM25 search\n", + " \n", + " # Create dictionaries to map document index to score\n", + " vector_scores_dict = {result[\"metadata\"][\"index\"]: result[\"similarity\"] for result in vector_results}\n", + " bm25_scores_dict = {result[\"metadata\"][\"index\"]: result[\"bm25_score\"] for result in bm25_results}\n", + " \n", + " # Ensure all documents have scores for both methods\n", + " all_docs = vector_store.get_all_documents()\n", + " combined_results = []\n", + " \n", + " for i, doc in enumerate(all_docs):\n", + " vector_score = vector_scores_dict.get(i, 0.0) # Get vector score or 0 if not found\n", + " bm25_score = bm25_scores_dict.get(i, 0.0) # Get BM25 score or 0 if not found\n", + " combined_results.append({\n", + " \"text\": doc[\"text\"],\n", + " \"metadata\": doc[\"metadata\"],\n", + " \"vector_score\": vector_score,\n", + " \"bm25_score\": bm25_score,\n", + " \"index\": i\n", + " })\n", + " \n", + " # Extract scores as arrays\n", + " vector_scores = np.array([doc[\"vector_score\"] for doc in combined_results])\n", + " bm25_scores = np.array([doc[\"bm25_score\"] for doc in combined_results])\n", + " \n", + " # Normalize scores\n", + " norm_vector_scores = (vector_scores - np.min(vector_scores)) / (np.max(vector_scores) - np.min(vector_scores) + epsilon)\n", + " norm_bm25_scores = (bm25_scores - np.min(bm25_scores)) / (np.max(bm25_scores) - np.min(bm25_scores) + epsilon)\n", + " \n", + " # Compute combined scores\n", + " combined_scores = alpha * norm_vector_scores + (1 - alpha) * norm_bm25_scores\n", + " \n", + " # Add combined scores to results\n", + " for i, score in enumerate(combined_scores):\n", + " combined_results[i][\"combined_score\"] = float(score)\n", + " \n", + " # Sort by combined score (descending)\n", + " combined_results.sort(key=lambda x: x[\"combined_score\"], reverse=True)\n", + " \n", + " # Return top k results\n", + " top_results = combined_results[:k]\n", + " \n", + " print(f\"Retrieved {len(top_results)} documents with fusion retrieval\")\n", + " return top_results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Document Processing Pipeline" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "def process_document(pdf_path, chunk_size=1000, chunk_overlap=200):\n", + " \"\"\"\n", + " Process a document for fusion retrieval.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF file\n", + " chunk_size (int): Size of each chunk in characters\n", + " chunk_overlap (int): Overlap between chunks in characters\n", + " \n", + " Returns:\n", + " Tuple[List[Dict], SimpleVectorStore, BM25Okapi]: Chunks, vector store, and BM25 index\n", + " \"\"\"\n", + " # Extract text from the PDF file\n", + " text = extract_text_from_pdf(pdf_path)\n", + " \n", + " # Clean the extracted text to remove extra whitespace and special characters\n", + " cleaned_text = clean_text(text)\n", + " \n", + " # Split the cleaned text into overlapping chunks\n", + " chunks = chunk_text(cleaned_text, chunk_size, chunk_overlap)\n", + " \n", + " # Extract the text content from each chunk for embedding creation\n", + " chunk_texts = [chunk[\"text\"] for chunk in chunks]\n", + " print(\"Creating embeddings for chunks...\")\n", + " \n", + " # Create embeddings for the chunk texts\n", + " embeddings = create_embeddings(chunk_texts)\n", + " \n", + " # Initialize the vector store\n", + " vector_store = SimpleVectorStore()\n", + " \n", + " # Add the chunks and their embeddings to the vector store\n", + " vector_store.add_items(chunks, embeddings)\n", + " print(f\"Added {len(chunks)} items to vector store\")\n", + " \n", + " # Create a BM25 index from the chunks\n", + " bm25_index = create_bm25_index(chunks)\n", + " \n", + " # Return the chunks, vector store, and BM25 index\n", + " return chunks, vector_store, bm25_index" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Response Generation" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_response(query, context):\n", + " \"\"\"\n", + " Generate a response based on the query and context.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " context (str): Context from retrieved documents\n", + " \n", + " Returns:\n", + " str: Generated response\n", + " \"\"\"\n", + " # Define the system prompt to guide the AI assistant\n", + " system_prompt = \"\"\"You are a helpful AI assistant. Answer the user's question based on the provided context. \n", + " If the context doesn't contain relevant information to answer the question fully, acknowledge this limitation.\"\"\"\n", + "\n", + " # Format the user prompt with the context and query\n", + " user_prompt = f\"\"\"Context:\n", + " {context}\n", + "\n", + " Question: {query}\n", + "\n", + " Please answer the question based on the provided context.\"\"\"\n", + "\n", + " # Generate the response using the OpenAI API\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\", # Specify the model to use\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt}, # System message to guide the assistant\n", + " {\"role\": \"user\", \"content\": user_prompt} # User message with context and query\n", + " ],\n", + " temperature=0.1 # Set the temperature for response generation\n", + " )\n", + " \n", + " # Return the generated response\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Main Retrieval Function" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "def answer_with_fusion_rag(query, chunks, vector_store, bm25_index, k=5, alpha=0.5):\n", + " \"\"\"\n", + " Answer a query using fusion RAG.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " chunks (List[Dict]): Text chunks\n", + " vector_store (SimpleVectorStore): Vector store\n", + " bm25_index (BM25Okapi): BM25 index\n", + " k (int): Number of documents to retrieve\n", + " alpha (float): Weight for vector scores\n", + " \n", + " Returns:\n", + " Dict: Query results including retrieved documents and response\n", + " \"\"\"\n", + " # Retrieve documents using fusion retrieval method\n", + " retrieved_docs = fusion_retrieval(query, chunks, vector_store, bm25_index, k=k, alpha=alpha)\n", + " \n", + " # Format the context from the retrieved documents by joining their text with separators\n", + " context = \"\\n\\n---\\n\\n\".join([doc[\"text\"] for doc in retrieved_docs])\n", + " \n", + " # Generate a response based on the query and the formatted context\n", + " response = generate_response(query, context)\n", + " \n", + " # Return the query, retrieved documents, and the generated response\n", + " return {\n", + " \"query\": query,\n", + " \"retrieved_documents\": retrieved_docs,\n", + " \"response\": response\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Comparing Retrieval Methods" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "def vector_only_rag(query, vector_store, k=5):\n", + " \"\"\"\n", + " Answer a query using only vector-based RAG.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " vector_store (SimpleVectorStore): Vector store\n", + " k (int): Number of documents to retrieve\n", + " \n", + " Returns:\n", + " Dict: Query results\n", + " \"\"\"\n", + " # Create query embedding\n", + " query_embedding = create_embeddings(query)\n", + " \n", + " # Retrieve documents using vector-based similarity search\n", + " retrieved_docs = vector_store.similarity_search_with_scores(query_embedding, k=k)\n", + " \n", + " # Format the context from the retrieved documents by joining their text with separators\n", + " context = \"\\n\\n---\\n\\n\".join([doc[\"text\"] for doc in retrieved_docs])\n", + " \n", + " # Generate a response based on the query and the formatted context\n", + " response = generate_response(query, context)\n", + " \n", + " # Return the query, retrieved documents, and the generated response\n", + " return {\n", + " \"query\": query,\n", + " \"retrieved_documents\": retrieved_docs,\n", + " \"response\": response\n", + " }\n" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "def bm25_only_rag(query, chunks, bm25_index, k=5):\n", + " \"\"\"\n", + " Answer a query using only BM25-based RAG.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " chunks (List[Dict]): Text chunks\n", + " bm25_index (BM25Okapi): BM25 index\n", + " k (int): Number of documents to retrieve\n", + " \n", + " Returns:\n", + " Dict: Query results\n", + " \"\"\"\n", + " # Retrieve documents using BM25 search\n", + " retrieved_docs = bm25_search(bm25_index, chunks, query, k=k)\n", + " \n", + " # Format the context from the retrieved documents by joining their text with separators\n", + " context = \"\\n\\n---\\n\\n\".join([doc[\"text\"] for doc in retrieved_docs])\n", + " \n", + " # Generate a response based on the query and the formatted context\n", + " response = generate_response(query, context)\n", + " \n", + " # Return the query, retrieved documents, and the generated response\n", + " return {\n", + " \"query\": query,\n", + " \"retrieved_documents\": retrieved_docs,\n", + " \"response\": response\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluation Functions" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "def compare_retrieval_methods(query, chunks, vector_store, bm25_index, k=5, alpha=0.5, reference_answer=None):\n", + " \"\"\"\n", + " Compare different retrieval methods for a query.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " chunks (List[Dict]): Text chunks\n", + " vector_store (SimpleVectorStore): Vector store\n", + " bm25_index (BM25Okapi): BM25 index\n", + " k (int): Number of documents to retrieve\n", + " alpha (float): Weight for vector scores in fusion retrieval\n", + " reference_answer (str, optional): Reference answer for comparison\n", + " \n", + " Returns:\n", + " Dict: Comparison results\n", + " \"\"\"\n", + " print(f\"\\n=== Comparing retrieval methods for query: {query} ===\\n\")\n", + " \n", + " # Run vector-only RAG\n", + " print(\"\\nRunning vector-only RAG...\")\n", + " vector_result = vector_only_rag(query, vector_store, k)\n", + " \n", + " # Run BM25-only RAG\n", + " print(\"\\nRunning BM25-only RAG...\")\n", + " bm25_result = bm25_only_rag(query, chunks, bm25_index, k)\n", + " \n", + " # Run fusion RAG\n", + " print(\"\\nRunning fusion RAG...\")\n", + " fusion_result = answer_with_fusion_rag(query, chunks, vector_store, bm25_index, k, alpha)\n", + " \n", + " # Compare responses from different retrieval methods\n", + " print(\"\\nComparing responses...\")\n", + " comparison = evaluate_responses(\n", + " query, \n", + " vector_result[\"response\"], \n", + " bm25_result[\"response\"], \n", + " fusion_result[\"response\"],\n", + " reference_answer\n", + " )\n", + " \n", + " # Return the comparison results\n", + " return {\n", + " \"query\": query,\n", + " \"vector_result\": vector_result,\n", + " \"bm25_result\": bm25_result,\n", + " \"fusion_result\": fusion_result,\n", + " \"comparison\": comparison\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "def evaluate_responses(query, vector_response, bm25_response, fusion_response, reference_answer=None):\n", + " \"\"\"\n", + " Evaluate the responses from different retrieval methods.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " vector_response (str): Response from vector-only RAG\n", + " bm25_response (str): Response from BM25-only RAG\n", + " fusion_response (str): Response from fusion RAG\n", + " reference_answer (str, optional): Reference answer\n", + " \n", + " Returns:\n", + " str: Evaluation of responses\n", + " \"\"\"\n", + " # System prompt for the evaluator to guide the evaluation process\n", + " system_prompt = \"\"\"You are an expert evaluator of RAG systems. Compare responses from three different retrieval approaches:\n", + " 1. Vector-based retrieval: Uses semantic similarity for document retrieval\n", + " 2. BM25 keyword retrieval: Uses keyword matching for document retrieval\n", + " 3. Fusion retrieval: Combines both vector and keyword approaches\n", + "\n", + " Evaluate the responses based on:\n", + " - Relevance to the query\n", + " - Factual correctness\n", + " - Comprehensiveness\n", + " - Clarity and coherence\"\"\"\n", + "\n", + " # User prompt containing the query and responses\n", + " user_prompt = f\"\"\"Query: {query}\n", + "\n", + " Vector-based response:\n", + " {vector_response}\n", + "\n", + " BM25 keyword response:\n", + " {bm25_response}\n", + "\n", + " Fusion response:\n", + " {fusion_response}\n", + " \"\"\"\n", + "\n", + " # Add reference answer to the prompt if provided\n", + " if reference_answer:\n", + " user_prompt += f\"\"\"\n", + " Reference answer:\n", + " {reference_answer}\n", + " \"\"\"\n", + "\n", + " # Add instructions for detailed comparison to the user prompt\n", + " user_prompt += \"\"\"\n", + " Please provide a detailed comparison of these three responses. Which approach performed best for this query and why?\n", + " Be specific about the strengths and weaknesses of each approach for this particular query.\n", + " \"\"\"\n", + "\n", + " # Generate the evaluation using meta-llama/Llama-3.2-3B-Instruct\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\", # Specify the model to use\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt}, # System message to guide the evaluator\n", + " {\"role\": \"user\", \"content\": user_prompt} # User message with query and responses\n", + " ],\n", + " temperature=0 # Set the temperature for response generation\n", + " )\n", + " \n", + " # Return the generated evaluation content\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Complete Evaluation Pipeline" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "def evaluate_fusion_retrieval(pdf_path, test_queries, reference_answers=None, k=5, alpha=0.5):\n", + " \"\"\"\n", + " Evaluate fusion retrieval compared to other methods.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF file\n", + " test_queries (List[str]): List of test queries\n", + " reference_answers (List[str], optional): Reference answers\n", + " k (int): Number of documents to retrieve\n", + " alpha (float): Weight for vector scores in fusion retrieval\n", + " \n", + " Returns:\n", + " Dict: Evaluation results\n", + " \"\"\"\n", + " print(\"=== EVALUATING FUSION RETRIEVAL ===\\n\")\n", + " \n", + " # Process the document to extract text, create chunks, and build vector and BM25 indices\n", + " chunks, vector_store, bm25_index = process_document(pdf_path)\n", + " \n", + " # Initialize a list to store results for each query\n", + " results = []\n", + " \n", + " # Iterate over each test query\n", + " for i, query in enumerate(test_queries):\n", + " print(f\"\\n\\n=== Evaluating Query {i+1}/{len(test_queries)} ===\")\n", + " print(f\"Query: {query}\")\n", + " \n", + " # Get the reference answer if available\n", + " reference = None\n", + " if reference_answers and i < len(reference_answers):\n", + " reference = reference_answers[i]\n", + " \n", + " # Compare retrieval methods for the current query\n", + " comparison = compare_retrieval_methods(\n", + " query, \n", + " chunks, \n", + " vector_store, \n", + " bm25_index, \n", + " k=k, \n", + " alpha=alpha,\n", + " reference_answer=reference\n", + " )\n", + " \n", + " # Append the comparison results to the results list\n", + " results.append(comparison)\n", + " \n", + " # Print the responses from different retrieval methods\n", + " print(\"\\n=== Vector-based Response ===\")\n", + " print(comparison[\"vector_result\"][\"response\"])\n", + " \n", + " print(\"\\n=== BM25 Response ===\")\n", + " print(comparison[\"bm25_result\"][\"response\"])\n", + " \n", + " print(\"\\n=== Fusion Response ===\")\n", + " print(comparison[\"fusion_result\"][\"response\"])\n", + " \n", + " print(\"\\n=== Comparison ===\")\n", + " print(comparison[\"comparison\"])\n", + " \n", + " # Generate an overall analysis of the fusion retrieval performance\n", + " overall_analysis = generate_overall_analysis(results)\n", + " \n", + " # Return the results and overall analysis\n", + " return {\n", + " \"results\": results,\n", + " \"overall_analysis\": overall_analysis\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_overall_analysis(results):\n", + " \"\"\"\n", + " Generate an overall analysis of fusion retrieval.\n", + " \n", + " Args:\n", + " results (List[Dict]): Results from evaluating queries\n", + " \n", + " Returns:\n", + " str: Overall analysis\n", + " \"\"\"\n", + " # System prompt to guide the evaluation process\n", + " system_prompt = \"\"\"You are an expert at evaluating information retrieval systems. \n", + " Based on multiple test queries, provide an overall analysis comparing three retrieval approaches:\n", + " 1. Vector-based retrieval (semantic similarity)\n", + " 2. BM25 keyword retrieval (keyword matching)\n", + " 3. Fusion retrieval (combination of both)\n", + "\n", + " Focus on:\n", + " 1. Types of queries where each approach performs best\n", + " 2. Overall strengths and weaknesses of each approach\n", + " 3. How fusion retrieval balances the trade-offs\n", + " 4. Recommendations for when to use each approach\"\"\"\n", + "\n", + " # Create a summary of evaluations for each query\n", + " evaluations_summary = \"\"\n", + " for i, result in enumerate(results):\n", + " evaluations_summary += f\"Query {i+1}: {result['query']}\\n\"\n", + " evaluations_summary += f\"Comparison Summary: {result['comparison'][:200]}...\\n\\n\"\n", + "\n", + " # User prompt containing the evaluations summary\n", + " user_prompt = f\"\"\"Based on the following evaluations of different retrieval methods across {len(results)} queries, \n", + " provide an overall analysis comparing these three approaches:\n", + "\n", + " {evaluations_summary}\n", + "\n", + " Please provide a comprehensive analysis of vector-based, BM25, and fusion retrieval approaches,\n", + " highlighting when and why fusion retrieval provides advantages over the individual methods.\"\"\"\n", + "\n", + " # Generate the overall analysis using meta-llama/Llama-3.2-3B-Instruct\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " # Return the generated analysis content\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluating Fusion Retrieval" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "=== EVALUATING FUSION RETRIEVAL ===\n", + "\n", + "Extracting text from data/AI_Information.pdf...\n", + "Created 42 text chunks\n", + "Creating embeddings for chunks...\n", + "Added 42 items to vector store\n", + "Created BM25 index with 42 documents\n", + "\n", + "\n", + "=== Evaluating Query 1/1 ===\n", + "Query: What are the main applications of transformer models in natural language processing?\n", + "\n", + "=== Comparing retrieval methods for query: What are the main applications of transformer models in natural language processing? ===\n", + "\n", + "\n", + "Running vector-only RAG...\n", + "\n", + "Running BM25-only RAG...\n", + "\n", + "Running fusion RAG...\n", + "Performing fusion retrieval for query: What are the main applications of transformer models in natural language processing?\n", + "Retrieved 5 documents with fusion retrieval\n", + "\n", + "Comparing responses...\n", + "\n", + "=== Vector-based Response ===\n", + "The provided context does not mention transformer models specifically. However, it does mention Natural Language Processing (NLP) as a branch of AI that focuses on enabling computers to understand, interpret, and generate human language. NLP techniques are used in chatbots, machine translation, text summarization, and sentiment analysis.\n", + "\n", + "Transformer models are a type of neural network architecture that is particularly effective for NLP tasks, such as machine translation, text generation, and text classification. They are not explicitly mentioned in the provided context.\n", + "\n", + "If you're looking for information on transformer models, I can provide general information on this topic. However, please note that the context provided does not specifically address transformer models.\n", + "\n", + "=== BM25 Response ===\n", + "The provided context does not mention transformer models or their applications in natural language processing. The context covers various topics such as deep learning, convolutional neural networks, recurrent neural networks, natural language processing, and machine learning, but it does not specifically discuss transformer models.\n", + "\n", + "If you're looking for information on transformer models, I can provide general information on this topic. Transformer models are a type of neural network architecture that have gained popularity in natural language processing tasks such as machine translation, text generation, and language understanding. They are particularly effective in handling long-range dependencies in sequential data and have been widely adopted in many NLP applications. However, this information is not present in the provided context.\n", + "\n", + "=== Fusion Response ===\n", + "The provided context does not explicitly mention the main applications of transformer models in natural language processing. However, it does mention that Generative Adversarial Networks (GANs) and transformers are examples of generative AI models that can create original content, including images, text, and music.\n", + "\n", + "Based on general knowledge, transformer models are widely used in natural language processing (NLP) for tasks such as:\n", + "\n", + "1. Machine translation\n", + "2. Text generation\n", + "3. Sentiment analysis\n", + "4. Text classification\n", + "5. Language modeling\n", + "\n", + "These models have achieved state-of-the-art results in many NLP tasks and have become a popular choice for many applications.\n", + "\n", + "If you're looking for more specific information on the applications of transformer models in NLP, I can try to provide more general information or point you in the direction of more resources.\n", + "\n", + "=== Comparison ===\n", + "**Comparison of Vector-based, BM25 Keyword, and Fusion Retrieval Approaches**\n", + "\n", + "For the given query, \"What are the main applications of transformer models in natural language processing?\", we can evaluate the responses based on relevance, factual correctness, comprehensiveness, and clarity/coherence.\n", + "\n", + "**Relevance:**\n", + "\n", + "* Vector-based response: 6/10 (The response is relevant to the query, but it does not directly answer the question. It provides general information about NLP and mentions transformer models, but does not explicitly state their main applications.)\n", + "* BM25 keyword response: 5/10 (The response is not directly relevant to the query, as it does not mention transformer models or their applications in NLP.)\n", + "* Fusion response: 9/10 (The response directly answers the question and provides a comprehensive list of transformer models' main applications in NLP.)\n", + "\n", + "**Factual Correctness:**\n", + "\n", + "* Vector-based response: 8/10 (The response is generally correct, but it does not explicitly mention the main applications of transformer models in NLP.)\n", + "* BM25 keyword response: 8/10 (The response is generally correct, but it does not mention transformer models or their applications in NLP.)\n", + "* Fusion response: 9/10 (The response is factually correct and provides a comprehensive list of transformer models' main applications in NLP.)\n", + "\n", + "**Comprehensiveness:**\n", + "\n", + "* Vector-based response: 6/10 (The response provides general information about NLP, but does not explicitly state the main applications of transformer models.)\n", + "* BM25 keyword response: 4/10 (The response does not provide any information about transformer models or their applications in NLP.)\n", + "* Fusion response: 9/10 (The response provides a comprehensive list of transformer models' main applications in NLP.)\n", + "\n", + "**Clarity and Coherence:**\n", + "\n", + "* Vector-based response: 7/10 (The response is clear, but it does not explicitly state the main applications of transformer models.)\n", + "* BM25 keyword response: 6/10 (The response is clear, but it does not mention transformer models or their applications in NLP.)\n", + "* Fusion response: 9/10 (The response is clear, concise, and well-organized, making it easy to understand the main applications of transformer models in NLP.)\n", + "\n", + "**Overall Performance:**\n", + "\n", + "* Vector-based response: 6.5/10\n", + "* BM25 keyword response: 5.5/10\n", + "* Fusion response: 8.5/10\n", + "\n", + "Based on the evaluation, the Fusion retrieval approach performed best for this query. The Fusion response provided a comprehensive list of transformer models' main applications in NLP, was factually correct, and was clear and concise. The Vector-based response was relevant but did not explicitly state the main applications of transformer models, while the BM25 keyword response was not directly relevant to the query.\n", + "\n", + "\n", + "=== OVERALL ANALYSIS ===\n", + "\n", + "**Overall Analysis: Vector-based, BM25, and Fusion Retrieval Approaches**\n", + "\n", + "In this analysis, we will evaluate the performance of three retrieval approaches: Vector-based, BM25 Keyword, and Fusion Retrieval. We will examine the strengths and weaknesses of each approach, their performance on specific query types, and how fusion retrieval balances the trade-offs.\n", + "\n", + "**Query 1: What are the main applications of transformer models in natural language processing?**\n", + "\n", + "For this query, we can evaluate the performance of the three approaches as follows:\n", + "\n", + "1. **Vector-based Retrieval (Semantic Similarity)**: This approach is suitable for queries that require understanding the semantic meaning of the query and the documents. In this case, the query is asking about the main applications of transformer models, which implies a need for semantic understanding. The vector-based approach is likely to perform well, as it can capture the nuances of the query and the documents.\n", + "\n", + "Performance: 8/10\n", + "\n", + "2. **BM25 Keyword Retrieval (Keyword Matching)**: This approach is suitable for queries that require exact keyword matching. In this case, the query is asking about the main applications of transformer models, which implies a need for exact keyword matching. However, the query is also asking about the main applications, which may require a more nuanced understanding of the documents.\n", + "\n", + "Performance: 6/10\n", + "\n", + "3. **Fusion Retrieval (Combination of Both)**: This approach combines the strengths of both vector-based and BM25 keyword retrieval. By using a combination of both approaches, fusion retrieval can capture both the semantic meaning of the query and the exact keyword matching.\n", + "\n", + "Performance: 9/10\n", + "\n", + "**Overall Strengths and Weaknesses of Each Approach**\n", + "\n", + "1. **Vector-based Retrieval (Semantic Similarity)**:\n", + "\t* Strengths: Can capture nuances of the query and documents, suitable for queries that require semantic understanding.\n", + "\t* Weaknesses: May not perform well for queries that require exact keyword matching.\n", + "2. **BM25 Keyword Retrieval (Keyword Matching)**:\n", + "\t* Strengths: Can perform well for queries that require exact keyword matching.\n", + "\t* Weaknesses: May not capture nuances of the query and documents, suitable for queries that require semantic understanding.\n", + "3. **Fusion Retrieval (Combination of Both)**:\n", + "\t* Strengths: Can capture both the semantic meaning of the query and the exact keyword matching, suitable for a wide range of queries.\n", + "\t* Weaknesses: May require more computational resources and complex implementation.\n", + "\n", + "**How Fusion Retrieval Balances the Trade-Offs**\n", + "\n", + "Fusion retrieval balances the trade-offs between vector-based and BM25 keyword retrieval by combining the strengths of both approaches. By using a combination of both, fusion retrieval can capture both the semantic meaning of the query and the exact keyword matching, resulting in a more comprehensive search result.\n", + "\n", + "**Recommendations for When to Use Each Approach**\n", + "\n", + "1. **Vector-based Retrieval (Semantic Similarity)**: Use for queries that require semantic understanding, such as questions that ask about the meaning or context of a term.\n", + "2. **BM25 Keyword Retrieval (Keyword Matching)**: Use for queries that require exact keyword matching, such as searches for specific terms or phrases.\n", + "3. **Fusion Retrieval (Combination of Both)**: Use for queries that require a balance between semantic understanding and exact keyword matching, such as searches for terms or phrases with nuanced meanings.\n", + "\n", + "In conclusion, fusion retrieval provides advantages over individual methods by combining the strengths of both vector-based and BM25 keyword retrieval. By using a combination of both approaches, fusion retrieval can capture both the semantic meaning of the query and the exact keyword matching, resulting in a more comprehensive search result.\n" + ] + } + ], + "source": [ + "# Path to PDF document\n", + "# Path to PDF document containing AI information for knowledge retrieval testing\n", + "pdf_path = \"data/AI_Information.pdf\"\n", + "\n", + "# Define a single AI-related test query\n", + "test_queries = [\n", + " \"What are the main applications of transformer models in natural language processing?\" # AI-specific query\n", + "]\n", + "\n", + "# Optional reference answer\n", + "reference_answers = [\n", + " \"Transformer models have revolutionized natural language processing with applications including machine translation, text summarization, question answering, sentiment analysis, and text generation. They excel at capturing long-range dependencies in text and have become the foundation for models like BERT, GPT, and T5.\",\n", + "]\n", + "\n", + "# Set parameters\n", + "k = 5 # Number of documents to retrieve\n", + "alpha = 0.5 # Weight for vector scores (0.5 means equal weight between vector and BM25)\n", + "\n", + "# Run evaluation\n", + "evaluation_results = evaluate_fusion_retrieval(\n", + " pdf_path=pdf_path,\n", + " test_queries=test_queries,\n", + " reference_answers=reference_answers,\n", + " k=k,\n", + " alpha=alpha\n", + ")\n", + "\n", + "# Print overall analysis\n", + "print(\"\\n\\n=== OVERALL ANALYSIS ===\\n\")\n", + "print(evaluation_results[\"overall_analysis\"])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv-new-specific-rag", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/17_graph_rag.ipynb b/17_graph_rag.ipynb new file mode 100644 index 0000000..bb0a653 --- /dev/null +++ b/17_graph_rag.ipynb @@ -0,0 +1,959 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "markdown" + } + }, + "source": [ + "# Graph RAG: Graph-Enhanced Retrieval-Augmented Generation\n", + "\n", + "In this notebook, I implement Graph RAG - a technique that enhances traditional RAG systems by organizing knowledge as a connected graph rather than a flat collection of documents. This allows the system to navigate related concepts and retrieve more contextually relevant information than standard vector similarity approaches.\n", + "\n", + "Key Benefits of Graph RAG\n", + "\n", + "- Preserves relationships between pieces of information\n", + "- Enables traversal through connected concepts to find relevant context\n", + "- Improves handling of complex, multi-part queries\n", + "- Provides better explainability through visualized knowledge paths" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the Environment\n", + "We begin by importing necessary libraries." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import numpy as np\n", + "import json\n", + "import fitz # PyMuPDF\n", + "from openai import OpenAI\n", + "from typing import List, Dict, Tuple, Any\n", + "import networkx as nx\n", + "import matplotlib.pyplot as plt\n", + "import heapq\n", + "from collections import defaultdict\n", + "import re\n", + "from PIL import Image\n", + "import io" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the OpenAI API Client\n", + "We initialize the OpenAI client to generate embeddings and responses." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize the OpenAI client with the base URL and API key\n", + "client = OpenAI(\n", + " base_url=\"https://api.studio.nebius.com/v1/\",\n", + " api_key=os.getenv(\"OPENAI_API_KEY\") # Retrieve the API key from environment variables\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Document Processing Functions" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def extract_text_from_pdf(pdf_path):\n", + " \"\"\"\n", + " Extract text content from a PDF file.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF file\n", + " \n", + " Returns:\n", + " str: Extracted text content\n", + " \"\"\"\n", + " print(f\"Extracting text from {pdf_path}...\") # Print the path of the PDF being processed\n", + " pdf_document = fitz.open(pdf_path) # Open the PDF file using PyMuPDF\n", + " text = \"\" # Initialize an empty string to store the extracted text\n", + " \n", + " # Iterate through each page in the PDF\n", + " for page_num in range(pdf_document.page_count):\n", + " page = pdf_document[page_num] # Get the page object\n", + " text += page.get_text() # Extract text from the page and append to the text string\n", + " \n", + " return text # Return the extracted text content" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def chunk_text(text, chunk_size=1000, overlap=200):\n", + " \"\"\"\n", + " Split text into overlapping chunks.\n", + " \n", + " Args:\n", + " text (str): Input text to chunk\n", + " chunk_size (int): Size of each chunk in characters\n", + " overlap (int): Overlap between chunks in characters\n", + " \n", + " Returns:\n", + " List[Dict]: List of chunks with metadata\n", + " \"\"\"\n", + " chunks = [] # Initialize an empty list to store the chunks\n", + " \n", + " # Iterate over the text with a step size of (chunk_size - overlap)\n", + " for i in range(0, len(text), chunk_size - overlap):\n", + " # Extract a chunk of text from the current position\n", + " chunk_text = text[i:i + chunk_size]\n", + " \n", + " # Ensure we don't add empty chunks\n", + " if chunk_text:\n", + " # Append the chunk with its metadata to the list\n", + " chunks.append({\n", + " \"text\": chunk_text, # The chunk of text\n", + " \"index\": len(chunks), # The index of the chunk\n", + " \"start_pos\": i, # The starting position of the chunk in the original text\n", + " \"end_pos\": i + len(chunk_text) # The ending position of the chunk in the original text\n", + " })\n", + " \n", + " # Print the number of chunks created\n", + " print(f\"Created {len(chunks)} text chunks\")\n", + " \n", + " return chunks # Return the list of chunks" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating Embeddings" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "def create_embeddings(texts, model=\"BAAI/bge-en-icl\"):\n", + " \"\"\"\n", + " Create embeddings for the given texts.\n", + " \n", + " Args:\n", + " texts (List[str]): Input texts\n", + " model (str): Embedding model name\n", + " \n", + " Returns:\n", + " List[List[float]]: Embedding vectors\n", + " \"\"\"\n", + " # Handle empty input\n", + " if not texts:\n", + " return []\n", + " \n", + " # Process in batches if needed (OpenAI API limits)\n", + " batch_size = 100\n", + " all_embeddings = []\n", + " \n", + " # Iterate over the input texts in batches\n", + " for i in range(0, len(texts), batch_size):\n", + " batch = texts[i:i + batch_size] # Get the current batch of texts\n", + " \n", + " # Create embeddings for the current batch\n", + " response = client.embeddings.create(\n", + " model=model,\n", + " input=batch\n", + " )\n", + " \n", + " # Extract embeddings from the response\n", + " batch_embeddings = [item.embedding for item in response.data]\n", + " all_embeddings.extend(batch_embeddings) # Add the batch embeddings to the list\n", + " \n", + " return all_embeddings # Return all embeddings" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Knowledge Graph Construction" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def extract_concepts(text):\n", + " \"\"\"\n", + " Extract key concepts from text using OpenAI's API.\n", + " \n", + " Args:\n", + " text (str): Text to extract concepts from\n", + " \n", + " Returns:\n", + " List[str]: List of concepts\n", + " \"\"\"\n", + " # System message to instruct the model on what to do\n", + " system_message = \"\"\"Extract key concepts and entities from the provided text.\n", + "Return ONLY a list of 5-10 key terms, entities, or concepts that are most important in this text.\n", + "Format your response as a JSON array of strings.\"\"\"\n", + "\n", + " # Make a request to the OpenAI API\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": f\"Extract key concepts from:\\n\\n{text[:3000]}\"} # Limit for API\n", + " ],\n", + " temperature=0.0,\n", + " response_format={\"type\": \"json_object\"}\n", + " )\n", + " \n", + " try:\n", + " # Parse concepts from the response\n", + " concepts_json = json.loads(response.choices[0].message.content)\n", + " concepts = concepts_json.get(\"concepts\", [])\n", + " if not concepts and \"concepts\" not in concepts_json:\n", + " # Try to get any array in the response\n", + " for key, value in concepts_json.items():\n", + " if isinstance(value, list):\n", + " concepts = value\n", + " break\n", + " return concepts\n", + " except (json.JSONDecodeError, AttributeError):\n", + " # Fallback if JSON parsing fails\n", + " content = response.choices[0].message.content\n", + " # Try to extract anything that looks like a list\n", + " matches = re.findall(r'\\[(.*?)\\]', content, re.DOTALL)\n", + " if matches:\n", + " items = re.findall(r'\"([^\"]*)\"', matches[0])\n", + " return items\n", + " return []" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def build_knowledge_graph(chunks):\n", + " \"\"\"\n", + " Build a knowledge graph from text chunks.\n", + " \n", + " Args:\n", + " chunks (List[Dict]): List of text chunks with metadata\n", + " \n", + " Returns:\n", + " Tuple[nx.Graph, List[np.ndarray]]: The knowledge graph and chunk embeddings\n", + " \"\"\"\n", + " print(\"Building knowledge graph...\")\n", + " \n", + " # Create a graph\n", + " graph = nx.Graph()\n", + " \n", + " # Extract chunk texts\n", + " texts = [chunk[\"text\"] for chunk in chunks]\n", + " \n", + " # Create embeddings for all chunks\n", + " print(\"Creating embeddings for chunks...\")\n", + " embeddings = create_embeddings(texts)\n", + " \n", + " # Add nodes to the graph\n", + " print(\"Adding nodes to the graph...\")\n", + " for i, chunk in enumerate(chunks):\n", + " # Extract concepts from the chunk\n", + " print(f\"Extracting concepts for chunk {i+1}/{len(chunks)}...\")\n", + " concepts = extract_concepts(chunk[\"text\"])\n", + " \n", + " # Add node with attributes\n", + " graph.add_node(i, \n", + " text=chunk[\"text\"], \n", + " concepts=concepts,\n", + " embedding=embeddings[i])\n", + " \n", + " # Connect nodes based on shared concepts\n", + " print(\"Creating edges between nodes...\")\n", + " for i in range(len(chunks)):\n", + " node_concepts = set(graph.nodes[i][\"concepts\"])\n", + " \n", + " for j in range(i + 1, len(chunks)):\n", + " # Calculate concept overlap\n", + " other_concepts = set(graph.nodes[j][\"concepts\"])\n", + " shared_concepts = node_concepts.intersection(other_concepts)\n", + " \n", + " # If they share concepts, add an edge\n", + " if shared_concepts:\n", + " # Calculate semantic similarity using embeddings\n", + " similarity = np.dot(embeddings[i], embeddings[j]) / (np.linalg.norm(embeddings[i]) * np.linalg.norm(embeddings[j]))\n", + " \n", + " # Calculate edge weight based on concept overlap and semantic similarity\n", + " concept_score = len(shared_concepts) / min(len(node_concepts), len(other_concepts))\n", + " edge_weight = 0.7 * similarity + 0.3 * concept_score\n", + " \n", + " # Only add edges with significant relationship\n", + " if edge_weight > 0.6:\n", + " graph.add_edge(i, j, \n", + " weight=edge_weight,\n", + " similarity=similarity,\n", + " shared_concepts=list(shared_concepts))\n", + " \n", + " print(f\"Knowledge graph built with {graph.number_of_nodes()} nodes and {graph.number_of_edges()} edges\")\n", + " return graph, embeddings" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Graph Traversal and Query Processing" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "def traverse_graph(query, graph, embeddings, top_k=5, max_depth=3):\n", + " \"\"\"\n", + " Traverse the knowledge graph to find relevant information for the query.\n", + " \n", + " Args:\n", + " query (str): The user's question\n", + " graph (nx.Graph): The knowledge graph\n", + " embeddings (List): List of node embeddings\n", + " top_k (int): Number of initial nodes to consider\n", + " max_depth (int): Maximum traversal depth\n", + " \n", + " Returns:\n", + " List[Dict]: Relevant information from graph traversal\n", + " \"\"\"\n", + " print(f\"Traversing graph for query: {query}\")\n", + " \n", + " # Get query embedding\n", + " query_embedding = create_embeddings(query)\n", + " \n", + " # Calculate similarity between query and all nodes\n", + " similarities = []\n", + " for i, node_embedding in enumerate(embeddings):\n", + " similarity = np.dot(query_embedding, node_embedding) / (np.linalg.norm(query_embedding) * np.linalg.norm(node_embedding))\n", + " similarities.append((i, similarity))\n", + " \n", + " # Sort by similarity (descending)\n", + " similarities.sort(key=lambda x: x[1], reverse=True)\n", + " \n", + " # Get top-k most similar nodes as starting points\n", + " starting_nodes = [node for node, _ in similarities[:top_k]]\n", + " print(f\"Starting traversal from {len(starting_nodes)} nodes\")\n", + " \n", + " # Initialize traversal\n", + " visited = set() # Set to keep track of visited nodes\n", + " traversal_path = [] # List to store the traversal path\n", + " results = [] # List to store the results\n", + " \n", + " # Use a priority queue for traversal\n", + " queue = []\n", + " for node in starting_nodes:\n", + " heapq.heappush(queue, (-similarities[node][1], node)) # Negative for max-heap\n", + " \n", + " # Traverse the graph using a modified breadth-first search with priority\n", + " while queue and len(results) < (top_k * 3): # Limit results to top_k * 3\n", + " _, node = heapq.heappop(queue)\n", + " \n", + " if node in visited:\n", + " continue\n", + " \n", + " # Mark as visited\n", + " visited.add(node)\n", + " traversal_path.append(node)\n", + " \n", + " # Add current node's text to results\n", + " results.append({\n", + " \"text\": graph.nodes[node][\"text\"],\n", + " \"concepts\": graph.nodes[node][\"concepts\"],\n", + " \"node_id\": node\n", + " })\n", + " \n", + " # Explore neighbors if we haven't reached max depth\n", + " if len(traversal_path) < max_depth:\n", + " neighbors = [(neighbor, graph[node][neighbor][\"weight\"]) \n", + " for neighbor in graph.neighbors(node)\n", + " if neighbor not in visited]\n", + " \n", + " # Add neighbors to queue based on edge weight\n", + " for neighbor, weight in sorted(neighbors, key=lambda x: x[1], reverse=True):\n", + " heapq.heappush(queue, (-weight, neighbor))\n", + " \n", + " print(f\"Graph traversal found {len(results)} relevant chunks\")\n", + " return results, traversal_path" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Response Generation" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_response(query, context_chunks):\n", + " \"\"\"\n", + " Generate a response using the retrieved context.\n", + " \n", + " Args:\n", + " query (str): The user's question\n", + " context_chunks (List[Dict]): Relevant chunks from graph traversal\n", + " \n", + " Returns:\n", + " str: Generated response\n", + " \"\"\"\n", + " # Extract text from each chunk in the context\n", + " context_texts = [chunk[\"text\"] for chunk in context_chunks]\n", + " \n", + " # Combine the extracted texts into a single context string, separated by \"---\"\n", + " combined_context = \"\\n\\n---\\n\\n\".join(context_texts)\n", + " \n", + " # Define the maximum allowed length for the context (OpenAI limit)\n", + " max_context = 14000\n", + " \n", + " # Truncate the combined context if it exceeds the maximum length\n", + " if len(combined_context) > max_context:\n", + " combined_context = combined_context[:max_context] + \"... [truncated]\"\n", + " \n", + " # Define the system message to guide the AI assistant\n", + " system_message = \"\"\"You are a helpful AI assistant. Answer the user's question based on the provided context.\n", + "If the information is not in the context, say so. Refer to specific parts of the context in your answer when possible.\"\"\"\n", + "\n", + " # Generate the response using the OpenAI API\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\", # Specify the model to use\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_message}, # System message to guide the assistant\n", + " {\"role\": \"user\", \"content\": f\"Context:\\n{combined_context}\\n\\nQuestion: {query}\"} # User message with context and query\n", + " ],\n", + " temperature=0.2 # Set the temperature for response generation\n", + " )\n", + " \n", + " # Return the generated response content\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualization" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "def visualize_graph_traversal(graph, traversal_path):\n", + " \"\"\"\n", + " Visualize the knowledge graph and the traversal path.\n", + " \n", + " Args:\n", + " graph (nx.Graph): The knowledge graph\n", + " traversal_path (List): List of nodes in traversal order\n", + " \"\"\"\n", + " plt.figure(figsize=(12, 10)) # Set the figure size\n", + " \n", + " # Define node colors, default to light blue\n", + " node_color = ['lightblue'] * graph.number_of_nodes()\n", + " \n", + " # Highlight traversal path nodes in light green\n", + " for node in traversal_path:\n", + " node_color[node] = 'lightgreen'\n", + " \n", + " # Highlight start node in green and end node in red\n", + " if traversal_path:\n", + " node_color[traversal_path[0]] = 'green'\n", + " node_color[traversal_path[-1]] = 'red'\n", + " \n", + " # Create positions for all nodes using spring layout\n", + " pos = nx.spring_layout(graph, k=0.5, iterations=50, seed=42)\n", + " \n", + " # Draw the graph nodes\n", + " nx.draw_networkx_nodes(graph, pos, node_color=node_color, node_size=500, alpha=0.8)\n", + " \n", + " # Draw edges with width proportional to weight\n", + " for u, v, data in graph.edges(data=True):\n", + " weight = data.get('weight', 1.0)\n", + " nx.draw_networkx_edges(graph, pos, edgelist=[(u, v)], width=weight*2, alpha=0.6)\n", + " \n", + " # Draw traversal path with red dashed lines\n", + " traversal_edges = [(traversal_path[i], traversal_path[i+1]) \n", + " for i in range(len(traversal_path)-1)]\n", + " \n", + " nx.draw_networkx_edges(graph, pos, edgelist=traversal_edges, \n", + " width=3, alpha=0.8, edge_color='red', \n", + " style='dashed', arrows=True)\n", + " \n", + " # Add labels with the first concept for each node\n", + " labels = {}\n", + " for node in graph.nodes():\n", + " concepts = graph.nodes[node]['concepts']\n", + " label = concepts[0] if concepts else f\"Node {node}\"\n", + " labels[node] = f\"{node}: {label}\"\n", + " \n", + " nx.draw_networkx_labels(graph, pos, labels=labels, font_size=8)\n", + " \n", + " plt.title(\"Knowledge Graph with Traversal Path\") # Set the plot title\n", + " plt.axis('off') # Turn off the axis\n", + " plt.tight_layout() # Adjust layout\n", + " plt.show() # Display the plot" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Complete Graph RAG Pipeline" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "def graph_rag_pipeline(pdf_path, query, chunk_size=1000, chunk_overlap=200, top_k=3):\n", + " \"\"\"\n", + " Complete Graph RAG pipeline from document to answer.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF document\n", + " query (str): The user's question\n", + " chunk_size (int): Size of text chunks\n", + " chunk_overlap (int): Overlap between chunks\n", + " top_k (int): Number of top nodes to consider for traversal\n", + " \n", + " Returns:\n", + " Dict: Results including answer and graph visualization data\n", + " \"\"\"\n", + " # Extract text from the PDF document\n", + " text = extract_text_from_pdf(pdf_path)\n", + " \n", + " # Split the extracted text into overlapping chunks\n", + " chunks = chunk_text(text, chunk_size, chunk_overlap)\n", + " \n", + " # Build a knowledge graph from the text chunks\n", + " graph, embeddings = build_knowledge_graph(chunks)\n", + " \n", + " # Traverse the knowledge graph to find relevant information for the query\n", + " relevant_chunks, traversal_path = traverse_graph(query, graph, embeddings, top_k)\n", + " \n", + " # Generate a response based on the query and the relevant chunks\n", + " response = generate_response(query, relevant_chunks)\n", + " \n", + " # Visualize the graph traversal path\n", + " visualize_graph_traversal(graph, traversal_path)\n", + " \n", + " # Return the query, response, relevant chunks, traversal path, and the graph\n", + " return {\n", + " \"query\": query,\n", + " \"response\": response,\n", + " \"relevant_chunks\": relevant_chunks,\n", + " \"traversal_path\": traversal_path,\n", + " \"graph\": graph\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluation Function" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "def evaluate_graph_rag(pdf_path, test_queries, reference_answers=None):\n", + " \"\"\"\n", + " Evaluate Graph RAG on multiple test queries.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF document\n", + " test_queries (List[str]): List of test queries\n", + " reference_answers (List[str], optional): Reference answers for comparison\n", + " \n", + " Returns:\n", + " Dict: Evaluation results\n", + " \"\"\"\n", + " # Extract text from PDF\n", + " text = extract_text_from_pdf(pdf_path)\n", + " \n", + " # Split text into chunks\n", + " chunks = chunk_text(text)\n", + " \n", + " # Build knowledge graph (do this once for all queries)\n", + " graph, embeddings = build_knowledge_graph(chunks)\n", + " \n", + " results = []\n", + " \n", + " for i, query in enumerate(test_queries):\n", + " print(f\"\\n\\n=== Evaluating Query {i+1}/{len(test_queries)} ===\")\n", + " print(f\"Query: {query}\")\n", + " \n", + " # Traverse graph to find relevant information\n", + " relevant_chunks, traversal_path = traverse_graph(query, graph, embeddings)\n", + " \n", + " # Generate response\n", + " response = generate_response(query, relevant_chunks)\n", + " \n", + " # Compare with reference answer if available\n", + " reference = None\n", + " comparison = None\n", + " if reference_answers and i < len(reference_answers):\n", + " reference = reference_answers[i]\n", + " comparison = compare_with_reference(response, reference, query)\n", + " \n", + " # Append results for the current query\n", + " results.append({\n", + " \"query\": query,\n", + " \"response\": response,\n", + " \"reference_answer\": reference,\n", + " \"comparison\": comparison,\n", + " \"traversal_path_length\": len(traversal_path),\n", + " \"relevant_chunks_count\": len(relevant_chunks)\n", + " })\n", + " \n", + " # Display results\n", + " print(f\"\\nResponse: {response}\\n\")\n", + " if comparison:\n", + " print(f\"Comparison: {comparison}\\n\")\n", + " \n", + " # Return evaluation results and graph statistics\n", + " return {\n", + " \"results\": results,\n", + " \"graph_stats\": {\n", + " \"nodes\": graph.number_of_nodes(),\n", + " \"edges\": graph.number_of_edges(),\n", + " \"avg_degree\": sum(dict(graph.degree()).values()) / graph.number_of_nodes()\n", + " }\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "def compare_with_reference(response, reference, query):\n", + " \"\"\"\n", + " Compare generated response with reference answer.\n", + " \n", + " Args:\n", + " response (str): Generated response\n", + " reference (str): Reference answer\n", + " query (str): Original query\n", + " \n", + " Returns:\n", + " str: Comparison analysis\n", + " \"\"\"\n", + " # System message to instruct the model on how to compare the responses\n", + " system_message = \"\"\"Compare the AI-generated response with the reference answer.\n", + "Evaluate based on: correctness, completeness, and relevance to the query.\n", + "Provide a brief analysis (2-3 sentences) of how well the generated response matches the reference.\"\"\"\n", + "\n", + " # Construct the prompt with the query, AI-generated response, and reference answer\n", + " prompt = f\"\"\"\n", + "Query: {query}\n", + "\n", + "AI-generated response:\n", + "{response}\n", + "\n", + "Reference answer:\n", + "{reference}\n", + "\n", + "How well does the AI response match the reference?\n", + "\"\"\"\n", + "\n", + " # Make a request to the OpenAI API to generate the comparison analysis\n", + " comparison = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_message}, # System message to guide the assistant\n", + " {\"role\": \"user\", \"content\": prompt} # User message with the prompt\n", + " ],\n", + " temperature=0.0 # Set the temperature for response generation\n", + " )\n", + " \n", + " # Return the generated comparison analysis\n", + " return comparison.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluation of Graph RAG on a Sample PDF Document" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Extracting text from data/AI_Information.pdf...\n", + "Created 42 text chunks\n", + "Building knowledge graph...\n", + "Creating embeddings for chunks...\n", + "Adding nodes to the graph...\n", + "Extracting concepts for chunk 1/42...\n", + "Extracting concepts for chunk 2/42...\n", + "Extracting concepts for chunk 3/42...\n", + "Extracting concepts for chunk 4/42...\n", + "Extracting concepts for chunk 5/42...\n", + "Extracting concepts for chunk 6/42...\n", + "Extracting concepts for chunk 7/42...\n", + "Extracting concepts for chunk 8/42...\n", + "Extracting concepts for chunk 9/42...\n", + "Extracting concepts for chunk 10/42...\n", + "Extracting concepts for chunk 11/42...\n", + "Extracting concepts for chunk 12/42...\n", + "Extracting concepts for chunk 13/42...\n", + "Extracting concepts for chunk 14/42...\n", + "Extracting concepts for chunk 15/42...\n", + "Extracting concepts for chunk 16/42...\n", + "Extracting concepts for chunk 17/42...\n", + "Extracting concepts for chunk 18/42...\n", + "Extracting concepts for chunk 19/42...\n", + "Extracting concepts for chunk 20/42...\n", + "Extracting concepts for chunk 21/42...\n", + "Extracting concepts for chunk 22/42...\n", + "Extracting concepts for chunk 23/42...\n", + "Extracting concepts for chunk 24/42...\n", + "Extracting concepts for chunk 25/42...\n", + "Extracting concepts for chunk 26/42...\n", + "Extracting concepts for chunk 27/42...\n", + "Extracting concepts for chunk 28/42...\n", + "Extracting concepts for chunk 29/42...\n", + "Extracting concepts for chunk 30/42...\n", + "Extracting concepts for chunk 31/42...\n", + "Extracting concepts for chunk 32/42...\n", + "Extracting concepts for chunk 33/42...\n", + "Extracting concepts for chunk 34/42...\n", + "Extracting concepts for chunk 35/42...\n", + "Extracting concepts for chunk 36/42...\n", + "Extracting concepts for chunk 37/42...\n", + "Extracting concepts for chunk 38/42...\n", + "Extracting concepts for chunk 39/42...\n", + "Extracting concepts for chunk 40/42...\n", + "Extracting concepts for chunk 41/42...\n", + "Extracting concepts for chunk 42/42...\n", + "Creating edges between nodes...\n", + "Knowledge graph built with 42 nodes and 110 edges\n", + "Traversing graph for query: What are the key applications of transformers in natural language processing?\n", + "Starting traversal from 3 nodes\n", + "Graph traversal found 9 relevant chunks\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABKUAAAPdCAYAAABba9tpAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd3iURdfH8e+9u+mFQIDQQYp0CL0lVAEB6SB2UXiwPIIFsPIqKooKCj7YBbuiUqQoiqAgvRt6h9B7COlld+/3j5iVpQZIsim/z3Xl0p2de+ZsSdiczJwxTNM0ERERERERERERyUUWTwcgIiIiIiIiIiKFj5JSIiIiIiIiIiKS65SUEhERERERERGRXKeklIiIiIiIiIiI5DolpUREREREREREJNcpKSUiIiIiIiIiIrlOSSkREREREREREcl1SkqJiIiIiIiIiEiuU1JKRERERERERERynZJSIiIiecjixYsxDIPFixdn25ijR4/GMIxsG68gGThwIIGBgbk6Z3R0NIZh8MUXX2S57/jx43M+sEJm4MCBVKpUydNhuFSqVInbbrvN02GIiIjkKiWlRESkwPriiy8wDIN169a5tZ87d46mTZvi6+vLb7/95qHoCg+n08lXX31Fx44dKV68OF5eXpQsWZJOnTrxySefkJqa6ukQPW7evHmMHj06W8esVKkShmFc9SsrybHCrG3btm7PV7FixWjSpAmfffYZTqfzmsbatm0bo0ePJjo6OmeCFRERyWdsng5AREQkN8XFxdGpUyc2bdrETz/9xK233urpkAq05ORkevfuzfz582nZsiUjRowgLCyMmJgY/vrrLx599FFWr17NlClTPB1qrqlYsSLJycl4eXm52ubNm8f777+frYmpiRMnkpCQ4DbH1KlTmTBhAsWLF3e1t2zZMtvmLKjKlSvH2LFjATh16hRfffUVgwYNYteuXbzxxhtZHmfbtm28/PLLtG3bNk+t0hIREfEUJaVERKTQiI+Pp3PnzkRFRTFz5ky6dOni6ZAKvCeffJL58+czceJEHn/8cbf7hg8fzu7du1mwYMEVx7Db7TidTry9vXMy1FxjGAa+vr45Pk+vXr3cbh8/fpypU6fSq1evKyZEEhMTCQgIyNngrpGnYypSpAj33HOP6/ZDDz1E9erVee+993j11VfdEowiIiKSddq+JyIihUJCQgK33norGzZsYMaMGXTr1s3t/szaQkeOHKFXr14EBgZSokQJRowYgcPhcOubmJjI8OHDKV++PD4+PlSvXp3x48djmqarT58+fWjYsKHbdd27d8cwDObMmeNqW716NYZh8Ouvv14x/tWrV3PrrbdSpEgR/P39adOmDcuXL7+o37Jly2jSpAm+vr5UqVKFjz/++JLjJScnM2zYMIoXL05QUBA9evTgyJEjGIZx0WqdI0eO8OCDDxIWFoaPjw+1a9fms88+u2K8AIcOHWLy5MnceuutFyWkMlWrVo1HH33Udfv8GkoTJ06kSpUq+Pj4sG3bNtLS0njxxRdp1KgRRYoUISAggMjISBYtWuQ25vljTJgwgYoVK+Ln50ebNm3YsmXLJePIyut+oaeeeorQ0FC3133o0KEYhsH//vc/V9uJEycwDIMPP/zQLb7MbXMDBw7k/fffB3DbJnahTz75xPV8NGnShLVr114xvqzIfN/v3buXrl27EhQUxN133w3A0qVL6d+/PxUqVMDHx4fy5cvz5JNPkpyc7Lp+/PjxGIbBgQMHLhr7ueeew9vbm7Nnz7rasvI+zqyBtm3bNu666y6KFi1KREQEkJFYe+CBByhXrhw+Pj6ULl2anj17um2Hmz17Nt26daNMmTL4+PhQpUoVXn311au+ntfC39+f5s2bk5iYyKlTpzhw4ACPPvoo1atXx8/Pj9DQUPr37+8W1xdffEH//v0BaNeunet1vrB+3LJly1zbiytXrsxXX32VbXGLiIjkNVopJSIiBV5iYiJdunRh7dq1TJ8+/bLFhB0OB507d6ZZs2aMHz+ehQsX8vbbb1OlShUeeeQRAEzTpEePHixatIhBgwYRHh7O/PnzGTlyJEeOHGHChAkAREZGMnv2bOLi4ggODsY0TZYvX47FYmHp0qX06NEDyPjF32Kx0KpVq8vG/+eff9KlSxcaNWrESy+9hMVi4fPPP6d9+/YsXbqUpk2bArB582Y6depEiRIlGD16NHa7nZdeeomwsLCLxhw4cCA//vgj9957L82bN+evv/66KFEHGQmV5s2bYxgGjz32GCVKlODXX39l0KBBxMXF8cQTT1w27l9//RWHw+G2wiSrPv/8c1JSUhgyZAg+Pj4UK1aMuLg4Jk+ezJ133sl//vMf4uPjmTJlCp07d2bNmjWEh4e7jfHVV18RHx/Pf//7X1JSUnj33Xdp3749mzdvdntOsvK6X0pkZCQTJkxg69at1KlTB/j39Vy6dCnDhg1ztQG0bt36kuM89NBDHD16lAULFvD1119fss93331HfHw8Dz30EIZh8NZbb9GnTx/27dt3w6t07HY7nTt3JiIigvHjx+Pv7w/AtGnTSEpK4pFHHiE0NJQ1a9YwadIkDh8+zLRp0wC4/fbbefrpp/nxxx8ZOXKk27g//vgjnTp1omjRokDW38eZ+vfvT7Vq1Xj99dddib++ffuydetWhg4dSqVKlTh58iQLFizg4MGDrtVfX3zxBYGBgTz11FMEBgby559/8uKLLxIXF8e4ceNu6Lk63759+7BarYSEhDBv3jxWrFjBHXfcQbly5YiOjubDDz+kbdu2bNu2DX9/f1q3bs2wYcP43//+x/PPP0/NmjUBXP8F2LNnD/369WPQoEHcf//9fPbZZwwcOJBGjRpRu3btbItdREQkzzBFREQKqM8//9wEzIoVK5peXl7mrFmzLtv3/vvvNwHzlVdecWtv0KCB2ahRI9ftWbNmmYA5ZswYt379+vUzDcMw9+zZY5qmaa5du9YEzHnz5pmmaZqbNm0yAbN///5ms2bNXNf16NHDbNCggev2okWLTMBctGiRaZqm6XQ6zWrVqpmdO3c2nU6nq19SUpJ50003mR07dnS19erVy/T19TUPHDjgatu2bZtptVrN8//JX79+vQmYTzzxhNtjGDhwoAmYL730kqtt0KBBZunSpc3Tp0+79b3jjjvMIkWKmElJSZd4NjM8+eSTJmBGRUW5taemppqnTp1yfZ0/9v79+03ADA4ONk+ePOl2nd1uN1NTU93azp49a4aFhZkPPvjgRWP4+fmZhw8fdrWvXr3aBMwnn3zS1ZbV1/1STp48aQLmBx98YJqmacbGxpoWi8Xs37+/GRYW5uo3bNgws1ixYq7XLzO+zz//3NXnv//9r3mpj2WZfUNDQ82YmBhX++zZs03AnDt37hVjPN+4ceNMwNy/f/9Fj//ZZ5+9qP+lXtuxY8eahmG4vcdatGhx0XO1Zs0aEzC/+uor0zSv7X380ksvmYB55513uo159uxZEzDHjRt3xcd5qbgfeugh09/f30xJSXF77BUrVrziWKZpmm3atDFr1Kjher9u377dHDZsmAmY3bt3v+ycK1eudHsOTNM0p02b5vb9fb6KFSuagLlkyRJX28mTJ00fHx9z+PDhV41TREQkP9L2PRERKfBOnDiBr68v5cuXv2rfhx9+2O12ZGQk+/btc92eN28eVqvVtQom0/DhwzFN07UNr0GDBgQGBrJkyRIgY7VMuXLluO+++9iwYQNJSUmYpsmyZcuIjIy8bDxRUVHs3r2bu+66izNnznD69GlOnz5NYmIiHTp0YMmSJTidThwOB/Pnz6dXr15UqFDBdX3NmjXp3Lmz25iZJw6ev20OMraenc80TWbMmEH37t0xTdM19+nTp+ncuTPnzp1jw4YNl409Li4OgMDAQLf2efPmUaJECddXxYoVL7q2b9++lChRwq3NarW66ko5nU5iYmKw2+00btz4knH06tWLsmXLum43bdqUZs2aMW/evIv6Xu11v5QSJUpQo0YN12u8fPlyrFYrI0eO5MSJE+zevRvIeO0jIiIuuSUvqwYMGOBacZQZH3DVGLPqUivC/Pz8XP+fmJjI6dOnadmyJaZp8vfff7vFtn79evbu3etq++GHH/Dx8aFnz55A1t/H57vwNfHz88Pb25vFixe7bQm8Utzx8fGcPn2ayMhIkpKS2LFjRxafEXc7duxwvV9r1qzJpEmT6Natm2sb6/lzpqenc+bMGapWrUpISMgVv0cuVKtWLbefByVKlKB69erZ9jqLiIjkNUpKiYhIgffxxx/j7e3Nrbfeys6dOy/bz9fX96JESNGiRd1+AT5w4ABlypQhKCjIrV/mFpzM2jpWq5UWLVq4tm4tXbqUyMhIIiIicDgcrFq1im3bthETE3PFpFRmYuP+++93S+SUKFGCyZMnk5qayrlz5zh16hTJyclUq1btojGqV6/udvvAgQNYLBZuuukmt/aqVau63T516hSxsbF88sknF839wAMPAHDy5MnLxp75HJ1/AhxAq1atWLBgAQsWLKBTp06XvPbC2DJ9+eWX1KtXD19fX0JDQylRogS//PIL586du6jvpZ6Lm2++2a3OD2Ttdb+cyMhIt9e4cePGNG7cmGLFirF06VLi4uLYuHHjFV/jrDg/0ZgZH5ClGK/GZrNRrly5i9oPHjzIwIEDKVasmKvWVps2bQDcnu/+/ftjsVj44YcfgIxk5rRp0+jSpQvBwcFA1t/H57vwPeDj48Obb77Jr7/+SlhYGK1bt+att97i+PHjbv22bt1K7969KVKkCMHBwZQoUcK1hfRS75OsqFSpEgsWLGDhwoUsW7aM48eP8/PPP7tOMUxOTubFF1901ZkrXrw4JUqUIDY29prmvPB1hqy/F0VERPIj1ZQSEZECr1atWsybN48OHTrQsWNHli9ffslVU1arNVvnjYiI4LXXXiMlJYWlS5fywgsvEBISQp06dVi6dKmrrtGVEhaZq0fGjRt3Uc2kTIGBgaSmpmZr7OfPfc8993D//fdfsk+9evUue32NGjUA2LJlC/Xr13e1lyhRgltuuQWAb7755pLXnr/yJNM333zDwIED6dWrFyNHjqRkyZJYrVbGjh3rtkrnWt3I6x4REcGnn37Kvn37XIlHwzCIiIhg6dKllClTBqfTecNJqcvFaJ5XZP16+fj4YLG4/53S4XDQsWNHYmJieOaZZ6hRowYBAQEcOXKEgQMHuq1qKlOmDJGRkfz44488//zzrFq1ioMHD/Lmm2+6+mT1fXy+S70HnnjiCbp3786sWbOYP38+//d//8fYsWP5888/adCgAbGxsbRp04bg4GBeeeUVqlSpgq+vLxs2bOCZZ565aDVWVgUEBLjes5cydOhQPv/8c5544glatGhBkSJFMAyDO+6445rmzMnXWUREJC9SUkpERAqFpk2bMmvWLLp160bHjh1ZunTpRatjsqJixYosXLiQ+Ph4t9VSmduCzt+KFhkZSVpaGlOnTuXIkSOuxETr1q1dSambb775koXIM1WpUgWA4ODgK/5SXKJECfz8/FwrUs534eqwihUr4nQ62b9/v9tqoj179lw0ZlBQEA6H44pzX06XLl2wWq18++23rhPdbsT06dOpXLkyM2fOdNsK99JLL12y/6Wei127drkKYmeHzNd0wYIFrF27lmeffRbIeI0//PBDypQpQ0BAAI0aNbriODeytS8nbN68mV27dvHll19y3333udoXLFhwyf4DBgzg0UcfZefOnfzwww/4+/vTvXt31/1ZfR9nRZUqVRg+fDjDhw9n9+7dhIeH8/bbb/PNN9+wePFizpw5w8yZM90Ky+/fv/+G5rya6dOnc//99/P222+72lJSUoiNjXXrl9deZxEREU/T9j0RESk0OnTowNSpU9mzZw+33nqrq+bRtejatSsOh4P33nvPrX3ChAkYhkGXLl1cbc2aNcPLy4s333yTYsWKuU7PioyMZNWqVfz1119XXUHTqFEjqlSpwvjx4y/aBgcZW+wgY4VF586dmTVrFgcPHnTdv337dubPn+92TWaNqQ8++MCtfdKkSW63rVYrffv2ZcaMGWzZsuWyc19OhQoVePDBB/n1118ver4yXcsKkMxVJOdfs3r1alauXHnJ/rNmzeLIkSOu22vWrGH16tVur9GNuummmyhbtiwTJkwgPT3ddYpiZGQke/fuZfr06TRv3hyb7cp/BwwICAC4KInhKZd6rk3T5N13371k/759+2K1Wpk6dSrTpk3jtttucz0myPr7+EqSkpJISUlxa6tSpQpBQUGulYKXijstLe2i93p2s1qtF72XJ02ahMPhcGvLa6+ziIiIp2mllIiIFCq9e/fm008/5cEHH6RHjx789ttv+Pr6Zvn67t27065dO1544QWio6OpX78+v//+O7Nnz+aJJ55wrQgB8Pf3p1GjRqxatYru3bu7Vkm0bt2axMREEhMTr5qUslgsTJ48mS5dulC7dm0eeOABypYty5EjR1i0aBHBwcHMnTsXgJdffpnffvuNyMhIHn30Uex2O5MmTaJ27dps2rTJNWajRo3o27cvEydO5MyZMzRv3py//vqLXbt2Ae6rOd544w0WLVpEs2bN+M9//kOtWrWIiYlhw4YNLFy4kJiYmCvGP3HiRPbv38/QoUP5/vvv6d69OyVLluT06dMsX76cuXPnXlTz6nJuu+02Zs6cSe/evenWrRv79+/no48+olatWpdMdFStWpWIiAgeeeQRUlNTmThxIqGhoTz99NNZmi+rIiMj+f7776lbt66r1lPDhg0JCAhg165d3HXXXVcdI3Ml1bBhw+jcuTNWq5U77rgjW+O8FjVq1KBKlSqMGDGCI0eOEBwczIwZMy5b26hkyZK0a9eOd955h/j4eAYMGOB2/7W8jy9n165ddOjQgdtvv51atWphs9n46aefOHHihOu5atmyJUWLFuX+++9n2LBhGIbB119/nePb32677Ta+/vprihQpQq1atVi5ciULFy4kNDTUrV94eDhWq5U333yTc+fO4ePjQ/v27SlZsmSOxiciIpJXKSklIiKFzgMPPEBMTAwjRoygf//+/PTTT1m+1mKxMGfOHF588UV++OEHPv/8cypVqsS4ceMYPnz4Rf0zV0VFRES42kqVKkXVqlXZs2dPlmoNtW3blpUrV/Lqq6/y3nvvkZCQQKlSpWjWrBkPPfSQq1+9evWYP38+Tz31FC+++CLlypXj5Zdf5tixY25JKYCvvvqKUqVKMXXqVH766SduueUWfvjhB6pXr+6WpAsLC2PNmjW88sorzJw5kw8++IDQ0FBq167tVjPocvz9/fntt9/4+uuv+frrr3nrrbeIi4sjJCSE+vXr88EHH1y2XtWFBg4cyPHjx/n444+ZP38+tWrV4ptvvmHatGksXrz4ov733XcfFouFiRMncvLkSZo2bcp7771H6dKlszRfVmUmpc5/jW02Gy1atGDhwoVZeo379OnjStx98803mKbp0aSUl5cXc+fOZdiwYYwdOxZfX1969+7NY4895lYf7HwDBgxg4cKFBAUF0bVr14vuz+r7+HLKly/PnXfeyR9//MHXX3+NzWajRo0a/Pjjj/Tt2xeA0NBQfv75Z4YPH86oUaMoWrQo99xzDx06dLjoFMrs9O6777q2qqakpNCqVSsWLlx40ZylSpXio48+YuzYsQwaNAiHw8GiRYuUlBIRkULLMFU5UURERICoqCgaNGjAN998ky01oDwlOjqam266iXHjxjFixAhPhyMiIiIil6GaUiIiIoVQcnLyRW0TJ07EYrG4FYgWEREREckp2r4nIiJSCL311lusX7+edu3aYbPZ+PXXX/n1118ZMmQI5cuX93R4IiIiIlIIKCklIiJSCLVs2ZIFCxbw6quvkpCQQIUKFRg9ejQvvPCCp0MTERERkUJCNaVERERERERERCTXqaaUiIiIiIiIiIjkOiWlREREREREREQk1ykpJSIiIiIiIiIiuU5JKRERERERERERyXVKSomIiIiIiIiISK5TUkpERERERERERHKdklIiIiIiIiIiIpLrlJQSEREREREREZFcp6SUiIiIiIiIiIjkOiWlREREREREREQk1ykpJSIiIiIiIiIiuU5JKRERERERERERyXVKSomIiIiIiIiISK5TUkpERERERERERHKdklIiIiIiIiIiIpLrlJQSEREREREREZFcp6SUiIiIiIiIiIjkOiWlREREREREREQk1ykpJSIiIiIiIiIiuU5JKRERERERERERyXVKSomIiIiIiIiISK5TUkpERERERERERHKdklIiIiIiIiIiIpLrlJQSEREREREREZFcp6SUiIiIiIiIiIjkOiWlREREREREREQk1ykpJSIiIiIiIiIiuU5JKRERERERERERyXVKSomIiIiIiIiISK5TUkpERERERERERHKdklIiIiIiIiIiIpLrlJQSEREREREREZFcp6SUiIiIiIiIiIjkOiWlREREREREREQk1ykpJSIiIiIiIiIiuU5JKRERERERERERyXVKSomIiIiIiIiISK5TUkpERERERERERHKdzdMBiIiIiIh4QrrDSXyanfg0O4npDhxOExOwGgYBXlaCfGwEedvwturvuCIiIjlBSSkRERERKTRM0yQmJZ3DccmcSEzFYZo4TTAA84K+hpGRoCrh70P5YF+K+3ljGIYnwhYRESmQDNM0L/z3V0RERESkwIlNSWfb6XjOpabjNDMSTjaLgQEXJZtMM2PVlMNpYjdNLAYEeduoWTyIUD9vj8QvIiJS0CgpJSIiIiIFmsNpsi82kX2xSdidJt4WCxbj4kTU1cZIczqxWgwqBvtRrVgANou29YmIiNwIJaVEREREpMBKdziJOnGOk0lpWA0DL4tx3VvwTNMk3WniME2K+XnRMKwIPjZrNkcsIiJSeCgpJSIiIiIFkt3pZP3xc5xOSsPbYsFqyZ56UA7TJM3hJMTXiyalQ1QIXURE5DrpX1ARERERKXBM02TLqfhsT0hBRi0qH6uF2JR0ok6cQ3/jFRERuT5KSomIiIhIgXMsMZVjCSl4WYxsTUhlshgG3hYLp5PSOBiXnO3ji4iIFAZKSomIiIhIgZJid7DjdDymSY4WI7f+c3LfrphEEtPsOTaPiIhIQaWklIiIiIgUKIfikkm2O/HJhVpP3lYLaQ4n0eeScnwuERGRgkZJKREREREpMBxOk0PxKVjguk/ZuxaGYWA1DI4mpJDmcOb4fCIiIgWJklIiIiIiUmCcSEolJd2BVy6eiOdlMUhzmBxLSMm1OUVERAoCJaVEREREpMA4m5yGSUYh8tySuSLrTHJars0pIiJSECgpJSIiIiJudu/eTcuWLbn55ptp0qQJW7duzfK1pmly00030aFDh6v2nTVrFqtWrXLdXrduHQMGDHDd/vjjj6lRowbh4eEcOXKEyMjIq445eEBfjuzbc9V+d0U0ZM+2zRe1Hz98kB71qlz1eoAZn31EzKkTQEYS7FyqHdM0L9l34MCBTJw4EYDRo0fzxBNPADBnzhyefPLJLM0nIiJS0CgpJSIiIiJuHnroIYYMGcKuXbt45plnGDhwYJav/eOPPwgJCWHTpk3s37//sv3sdvtFSanGjRvzww8/uG5PnDiRzz//nKioKMqWLcvSpUuvOLfd6WT05G+pWKValuO9ETM++4SYUycBsBqQ6nCSYr+2ulI9evRgwoQJORGeiIhInqeklIiIiIi4nDx5knXr1nHPPfcA0LdvXw4dOsSePVdffQQwZcoU/vOf/3DXXXfx2WefudoXL15M7dq1GTRoEOHh4Xz77bfMmTOHcePGER4ezuTJk1m8eDHh4eEA9OvXj7179zJw4ED69etHdHQ0ISEhrvFWrlxJREQE9evXp169esyePZs0h8l9rRuzb/sWAKZN/pBHe3ZkSNe2PNqzI1s3rL3m56PDTSX49v0JPNqzE3dHNuK3ad8B8NX/xnPm5HFefew/DOnaln3bt5CelsZzzz1L06ZNCQ8P5/bbb+fs2bNXHP+LL76gV69ertsvvfQSVatWpUmTJowaNYpKlSq57ps/fz4RERE0atSIpk2bsmjRItdzW6dOHR599FHq169P7dq1Wbduneu6X375hSZNmlC/fn3Cw8NZvXo1AGvXrqV9+/Y0btyYBg0aMG3atGt+fkRERG6EzdMBiIiIiEjecejQIUqXLo3NlvEx0TAMKlSowMGDB6latSqDBw+mR48e9OjR46JrY2Ji+O233/jwww85ePAg3bp14+WXX8Ziyfg76Pbt2/nggw+YMmUKAIsWLSI8PNy1lW3x4sWusaZPn06lSpX44YcfCA8PJzo62m2eXr16MX36dCIjI3E6ncTGxuL8Z+tcZjWpjr3703/wIwBs+3sdb40Yyhd/rLzsY09NTSHmEkkkL28fPpj9Owf37ubRnh3p2Pt27hs2gt9+/I7/e+9Tqtaqi9M0+ea9CZQJCGDNmjUAvPrqq4waNYr333//6k88GcmjGTNm8PfffxMYGMiDDz7oum/fvn2MHj2a+fPnExwczJ49e4iMjHQ9Lzt27GDKlCl88MEHfPTRR7zwwgvMnz+fXbt28cADD7BkyRJq1KhBeno6SUlJxMbGMmTIEObNm0fp0qU5ffo0DRs2pGXLlpQtWzZL8YqIiNwoJaVEREREJMsmT5582fu+/fZbunTpQkhICCEhIYSFhTF//ny6dOkCQOXKlWnTps0Nx7By5UqqV6/uqjFlsVgoVqwYSel2ADKrOu3Zuplv359AXOxZrFYrh/btITUlGR9fv0uOm5SUTHJi0nkjZLilV18AKlSphtVqI+bUSUqULuPWxwRWLvwVZ3Ii8+bMAiAtLc1tpdPV/PHHH/Tv35+goCAABg0a5FoN9dtvv7Fnzx5at27t6m+xWDh48CAAVatWpVmzZgC0aNGC8ePHA7BgwQJuvfVWatSoAYCXlxdFihRh3rx57Nu3z/XaZNq5c6eSUiIikmuUlBIRERERl/Lly3Ps2DHsdjs2mw3TNDl48CAVKlS46rVTpkzh+PHjrkRMfHw8U6ZMcSU+AgMDczJ0vK0ZK7JME9LT0njpkYG8/d0satRvQGJ8PD3qVSY9Le2ySanM+C6sVe7t4+v6f4vVisNhv+ha0zQxTXh74rv06Nrlovuvh3HeCYKmadKxY0e+++67i/odOXIEX99/Y7RardjtF8d4Yby1a9dmxYoV2RKriIjI9VBNKRERERFxKVmyJA0bNuSbb74BYMaMGZQrV46qVate8br169dz6tQpjh49SnR0NNHR0ezdu5f58+dz6tSpS14THBzMuXPnrjnGli1bsnv3blfhc6fTSUxMDDaLBYsBDtMkLTUVe3o6JctkrPqZ9eWnVx3Xy8sLbx9vTNMkLT3tqv39A4NIjIsDwGFCZKcufDjpfyQlJQGQlJR0TScXtm/fnhkzZpCQkIBpmm41uTp37szChQvZtGmTqy1zm+CVdO7cmfnz57Njxw4A0tPTOXfuHC1btmT//v0sXLjQ1TcqKoq0tKs/bhERkeyipJSIiIiIuPn444/5+OOPufnmm3njjTf4/PPPXfcNHjyYOXPmXHTNlClTuOOOO1z1owBCQkLo2LEjX3/99SXnuffee/nxxx9p0KDBFbcFXqho0aL89NNPPPvss9SrV4+GDRuyfPlyACz/rC4KCArigaee47+9OvNw9w7YvLyzNLZ/QAAACQkJV+3be+Bg3n7uKYZ0bcuerZt56PGnaNKkCc2aNaNevXo0b96cqKioLD+u2267jZ49exIeHk6TJk1c2yAhY3ved999x0MPPUT9+vWpWbMmEydOvOqYVatW5fPPP+eee+6hfv36NGvWjJ07d1K0aFF++eUXXn/9derXr0+tWrV49tlncTqv7fRAERGRG2GY5oULlEVERERE8qcj8SlsPHEOb6vFlaC6Fqbp5PDhw4BBuXLl3LbQXf4akxSHk9rFg6gU4n8dUf8rPj6eoKAgTNNk+PDhJCcn8+GHH97QmCIiInmVakqJiIiISIERFuCDj81CmsOJj9V6zdcbhoWAgADi4xNISk4iwD/gqtekO028LAalA32v2vdq7rvvPqKjo0lJSaF27dp89NFHNzymiIhIXqWVUiIiIiJSoOw6k8Dus4n4Wi1ZWul0obS0VI4dO46vny9hJcOu2DdzlVTFYD/qlAy+3pBFREQKJdWUEhEREZECpXwRP3xtFlKvsz6St7c33t5epCSnYLenX7FvmtOJl8Wg4g1u2xMRESmMlJQSERERkQLFz2alRmggAPbrSkwZBAYGAVcueO5wmjhNqFYskCBvVcUQERG5VkpKiYiIiEiBUybQl1IBPqQ7TRzXUa0iICAAwzBISEgELr7eaZqkOZ2E+nlTsYhfNkQsIiJS+CgpJSIiIiIFjmEY1C0RTDE/L9IczmtOTFksFvwD/HE4HCQnJ7vd5zRNUh1Ogn1shIcFX9cpfyIiIqKklIiIiIgUUF5WC41KhVDcz5s0h5M0h5NrOeMnMDBjC2DmFj7TNEl3Okl1OAnxtdG4dAi+tms/4U9EREQyKCklIiIiIgWWt9VCo9IhVAnxBwNSHE4czqwlpnx9fPDyspGcnEy6PZ1UhxPThIpF/GhSuih+SkiJiIjcEMO8lj8XiYiIiIjkUzHJaWw7HU98mh2nCTbDwGoxMMjY7nchp2kSn5BASlo6Pr4+hAYFUDM0iJIBPrkfvIiISAGkpJSIiIiIFBqmaXIqKY3D8cmcSkrDYZpc7tOwARiY7N0cReLRA4x45D9YLdpoICIikl10dq2IiIiIFBqGYVAywIeSAT6k2B3Ep9mJT7OTmObA7jQBE4thEOhtI8jbRpCPjegF+9m3KYrdu3ZRo0YNTz8EERGRAkNJKREREREplHxtVnxtVkr4X3k7XkREBFFRUSxbtkxJKRERkWyk9cciIiIiIldQu3ZtQkJC+Pvvv0lMTPR0OCIiIgWGklIiIiIiIldgsVho2bIldrud1atXezocERGRAkNJKRERERGRq2jVqhUAS5cuRecEiYiIZA8lpURERERErqJ48eLUrFmTo0ePcuDAAU+HIyIiUiAoKSUiIiIikgUREREALFu2zMORiIiIFAxKSomIiIiIZEF4eDgBAQGsWbOG1NRUT4cjIiKS7ykpJSIiIiKSBTabjebNm5Oamsq6des8HY6IiEi+p6SUiIiIiEgWaQufiIhI9rF5OgARERERgHQznQRnAg4cGBh4GV4EGUEYhuHp0ERcypQpQ+XKldm3bx9Hjx6lTJkyng5JREQk31JSSkRERDzCaTo5aj/KIfshTtpPEuuMxYED0zQxDAMDA2/Dm+LW4pS0lqSyV2WKWIt4OmwRIiIi2LdvH8uXL6d///6eDkdERCTfMkzTND0dhIiIiBQedtPOzrSd7EjbQawjFidODAxshg0rVlc/ExO7aceBAwCbYaOcrRy1vGtR1qusp8IXITU1lZEjR2Kz2Xjrrbew2fR3XhERkeuhf0FFREQk15yyn2J58nJOO04D4GP4YDMu/3HEy/ACwDRN0kgjOj2aQ/ZD3Ox1M419G+Nr8c2VuEXO5+PjQ5MmTVi2bBlRUVE0btzY0yGJiIjkSyp0LiIiIjnONE02pmzkl8RfOOU4ha/hS4Al4IoJqfMZhoGP4UOAEYAFC9vTtjMnYQ4n7CdyOHKRS1PBcxERkRunpJSIiIjkKNM0WZuylrUpa3GaTgKMAKyG9eoXXkJmcsrf8Oec8xwLEhdw1H40myMWubpKlSpRpkwZtm/fzunTpz0djoiISL6kpJSIiIjkqL9T/2ZT6iZshg0/i1+2nKZnMSwEGAEkm8n8mfgnp+ynsiFSkawzDIPIyEgAVqxY4eFoRERE8iclpURERCTHHLUfZWPqRqxY8TF8snVswzBciaklyUtIN9OzdXyRq2nWrBk2m43ly5fjdDo9HY6IiEi+o6SUiIiI5Ig0M40VySuwm/ZsT0hlMgwDP8OPGEcMf6f8nSNziFxOQEAADRo0IDY2lm3btnk6HBERkXxHSSkRERHJEVtSt3DWcRZ/wz9btuxdjtWwYjNsbE3b6jrVTyS3qOC5iIjI9VNSSkRERLJdupnOjrQdWLBgMXL+44YPPthNO7tSd+X4XCLnq169OsWLF2fjxo3ExcV5OhwREZF8RUkpERERyXbR6dEkOhPxNXxzZT7DMLBhY2/6XlKcKbkypwhkvPciIiJwOp2sXLnS0+GIiIjkK0pKiYiISLbbl74PIFdWSWXyNrxJMVM4ZD+Ua3OKALRo0QLDMFi2bBmmaXo6HBERkXxDSSkREZEC7vPPP8cwDGbNmpWl/gMHDqRs2bI0aNCAatWqERERwddff53l+Zymk1P2U9iwXWfEMKzuMKI3RV/TNZkJsDOOM9c9L8DgwYNZtGjRDY0hhUtISAh169bl5MmT7N6929PhiIiI5BvX/2lRRERE8rzo6Gg+/fRTmjdvfk3XjRw5kieeeAKAqKgoBgwYwKlTp3jqqaeuem2cM4500rEa1usJ+YYYGJxynLpiH7vdjs12+Y9AkydPzu6wpBCIiIhg06ZNLFu2jJtvvtnT4YiIiOQLWiklIiJSQDmdTgYPHsykSZPw8fG57nHCw8N59913efPNN11bk77++muaNWtGw4YNad26NRs3bnT1Hzd+HK+0f4UXW7/IG33e4NTBjCTR9LHTmXDPBMbcNobhjYczbsA44mPirymWvev3Mua2MbzQ5gWei3iOVT+tAsBhdzC291jGtBvDw00e5q677iIxMRGAxYsXU7t2bQYNGkR4eDg//fQTlSpV4sUXX6RFixbcdNNNjBkzxjVH27ZtXavKBg4cyEMPPUSHDh24+eab6dOnD2lpaQDEx8czYMAAatSoQWRkJA899BADBw68rudY8r+6detSpEgRNmzYQFJSkqfDERERyReUlBIRESmg3nnnHVq1akWjRo0uuu/FF1/ko48+yvJYzZo14+TJk5w6dYrly5czdepUlixZwoYNG3jttde46667APjuu+/Ys2sPz/z+DK8veZ1Wt7fi8+Gfu8bZuXInj015jLfXvU1o2VC+H/292zwpKSnEx8dfsi5PYmwikx+fzH8n/5fX/nqN52Y/x7ejviXmaAwWq4XHpjzG6MWjeW3lawQHBzNp0iTXtdu3b+e+++4jKiqK/v37AxAbG8vKlStZu3Yt48aN48iRI5d87FFRUcydO5ft27dz4sQJZsyYAcArr7yCn58f27dvZ968eaxYsSLLz6cUPBaLhZYtW5Kens6aNWs8HY6IiEi+oO17IiIiBdCWLVuYMWMGS5YsueT9r7zyyjWNd36SaPbs2WzcuJFmzZq52mJiYkhOTmbWrFmsXLOSFW1XYMGC6XRPLoV3DickLASA9gPbM+GeCa77zp07R2xsLMnJyTjsDhwOh9u1u9bs4uSBk7zV7y239mO7jxFSKoRf3/+VDfM34HQ4MeINWrZs6epTuXJl2rRp43ZdZiKtePHiVK5cmf3791O2bNmLHnvv3r3x9/cHoGnTpuzduxeAP/74gwkTJmAYBkFBQQwYMIA9e/Zc+YmUAq1ly5b8+uuvLF26lDZt2mAYhqdDEhERydOUlBIRESmAli5dSnR0NNWqVQPg+PHjDBkyhGPHjvHII49c83hr166lZMmSlCxZEtM0uf/++3n99dcv6meaJo8+8yjF7yyOv+F/1dP3DMPA6XRy+vRpkpOTgYyi0RgQFxd3weBQrkY5Xl7w8kXjLPthGVuXbOWZn58hsEggZ78461asPDAw8KJrfH19Xf9vtVqx2+2XjDGr/ZSAkJIlS1K9enV27tzJwYMHqVixoqdDEhERydO0fU9ERKQAeuSRRzh27BjR0dFER0fTvHlzPvnkk+tKSG3atIknnniCZ555BoAePXrwzTffcPDgQSCjdtW6desA6NWrF99/+j0psSk4cGBPtxO9Mdo1VtTvUZw7eQ6ARV8tomZkTY4dO0ZycjIWi4WSJUtSqlQpDAxSUlJITU11XVutaTVOHTjF5kWbXW3Rm6Kxp9lJjE0kKDQI72BvfJN8+fLLL6/5cV6r9u3b8+WXX2KaJgkJCfz44485PqfkfREREQAsW7bMw5GIiIjkfVopJSIiUgi9+OKLlClThocffviS948bN44vvviCpKQkSpYsyXPPPcd9990HQGRkJG+99Ra9e/fGbreTlpZGt27daNy4MXfffTenT59mfPfxOE0npsOk7T1tqVS/EgA1WtTgvUHvEXMshuIVi9PrpV7Y7Xa8vb0pUaKE61Q8i9XC5w9/zldeX2G1Zpzi9/LClxn540i+HfUt3476Fke6g9ByoTz13VNE3hHJul/WMarJKMJKhNE2si0HDhzI8edw0KBB1KxZk+LFi1O/fv2MVV5SqDVo0AB/f3/WrFlDv379buiQARERkYLOMC9VSVRERETkBvye+DsH0g8QaPl329z0sdNJOpfEvWPv5cyZM67T8YKCgihatOhF299OnTxFUnISJUqUcNV0uhKn6STJTKKtf1uqeVfL3gd0Cenp6TgcDnx9fUlMTKRz584MHTqUAQMG5Pjckrd9//33LFq0iIEDB9KiRQtPhyMiIpJnafueiIiIZLvKXpUBcJjuxcqdTifHjh0jMTERwzAoXrw4xYoVu2Q9ppCiIQCcPXv2kqfxXSjVTMXX8KW8rfyNP4AsOHv2LK1atSI8PJxGjRrRqlUrbr/99lyZW/I2beETERHJGm3fExERkWxX0asiAZYAkp3J+BsZq5y6DOvCmTNnSE9Px8vLixIlSuDl5XXZMby8vAgKCiI+Pp74+HiCg4Mv29c0TRw4qOlVE1+L72X7ZaeSJUuyfv36XJlL8pdy5cpRqVIl9uzZw/HjxylVqpSnQxIREcmTtFJKREREsp2X4UUN7xo4ceJwOoiJieH06dOYpom/vz+lSpW6YkIqU5EiRbBYLJw7dw6n0wlnzsDBgxAT49YvlVRsho2bfW7OqYckck0yV0stX77cw5GIiIjkXUpKiYiISI6o41OHIDOI0wmniY+PB6Bo0aKUKFECiyVrH0GsVivBwcE4nU6Sjh2DkychMRFOnMj4ImOLoN20U9u7NqHW0Bx7PCLXokmTJnh7e7Ny5UrsdrunwxEREcmTlJQSERGRHLF3x142frGR9JR0vAK8KFWq1BW34F1OcHAwVquVtIQE99pSMTGYsWdJNpMJtYbSwLdBNkYvcmN8fX1p3Lgx8fHxbNq0ydPhiIiI5ElKSomIiEi2Mk2Tn3/+mXfffZeYHTHY9tgILhoM3le5MCEBdu2CnTshNtbVbBgGRYsWJdnLC4fj38LpJpCYFodfqkGkXyRextW3A4rkJhU8FxERuTIlpURERCTbJCYmMmnSJObOnYtpmnTt2pVhHYdR37c+dtNOsjP58ifpnTgBDgc4nXD8OJzXLyAgAIufHzE+vphOJ06LQWKoH35xKbR/dQElTjsuPaaIB1WuXJnSpUuzbds2Yi6ogyYiIiJKSomIiEg2iY6OZsyYMWzduhV/f3+GDh1Kz549sVqtNPFtQlO/plgMC4lmIg7zgiSSaUJamvvt9HS3LkWLFiXZx5tzIQEkFfOjyNF4Or21gjKr9sJTT0Fyci48SpGsMwyDiIgITNNkxYoVng5HREQkz1FSSkRERG6IaZosXryYt956i5iYGCpWrMioUaOoU6eOq49hGNTzqUe3gG6UtJYkxUwhwZmA3fynALTjEiudrFa3OQwfA79QPxK8bVRac4aeoxZRcs8/q0927YKXXspYZSWShzRr1gyr1cry5cszTpAUERERFyWlRERE5Lqlpqby2WefMXXqVBwOB61bt+bpp58mNPTSp+CVsJWgW2A3Wvq1JNQaSpqZRoIzgURnIqmB3ti9rdi9LNi9LKQbDpKdyRn3m4kA3OR1E0d/PMqqvw28Qku5D/7nnzB5ck4/ZJFrEhQURHh4ODExMWzfvt3T4YiIiOQpNk8HICIiIvnTsWPH+Pjjjzl27Bje3t7cc889NGvW7KrX2QwbtXxqUdO7JkftRzlkP8TJxD3E2k+S7msDAzAMDMOJr+FLCWsJSlhLUNmrMsHWYOIrxLMkegmrBgyg5ccfQ3z8v4N/8glUqwbt2uXcAxe5RhEREaxfv55ly5ZRu3ZtT4cjIiKSZygpJSIiItds3bp1fPXVV6SmphIWFsbDDz9MmTJlrmkMwzAo61WWsl5l4YAT+4hJJIT64fCyYhQtitennxNoBGIYhtt13bt3Z/Xq1Uxfs4aGL72E79NPu2/b+/BDaNsWLrhOxFNq1qxJaGgoGzduJD4+nqCgIE+HJCIikido+56IiIhkmd1u5/vvv+fTTz8lNTWVhg0b8vzzz19zQuoiCQnY0hyEHEsg9OA5isVbCLIEXZSQAggODubWW28lMTGReTEx8OST7h3S0pSQkjzFMAxatWqFw+Fg1apVng5HREQkz1BSSkRERLLk7NmzjB8/nkWLFmGxWLj99tsZMmQIvr6+Nz74+VvwAAIDr9j9lltuISQkhD/++IMzHTvCQw9lFEb39obHHrvxeESyWcuWLTEMg2XLlmGapqfDERERyROUlBIREZGr2rZtG6+++ir79+8nJCSEESNG0KFDh0uuZLouFyalrrK9ydvbm549e2K325k9Zw785z8Zhc4XL4ZbbsmemESyUdGiRalduzbHjx9n3759ng5HREQkT1BSSkRERC7LNE1+/vln/ve//5GYmEiNGjUYNWoUVapUyd6JrjEpBdC8eXPKlSvH6tWrOXDgAAQEZKyUAjhzBp55JiNZtWJF9sYqcp0iIiIAWLp0qYcjERERyRuUlBIREZFLSkhIYNKkScydOxfTNOnatSuPP/54zhRpvsbtewAWi4V+/foBMH36dPctUa+9Bn/8AX//DY8/DsuWZWe0ItelXr16BAUFsW7dOpKTkz0djoiIiMcpKSUiIiIXiY6OZsyYMWzdupWAgACGDh1Kz549sVhy6KNDQoL77SwmvmrWrEmdOnXYtWsXmzZt+veO48f//X/ThOefB22ZEg+zWq20bNmS9PR01q5d6+lwREREPE5JKREREXExTZPFixfz1ltvcfbsWSpWrMgLL7xAnTp1cnbi60xKAfTt2xfDMJgxYwYOhyOjsU8f905JSfDUU3Du3A0GKnJjWrVqBcAyrd4TERFRUkpEREQypKamMmXKFKZOnYrD4aBNmzY8/fTThIaG5vzk17F9L1OZMmVo1aoVJ06c+LdWT9++0Lmze8fDh+HZZ8Fuv8FgRa5fWFgY1apV48CBAxw6dMjT4YiIiHiUklIiIiLCsWPHGDt2LGvXrsXb25sHH3yQu+66C5vNljsBXEeh8/P16NEDb29v5s6dm1GrxzDgxRehVi33jmvXwttv32CwIjcms+C5VkuJiEhhp6SUiIhIIbd27VrGjh3LsWPHCAsL47nnnqNZs2a5G8QNrJQCKFKkCJ07dyYhIYH58+dnNPr4wPjxULy4e+dp02DGjBsIVuTGNGzYED8/P1avXk16erqnwxEREfEYJaVEREQKKbvdzvfff8/kyZNJTU2lUaNGPP/885QpUyb3g/H2dr99YSIpCzp27EiRIkVYuHAhMTExGY0lS2asjLpw/LfegvXrrzNYkRvj7e1Ns2bNSE5OZsOGDZ4OR0RExGOUlBIRESmEzp49y/jx41m0aBEWi4Xbb7+d//znP/j6+nomoK5d//3/OnWgatVrHsLHx4eePXuSnp7O7Nmz/72jdu2MrXznczhg5Eg4cuQ6Axa5MdrCJyIiArlUKEJERETyim3btjF58mQSExMJCQnhoYceonLlyp4N6uGHoX79jNPxOnTIqAl1HVq0aMHChQtZvXo1HTp0oEKFChl33Hor7N0Ln3/+b+e4OHjyyYy2gIBseBAiWVe+fHkqVKjArl27OHHiBGFhYZ4OSUREJNdppZSIiEghYZomP//8M//73/9ITEykZs2ajBo1yvMJKchIQrVsCV26XLzV7hpYLBb69euHaZpMnz4d0zT/vfORR6B1a/cL9u2DUaPA6bzuOUWuV+ZqqeXLl3s4EhEREc9QUkpERKQQSEhIYNKkScydOxfTNOnWrRvDhg0j6BpPucsPateuTa1atdi5cydbtmz59w6LBcaMgSpV3C9YuhTefz93gxQBmjRpgpeXFytXrsThcHg6HBERkVynpJSIiEgBt3//fsaMGcPWrVsJCAhg6NCh9OjRA4slD30M+OADaNMG7r0XDh++4eH69u2LYRjMmDED5/mroPz9YcIEKFLE/YIvv4TffrvheUWuhb+/P40aNSIuLo7Nmzd7OhwREZFcl4c+jYqIiEh2Mk2TRYsWMW7cOM6ePUvFihV54YUXqFOnjqdDc7dzJ3z2GSQmwvbt8OmnNzxkuXLlaNGiBceOHbu4kHSZMjBuHFit7u0ffKBtfJLrVPBcREQKMyWlRERECqDU1FSmTJnC999/j8PhoE2bNjz99NOEhoZ6OrSLbdvmfvvAgWwZtmfPnnh5eTFnzhxSUlLc72zYEJ55xr0tMTFb5hW5FlWrViUsLIwtW7Zw9uxZT4cjIiKSq5SUEhERKWCOHTvG2LFjWbt2Ld7e3jz44IPcdddd2Gx59NDd+Hj324GB2TJsSEgInTp1Ij4+nvnz51/coU8fePTRjBVTFgv8978Z/xXJRYZhEBERgWmarFy50tPhiIiI5Cp98hIRESlA1q5dy9ixYzl27BhhYWE899xzNGvWzNNhXVlCgvvtbCy+3qlTJ4KDg1mwYMGlV6E8+CD8/jssXpyRpBLxgObNm2OxWFi2bJn7iZEiIiIFnJJSIiIiBYDdbuf7779n8uTJpKam0rhxY55//nnKlCnj6dCuLgeTUr6+vvTo0YP09HTmzJlz6U5FimQUQIeMVVuvvJKxgmr58myLQ+RKgoODqV+/PmfOnGHHjh2eDkdERCTXKCklIiKSz8XExDB+/HgWLVqExWJhwIABDB48GF9fX0+HljU5tH0vU6tWrShdujQrV67k8NVO9nvnHZgzB9asgccfh0WLsjUWkctRwXMRESmMlJQSERHJx7Zu3cqYMWPYv38/RYsWZeTIkbRv3x7DMDwdWtZdmJTKxpVSABaLhX79+mGaJtOnT7/y9qgLk1Yvvgi7dmVrPCKXUqtWLYoWLUpUVBQJF64eFBERKaCUlBIREcmHnE4nc+fOZdKkSSQmJlKzZk1eeOEFKleu7OnQrl0Or5QCqF27NjVq1GD79u1su/C0v/P17Ol+OzkZnnoKYmKyPSaR81ksFlq1aoXdbmf16tWeDkdERCRXKCklIiKSzyQkJDBp0iR+/vlnTNPktttuY9iwYQRl8wqjXJODNaUyGYZBv379MAyD6dOn43Q6L92xWzfo0cO97fhxePppSE/P9rhEzteyZUsMw1DBcxERKTSUlBIREclH9u/fz5gxY9i2bRsBAQEMHTqU7t27Y7Hk43/Sc3j7Xqby5cvTvHlzjh49yooVKy7dyTDg2WehXj339qgoGDsWlCiQHBQaGkqtWrU4evQo+/fv93Q4IiIiOS4ff4IVEREpPEzTZNGiRYwbN46zZ89SqVIlXnjhBerUqePp0G5cLqyUytSzZ0+8vLyYPXs2qampl+7k7Q3jx0NYmHv7nDkwdWqOxSYCKnguIiKFi5JSIiIieVxqaipTpkzh+++/x+Fw0LZtW0aOHEloaKinQ7txDgckJbm35UBNqUxFixbllltuIS4ujt9///3yHYsVgwkT4MITDCdOhJUrcyw+kXr16hEYGMi6detISUnxdDgiIiI5SkkpERGRPOzYsWOMHTuWtWvX4u3tzaBBg7jzzjux2WyeDi17XOqUsRxMSgHceuutBAUF8fvvvxMbG3v5jjffDK+84t7mdMJzz8GBAzkaoxReNpuNFi1akJqaytq1az0djoiISI5SUkpERCSPWrt2LWPHjuXYsWOEhYXx3HPP0bRpU0+Hlb0ulZTK4YLtvr6+dO/enbS0NObMmXPlzu3bw0MPubclJMCTT0JcXM4FKYVaq1atAG3hExGRgk9JKRERkTzGbrczdepUJk+eTGpqKo0bN+b555+nTJkyng4t+11Y5NxqvXjLXA6IiIigVKlSrFixgiNHjly58+DBcMst7m0HD2asmHI4ci5IKbRKly5NlSpViI6O5vDhw54OR0REJMcoKSUiIpKHxMTEMG7cOBYvXozVauWOO+5g8ODB+OZCosYjLnXynmHk+LRWq5W+fftimiYzZsy4cmfDgNGjoUYN9/bVqzNqTInkgMyC58uXL/dwJCIiIjlHSSkREZE8YuvWrYwZM4bo6GiKFi3KiBEjaNeuHUYuJGk8xmp1v12sWK5NXbduXW6++Wa2bt3Ktm3brtzZ1xfefvvi+KZOzTiVTySbNWrUCF9fX1atWkV6erqnwxEREckRSkqJiIh4mNPpZO7cuUyaNInExERq1qzJCy+8QOXKlT0dWs6rUwfO35bYo0euTW0YBv369QNgxowZOJ3OK18QFpaRmPLycm9//32w23MoSimsfHx8aNq0KUlJSURFRXk6HBERkRyhpJSIiIgHJSQkMGnSJH7++WcAbrvtNoYNG0ZQDhf7zjO8veHLL+H55+Hdd+Gee3J1+ooVK9KsWTMOHz7MqlWrrn5B3bowapR7W3x8xql8ItkscwufCp6LiEhBVUDOkxYREcl/9u/fz8cff8zZs2cJCAhg0KBB1K5d29Nh5b6iRaFPH49N37NnT9avX8/s2bNp1KgRPj4+V76gWzc4cwY++CBjhdTgwRnJNZFsVqFCBcqVK8eOHTs4deoUJUqU8HRIIiIi2UorpURERHKZaZosWrSIcePGcfbsWSpVqsQLL7xQOBNSeUBoaCgdOnQgNjaWhQsXZu2i++6DefNgwQJ48MGcDVAKLcMwVPBcREQKNCWlREREclFqaipTpkzh+++/x+Fw0LZtW0aOHEloaKinQ/OMmBh4/HHo0iVj+56HtsF16dKFgIAA5s+fT1xcXNYuKlYsY5UXQGoqvPMODBsGS5fmXKBS6DRr1gybzcaKFSuuXvdMREQkn1FSSkREJJccO3aM119/nbVr1+Lt7c2gQYO48847sdkK8W76b76B5cvh1Cn4+mtYv94jYfj5+dG9e3dSU1OZcz2n6X3wAXz3HaxYAU8+CfPnZ3+QUij5+/vTqFEjzp07x5YtWzwdjoiISLZSUkpERCQXrF27lrFjx3L8+HFKlSrF888/T9OmTT0dludd+Ev2/v2eiQNo3bo1JUuWZNmyZRw9evTaLo6Odr/98suwbVu2xSaFmwqei4hIQaWklIiIyD/SHU7i0+zEpaYTn2Yn1e7ANM0bGtNutzN16lQmT55MamoqjRs35rnnnqN06dLZFHU+l5Dgfjsw0DNxAFarlb59+2KaJjNnzry2i7t3d7+dlgbDh2esABO5QdWqVaNkyZJs3ryZ2NhYT4cjIiKSbQrxfgERESnsHE6TE4mpnElOIzY1naR0B6YJJiZgYDHAx2ohxNeLor5elA70xdua9b/nxMTE8PHHHxMdHY3VaqV///60bdsWwzBy7kHlN/Hx7rc9mJQCqF+/PtWqVWPz5s3s2LGDGjVqZO3CW26Bfv1g+vR/206dykhMffopXO1EP5ErMAyDVq1a8dNPP7Fy5Uq6dOni6ZBERESyhVZKiYhIoZNqd7ArJoG/Dp4m6sQ5DsYlE59qB8BqGNgMC7Z/EkfJdgdH4lPYciqevw6eZuupOBLS7FedY+vWrYwZM4bo6GiKFi3KiBEjaNeunRJSF8pjSSnDMOjXrx8A06dPv7aVciNGQOPG7m3btsGrr8INrrgTadGiBRaLheXLl9/wCk4REZG8QkkpEREpNEzT5FhCCssPx7A7JpFUhxNvqwU/mxVfmxUviwWrxXB9eVks+FitGfdbLTicEH0umZVHYtgfm4TzEr8YOp1O5s6dy6RJk0hMTKRmzZq88MILVK5c2QOPOI9zOiEx0b0tKMgzsZynUqVKNGnShEOHDrF69eqsX2izwZtvQrly7u2//QZffpm9QUqhU6RIEerWrcupU6fYtWuXp8MRERHJFkpKiYhIoWB3mmw6GUfUiXOk2J34WjMSTpYsrlwyDANvqwVfqwW702Tb6XjWHo0lxe5w9UlISGDSpEn8/PPPANx2220MGzaMoDyQaMmTkpIuXkGUR56r3r17Y7PZmDVrFmlpaVm/sEgReOcd8Pd3b3//fViyJHuDlEJHBc9FRKSgUVJKREQKPLvTyd/HYzkcn4LVMPC1Wa97G51hGPhYrXhbLJxOTmPtsViS0x3s27ePMWPGsG3bNgICAhg6dCjdu3fHYtE/tZd14dY9yDNJqdDQUNq3b8/Zs2f5448/ru3iypXh9dfh/PeYacKoUbBnT/YGKoVKnTp1CAkJYcOGDSReuMpQREQkH9InZRERKdCcpsnGE3GcTErD22LBlk1JIqvFwMdqIS41nQXb9jFx0nucPXuWSpUqMWrUKGrXrp0t8xRoFyalDAP8/DwTyyV06dKFgIAAfv31V+Li4q7t4ogIGDrUvS0pCZ56Cs6ezb4gpVCxWCy0bNkSu93OmjVrPB2OiIjIDVNSSkRECrTo2CSOJ6bi/U+9qOxkYJJ4LpZEB1RqEkHbtm0ZOXIkxYoVy9Z5CqyEBPfbgYGQh1aW+fv7061bN1JTU11bMq/JvfdC167ubUePwjPPQHp69gQphU6rVq0AWLp0qQqei4hIvpd3PvmJiIhks/hUO3vOJmIxjGxPSKWnp3Ps2DESExJxpKVSpUET2vXIqEMkWXRhUiqPbN07X5s2bShRogRLly7l+PHj13axYWRs2atb1719wwZ46y2dyCfXpXjx4tSoUYMjR45w4MABT4cjIiJyQ5SUEhGRAsk0TbafiSfdaeKdzQmpxKREjh8/Rnq6HS8vGyVCQ/Hy8mb76QTsTiUasuzC7XuBgZ6J4wpsNht9+vTB6XQyc+bMax/A2xvGj4eSJd3bf/oJpk3LniCl0FHBcxERKSiUlBIpoEzTJDHNzvGEFHbFJLD5n1PHNp2MY/vpeA7HJXMuJR2HfoGWAiou1U5McjpeFuO6i5pfyMQkJiaG06dO43SaBAT4U6p0aby8vPC2WkhOd3AiMSVb5ioULkxK5cGVUgANGjSgSpUqbNy4kV27dl37AKGhGSfy+fi4t3/4IaSmZk+QUqiEh4cTEBDAmjVrSNV7SERE8jElpUQKmDSHkwPnklh+OIalh2JYf/wce2ISORiXzJH4FA7FJbMvNomNJ+NYcSSGxQdPs/NMAolpdk+HLpKtDscn4zBNrNmUkLI77Jw4fpz4+HgMA4oVK0rx4sWxGBn/lFoMAxM4FJesOi9ZlQ9WSkHGiYv9+vUDYPr06df3+taoAaNHu7clJCgpJdfFy8uL5s2bk5qayvr16z0djoiIyHVTUkqkgHCaJvtiE1ly8AxbTsUTl2rHYoCv1YKvzYrfBV++Vgs2w0Kaw8mes4ksOxzDxhPnSLU7PP1QRG6Y0zQ5lpCKxcieVVLJKckcO3aM1NQ0bDYrYaVKERQUDLiP7WUxiE21k5iu76MsyQc1pTJVrlyZxo0bc+DAges/9axjRxg+HDLrjt15JwQHZ1+QUqhoC5+IiBQESkqJFADxaXbWHD3LjtMJpDud+PyTiLJZLJf9hdz4p/CzjzUjQYUJh+NTWHHkLMcSUrTSIx+oVKkS1atXJzw8nPDwcH744YcsXde2bVtCQ0M5d+6cq61fv3588cUXV7121qxZrFq16npDvqKBAwcyceLES95XqVIloqKisjxWQpqddKeJ7YYTUibnzsVy8uRJnA4nfn6+lC5dGh9vn0v2thoGDqdJ3A2sPDQMg7p161K/fn3q1q3LtDxadyg8PJz4C1c6Xat8sn0vU+/evbFarcyaNYv06z097847Ye7cjK+nnsreAKVQKVOmDJUrV2bv3r0cPXrU0+GIiIhcFyWlRPK500lprD5yljPJ6XhZLPhYrViu8RdxwzDwslrw/acmTtSJOPaeTVJiKh/44YcfiIqKIioqigEDBmT5uuDgYN54441rnu9GklJ2e+5tEY1Ps+M0TW6kvrnT6eDEyZPExmYk70JCilCyZEksFutlr8lMAsen3thjXbp0KRs3buTzzz/n/vvv5/Tp02735+ZzeTlRUVEE3WgS6cKfVUWL3th4Oax48eK0a9eOmJgY/vzzz+sfqEQJKF064/8dDvjkExg5EpYsyZ5ApdDIXC21fPlyD0ciIiJyfZSUEsnHziSl8feJWFIdTnytlhs+8t4wDHxtVgxg19kE9pxNVGKqgHrmmWeYMmXKJf+6/scff9CiRQsaNGhA7dq1mTJlCgDz5s1jzpw5jBs3jvDwcCZPnszixYsJDw93XbtlyxYqVaoEQHR0NCEhITzzzDM0bNiQ995777JjX6933nmHJk2aEB4eTpMmTVi5ciUAiWkOHmzbmC8mvMljfbpwd2Qjvpn0tuu6g3t381ifLjzYKYKXHh7I0/f257fpUwF4c8Rj/PDJ+xw7doyU5BR+/OhdFvz4DUWKhLBh+VIe69OFh7q148FOEcz74RvXmGdOHufpe/vzyK2R3N+nB3fccQej/6khlJ6ezrPPPkvTpk0JDw/n9ttv5+zZs1d9fI0bNyYwMJDo6GgGDhzIgw8+SOvWralTpw4A48aNo3bt2tStW5e7777btfotLS2NkSNHUqdOHerXr8+tt97qGnP8+PE0bdqUhg0bcuutt7qOlJ87dy716tUjPDycOnXqMHv2bADGjBlDzZo1XSvyMvsbhkFsbCyQsZLtxRdfpEWLFtx0002MGTPGNd+OHTto0aIFtWvXpk+fPnTq1OnfVXlt2/6bmPLygltuuepz4mldu3bF39+fefPm3fhKMYAvv8xISi1alLFyas6cGx9TCo3GjRvj4+PDypUr80SyWkRE5FrZPB2AiFyfxDQ7f584R7rDxNd6+W1618Pb+m+tKX8vK2WD/LJtbMle9913H6Zp0rRpU9544w1KlCgBwODBg+nRowc9evS45HWlSpXioYce4qWXXuLTTz91u69hw4YsW7YMq9VKTEwMDRo0oHPnznTt2pUePXoQHh7OE088AcDixYuvGN+5c+eoXbs2b775JgBnz5695NjlypW7rsd/77338tQ/W6BWrVrFwIED2bFjB45/kqmJced4b+avnIs5w71tm9C5/12UKFWaN556lB73PMCt/e/iwJ5dPNStPe179nGNm5SUSGpqKv7+/gQEBOBl8wKgWp16vDvtZ6xWK3GxZ3moW3uatG5PidJleG/089Rq2JhXhn4P8Wd5sEtbatSoAWQkjzJPygJ49dVXGTVqFO+///4VH9/ChQtJTU2lWrVqAKxfv55ly5YRFBTEr7/+ymeffcbKlSsJCQlhyJAhPPvss3z44YeMHTuWXbt2sX79enx8fDh16hQA3333HTt37mTlypVYrVa+/vprHn30UX755RdGjRrFxx9/TIsWLXA6ncTFxXH27FnGjx/PsWPH8PPzIykpCYvl0n/Pio2NZeXKlZw+fZoqVarwwAMPULZsWe69914effRRHnjgAbZv306DBg246667Mi6KjISPPoItW6B1a6hQ4XreBrkqICCAbt26MW3aNH755RfuuOOOGxtwzx7326+/DhUrQv36NzauFAo+Pj40adKEZcuWERUVRePGjT0dkoiIyDVRUkokHzJNk62n410rpLIzIZXJ22ohxeFgx5kEivl64+d1+S1L4hlLliyhQoUKpKenM2rUKO6//37mzZsHwOTJk696/ciRI6levTo7duxwaz9z5gyDBg1i165d2Gw2zpw5w5YtW64rceTl5cU999yTI2MD/P3337z22mucOXMGm83Gzp07SU5Odt3fvmdfAIoUC6V0+YocP3QA/4BA9mzbQsc+GdsdK1a9mbpNmrmucTgcJCcnk5qaSlhYmOt0PYC4szGMf+YJDu/f+09iKob9u7ZTonQZ/l6xlIeffxmA0JJh3Hbbba7rZs2axblz55gxYwaQsZIpc0XZpURGRmK1WilatCizZ8+mSJEiAPTv39+1ZW7hwoUMGDCAkJAQAB555BH69+8PwM8//8ybb76Jj09G7avMZOWsWbNYu3YtjRo1cj3WTB06dODxxx+nX79+dOrUifDwcBwOB9WqVeOee+6hU6dOdOvW7bKvVWaiqXjx4lSuXJn9+/cTFBREVFQU9913HwA1a9Z0bTdyadQo4ysfadu2LYsWLeKvv/6iXbt2hIWFXf9g3brB77//e9tuz9jK9+WX/27xE7mCiIgIli1bxrJly5SUEhGRfEfb90TyoYNxyZxOSsP7CoXMs4OPxUKK3cm20/HaxpcHVfhnVYmXlxdPPPEES5cuvabrg4ODeeaZZ3juuefc2h9++GEiIiLYvHkzUVFR3HzzzaSkpFxyDJvN5pbYuLCfv7+/28qaaxn7atLS0ujTpw/jx49ny5YtLPmnHk9qairWf74vvH3+LUhusVrdYnX37/eR1WrDYrHg4+PD6TNnSEz8d4vWxFEjqdO4GZN/W8In8xZT7qYqpKWmuo1kAjaL+6l/pmkyadIkV/2vbdu2uRKIl7J06VKioqJYtGgRbdu2dbUHBgZe9pqs/CwwTZPnnnvOFcfmzZvZvHkzkLEV8vPPP8ff35/777+ft956C6vVyqpVq3jiiSc4efIkzZs3v+z7zNfX1/X/Vqv1sluJcvJnVm6x2Wz07t0bp9PJzJkzb2ywVq3g7rvd22JiMk7pS0q6sbGlUKhUqRJlypRh+/btF9WfExERyeuUlBLJZxxOk32xGb+o3GgNqasxDAMvi8HJpDRib7Bws2SvxMREVz0fgKlTp9KgQYNrHueRRx4hKiqK9evXu9rOnj1LxYoVMQyDJUuWsHHjRtd9wcHBbqf2Va5cmQMHDri2h3399ddXnO9KY1+rlJQU0tLSXMm5SZMmue4L8M5Y2XepZGpAUBBVatZm4ayMU+0O7d3DlnWrXfeXrliR6J3bKVasGMkJ8axZ/CdpaRmJp/hzsYSVLYdhGGxavYJ927e6rgtvEcH8Gd9nxBYbw88//+y6r1evXkyYMIGkf5IMSUlJbN3677XX45ZbbuHHH38kLi4OgI8//phOnToB0KNHD959911S/0mYZb4+vXr14qOPPiImJgbIqHX1999/Axm1n2rXrs1jjz3GI488wqpVq4iPj+fEiRNERkbyf//3f0RERLj6Z0VwcDD169fnm28yam/t3LnT/fj6DRvgrrsyvq7hdMW8oFGjRtx0001ERUWxe/fuGxvs8cehZUv3tl27YPRocDpvbGwp8AzDcK1AXLFihYejERERuTZKSonkMyeTUklOd+BtzZ1vX6th4DRNDsclX72z5JoTJ07Qrl076tWrR926dfnrr7/46quvXPcPHjyYOVkomOzj48Mrr7xCdHS0q+2NN97g2WefJTw8nM8++4xmzf7d2nbvvffy448/0qBBAyZPnkyZMmV4+umnadq0Kc2bN6dYsWJXnO9KY19NZu2pzK+4uDjGjBlD06ZNadSoEd7e3q6+Qd42wOByC/yeeft95n7zBYM6R/LJm69QvV44gcEZW+Ru7XcX8bFnebxPF74cN4aqteuSlJRMXHwcg5/+Pya/9SpDurbl12nfUSP8321n/33pdTatWckjt0by7KODadasmWtr3TPPPEOTJk1o1qwZ9erVo3nz5kTdYBKmS5cuPPDAA7Ro0YK6desSFxfH2LFjXfPdfPPNNGzYkPDwcO6//34A7r77bgYOHEi7du2oX78+4eHhrlPknn/+eWrXrk2DBg34+uuvGT16NOfOnaNPnz7UrVuXevXqkZ6e7horq7766is+/PBD6tSp43oeQkJCMpItL72UkXzZtQuef57LvmB5kGEYru2S06dPv7HVpBYLvPZaRi2p8/35J1xQ803kUpo3b47NZmP58uU4lcgUEZF8xDC1J0ckX1lz9CynktLws+Vejac0hxOLAW0qFscnl5JhIjfCaZr8GX0au9O8ZAI3OTEBX/8ADMPg2KEDDO3ThQ9mL6BkmbIkJydx8uQpQkNDCQwMxOGwc+LECdLT7YSEhLjqO10oNSUZLFYMq42a/ia3tI7gm2++uabEW0GUkJBAQEDGc71//35atGjB2rVrKe/jA127undetAj+qZmVX3z88cds2LCBwYMH06RJkxsb7OBBGDgQ/ln95jJ2LHTseGNjS4E3efJk1q5dy9ChQ10ndIqIiOR1+u1SJB+xO52cTUnHlss1WWwWA7vTJDYlPVfnFbleFsOgdKAPDtO85AqWrevXMqRrO/7TpS2jBt/Do/83hpJlygIZNaEAV5kpq9VGWKlSeHt7ERsbS2zs2fN7uRzev4//9uzE0O7tubV9Wx599NFCn5CCjO1E4eHh1K9fn+7duzNhwgTKly8PCQkXd/b3z/0Ab1Dv3r2xWCz89NNPpKff4M/IChXgjTcyVk6db/RouOBAApELZW7hc9siKyIikscpKSUFVkpKCr169eLmm2+mfv36dOzYkT3nHb39wAMPuO5r1aoVa9euzdK4o0ePpkSJEoSHh1OzZk0GDBjA2bNnc+phuIlPc+A0TSzXUEsqatVyhnRtC8DpE8d5vP9tV77gEiyGgQnEp7nXldqyZctlTxA7dOgQPXr0oG7dutStW9dtm1BeMHr06OsusC35Q9kgP6yGgeMSSanGrdvx6a+L+fTXxUyZv5T2Pfr8e+c//c//LrNarISFheHt4825c3H/fM+7j3tTjVpMmvsHi1evZfv27Tz55JM58Kjyn06dOrFx40Y2btzIli1buPPOOzPuiI937+jvD9b8d8pnyZIladu2LWfOnGHRokU3PmDTphlFzs+XmgpPPQUqYi1XUL16dYoXL87GjRtdteZERETyOiWlpEAbMmQIO3fuZOPGjfTs2ZPBgwe77uvduzfbtm1j48aNPPfcc67aIFlx9913ExUVxZYtW0hPT2fMmDHXFNflTwC7svg0O07z+r9xi4eV4t1pP1+942XEpWZ9FcAjjzxCu3btXKd7LVy4kKpVq1733Nkl80Swl19+WUmpAq6Ij42ivl6kOy+9WupaWf5JTPn4+hAXF/9PsfB/x01zOPH3shIW4Hv5QeRfFyal8tm2vfN169YNPz8/5s2bR2Ji4o0PePvt0KePe9vJkzBiBKSl3fj4UiAZhkGrVq1wOp2sXLnS0+GIiIhkiZJSUmD5+vrStWtX1/HjzZs3dyvm3KNHD2w2m+u+I0eOXPYI88uxWq3ccsst7Ny5E8g4eaxZs2Y0bNiQ1q1bu04W++KLL2jXrh19+/albt26rFmzhjFjxlCzZk3Cw8MJDw/nwIEDAMyfP5+GDRtSr1492rRpw7Zt2wBYvHgxtzRrxAcvPcOQru14sFMEOzdFAeCw23nmvv480uMWHuwUwWuPP0Ry0sW/GB0/fJAe9aoAsHrRAoZ0bev6urV6OdfJYWv/+pPH+3fj4e4deLRnJ/5euQwDSLY7GT16NNWqVaNRo0Z8//33l31uDh8+TNmyZV23ixcv7jolbfTo0TzxxBOu+9577z0GDhzoeq7at29Pjx49qFWrFq1bt3a9ble6z+FwMHLkSOrUqUOdOnUYOnQoaf/88jZw4EAefPBBWrduTZ06dXj44YcBiIyMJDw8nJMnT171tZb8xzAMahYPxMtikObMelLq3+17F69ItBgWwkqWxNfXh/j4BE6fOQOY2J1ODANqhgZiy+FTMQuMC5NSgYGeiSMbBAYG0rVrV5KTk/nll19ufEDDgJEjoWFD9/YtW2DMmHxVEF5yV8uWLTEMg2XLlmVLMl5ERCSnKSklhca7775Lz549L3tf165dXUmqjz76iBdffPGqYyYnJzNr1iwaNWrE8uXLmTp1KkuWLGHDhg289tpr3HXXXa6+q1ev5vXXX2fz5s3UqFGD8ePHs2HDBqKiolixYgVhYWGcPHmSu+66iy+//JJNmzYxZMgQ+vXr5/pguW/3Ljr0HsCnvy6m9/2D+Wz8awBYrFaef/djPpyzkCnzlxIQFMysLydfMfZm7TryybzFfDJvMf3/818qVq1GZOfbOHowmi/fHcfrn33PR3P/4IV3P+L1xx/CnpbK8oXzmTZtGuvXr2fdunVuSb4LPfPMMwwaNIhWrVoxfPhwlixZctXnM9Py5ct588032bZtG7fddhtDhgy56n2ffPIJa9euZf369URFRbF3714mTJjgum79+vX88ssv7Nixg48++giApUuXEhUVRcmSJbMcm+QvwT5eVCkagGmaOLKamLrE9r3zGYaFkiXD8PPzJTEhkVNnzpDudFIm0JewAJ/sCbwwuLCmVD5eKQXQrl07QkNDWbRoUfYkur284K23oEwZ9/Z58+Drr298fCmQQkJCqFu3LidPnnQrWSAiIpJXKSklhcLrr7/Onj17XMeln++bb77hxx9/5JNPPnG1Pfzww7zyyiuXHe/bb7+lQYMGtGzZklq1avHss88ye/ZsNm7cSLNmzQgPD2fo0KHExMSQnJwMZPz1snr16gAEBwdTrVo17rnnHj7++GNiYmLw9fVl9erVrhpMkLFN8OjRoxw5cgSAijdVpkZ4xl/OazVozNGD0QCYpsmMKR/xULd2/KdLG1YvWsCebVuy9Nz8vWIpX737Fq99NhX/wEDW/vUnRw/s58kB3RnStS0vP/oghsXCiaNH2LB8KbfffjvBwcEYhsFDDz102XHvvPNODh48yPB/aqP07NmTcePGZSmmli1bUrNmTSBjC+bixYtdWx4vd9/ChQsZOHAgPj4+2Gw2/vOf/7BgwQLXmP379ycon//SK9fnphB/Sgb4kOZ0Zj0xdRWGYVCiZEn8AwJwGlbiTh6nShFf18pMyYICtH0PwMvLi969e+N0Ovnpp5+yZ9CQEHjnnYsLwH/yyaULxYugguciIpK/2DwdgEhOGz9+PDNnzmThwoX4X/DB/ocffuDll1/mjz/+ICwsLMtj3n333UycONGtzTRN7r//fl5//fVLXhN43tYUq9XKqlWrWLFiBYsXL6Z58+ZMnTr1qvP6+vpi/jOXxWrFYc9I1PwxewZ/r1zGO9/PISAoiJmff8LfK6/+YXT/zu2Me/pxXv/sO4qHlXI9jkYRbXjh3Y/d+ibbHRdtS7raL+BFixalT58+9OnThyZNmvD6668zcuRIbDabW12tnKjtdGFsgfl4a5DcGIthEB4WzIbj5ziVlIYXBrYLTzc7z5W2753P6YSAkBASz5xixYxvObGmLI8++ije3t7ZF3xBVsBWSgE0btyYhQsXsmHDBvbu3UuVKlVufNCqVTO27A0f/u+2vZQUiIvL11seJefUqVOH4OBg1q9fz4ABAy767CMiIpKXaKWUFGjvvPMOU6dOZcGCBYSEhLjd9+OPPzJq1CgWLlzoqnV0I3r06ME333zDwYMHAXA6naxbt+6SfePj4zlx4gSRkZH83//9HxEREfz99980b96czZs3s2VLxiqn77//nrJly7pqM1ksBhYDnBeMl3AuliJFixEQFERSQoKrNtSVnDp+jBcfuo+R4/5HpZtruNqbtG7HhuVL2Lt9q6ttR9QGANp26MC0adOIj4/HNE231WUX+vnnn0lKSgIyEl1///236xe0qlWrsm7dOhwOB0lJScyYMcPt2pUrV7Ljn+PPJ0+eTLt27bD+cyrX5e675ZZb+Oqrr0hLS8NutzN58mQ6dep02fiCgoI4d+7cVZ8nKRhsFgsNS4VQNsgXh2mS4nBcvt7KP82XS0mZpkmqw0m600lxP2+6N6hBo/p12b59O//73/9UQD+rClBNqUyGYdCvXz8Apk+fnn01fVq3hqefztjSB9C588Xb+kT+YbVaadmyJenp6axZs8bT4YiIiFyRVkpJgXX48GGGDx9O5cqVadeuHQA+Pj6sXr0ayFjtVKpUKbc6U3/88QehoaF89NFHHD169Ipb+C4UGRnJW2+9Re/evbHb7aSlpdGtWzcaN258Ud9z587Rr18/EhMTMQyDatWqcf/991OkSBG+/fZb7rvvPux2O0WLFmXatGmuVT8Ww8BiGDgv2ILUsc8AViz8jfvbNyckNJS6TZpz4sjhK8b76w/fcC7mDB++OsrVNvDJZ2nZ8Vaen/gRE14YQWpyMvb0NKrUqsuIdz6ge7duHN62iYYNGxIcHEyXLl0uO/5ff/3lWhVlmibVq1fnvffeA6BPnz5MmzaNmjVrUq5cORo0aOBKYEHGFr1nnnmGPXv2EBoayldffXXV+4YMGcLevXtp+E9h4LZt27oVU7/Q8OHD6dixI/7+/vz++++qK1UI2CwG9UsGU9Lfhx1n4km2O7EY4GWxYHFbFXXpRIJpmqQ7TRymiZfFoGpoIJWK+GMxDO699168vb1ZtGgREydOZNiwYVqdcDUFMCkFUK1aNcLDw4mKimLDhg00atQoewbu3z8jOZWUBJUqZc+YUmC1atWK3377jWXLltG2bVtPhyMiInJZhqmjOUTylTVHz3IqKQ0/mzXX5kxzZPzy3qZCcXxsObvA8osvvmDWrFnMmjXrmu4TuRYpdgcHzyVzKD6ZVLsTk4ylwxaLQUpSErGxsRQrHoqPtw8OE5z//FPpbTUoHehLxSL+BHm7/13HNE1mzpzJ77//Tvny5Xn88cdVx+xKHnsMVq369/bjj8O993ounmx04sQJRo8eTbFixXj55Zddh2hkK9OEqVMzTuTr2BH++eOLSKa3336bXbt28cILL2TLinAREZGcoO17IvlMuWA/DP79JTmnmWbGypDSgb45npASyS2+Nis3hwbSpkJx6ocFUz7Yl4B/kkymYcHm44OJgQn42iyUCfKldokgWlcoTp0SwRclpCBj61afPn247bbbOHToEG+//ba2iF5JASt0fr6wsDDatGnD6dOnWbx4cc5MMnNmRhH033+HkSNh+vScmUfyLRU8FxGR/EC/YYrkM2H+Pvh5WUlzXFhZKmc4TBOLYVAu2C9X5hs4cOBlV0Jd6T6R62GzGJQN8qNeySK0rhBKu4rFCThzkA2zplKRJNpUCKVthVAahBWhUhF/fKxX/mfTMAy6d+9Onz59OHbsGOPHjycmJiaXHk0+U4CTUgDdunXD19eXX375hcTExOyfYOdO99tvvQVr12b/PJJvNWzYEH9/f1avXk1aWpqnwxEREbkkJaVE8hmrxaBySEatmuw63v5yMmvolPT3JsRHJeik4PO2WrCkp5Jw+gR+lowVVVc7ZfJSOnfuzB133MHJkycZP348p06dyoFo87kCePre+YKCgujSpQtJSUnMmzcv+yfo3Nn9ttMJzzwDh69cT1AKDy8vL5o1a0ZKSgobNmzwdDgiIiKXpKSUSD5UIdiP4v7epDmd2Xe60yWkOp342izUKh50Xb+Yi+RHTmfGKsQbfc+3a9eO++67j5iYGMaPH8/x48ezI7yCo4AWOj9fhw4dKFq0KIsWLeL06dPZO3ijRvDgg+5tcXHw5JOQEyuzJF/SFj4REcnrlJQSyYcMw6B28SB8rBZSHTmTmEpzODGA6qGB+HnlXlF1EU/LTEpZLDf+T2SrVq148MEHiYuLY/z48RzWKpYMTidcmPQLCfFIKDnJy8uL3r1743A4+Omnn7J/gocfhgtPVtu/H154IeM5lkKvXLlyVKxYkd27d3PixAlPhyMiInIRJaVE8qkAbxvhYUWwWQxSsjkxleZw4sSkatEAygb6Ztu4IvlB5vdSdq0ObNq0KUOGDCEpKYm3336b6OjobBk3X7NY4J8VHADUrg1lyngunhzUtGlTKlSowLp169i3b1/2Dm6xwCuvQNWq7u3LlsF772XvXJJvabWUiIjkZUpKieRjxf29aViqCD5WCykO5w3XmDJNkxS7AxOoVjSAqkUDtG1PCp3sXCmVqUGDBjz66KOkpaUxYcIE9uzZk21j51tjxsDw4TB0KHzwgaejyTGGYdCvXz8Apk+fnv0rW/39M07hu3Cl2VdfwS+/ZO9cki81bdoUb29vVq5cid1u93Q4IiIibpSUEsnnivv70KxMUUL9vEhzOkl1OK75l56MguZOUhwZNaTCw4KVkJJCK7tqSl2oTp06DB06FKfTybvvvsuOHTuydfx8x9sb7rwT7r8fAgI8HU2Oql69OvXq1WPv3r1ERUVl/wRlysC4cWC74ECKMWNg8+bsn0/yFV9fXxo3bkx8fDybNm3ydDgiIiJulJQSKQCCfGw0LVOUmqGB2CwZq6ZS7A7sVyiEbpomDqdJqsORsf0PKBfkS8tyxSgd6KuElBRamd8z2blSKlONGjV44oknsFgsTJo0iS1btmT7HJI39enTB4vFwsyZM3NmtUqDBvDss+5t6ekZq9FUS6jQ0xY+ERHJq5SUEikgLIZB5aIBtKkQSp0SQQT72HCakOJwkmx3XPSV4nBiN514Wy1ULRpARLli1A8rgq9NRc2lcMuplVKZqlSpwpNPPom3tzcffPABf//9d47Mk6eZJnzzDQwYAC++CElJno4ox5UuXZrIyEhOnjzJkiVLcmaSXr3gjjvc22JiMhJTKSk5M6fkC5UrV6Z06dJs27aNmJgYT4cjIiLioqSUSAHjbbVQsYg/rcoVI7J8MRqVKkK1YgGUD/ajTJAv5YJ8qRziT/2SwbQsW4y2FYpTPTSQQG/b1QcXKQRycqVUpkqVKjF8+HD8/f355JNPWLNmTY7NlSdt3gwTJ8LevTBvHnzxhacjyhW33XYbPj4+/PzzzyTlVCLuySehWTP3th07YPTojGSgFEqGYRAREYFpmqxYscLT4YiIiLgoKSVSQBmGQYC3jVKBvtxcLJB6JYNpEFaE+mFFqFk8iHLBfhTx9cJq0TY9kfPl9EqpTOXKlWPEiBEEBwfz2WefsXz58hydL0+5cHXY7t2eiSOXBQcHc+utt5KYmMivv/6aM5NYrTB2LFSo4N6+cCFMnpwzc0q+0KxZM6xWK8uXL3f9nBMREfE0JaVERETOkxsrpTKVKlWKESNGUKxYMb766isWLVqU43PmCfHx7rf9/T0ThwfccssthISE8Oeff3LmzJmcmSQ4GCZMgMBA9/YpUzK280mhFBQURHh4ODExMWzfvt3T4YiIiABKSomIiLjJXEGQG0kpgBIlSjBixAhKlizJ999/z/z583NlXo9KSHC/fWHypADz9vamV69e2O12fvrpp5ybqGLFjBVT57+P7XY4dSrn5pQ8TwXPRUQkr1FSSkRE5DyZK6Vy8wTKYsWKMWLECEqXLs3MmTOZO3fuZU/OLBAuXCkVFOSZODykWbNmlC9fnrVr1xIdHZ1zE7VoAc89B15eGbebN4dq1XJuPsnzatasSWhoKBs3biT+wu9DERERD1BSSkRE5Dy5vVIqU5EiRRg+fDjly5fn559/ZubMmQU3MVXIk1IWi4V+/foBMH369Jx9nXv3hpkzM047fPdd95VTUugYhkHLli1xOBysWrXK0+GIiIgoKSUiInK+3KwpdaGgoCCeeuopbrrpJn7//Xe+//77gpmYunD7XiFLSgHUqFGDOnXqsHv3bjZt2pSzk5UuDTVqZBRBB5g7F15+Gf78M2fnlTypVatWGIbBsmXLCubPFxERyVeUlBIRETlPbp2+dzn+/v488cQTVKtWjcWLF/P1118XvJOyLlwpVYhqSp2vb9++GIbBjBkzcDgcuTPpggUZCam5c+Hpp+Hbb3NnXskzihYtSu3atTl+/Dj79u3zdDgiIlLIKSklIiJyHk9t3zufr68vw4YNo2bNmixfvpzPPvss95IWuUErpQAoU6YMERERnDhxgqVLl+bOpBeeuvbuu7BiRe7MLXmGCp6LiEheoaSUiIjIeTy5fe983t7e/Pe//6VevXqsXbuWTz/9FLvd7tGYsk0hryl1vh49euDj48PcuXNJTk7O+Qk7dHC/7XRmFEPPyYLrkufUrVuXoKAg1q1blzvvOxERkctQUkpEROQ8nt6+dz4vLy8efvhhGjVqxN9//80HH3xAenq6p8O6MenpkJLi3lZIt+8BBAcH06lTJxISEvjtt99yfsLateGRR9zbEhPhySchLi7n55c8wWaz0aJFC9LS0li7dq2nwxERkUJMSSkREZHz5IXte+ezWq0MHjyY5s2bs3XrViZNmkRqaqqnw7p+F27dg0K9UgqgY8eOFClShIULFxITE5PzEz74IHTq5N526BA8+ywUpG2ickXawiciInlB3vjELSIikkdkbt/LCyulMlksFgYOHEjr1q3ZuXMn7777bv7dcnPh1j0o9EkpHx8fevbsid1uZ9asWTk/oWHAiy9CzZru7WvWwDvv5Pz8kieEhYVRrVo1Dhw4wKFDhzwdjoiIFFJKSomIiJwnr62UymQYBnfddRcdOnRg7969TJgwgcTERE+Hde0uXCnl7Z3xVci1aNGCsmXLsnr1ag4cOJDzE/r6wttvQ2ioe/sPP8DMmTk/v+QJmaulli9f7uFIRESksMpbn7hFREQ8LC/VlLqQYRj079+frl27cuDAAd5++23i8lsdoAuTUoW4ntT5LBYL/fr1A2D69OmuFXs5qmTJjMTUhUnBN9+EDRtyfn7xuIYNG+Lr68vq1avzf706ERHJl5SUEhEROU9eOX3vcgzDoGfPnvTs2ZMjR44wfvx4zp496+mwsk4n711WrVq1qF27Nrt27WLz5s25M2mdOjBqlHubwwEjR8LRo7kTg3iMt7c3zZo1IykpiQ1KRIqIiAfkzU/cIiIiHpKXV0qdr2vXrvTv358TJ04wfvx4zpw54+mQsubCQtpFingmjjyqb9++GIbBjBkzXO/FHNe1K9x/v3vbuXMZJ/IlJeVODOIxKnguIiKepKSUiIjIeUzTxDCMPJ+UArjlllu4++67OX36NOPGjePEiROeDunqwsMz6hll6tDBY6HkRWXLlqVly5YcP348d5ME//0vREa6t+3dm7GKKreSY+IRFSpUoHz58uzatYuTJ096OhwRESlklJQSERE5j9PpzBcJqUytW7dm4MCBxMbGMn78eI7m9S1XJUvCZ5/BwIHw0ktw552ejijP6dGjB97e3syZM4eUlJTcmdRigTFjoHJl9/YlS+DDD3MnBvEYFTwXERFPUVJKRETkPKZp5tl6UpfTokULBg8eTEJCAuPHj+fgwYOeDunKbr4ZHnsMunfPSIaIm5CQEDp16kR8fDzz58/PvYkDAmDCBAgOdm//4gs4fjz34pBc17RpU7y8vFixYgWOC7fYioiI5CB9EhQRETlPflsplalx48Y8/PDDpKam8s4777Bv3z5PhyQ3oFOnTgQHB7NgwYLcLWRftiy89RZYrf+2mSYcPpx7MUiu8/f3p1GjRsTFxbFlyxZPhyMiIoWIklIiIiLnyY8rpTLVr1+f//73v9jtdiZOnMiuXbs8HdLFzp2D0aNh0CCYN8/T0eRZPj4+9OjRg/T0dGbPnp27kzdunFFLyts743bt2lCvXu7GILlOBc9FRMQT8uenbhERkRySX1dKZapVqxbDhg0D4H//+x9bt271cEQX+PRT+Pln2LgRXnwR8vpWQw9q1aoVZcqUYdWqVRw6dCh3J+/eHaZPz6j/9emn/yaopMCqWrUqYWFhbN68OXdX54mISKGmpJSIiMh5nE5nvl0plenmm2/miSeewMvLiw8++ICNGzd6OqR/rVvnfnvnTs/EkQ9YLBb69u2LaZpMnz4d0zRzN4AyZTJWSGUmpBYvhjfegD/+yNjSJwWKYRhERERgmiYrV670dDgiIlJI5O9P3SIiItksP2/fO1/lypV56qmn8PHx4aOPPmLdhckgT4mPd7/t7++ZOPKJ2rVrU7NmTXbs2OHZVW9r1sCIERmrp555Bj7/3HOxSI5p3rw5FouFZcuWYZompmkS74znYPpB9qTtYXfabval7eOE/QTpZrqnwxURkQLA5ukARERE8pL8vn3vfOXLl2fEiBFMmDCByZMnk56eTosWLTwb1IVJqaAgz8SRTxiGQd++fXnttdeYPn06tWrV8kzS9MLi1x98AFWqQJs2uR+L5Jjg4GDqhddjV+wufjryE6nBqaSYKThMB06cGGT8bLRgwWbYKGIpQiWvSlT1rkqgJdDD0YuISH6U//8ULCIiko0Kwva985UpU4aRI0cSEhLCF198wV9//eW5YJxOSEpybwvUL7JXU758eVq0aMGxY8dYvny5Z4Jo3RouTNaOGgW7d3smHsl2pmmyI20HAT0DKHt7WQ5ZDpHsTMaKFT/Dj0AjkEBLIAFGAN6GNyYmpx2nWZuylunx01mUuIizDtWiEhGRa1NwPnWLiIhkg4Kyfe98JUuWZOTIkRQvXpzvvvuOhQsXeiaQhISL25SUypIePXrg5eXFnDlzSE1Nzf0AqlaFxx93b0tOhqeeAhXFzvfiHHHMT5rPsqRlpPmmYaaZpMSk4IMPXoYXFsPiWkFqGAY2w4aP4UOAJYAAIwCA3em7mZswly2pW3CaTk8+HBERyUcK1qduERGRG1SQtu+dLzQ0lJEjRxIWFsa0adOYN29e7gdxqaSUtu9lSdGiRenYsSNxcXH8/vvvngni7rszTuU737Fj8PTTkK76QvnVwfSDzEmYw8H0g65EU6BfIKZpknCp79kLGIaBj+FDoBFIupnOquRV/J70O6mmB5KnIiKS7ygpJSIicp6Ctn3vfCEhIYwYMYKyZcsye/ZsZs2albsnul1YT8pqBV/f3Js/n+vcuTNBQUHMnz+f2NjY3A/AMOC55zJO5Dvf33/Dm2/qRL58aH/6fv5M+pMUM4VAIxCbkVFuNvCfFYxZSUplMgwDf4s/PoYPh9IP8XuiElMiInJ1BfNTt4iIyHUyTbNArpTKFBwczPDhw6lYsSK//vor06ZNy73E1IVJqcDAi+sUyWX5+vrSo0cP0tPTmTNnjmeC8PaGceOgZEn39lmz4IcfPBKSXJ9j9mMsSVqC3bTjb/i7/dyz2Wz4+fqRnp5+zdtFbYYNP8OP4/bj/Jn4Jw7Tkd2hi4hIAaKklIiIyHkK8kqpTAEBATz55JNUqVKFP/74g2+//TZ3ElM6ee+GRUREUKpUKVasWMHhw4c9E0RoKEyYcPEqt3fegVWrPBOTXJNUM5XlSctJM9MuSkhlCgy69tVSmayGFV/Dl8P2w2xN23rD8YqISMFVsD91i4iIXKOCWOj8Uvz8/Hj88cepUaMGS5cu5YsvvsDpzOHixBf+cquk1DWzWCz07dsX0zSZMWOG5wKpXh1eftm9zemEZ5+FAwc8E5Nk2YbkDZx1nr1sQgoyfkZYLBYSExOv62eDzbBhxcrfKX8T44i50ZBFRKSAKvifukVERK5BQS10fik+Pj489thj1KlTh1WrVvHpp59it9tzbkKtlMoWdevWpXr16mzbto2tWz24CqVDBxgyxL0tIQGefBLi4jwTk1zVGccZdqTvcJ2qdzmGYRAYmFHwPCkp6brm8jV8STPTWJ+y/nrDFRGRAk5JKRERkfMUlpVSmby8vHjkkUdo0KABGzZs4OOPPyY9p05Su3Cl1D/FlOXaGIZBv379AJgxY0bOr3C7ksGDoX1797aDB+H558GhWkJ50e603dhNO954X7Xv9RQ8P59hGHgb3hy2HybWEXtdY4iISMFWeD51i4iIZEFhWimVyWazMWTIEJo2bcqmTZt4//33r7m4cZZcqtC5XJcKFSrQvHlzjhw5wsqVKz0XiMWSsY3v5pvd21etgokTPRKSXF6qmcqetD1YsWbp55yXlxc+Pj6kpqaSnnZ9yWovvLCbdvak77mu60VEpGBTUkpEROQ8hW2lVCaLxcIDDzxAq1at2L59O5MmTSIlJSV7J9H2vWzVs2dPvLy8mD17ds4kEbPKzy+jyHmxYu7tU6dCdLRHQpJLO2o/SrKZjI/hk+VrXKulEq9/tZQFC/vS9uXeSZ8iIpJvFL5P3SIiIldQGFdKZbJYLNx77720a9eO3bt3M2HChOuuJXNJKnSerYoVK8Ytt9zCuXPnWLBggWeDKVUKxo8HLy/39n37PBNPDho2bBiVKlXCMAyioqKu6VrTNLnpppvo0KHDVfvOmjWLVeedZrhu3ToGDBjguv3xxx9To0YNwsPDOXLkCJGRkVcd897u93J89/Er1pICGFZ3GNGbooGM0zotFgsJCQmcjD7J4AqDrzoPwK8f/ErsiVggo+h5kplEspl8yb4DBw5k4j8r60aPHs0TTzwBwJw5c3jyySezNJ+IiORPSkqJiIicp7CulMpkGAYDBgygc+fOREdH88477xB/4Qqn63VhEfXg4OwZtxDr3LkzgYGB/P7775w7d86zwdSrBy+9BN7/1CqqVAmaNPFoSDmhX79+LFu2jIoVK17ztX/88QchISFs2rSJ/fv3X7af3W6/KCnVuHFjfvjhB9ftiRMn8vnnnxMVFUXZsmVZunTpVed/ccaLlKpW6ppiNgwDf39/nE4nySmXTipdyq8fnpeUwobdtF/zKXw9evRgwoQJ13SNiIjkL4X3U7eIiMglFOaVUpkMw6B37950796dQ4cO8fbbb2dPwqNp03//32KBLKzskCvz8/Oje/fupKamMnfuXE+HA7feCtOmwQcfwJdfFsjVcK1bt6ZcuXLXde2UKVP4z3/+w1133cVnn33mal+8eDG1a9dm0KBBhIeH8+233zJnzhzGjRtHeHg4kydPZvHixYSHhwMZibG9e/cycOBA+vXrR3R0NCEhIa7xVq5cSUREBPXr16devXrMnj0bgHtq3cORzUcA+OW9XxjVdhTPRTzHqLaj2LVm12XjztzCl5TovnLyriJ3MWv8LEa1G8XjdR9n8TeLAZj55kzOHjvLpAcm8VzEcxzYdID09HRefv5lmjZtSnh4OLfffjtnz5694vP1xRdf0KtXL9ftl156iapVq9KkSRNGjRpFpUqVXPfNnz+fiIgIGjVqRNOmTVm0aJHrua1Tpw6PPvoo9evXp3bt2qxbt8513S+//EKTJk2oX78+4eHhrF69GoC1a9fSvn17GjduTIMGDZg2bdoVYxURketj83QAIiIieYnT6SzUK6UyGYbBbbfdhpeXFzNnzmT8+PE8+eSTFLuwbtC1uPNO8PGB3bvhttugdOnsC7gQi4yM5M8//2TZsmW0b9+eMmXKeDagsmUzvjKtWwdLl2aspGrfHgpw0nfw4MH06NGDHj16XHRfTEwMv/32Gx9++CEHDx6kW7duvPzyy66fN9u3b+eDDz5gypQpACxatIjw8HDXVrbFixe7xpo+fTqVKlXihx9+IDw8nOjzanfFxMTQq1cvpk+fTmRkJE6nk9jYWABM/q3pFHlHJN0e6wbA7rW7+eiRj3h73dsXxe1wODgXe45z586RnpwOF5SF8vLxYsyiMRz5f/buOzyqMu3j+PecaclMKqn0FlpCJ6F3CwhIs2B3176uFdRVX9eyurq7Yq9rWVfFhgKyAoqAoCAIhG5Ch9BCQvpMkplMOef9Y5IhgQQSSAjl/lxXLp0zZ855ZjIzzPxyP/ez4xB/HfFXhlwzhMl/mcyyGcu496N7adO9DQAzX5xJc2tz1qxZA8Czzz7LE088wVtvvVWrx3b+/PnMmjWLDRs2EBISwi233BK4bs+ePTz99NMsXLiQsLAwdu3axZAhQwKPy7Zt2/jwww95++23effdd/m///s/Fi5cyI4dO/jjH//IL7/8QufOnfF4PJSWllJYWMgdd9zBggULaNq0Kbm5ufTu3ZuBAwfSvPJzWwghxGmTUEoIIYSo5EKfvnesUaNGYTab+fLLL3nxxReZOnUqMTExp3YwRYErrqjfAQoMBgNXXHEFb7/9NrNmzeLee+9t7CEdtXUr3H03aBp89hncdhvcdVdjj6rBfPDBBzVe99lnn3HZZZcRERFBREQEcXFxLFy4kMsuuwyAdu3aMWzYsNMew6pVq+jUqVOgx5SqqtWGyRmbMvj2pW8pzi/GYDRweOdh3E435mD/9Euv10tWVhaFhYV4vV40TaPYUVwl2AIYdPUgAJp3bI5qUCnMLiSqedRx59u4YCMbHRtZMXcFAG63u0ql08ksWbKEq666itDy6rtbb701UA31ww8/sGvXLoYOHRrYX1VV9u/fD0BCQgL9+vUDYMCAAUyfPh2ARYsWMXr0aDp37gz4VxsMDw9nwYIF7NmzJ/C7qbB9+3YJpYQQop5JKCWEEEJUItP3jjdixAjMZjOffvopL774Ig8++CBNpcrprNK9e3c6dOjA77//ztatW+nSpUtjD8lv82Z/IFXhgw+gXTu49NLGG1Mj+fDDD8nKygoEMQ6Hgw8//DAQfFRMkWtICgo6Ol63l1dufIUnvnuC9n3aU2ov5baWt+Ep86CrOj6vjwMHDhBj9gfQwcHBBAUFUZxTfNwKeibL0eb2qkFF82ocS9d1dF3nqVee4o9j/1g/96XS+7Su61xyySV8/vnnx+136NAhgoKCApcNBgPeY/vbVTPepKQkVq5cWS9jFUIIUTP5U7AQQghRiUzfq96gQYO45ZZbcDgcvPTSSxw8eLDuB9m6FR54AB59FLKz632MFzJFUbjyyisB/9QuTTs+GGgUgwb5+4dV9vTT/ufCBWTdunXk5OSQmZlJRkYGGRkZ7N69m4ULF5KTk1PtbcLCwk6pl9vAgQPZuXNnoPG5pmnk5/sbjBswoKPjdrnxur1EN4sEl4sf/70QgMzMTPbs2YOmaWiahtVqpU2bNrRv357Q0NA6vTcGhwbjtPsbo/vw0WtML2a8MSOwomdpaSlpaWm1Pt7IkSOZNWsWxcX+YKxyT65Ro0axePFiNm/eHNhWMU3wREaNGsXChQvZtm0bAB6Ph6KiIgYOHMjevXtZvHhxYN+NGzfidrtrPV4hhBC1I5+6hRBCiEpk+l7N+vbtyx133EFpaSkvvfRSlT42J+XzwdSpsGIFLF4Mjz3WYOO8ULVp04a+ffty8ODBQLPmRteiBTz0UNVtbjdMmwa5uY0zptN055130qJFCw4ePMioUaNISEgIXHfbbbfxv//977jbfPjhh1xzzTVV3lsiIiK45JJL+PTTT6s9z4033sjMmTPp1avXCacFHisyMpI5c+bw6KOP0r17d3r37s2vv/4KgFr+0T/YrHD1faP46/D/4/Ghj6Pl5wFQVFQUqBZt1qwZ7dq1C1RwqaqKwWAAHTxuz0nHMfrO0bx/3/s8Nvgx9m7ey/gHxzMgZQD9+vWje/fu9O/fn40bN9b6fo0bN44JEybQs2dPUlJSAtMgwT897/PPP+fOO++kR48edOnShVdfffWkx0xISOCjjz7ihhtuoEePHvTr14/t27cTGRnJ/Pnzef755+nRoweJiYk8+uijZ0/YK4QQ5xFFP7YGVwghhLiA3XXXXXTt2pV77rmnsYdy1vr999959913MRgM3HvvvVW+lNdo1y645pqjl41GWLXqvG563Rjy8vJ48sknCQkJ4dlnn8VsNjf2kEDX4YUXYPbsqtuTkuD99+FsGOOFQNfJ2ryU+WHrMRUUo7o8eL3ewNS6bJMJJSyMmJgYrFbrcTcvKSkhKysLgJiYGMLCwmp96mKtmGbGZowNGXtad8HhcBAaGoqu60ybNg2n08k777xzWscUQgjRuORPwUIIIUS5ii9nUil1YhWhnaZpvPbaa4GpLyfkcFS9HBwsgVQDiIqK4qKLLqKwsLDK1KNGpSjwyCPQp0/V7Wlp8Nxz/tBKNByfD374Aa6/nqg/TiN0WyalwQY8Hg+apqHrOgaDgfiWLWndunW1gRRUqpQCnE5nrU+v6f7qog7mDqd9V2666SZ69epFYmIi+/fv59lnnz3tYwohhGhc8qlbCCGEOIaEUifXuXNnHnjgAVRV5Y033mDLli0nvsGxoVT5Clqi/o0ePRqbzcYPP/yA3W5v7OH4GY3wz39Cs2ZVty9YAJ980jhjOt+VlsIXX8DEibgfeYTclSs5sG8/UV+uQVdAV/1Nvy1BQZhbtiSofCpcTRRFQVEUjEYjZWVlxzU8r0mZXoZVtdLW1Pa079KcOXPYsGEDW7du5ZtvviE6Ovq0jymEEKJxyaduIYQQolxFvxBZfa922rdvz9SpUzGbzbzzzjusX7++5p2Li6tePgMrjV2orFYr48aNo6ysjO+++66xh3NURAS8+iocW4nz5pvwyy+NMaLzU34+vP02+rhxOP/+d7I3beLgoUPk5uVRWlpK7KJt2LKL8cZHYm7eHLVTJ2jS5KSHrQjrTSYTuq7jcrlOehuf7sOHjy7mLpgU00n3F0IIceGRUEoIIYQoVxFKSaVU7bVu3Zpp06ZhtVp5//33a26wfWyllIRSDWro0KHExsayYsUKDh8+3NjDOapdO/+UvcrBr67DE0/A7t2NN67zgd0Ozz+PPm4cJW+8weHt2zl8+DAFBQWUlpaiKAoR4eG06tKDwXo3DDExuKPCoHxK3smoytFQCk4+hU/XdZy6k2hDNN0s3U7vvgkhhDhvyaduIYQQolzFdBSplKqbFi1a8NBDDxEWFsZHH33EihUrjt/p2Eopmb7XoIxGI5MnT0bTNGYf22C8sQ0dCscuJFBaCg8+CIWFjTKkc56u47v9duwff8yhvXvJPnIEu92O0+lEVVUiIyKIHjGC5p9/TviiRbQddj3tzQm4dTc+3VerUyiqgkHTMJb3oDpZpZRLd2FSTAwMHihVUkIIIWokoZQQQghRTiqlTl18fDwPPfQQTZo04dNPP+Wnn36quoP0lDrjevbsSfv27dm8eTPbt29v7OFUddNNMGZM1W2Zmf6G6B5P44zpHGW321kwYwaZy5aRl5tLSXExLqcTg8FARHg44ZdfTrMFC4j69luUkSNBVVEUhf5B/YkzxOHUnbUKptTsbGKLigjNziZM1/F4/Kv3VcepOdHR6RvUlzhjXH3fZSGEEOcR+dQthBBClJOeUqcnJiaGhx9+mNjYWL766isWLlx49EoJpc44RVG46qqrAPjmm29q3Zj6jFAU/5S9pKSq29evhxdfbJwxnQvcbpg9Gx55hOJHH+Xrd97hscce46tFi9gBuFwuDEYjIU2aEHzNNTRdsYK4GTNQevU6brXLIDWIi20XE2uIxak78egnCAM9XpSiIv//6zrW0lIoP19luq5TopWAAn2D+5JoSazPey+EEOI8ZGzsAQghhBBni4ov7VIpdeoiIyN56KGHeOWVV5g9ezZut5tx48ahSCjVKNq2bUtycjKpqamsWbOGfv36NfaQjjKb4aWX/FVTR44c3T57NkyaBF26NN7YzjZ2O3zzDXz5JWWHD1Nkt+MsLcUaF8fW2FjKysr4ZOBAxvt8dO/Xjxa3344aG3vSw1pVK6Nso1jpXMlez17cuptgJTjQPyrAV14RpSjogLE8wHc6nYSU94fz6J7ASnv9gvqRYE6oz0dACCHEeUpCKSGEEKKcVErVj/DwcKZNm8Zrr73GvHnzcLvdTHY4qPKoSqPzM2bSpEls3LiROXPm0Lt370Cj6rNCdDRMnw633eavAqqwc6eEUuCf0vj55+hz5+LMz8deVERZWRmaruN2u7EeOULTHj1o06YNY8eOpX///nUO1YPUIEZYR9Da05rVrtWUaCUouoJFsWBUyr8qHHNMBbCYTLhcLlyaCy9eVFTamNrQP7g/oaqEzkIIIWpHQikhhBCinFRK1Z/Q0FCmTp3K66+/zo8//kjv9HTawNFgSkKpMyY6OpoRI0awaNEilixZwujRoxt7SFUlJsKzz8Jf/+oPpuLiYMCAxh5V49q+HT75BO3HHylxOLDb7Xg9HrTyXk4AZrOZzEGD+NOf/sSAAQMw1HIVveooikJ7c3uaGZuxy7OLbWXbsGt2XLp/ep5BVVBtJrwWBRQFT7AZU6gJn1vH6/XSLrgdHc0daWFsIaG+EEKIOpFQSgghhCgnlVL1y2q18sADD/Dmm29S+OOP5AFRUVH+YEqm751RY8aM4ddff+X7779n0KBBhJ5tj/9FF0HnzrBnD3TtCpGRjT2iM0/XYfVq+OQTfL/9RrHDgd3hQPP50HQ90FTcbDJxuFUrgu68kym33orRWH8f54PVYLpZupFkTiLTm0muL5dcXy45nmw8mo6ig+L10SSrEFPbdvy8ZD1t27blotEX1dsYhBBCXFgklBJCCCHKSaVU/QsKCuK+++4j6733KCkoQNd1oqOjUc62UOQ8Z7VaGTt2LF9//TXz5s3j2muvbewhHa95c/9PhfR0WLYMOnSAiy8+rlH3ecPrhR9/hBkz8KanY3c4KC4uRtc0NF1H0zR0XcdoNrOnfXtsd91F8nXXNeg0TFVRaWFqQQtTC/8GTUO/rR+Hj2Tjc5bRsmVLfJ/dyo/bVrGjcAecZcV3Qgghzh0SSgkhhBDlKiqlJJSqX2aTiRYREeS6XJSWlJCj6zQJDpYPIWfY8OHDWbp0Kb/88gsjR44kLi6usYdUs/37q/aZ2roV7ruvccdU39xuf/Pyzz7DfeAARXY7paWloOvouo6OPyjXzGa2de5MxJ/+xNDJkxunJ5iqogQFY9DAUz42Q1kZnTp1YvPmzRQXFwcangshhBB1IZ+6hRBCiHIVlVIyfa+euVyomkZMbCxWqxVnaSkfz5qFu3Jja9HgjEYjkydPRtM0Zs2a1djDObHNm6s2Pv/kE5g3r/HGU980Df2++3A+/zzZmzdz+PBhSktK0DUtsMKdMyiItcnJHHj3XUb9+CNDpkxp3Cb1Vqs/sC8PzSgtpWvXrui6ztatWxtvXEIIIc5pEkoJIYQQ5aRSqoGUN2ZWgOiYGGw2G5v37uWNN96grKysccd2genduzft2rVj06ZN7Nixo7GHU7O+feHYXkl//7s/rDrH+Xw+Nvzvfxz+7juOZGfjcvmbiasGAygK+TYbywYO5OC77zLxf/9j5MSJmM3mRh41YLUGAvuKUCoxMRGAtLS0xhyZEEKIc5h86hZCCCHKSU+pBhIWBgkJgD+Yiho5kr4XX8yOHTt49dVXcTqdjTu+C4iiKFx55ZUAfPPNN4Hn/FknNhYee6zqNo8HHnoIsrIaZ0ynQtdh40b44AM8s2axZNEinnjiCT787jscuo6iKBhNJlAU9kVEMG/4cA6/8QbXzpzJpePGYbFYGvseHFVRKUV5gF9aSnR0NLGxsaSnp5+9zyUhhBBnNWnnIIQQQpST1fca0BtvwEcfgaKg3Hor10VGYjabWbx4Ma+88gr3338/NputsUd5QWjfvj29e/dm/fr1pKamkpKS0thDqt6ECbB7N3z++dFt+fkwdSp8+CEEBzfe2E5G0/xN2j/5BN+mTf7m5Q4Hhzt1Ir9LF8Kjovj16qtpvXw5DlVle8eOJF5zDbdcdBFWq7WxR189qxW1olKqPJQCSEpKYunSpRw6dIgWLVo05giFEEKcg+RPwUIIIUQ5mb7XgGJi4JFH4OGHoUmTQMXOmDFj2LdvH9OnT8dutzf2KC8YkyZNwmAwMGfOHDzl0yvPSvffDwMGVN22Ywc8/bQ/+DnblJXBrFlwxRV4HnyQvJ9/5uChQ9iLitA0jSSHg+7du+Pz+VjtdDJ30CCUJ57gnvfe4/LLLz97AynwT9+rqJQqn74HBKbwpaenN9rQhBBCnLvkU7cQQghRTqbvnVmKojBhwgQmTpxIZmYm06dPp6CgoLGHdUGIjY1l+PDh5OXlsXTp0sYeTs0MBnj+eWjVqur2JUvggw8aZ0zVKSryj2fcOFxPP82R9evJzMykuLgYdB1LUBC2kBB2xMayefNm3G43o0eP5vnnn2fChAnnRpVgDZVSnTp1wmAwSF8pIYQQp0Q+dQshhBDlZPpeA9F1+N//4MEH4eOPj6twueyyy7j66qvJzs5m+vTp5ObmNtJALyxjx47FarWyYMECf3hytgoNhVdf9f+3svfeg8WLG2VIAZmZ8K9/oY8dS+nLL3N461ays7JwlpaComC12QgNCyPfYuGbDh1YnpDApZdeyvPPP8+kSZMICQlp3PHXRQ2VUhaLhQ4dOrBr1y5ZuEAIIUSdSSglhBBClJPpew0kNRX+9jdYvtzfW2ru3ON2ueiii7j++uvJzc1l+vTpZGdnN8JALyw2m43LLrsMp9PJ/PnzG3s4J9aqFbzwAhz72nzqKdi+/cyPZ+tWePxxtAkTcPznP2Tu2UNOTg7usjIUVSU0NJTw8HAO2mx82qkTb19yCU1uu43nX3iBK664gtBjA7ZzQQ2VUuCfwuf1es/uFR2FEEKcleRTtxBCCFGuYvqeVErVs1Wrql7esqXa3YYOHcof//hHCgsLmT59OpmZmWdgcBe2ESNGEBUVxbJlyzhy5EhjD+fE+vf3V9tVVlbm35aX1/Dn13VYuRL+9Cd8119P0ddfc+jAAfLz8vB6PKgGA+EREURERLA9MpJ3unblg2HDiLvpJp574QWuvvpqwsLCGn6cDaWGSinwNzsHZAqfEEKIOpNQSgghhCgnlVINxOGoejkoqMZd+/fvz+23305xcTHTp09n//79DTy4C5vJZGLSpElomsbs2bMbezgnd801MHFi1W1Hjvgb6LvdDXdeTYOnnsJ7993k//gjhw4dorCwEM3nw2gy0SQqiogmTdgQF8eLPXvyab9+tLrySp77+9+55ppriIiIaLixnSnNmlWtlAoPD1zVvHlzwsLCpNm5EEKIOpNP3UIIIUQ56SnVQI7tV3SSPjp9+vThT3/6E2VlZbz88svs2bOnAQcnkpOTadOmDRs2bGDXrl2NPZwTUxT4y1+gV6+q2zdvhr//3V/N1AAyf/6ZnE8+4VBmJg67HV3TsFgsxMTEEBoby6+tWvG3Pn2Y2a0bHceN49lnn+X6668nMjKyQcbTKEaORGvTBgB3cHCVcFBRFBITE8nOzibvTFStCSGEOG9IKCWEEEKUk9X3GsixlVK16KfTvXt37rnnHrxeL6+++qr0qmlAiqJw5ZVXAvDNN98EXgdnLZMJ/vUvaNq06vb582HDhtM/fl4e/PIL+p49pKWl8corr/DG++9TWlICuk6w1UpcfDzBLVrwY4cO/LVXL75t146uF1/M3/72N2688UaioqJOfxxnm5AQnO+/z0cjRvDL/fdDly5VrpYpfEIIIU6FfOoWQgghykmlVAM5hVAKoEuXLtx///0AvP766/JltwF16NCBXr16sXfvXtatW9fYwzm5yEh4+WUIDq66fevWUz/mnj3wt7+hjxlD8Z13kjl0KN8/+ijbtm3DHhnJgauvJr5HDwzduvFtUhKPde3KD02b0mPIEJ555hluvvlmYmJiTu9+neUsoaEcjoyk2Gg87rouXbqgKIq8ToUQQtTJ8f+iCCGEEBcoqZRqIKcYSoE/LHnwwQd5/fXXefvtt7njjjvo0aNHPQ9QAEyaNIlNmzYxZ84cevbsibGa4OGs0qGDf8reY4/5G56Hh8OQIXU7hq77q6s+/RTtl18odjiwOxz4vF4AkjMz6Xj77QwfPpy9e/fyr//9j4MHD6IoCsnJyYwdO5amx1ZsnccsFgvWsjK0/PzjrgsNDaVVq1Zs27YNn8+HwWBohBEKIYQ415zlnzaEEEKIM0canTeQOvaUOlbbtm2ZOnUqr776Ku+++y633norycnJ9ThAARAXF8ewYcNYunQpy5Yt4+KLL27sIZ3c0KEwc6a/QqprV4iPr93tNA2WLoVPPsG7eTMOhwOHw+Fv4A0YjEbCwsJo9oc/sK1tW95880327dsHQO/evbn88stp1qxZQ92rs5bx4495YMECzD//DO3bw/jxVa5PSkpi37597Nmzhw4dOjTSKIUQQpxLJJQSQgghylVUSsn0vXp2bChVh0qpCi1btmTatGm88sorfPDBB7jdbgYOHFhPAxQVxo0bx6pVq5g/fz4DBgzAZrM19pBOrnlz/0+FjAxYsgRat4aLLvI3R69QVgbffQczZuDeuxe73U5Jea8oALPZTFh4ONbwcLKHDuVtu529b74JQM+ePbn88stp0aLFGbxzZxGnEz74AFVRUDweePNNGDcOKoX4iYmJLFiwgPT0dAmlhBBC1IqEUkIIIUQ5qZRqAG63/6eyOlZKVWjWrBkPP/wwL7/8Mh9//DFut5vhw4ef/hhFQEhICGPGjGH27NksWLCAq666qrGHVDd5eXDzzVBS4r88ZQo8/DAUFsLMmegzZ1KWnY29qAin0xm4WVBwMOFhYVhiY8keOpQPgbTDh6GwkO7duzNu3Dhat27dKHfprFFUBB4Piqqi6Trk50NpaZXXc7t27QgKCiItLY0JEyY04mCFEEKcKySUEkIIIcpJT6kGcGw/KTilSqkKsbGxgWDqiy++wOv1nhvTzM4hI0eOZNmyZSxdupThw4efW827t2w5GkgBzJgBW7ag796Ns6CAIrsdd1mZ/zpFwWa1EhYejrl1azJHjmSm08nWjAzAPxVt/PjxtGnT5ozfjbOS1QqAqiiBaY7HhlIGg4HOnTuzadMmHA4HoafxWhdCCHFhkE/dQgghRDlZfa8BVBdKnWKlVIWoqCgefvhh4uPj+frrr1mwYEEgUBSnz2QyMXHiRHw+H3PmzGns4dRNr15gNvv/PycHfdcufF9/Tc6OHeTk5OAuK0NRVULDwmjevDnRgweT++CDvDpsGM9s3crWjAy6dOnCI488wn333SeBVGXlKx0GKqXAH0odIykpCV3X2Xo6KyEKIYS4YEillBBCCFFOpu81gGNDKYsFTKbTPmxERAQPPfQQr776KnPnzsXtdjNhwgQJFOtJ3759WbJkCevWrWPPnj20a9eusYdUO+Hh8Ne/4ps6FT07G83nQwfCiorwhodji4wkNCQEddAgDl10Ed9kZJC+ahUAHTt2ZPz48dILqSYmE5hMx1dKHSMpKQmAtLQ0+vbteyZHKIQQ4hwkn7qFEEKIcjJ9rwHUQ5PzmoSGhjJ16lRat27N999/z8yZM6Viqp4oisKVV14JwNdff312P66aBtnZ4HaTm5vLl0VF/Gyx4CsPpBRFwaSqNPX5CJ88mcMvv8wbHTvyt++/J33rVhISEpg6dSrTpk2TQOpkrFYUVUXXdXSoNpSKiooiLi6O9PT0s/t5I4QQ4qwglVJCCCFEOZm+1wCOrZSq5x4zNpuNBx98kDfeeIOffvoJj8fD9ddfL7/DetCxY0d69OjBpk2b2LBhA717927sIVVVXAyzZ8OXX1J28CBZisJ7XbuSGxKCmpJCYm4uMXl5qEYjSkQE7tBQ0teu5d38fHRFoV27dowfP57OnTvL86W2rFbU8sdK1zSUakIp8K/Ct3TpUg4dOnThrlYohBCiViSUEkIIIcrJ9L0G0ICVUhWCg4O5//77efvtt1m+fDlut5s//OEP8nusB5MnT2bLli3Mnj2b7t27YzSeBR8djxyBL75AnzULV14eRXY7ZS4XAH2Cg8n54x8ZNWoU8a+9Btdfj3v3bgqLinDm5BCZk8Mkq5XmL7xAUlKShFF1VV4pBaDpOmoNoVRSUhJLly4lLS1NQikhhBAnJJ/WhBBCiHIVU03ki2o9qljprEIDrcZlsVi455576Nq1K6tXr+b999/H6/U2yLkuJPHx8QwdOpScnBx+/vnnxh3M7t3w9NPol19O8bvvcnjXLo4cOUKZy4WiKISEhnLxlVdy55130qZNGw4XFfHZwIHsycvDWVqKyWwmJjaWSw8fpuv+/fI6PxU2WyDs1TWt2ul74K+yMxqNpKWlncnRCSGEOAedBX/uEkIIIc4OUinVAHr0qHp50KAGO5XJZOJPf/oTH3zwAevXr8fj8XDnnXdiqofG6heycePG8dtvvzF//nwGDBiA1Wo9cyfXdVi3Dj75BO3XX3E4HDjsdnw+H+B/rYaGhREaGoph+HC4+26ys7OZN28ea9euRdd1SseP57oVK7BaLARiqOeeg1atoGvXM3dfzgfBwYHpe5qu1xhKWSwWEhIS2LlzJ2VlZVgsljM5SiGEEOcQ+dQthBBClJOeUg0gMRH+9S+49FK47z6YPLlBT2c0Grnjjjvo27cvW7Zs4a233qLs2GotUSehoaGMHj2akpISvv/++zNzUp8PFi2Cm2/Ge/vtFCxYwMGDByksKMDn82E0GmnSpAnNW7YkYtIkDJ98wpFHH+Wjr77iqaeeYs2aNcTHx3PnnXdy29tvY3vmGaq8qt1ueOgh/1RAUXs2W2D63okqpcA/hc/n87F9+/YzNTohhBDnIKmUEkIIIcrJ6nsNZORI/88Zoqoqf/zjHzGZTPz666+8/vrr3HvvvQQFBZ2xMZxvLr74Yn7++Wd++uknhg0bRnR0NJqusS13G1tztrI1dys783dS4i5BVVRCzaF0ju5Ml5gudI3tSpuINrU7kdMJ330HM2bg3rcPu91OSUmJv2IKMFsshIWFYY2IQLn8crj+enKtVhYsWMCqVavQNI24uDguv/xy+vTpc/S1PHky7NoFM2cePVdurj+Yeu89kOdG7RxbKeV01rhrYmIis2bNIj09ne7du5+pEQohhDjHSCglhBBClJNKqfOHqqrceOONmM1mli5dyiuvvMJ9992HzWZr7KGdk0wmExMnTuSjjz7ii9lf0KR/E2ZsmcHWnK24vC5URUVHR1UqmmBrfL/re3Rdx2q20q95P67rdh2XtLsEi7GGqVz/+x/6a69Rlp2N3W7HWSnwCAoOJjwsDEtcHMrVV8NVV5EPLFiwgF9//RVN04iNjWXcuHGkpKRUHyxPmwYZGbBmzdFt6enwt7/B3/8O8ro/uWMrpUpKaty1efPmhIeHS18pIYQQJyShlBBCCFFOKqUaQGkpfPCBvyrlmmv80/nOEEVRmDJlCmazmYULF/Lyyy/zwAMPENpAzdbPdyl9U3jlx1d4Zt8zqAUqRqOR8KBwmgQ3CYRRx/JpPko9pSzNWMrPGT+TEJXAsyOeZWjroVX209avx/noo9jtdtwV0y0VBZvNRlhYGObWreGGG+Dyyylwufjhhx9Yvnw5Pp+PqKgoxo0bR//+/U/82jUY4B//gJtvhgMHjm7/8UcYO7ZB+52dN+pQKaUoComJiaxatYrc3Fyio6PP1CiFEEKcQySUEkIIIcpJpVQDeOUVmDPH//+LF/v7BJ3BaiVFUZg0aRJms5nvvvuOl156iQceeICIiIgzNobzQaYjkyd+eoKfg37G5XERVhZGs8hmJ72dQTUQagkl1BKK2+dmR94Obp5zMzd0v4FHBj2CRbGwcuVKDr35JoNzcgBQVJXQkBBCw8IwdusGN94II0dSVFzMD//7H7/88gter5cmTZowduxYBgwYgMFgqN0dCQvzPydvvrlqlU9amoRStVGHSinw95VatWoVaWlpDBs27EyMUAghxDlGQikhhBCinFRKNYBffz36/243bNsGffqc0SEoisK4ceMwm83MmjWL6dOn8+CDDxIVFXVGx3Gu2pG3g1vm3sKu/F1EWiMxu804nU5KS0vrtBKf2WCmGWHY87J4f/E/WT3vP3TxjcHlUgkKDSU5NJR4o5HQkBDUIUPgppugd2/sDgcLZ8/m559/xuPxEBERwdixYxk4cCBG4yl8lG3TBl54AR5+GMrKIDgYhg496c0EdaqUAn9fKUVRJJQSQghRIwmlhBBCiHIVlVISStUju73qZZOpccYBXHrppZjNZr744otAMBUbG9to4zkX7CnYw01zbiKjMIP4kHiMqhFzpD+UKiwsrF0opetQVAT5+eByYfP5MKCxwXqYHM+3jGp2KxMunUD7117D+Pvv0KoVtGhBcXExP86Zw9KlS3G73YSHh3PZZZcxePBgTKf7PBo40N/0fNMmSEqC1q1P73gXiri4QKWUpmkQEnLC3W02G61bt2b79u34fL7aV7QJIYS4YEgoJYQQQpSrqJSS6Xv1xOMBl6vqtrCwxhlLueHDh2Mymfj0008DwVTTpk0bdUxnq1JPKXfOu5OMwgyahjTFoPoDBZPJRGhoKA6HA4fDUXOPLk2DggLIz0f3ePD5fIHg14xCtNPAwWgneb3y6Ne/n78v1cCBlJSUsOjbb/npp58oKysjNDSUiRMnMnTo0NMPoypr3tz/UyE7299fqlkz/2qR8j5wvMGDoW1bOHQIn6rCpEknvUliYiIZGRns2bOHDh06nIFBCiGEOJdIKCWEEEKUk0qpelZcfPy2s6DJ+KBBgzCbzfznP/8J9Jhq0aJFYw/rrPPKqlfYkr2FWFtsIJCqEB4eTklJCYWFhdhstqqvGa8H8vKhsBDN68Xn8wUCX/C/vgwGA+bgYIhswve7vmfO1jlc1uYyFi9ezJIlS3C5XISEhDBu3DiGDRuGxVLDin31pbTU37sqP99/efJkeOwxCaaOZTajffIJn990E60HDeK6gQNPepOkpCQWLFhAWlqahFJCCCGOI6GUEEIIUU56StUzh+P4bSeZ7nOmpKSkYDQaef/993nppZe4//77adOmTWMP66yx9tBa/rPxP9hMNswG83HXGwwGwsLCKCwsxG63+xvHl5VBXh4UFaH5fPg0rdowSgkKgqgoCAvDpijYHaU8Mu8Rfjj8A2qpis1mY9KkSYwYMaLhw6gKW7ceDaQAZs/2VwRde+2ZOf85xBwSwr64OMKCg2u1f9u2bQkKCiI9PZ2JEyc27OCEEEKcc+RTtxBCCFFOVt+rZ8dWSplMYD4+4GgsvXr14u6778bj8fDKK6+wc+fOxh7SWeO9de9R4i4hIiiixn3CwsIwGAy48vLQ9u2DPbvx5efjdrvxVqqOMhgMmM0mjGFhKK1aQbt2EB6ODhQVFeEp9HC4+DA7LDuYMGECzz//PKNHjz5zgRRAly5wbH+sV16BVavO3BjOEYqiYDUa8VYXOlfDYDDQpUsX9u3bh6OWtxFCCHHhkFBKCCGEKCfT9+rZsaFUaOhZNx2qa9eu3HPPPei6zuuvv87WrVsbe0iNbm/BXpZmLCXUHHrCgFYpLSXe6STS4cBbWIjb7e8bBaAARoMBs9mMITIS2rbzNxMPCUHXdex2O4cOHaKwsBAFBVuQjZK2JYy8dCRBQUFn6J5WYrXC009X3aZp/il8+/ad+fGczebN44HZs7ninXdg1qxa3SQpKQmA9PT0hhyZEEKIc5B86hZCCCHKyfS9enZsVcRZMnXvWJ07d+b+++9HVVXefPNNtmzZ0thDalRzts2h2F1MqOUE/b+cTvR9+8DlQtf1QN8oRVEwGo2YLBbU6GhIaO9vJh4UVCWMKigoQNd1wsPDad68OU0jm3K45DCLdi86c3f0WCNHwh13VN1WXAwPPnj8KpIXKk2D117DACgeD7z+un/a5kkkJiYCEkoJIYQ4nnzqFkIIIcrJ9L16do6EUgDt27dn6tSpWCwW3n77bdavX3/S21x66aV0796dnj17MmTIEDZs2FCrc7Vp04aNGzdW2TZ8+HC+/fbbUxh5/Vt5YCWqovpXw6uGy+XCnpWFx+NB0zQURSEnVadwp44pKAg1LhY6dID4eDCZ0XUdh8MRCKM0TQuEUREREaiqitlgRtM11h/2P+7JycksW7bsuHNnZGT4+1c1lNtug4svrrpt/35/xZTPR8+ePS/sKWhlZVBQgKoo6JoGJSWQm3vSm0VFRREXF0d6enqVPmNCCCGEhFJCCCFEOZm+V8+qm753FmvdujVTp07FZrPx3nvvsXr16hPuP3PmTDZv3szGjRuZOnUqf/jDH87MQBuQ2+cmPSedYGN5E2u3G7Kz0TP2UZZ1mMOHD5OdnU1xpapCs9lM3OBg1B42nM2bQ3QMGAxVwqj8/Hw0TSMsLIwWLVoEwqjKVEVlQ1btgr2GoGkaGvin8XXqVPXK1avh1VfZuHEjoWf587hBBQWBoqCoKlpFuFRSUqubJiUlYbfbOXjwYAMOUAghxLlGPnULIYQQ5Sr+gi+VUvXk2IqSc+DLfIsWLXjooYcIDw/no48+Yvny5TXuW7lip6ioqN6eN8OHD+ehhx5iyJAhtG/fnrvuuitw3QcffEBiYiI9e/akW7dugeDs2OqrypVGw4cP59577yUlJYWEhASmTZsWeK5nZWVx9dVX07dvX7p168a9D99LqaeUIB9seWQDB/+7la1vHmLPN3lkvH8Y1/pCVFXF2qQJpTRlxywdpVUrDv1mIW+TRkFREQWbCtjy9BY2P7WZ3f/YjXOrk7CwMGJDYin4uoDt/9xO2jNpHPr2UGC8xbuLyX0pl++nfs9NN9+E1+ut8+P26aef0q9fP3r37s3QoUPZtGkTAFu2bGHw4MH07t2bxMREnnvuucBtnn76aa644gpGjRpF165dWbVqFRHx8TwVH0+fHTtI+P13FhQV+Xf+4gsURaGwsDDwmD/55JMMGDCAtm3bVjnutm3bGDBgAElJSUyePJlLL72U//73v3W+T2cdRYHg4ECllA7gdNbqptJXSgghRHWMjT0AIYQQ4mwhlVL17BwMpQDi4+N5+OGHefnll5kxYwZut5uLLrqo2n1vuukmli5dCsCCBQsC22+77TbGjx/P+PHjT2kMu3fvZunSpXg8HhITE1m1ahUDBgxg2rRpbNu2jaZNm+LxeCirRT8f8AcBK1euxOPxMHToUL744guuu+46br75Zh5//HGGDRuG1+1mcP+eONoeJry1iq5puEt12l2h+sOYHRoFW320GdMCRVHYO2cv0UPjISQEVc3FZIaSkhKOfH2EkMtCMLc0Y7PaCLGEYAm1sPO1ncRfFk9ox1B0n86uN3dRsK6A8B7h7Hl/D3HXxWHuaGZcm3F8+smndXq8fv31V7744gt++eUXLBYLy5cv57rrriMtLY02bdqwZMkSLBYLTqeTgQMHcvHFF9O/f38AVq1axYYNG4iLiyMjI4OioiK6DxzIMzffzA9TpnD/3r2MCQ8/erLNm2HoUAAKCwtZtWoVubm5tG/fnj/+8Y80b96cG2+8kbvvvps//vGPbN26lV69enHdddfV6T6dtWw2lPL3SF3XUWpZKdWhQweMRiO///47o0aNasgRCiGEOIdIKCWEEEKUk55S9ewcm75XWXR0dCCYmjlzJh6Ph9GjRx+33yeffALAxx9/zF/+8pdAMPXBBx/UeOyanl+Vt0+ZMgWj0YjRaKRnz57s3r2bAQMGcNFFF3HjjTdy+eWXc9lll9GxY8da3Z+bbroJk8mEyWTihhtuYPHixUyYMIElS5aQnZXlb+RdUECOuwS3zYenqT90iOysoqoqBoOB6CQTmatK8dq9qBaVoi1FNL+iOQ6HA6fTCVbQ3BqGVgacPzmxJlsJ7hqMJdqCr8yHfZsdj90TGJNWpuHKcmGJs6CoCiGdQ3D73AweMZh27drV6n5VmDt3Lps2baJfv36Bbfn5+TidTpxOJ3fffTcbN25EVVUOHDjAxo0bA6HUmDFjiIuLC9wuKCiIyZMng6Iw4Jln2D1lStWT/fWv8MUXAIGgKTo6mnbt2rF3715CQ0PZuHEjN910EwBdunRh8ODBdbo/Z7Xg4EBwr+t6rSulLBYLHTp0YMeOHZSVlWGxWBpylEIIIc4REkoJIYQQ5WT1vXp2DjU6r05kZCQPP/wwr7zyCnPmzMHtdnP55ZdXGyrdfPPN3HXXXeTl5REVFXXC48bExJCXl1dlW25uLrGxsYHLQUFBgf83GAyB6WyzZs1i3bp1LFu2jDFjxvDcc89xzTXXYDQa8fl8gdu4XK4TjkFRFHS7HXSd32JiUDSNIk1jRRj8ub8DvTxnMAUZMFnMEBkJUU2I7HOI3FW5aBYNcxszWYVZgdX3TKoJg8lAxMQIQr2hOLY7yPhvBk36NiFmWAwAnR/tjGqq+voqPVgKgIa/abrZYK5zMKzrOjfffDPPP//8cdfde++9REdHs2HDBoxGI5MnT67y+IQc87y0WCyB8xvGjMHHMYqKYNo00PUaf0/HOq+CbpstcH80TcNQWlrrmyYmJrJ161a2b99O9+7dG2qEQgghziHyqVsIIYQoJ5VS9ezYSqlzLJQCCAsLY9q0abRs2ZL58+cza9YsdF2nsLCQzMzMwH7ffvstUVFRNGnS5KTHHDVqFB9++GEgwPj555/Jz88/6Zd0r9fL7t27SU5O5qGHHuLKK69kzZo1ACQkJAT6S61Zs4bt27dXue2MGTPweDw4nU4+//hjLi4rwzZlCkODg3lizRqysrJwlpaCHRQ7YDGiKAqq1QZhoWCx4CxzY0w0kv1LNrm/5mLqakJRFEJCQggODiYkJMRf/VIIwc2CiR0RS8zQGEr2lGAIMhDaKZSsH7ICY3IXunEXuAmKD0LXdOzb7ASbglm3Yh27d++uza8nYPz48cyYMYP9+/cD/tdyamoqAAUFBbRo0QKj0cj27dtZtGhRnY4NwKBBVS/v3AmFhVD+nlFZWFgYPXr0YMaMGQBs376dFStW1P2cZ6vKlVKaBnUIpSr6SqWlpTXI0IQQQpx7pFJKCCGEKCeVUvXs2H5H59D0vcpCQkKYOnUqb7zxBosWLcLtdjNw4ECuvvpqnE4nqqoSExPDvHnzAoHmsT2lvLqXQq0Ql+bixodv5IXHXqBH7x6YVBNhYWHMmTMHm812wnH4fD5uueUW8vPzMRqNxMTE8NFHHwHw3HPPcfPNN/Pvf/870GC7si5dujAoOZn8AwcYbzYzrrSUww4HL1osPFdSwqWlpRhUldASI6FOM64mgA56aQne3BI0LQe7zYq3iRlU0At1mvVthtVmRVEUHAZH4PVT8GMBOZ/noBgVVLNKq+taAdD2lrYc/Pogac/4AwmDxUCr61thjjTT7vZ27J6xG+d3Tr666Ct69OhR4+Ngt9tp0aJF4HLLli1ZtWoV//rXv5g0aRJerxe3283YsWNJTk7miSee4MYbb+Tjjz+mffv2jBw5sg6//XLPPw9/+AOsW3d0m8sFa9ZA797H7f7JJ59wyy238OKLL5KQkEBKSkqVxvjntMqVUrpep1CqWbNmREREXNDNzn2aTrHHi9PjQ9MBBUyqQojZSJBBlT+KCCEuOIpe8QlCCCGEuMB98skn/Prrr7z00kvHTekRp+Bf/4KZM/3/r6qwYAFERzfumE6Dy+XizTffZOfOnQwcOJAbb7zxhAFmka+IXZ5dHPIeotBXiBcvmq6ho6OgoCoqJkxEGaJobmxOgjkBm3riYOpUDB8+nAeuuILLv/qK4iNHcNjtgal+qsFAWEgIoYBqs6F7vdzQ7xCLY4qJK6765dhts0F8PFartdr7bbfbKSgoIDYuluCg4DqP85DjEPf3u59HBz96SvezwR04ADff7O+/VeHaa/1T+Y5RXFyMrTy82bt3LwMGDGDt2rW0bNnyDA64gTz+OI7Zs8nPyyM2Npbgu++GP/+51jf/+OOPWblyJc899xwxMTENONCzh8vr43BxGdklLuxuLz5N9wdS5RRAVRUsBpUmQSaahQYRHVz3aaxCCHEukkopIYQQolzF32nki0A9ufde/xLyhw7BlCnndCAF/j5P9913H++88w4rV67E7XZzyy23YDAYquyX5c1iS9kWDnoP4tW9KCgYFSMmTKjK0TBHQ8OreznkPcQh7yE2lm2ktak1PSw9iDRE1tu4vV4ve+bM4dDOnf7pVoDRaCTSbCbY40Fxu/EEB2MvLaWkpITuu30sitbwKSpG/I3OVYMBc7NmYLWe8DwARkPdP146PU6MqpEBLQac2p08E1q2hH/+Ex580F8lZTTC8OHV7rpy5UoefvhhwF/h9sorr5wfgRScVqUU+KfwrVy5krS0NIbX8PidL0o9PnYVlJBV7MKj6f7wSVEwKiqVc10d/2Pp8mocdLjILHZhMxlpG2GlRWiQ/JskhDivSSglhBBClKvoKSXT9+pJcDCUfzE/X5jNZv785z/z3nvvkZqaisfj4Y477sBoNOLRPWxwbSDNnYZH92BWzNgUW41fKFVUjIr/o5iu65RRxg73DvZ79tMrqBeJ5kQMiqHa29aotBQWLoQDB8jq1o0Fe/fSrVs3TBkZ6JqGTVUJA0xOJ5qqUhwURHFZGe78/MAhxhyw8mGfMkqDIUqz+HtKNWkClqCazwt4feWhlLHuHy8LywrpHNWZwa3O8lXqUlL81X+//QZJSdCpU7W7XXrppVx66aVneHBnyGn0lAL/VFJFUUhPTz9vQyld1znocLE9r5gyn4ZBUU44NS8QVpVfrpjit+WIncPFLpKiQ7GZ5WubEOL8JO9uQgghRDnpKSVqw2g0cuedd/Kf//yH1NRU3n77ba674zp+8fxCni8Po2IkRAmpU3WDoigEEYQFCy7dxW/O39jv2c8I6wisas3VSQF5efDVV+jffIPryBHsdjtOl4vtl1yCEhxMt7Aw4uLiMOfkUKaqFNhsuNxucLsBMBiN2Gw2bFYrrUJCmBjs5RPbLrQmrVHV2gVjPq8Pg8FQ56oOj8+Drutc3/16DLU8V6Nq1gwmTz56uagIfvwRoqJgxAh/deD57DQrpWw2G61bt2bbtm14vd5TCjHPZl5NZ/ORIrJKykDnlPpEGVQFAwZ8mk5OqZuVhwroFhtKvO3EwbAQQpyLzq9/BYQQQojTIKvv1bN9++CzzyAiAm69FSyWxh5RvTEYDNx6662YTCbW7VzHp/s+xRZrw6pa617dVImiKAQrwXh1L5neTBaWLGSUbVTNwdS+fTBjBvr8+ZQWFGC323GXB02qojAqJoZkux3Dli3YS0pw6jqUT7NTVRWrzYbNZsMSHIySkgKjR8OIEdzlOcL8Ly4n15lHrC22VmP3er0YTXX7aKnrOkdKjtA5pjNXJV5Vp9ueFbxef5+pgwf9l8eNg6eeOr+DqUo9xU6lUgr8U/gyMjLYs2cPHTt2rO8RNhqvprE+q4icUjcmVcFoOL0/cBhUhSBFpcynsSnbjhYDzUIlmBJCnF8klBJCCCHKyfS9euR2+4OowkL/5aws+NvfGnVI9U1VVa648QpKMkpwmVyU5JZgi7H55+KcJqNiJJhgcn25LC5ZzOiQ0ZgV89EdNm+GTz5BW7aMYocDu92O7vFg9PkwGQwEh4djCAsjp1kz8r76CrU8iFJUFavVis1mIyg4GKVrV38Qdckl/kqfcu0I4eGBD/N/P/0fTo+TYNOJG5drmoamaXXuJ1VUVkSwKZi/j/w7oZZzcHXG3buPBlIA8+ZBmzb+lfrOV1braVVKgT+Umj9/Punp6edNKKXrOpuP2MkpdWNWVQxq/QSTiuJvgF6maWzJsWM2KERbz5+AXwghJJQSQgghysn0vXr0++9HAymA1NRGG0pD0XWd31y/YYoyQRE4XA6ys7OJi4url+eQQTEQTDDZvmw2uDbQz5ICv/wCn3yCb8MGHA4HxXY75rIywtxugrxelPLzpjVpwtedO5O7ezcx0dF0LSrCZrMRbLWitmvnD6JGjYIWLWo8/43db2TJniUs2rOIWDUWs8Fc476BJud1mIpV4i6hxFPC3cl3M7DlwFrf7qzStq2/ErDyc/2tt6BdOxg6tLFG1bDqoVKqbdu2BAcHk5aWxsSJE+t5gI3jgN1JVnEZpnoMpCooioJFVXH5NH7PcTCwhQnzaVZhCSHE2UJCKSGEEKKcTN+rR3Z71csmU+OMowHt9Oxkn2cfQUoQoZGhqIpKUVERWVlZxMXFHbcq36kwKAZMupH0grW0fPVdYhaux2634y0oIMjtJtrjQQVQFHRFodRopMRiId9ioUlyMqP79qX3Cy9gW7gQPB7o3x86dKjV9DKDauD1y17npm9vYu2htcRYY7AYq6/Q8Pl8QO1DqWJ3MYWuQq5IvIJHBz9a24fj7GM2+6frTZ0K5aE2ug5PPAH/+Q8kJDTu+BpCTEzVSqngE1fRVUdVVbp06cL69f7nc1hYWH2P8owq8XjZkV8CgLGeA6kKFRVTJR4fO/KK6Rp7bj9mQghRQSJ2IYQQopyEUvXI4ah6OfQcnJp1Am7dTarLX/1lUvyBW0REBBEREXg8HrKzswNBzSnz+SA3F/Ou/XhKHawYEkbx7t3YDh0ioriYII8HBdCAYrOZI6GhOCMiCI2Opv+UKUybNo0hQ4Zga9IErr0WbroJOnasU7+jyOBIPp74MYNbDSa3NJd8Z36gorCyikqpkwVxmq6RXZyNw+3g2q7X8sqoVzAZzvHAcsgQuPfeqttKS/1BVeUKqvNF794o5VPuNF2HU6x0SkxMBGDr1q31NbJGsyu/hDKfhqWBq5dURcGoKhx0uChweRr0XEIIcaZIKCWEEEKU03VdAqn6cp6HUns9eynRSghWqlaJhIeHExkZicfjISsrKxDW1FlZGezZg3b4MFpJCZYcO/Y2kRR1j4fy8NSnKNiDgsiPjkZt2pT4Fi1o2rYtYbfdhvXhh0/3LgY0CW7CJ5M+4cEBD6IqKpmOTEo9pVXCqZNN39N1HXuZnUxHJhHBEfzz4n/y0qiXTjgl8Jxy440wZkzVbZmZ8Mgj/gq184mqon7yCbMHDeKHa6+F8eNP6TBJSUkApKWl1efozjin10dWSRlGRTkj/34YFQWfrnPI7mzwcwkhxJkg0/eEEEKIcpqmST+p+lJcXPXyeRRK6brOtrJtAKjK8c+XsLAwVFUlLy8vMJXPVIfpi7qu487KwlhcjFoeQBlcGmWRVnZP7EnMugOUWq0YIiMJCwnBHByMMmiQv0fU0KGnNJ3qZKwmK48MeoSL213M08ueZlP2JvKd+VhNVoKNwXjKg5fKoZSma7i8Lko8JZR5y7CarIztMJYnhz1J64jW9T7GRqUo/il7+/f7+6lVWL8eXnwRHnvs/FqRz2zmYPv2eCIjT/kQTZo0IT4+nvT09HP6DwKZDhdeTSfoDPV4UhQFg6KQWeyiQ1RIg1dnCSFEQ5NQSgghhCin67qEUvXl2EqpkJDGGUcDKNQKydfysSg1r4AVEhKCoijk5ub6m5/HxmEynziY0jQNR/lKejaHgwhNo6IWSQUMTg/7L04k5audRKtmlF69/A3LL7oIwsPr7w6eQO+mvZl7zVzWZq5lZtpMFuxcQLGnmAJfAZpBI6skK7CvrutYjBaaBDfhqsSruDLxSjpGnR8rrVXLbIbp0/3TJI8cObp99mx/b6mrr268sTUAi8WC6xSanFeWlJTEkiVLOHjwIC1btqynkZ1Zh4tdKJzZad8mVcHl08gtLaN5aP2H0EIIcSbJJ28hhBCinKZp5+xf6886x1ZKVQqlXC4XEydOpGPHjvTo0YNLLrmEXbt21enww4YNIyEhodr+RpUtW7aMH374IXA5MzOTIUOGBC7PnTuXLl260LNnT7Zs2ULPnj1xHBuoHePO2+/k919+x3iSv+29fPXLHEw9iM/nIys7C7fbHbjuuvDrKCn0N0b2er3k5+ezb98+srKysNvtFCgKmqLwy8I0Mg8UoJvNmDUFLTwE11MPosyfD++9B5MnBwKpp59+mgceeACA//73v4FVzVJTU5kyZcoJx1oXiqLQt3lfpl86nQ13bmDetfMY7hzOQMNA7uxzJ3cl38W0AdP49+X/5scbf2Tt7Wt5fMjj53cgVSE6Gl56CSzHBJbTp8OaNY0zpoawejW3f/klN3/wAcycWaebVn79//Wvf2X+/PksWrQocL2u6zz99NN07NiRbt26MWLEiDod/0y+N3h8GiUeH4byfzem/+UBNqxacdIxTr1mAit+XFDtdRe1jaHYXnTC2yuKwuJZX7IpLf2k54Iz994ghBCnQkIpIYQQopxM36tHJ+kpdccdd7B9+3Y2bdrEhAkTuO2222p96J07d7Jz504sFgs///xzjft5vd7jvng2a9aM5cuXBy6/++67PPnkk2zcuJFu3bqxceNGQk8y1fDhtx+m85DOtQowzRYzsbGx6LpOdnY2ZWVlgevcHjdZWVns2bOHrKwsSkpK0DSN4OBgQqNj8DZtyvJF2zic50KNj8fQqg1aVCT5o/pDbOxJz10hOTmZr776qtb714XFaKFTZCeaFTRjQtQEnhr+FE8Oe5JpA6cxvtN4OkZ1xKheYIX5XbrA009X3aZp8Je/+Kf3nQ9efpkgtxvV64VXXz1+tc2TqHj9b9myhXbt2vH3v/89cN3rr7/O5s2b+f3339myZQtffPFFrY97pt8bHG4vmq6jlq+499A/X6XXgMG1Hu/pWDL7K35PO70m8Q353iCEELUln7yFEEKIcjJ9rx6dIJQKCgpizJgxgVCnf//+ZGRk1PrQ//nPf7jhhhu47bbb+PDDDwPbMzIyiIiI4C9/+Qu9e/fmzTff5N133+Wzzz6jZ8+e/O1vfwvsA3DfffexfPlyHn/8cQYOHAj4KxAKy1dM27p1K6NGjaJ79+50796dd999F4DbLrmNDfM3APDr17/y15F/5bHBj/HooEdZ9/2648YbHBxcJZjKy8sDYO+eveTm5vLK5a/w8/s/M+PuGbx91dts+HoDsXGxrPpxG3u2H2bGq4t4bNLLbPx5KwoKb7/8Nn379qV3796MHj2affv2nfDxWrZsGT179gxc/ve//03Hjh3p3bs3zz77bJVwbe3atYwcOZLk5GR69erF119/XeWxfeqpp+jTpw8JCQksWOCv9MjPzyc7O5s33niDHj160L17d+bOnQv4Q4KxY8eSkpJC9+7defPNN2v1Oz7nXXIJHBu0Ohz+FfmOrSI8Fx04gKqq/hVL3W44eLDWN638+jebzSQnJ3PkyBFcLhcAL774Iv/4xz8wm/1N8OPj42t97DP93jAopQ93jx3B/M/+C1StgFoydxZ/njiKO8eO4PbLhrNy8cJqRnziaq7rBvfmo5f/wT2TL+P6IX2Y8cZLAMz/8lN2/r6JV57+P3r27Bl4LU6fPv2sem8AWLVqFYMHD5b3BiFEjS6wP10JIYQQNZPpe/WoDj2lXnvtNSZMmBC4/OSTT9KsWTPuuuuu4/b1+Xx8/PHH/PTTT0RHR/PMM89QVFREePkUtqKiIpKSkvjnP/8JQGFhIYWFhbz66qsAVcKvioqMBx54IDCdpYLX62XChAk888wzXHvttQDk5uYCoKOj4H+edL+oOwOvHIiiKOTsy+HJi5+k++/dMVmq9o8ym82EhoaSlZVFfn4+QKAqymAwEGQI4h+//AN7np0Hez7IiBtHMOLmEayYuYLRfxpNyrgUAJZ8tYQjO46watUqDAYDn376KXfffTfz58+v8fGt7Pfff+fpp59mw4YNxMfH89RTTwWuKyws5I477mDBggU0bdqU3NxcevfuHfhSXlRURPfu3XnmmWf44YcfuP/++xkzZgx79uzhxx9/5Nlnn2XatGlomkZhYSE+n49rr72WGTNm0LlzZ0pLS+nfvz/9+vUjJSWlVuM9p91xB+zZAz/9dHRbRgY8/ri/uuhcDsCtVlRFAV1HB5SSklM+1ObNm2nTpg3bt2+nbdu2ZGdnM3fuXL755hsApk6dGphidra9NwweO5HNR+y47YXHjSd56AhGjp+MoihkHdzPPZNG02vgWnTA4/Vy5Eg2e/fupU2bNijVLJhQocRexJuzv6coP48bh6cw6qrrGHvNjSya8w2Tb7mdp+/8AwCff/4527dvP6veG/Lz85k4cSLffPMNQ4YMkfcGIUS1JJQSQgghykmlVD2q5ep7zz//PLt27WLJkiWBbX/7299qPOyCBQto06YNnTt3BuDiiy/m888/509/+hMAJpOJG2644TQHD9u3b8flcgUCKYDo6Ojj9svJyOGt294iPzMfg9FAcUExR/YdoXnH5qBDmauMnJwcnE4nuq6jaVqg142qqkRERKCqKoOn+Kf8hEWFEds6liP7jtCkWZPjzrdxwUYyN2TSp08fwP9FvC5++uknRo8eHag+uf322wOP98qVK9mzZw+XXXbZcY9Fu3btCAoKYvLkyQAMGDCA3bt3A7BixQrCw8MZNmxY4H41adKE9PR00tLSuOaaawLHcjgcpKenXxhfPFUVnnnGX0W0Y8fR7StXwrx5MH58443tdFmtKOXvlZqmYTjFhufPP/88BQUF9O3bl/T0dFq2bInX68XpdLJ69WoyMjIYOHAgnTt3pkePHmfde8MBuxOA8CZRx+2XdWAff7//TnIOZ6KoKvbCAjalptK0dWs8Hg9Opwu73U5uXh4x0TE1nm/khCsC52jasjVZB/YRE9+0/Nqjf0T59ttvWbt27Vn13rBq1So6deoU6NUl7w1CiOpIKCWEEEKUk0qpelSLUGr69OnMnj2bxYsXY7Vaa3XYDz/8kB07dtCmTRsAnE4nGRkZgS+eVqu1wYNFFRW9fNrNG7e8wTVPX0O/if0AuL317TgKHeTm5uIqc2F32CktLcVisaCqKrquExvj7wel6zqHDx/G6/Xi1bxHj29Q0bxa9SfX4Q8P/4Hn//x8vdyXys93XddJSkpi5cqVx+2XkZGBxWIJ7G8wGAJfeu3l/YSaNKkaoum6TpMmTdi4cWO9jPWcFBzsb3x+881QXiEHwPr153wopZY/F3Rdh1MIpSq//v/xj3+QlpbGtddeS0hISCA8atOmDYMGDWLt2rX06NHjhMdrjPcGo3r0MVAU0HQdp9NJbm4OT999K1fe/meSh18MwL2XjwTd/5pRALPJhC0k5KTvfeZKTfPVSq87AOMxr9/HHnuMO+64o17uW328N9RE3huEEJXJn4OFEEKIclIpVU807YSr7wG8/PLLfPHFFyxatCjQx+VksrOzWbJkCbt27SIjI4OMjAwOHz5MZmYmmzZtqvY2YWFhFBWdeCWr6nTq1Amr1VqlyXLF9D2jYgxM3yspLCGmdQxOp5PvP/ieksISCgoKKCkpQVVUQmwhNG/enPj4eLxeL4qiENkkEoCWLVsSZAlC13UyMzPZv39/lUboAMGhwTjLqzF0XafnmJ7M/WBuYAqgx+Nhw4YNtb5fI0aMYOHChRw5cgSgSt+dgQMHsnfvXhYvXhzYtnHjxiqrBlYnLi6OoqIitm/fDvjD3fz8fDp16kRYWBgfffRRYN9du3YFxn7BaNrUv/pecLD/sqJAeVXZOeuYSqm6hlKVX/+RkZEkJSWRk5PDkSNHuPbaawMNyPPz81mzZg3du3c/4fEa471hxowZFGQfxu1ysWfPbg4cOIi7rIxiRzElJaWUOOw0a9OG8PBw0lavoMRh9//uUVBUFUtQECajCUt576y6Cg4Jwec6Om1y4sSJvPvuu2fVe8PAgQPZuXNnoIG8vDcIIaojn7yFEEKIclIpVU+cTn8wVVmlSqmDBw8ybdo0CgsLGTFiBD179qRfv36B65988slAU/HKPv74Yy699NIqIZaqqlxzzTVVvkBVNmnSJDZu3BhoZlxbRqORuXPn8tFHH9GtWzd69OjBrFmz/NcpRnR0nC4nk/86mZeuf4knhj/B3k17iWgaQWhoKM2aNcNsMRNsDcZoNFJSUoLH4yEkJASj0V+obrPZaJ/QHoPBgMFgwG63s3fvXrxer/+LPjDyDyOZ+9JcHhv8GOt/XM/AKQO54eYbGDFiBD169KBnz578VLln0Ul069aNJ554gkGDBtG7d29cLleg505kZCTz58/n+eefp0ePHiQmJvLoo48GxlITl8vFZZddxnPPPUf37t3p3bs3v/76K0ajkXnz5jF79my6d+9OUlISt956K06ns9bjPW907w4zZ8JDD8G//w0jRjT2iE7PaVRKVff6/9e//gVAeno6L7zwAj/88ANdu3Zl6NCh/OUvf6Fv375A47837Nixg++//55Jkybx+OOPM6RvMvdPvIRVi74HdFSDii3ERnx8HPc+/TxvPvEIf7l+Enu3byUqriketwej0eBv4l6+al/F+0FdXTblBj5+45VAo/Prr7+eP/zhD2fVe0NkZCRz5szh0UcflfcGIUSNFL2isYEQQghxgXv22WdxOp08//zpTY3yT+W4gMOtrCwYN67qtqVLa+wrda7QNI1du3axestqsntmo/k0tDINk8mE1WrFZrNhMpmqvW1mZiYej4fmzZtX+yXU7XaTlZVFcXExuq5jsViIiooiPDw8UL3n1JxYFAvXhF2DQTGc8v1wOByBpe1fe+01fvjhB77//vtTPt6jjz6KyWTi2WefPeVjXJBKS2HxYggL81dOnUvvGY88gn3uXAry84mNiyP43nvhzjtP+XAlJSVMmzaNbt268ec//7keB3rqSktL2bVrFzt37gxUYFUOYUJDQ0lISKB532HotnBCLCYq93iq4CpzkXMkB03TsFjMxMTGogAHDhwkKMhCXFztVxes4NN1PJpGStMIYqyWk9+glur7vUEIIWpDekoJIYQQ5epaKaXrOvYyL/kuDw63h0KXlzKfFgilgowqERYToRYjTYJMhFmqDyzOO+VLuwcoCthsjTOW06TrOnv37iU1NZXU1NTAdJ82MW0I6RhCmDEssHR9TaqrkjqW2WymVatW2O12cnJyKCsrIysrC7vdTpMmTQgJCcGHjwRzwmkFUuAPkX799Vc8Hg/NmjXj3//+9ykfy+fzUVhYSKdOnU5rTBccXfeHOFu3+i+PGgXPPnvurMhXDz2lKrPZbIEV+Lxe7ylXD52OwsLCQAi1c+dOMjMzqfy3+6ioKBISEujQoQMdOnQgLi4ORVHILHaxMasITQf1mH8+SkqKycvLQ9f9Pa2io6NQFBWXy18RVLlfVF14fBo2k4Go4FOb+leT+nxvEEKI2pJQSgghhCinaVqtekr5NJ2sEhcH7E4KXV58FaupUak5rK7jKPNiL/M3sDYoCk2CTbQMCybOZgl8oTsvtWzp7yFV0VcqJeXc+bKN/0v2/v37A0FURZ+TiIgILr74YlJSUlCaKSwqXYR6gqXcK1QEWeFh4SfdNywsDKvVSl5eHvYiO8XFxZSVlRESGUJIWAgJIQmnd+eAt95667SPUaGwsBBd14mKOn71MXECmZlHAymAhQuhdWuopybVDe40e0pVJykpib1797Jnzx46dux42sc7EV3XycnJqRJC5eTkVNmnadOmdOjQIRBEHdvIv0Kc1UKQyUCZx4fFWBEY6xQVFVFY6H/th4WHERkRQUUlVVl5L6ZT6Sel6/5lFlqGBdf7vyP1+d4ghBC1JaGUEEIIUa42jc4LXG7SchzYy7zogElVMKnqCSusdF3Hp+vklLrJLXUTEWSia0zo+Vs5ZTDAm2/6e+dYrXD//Y09opPSdZ1Dhw4FgqiKL6hhYWGMGDGC5ORk2rdvH/g9+3QfTQxNyPPlYcNW4++/tLQUj8eDzWbDaKrdxy6j0UhcXBwhISHk5eXhdDrx4GHf6n18svMTrrrqKlq0aFE/d/w05eXlAcevvCdOIj4eYmOhvKk0AO+9B+3awcUXN964aqueK6UAEhMTmTdvHmlpafUeSlW8viuHUJWbnCuKQuvWrQMhVEJCQmAa28kYVIXWYcFsyyvGp+uoiv91UVJcAor/tREaUvVY7jJ/KHWyKsvqlGkaQUaVZqFBdb6tEEKcjSSUEkIIIcppmlbjtBFN19mZX0xGkROvpmMxqLX+K7WiKBgVBaPq7wVS4PLw26EC2kfaaBdhPT/7T3XtCm+80dijOKmsrCxSU1NZu3YtWVlZgH8q0ZAhQ0hOTqZjx47VBpUGxcDA4IF8X/I9ZZQRRPVfEAsLCwGICI+o89hsNhvBwcHkleZRUlDC6vdW4yn0sGXLFoYNG8aECRNqvXJhQ6kIpaRSqo4MBnjqKbj33qqLAjz1FLRoAZ07N97YaqMBKqXatm2L1WolLS2NSZMmndaxfD4f+/btq9ITqrTSGI1GY2AaXkJCAu3btyco6NRDnjbhVrJKyihwunHk5+JylaGqCtHRMQRXrLpYidtddkpNzr3lz5VOTUIIMp7eNF4hhDhbSCglhBBClKtp+p5P09mSYyfT4UJVFIIMJ66MOhFD+e3dms72vGKcXh+J0aHn93S+s0xOTk6gIurgwYMABAcHM3DgQJKTk+ncuTMGw8m/8MUb40kyJ7GpbBNevBiVqh+rTqVK6lhexYvVaqWrpysl3UvYtm0b6enp5OXlsXbtWkaNGsWll16K5RR705wuCaVOQ79+MHUqTJ9+dFtZmX/bp5/C2fyYNkCllKqqdO7cmfXr12O32wkLC6v1bd1uN3v27AkEULt378bj8QSuDwoKIikpKRBEtW7dusZFCU6FQVVobvCyr7AAXTViMHqJjY3FbDq+EkrTfHi9PoKCg6iuMXpNNF3Ho+nE2yw0lyopIcR5REIpIYQQolx10/c0Xef3HDuHHC5MqoKxHnojKYqCxaDg0TT2FTlRgMTo0POrYurnn2HNGujfH4YMaezRkJ+fHwii9u3bB4DFYqFfv34kJyeTmJh4Ss2VewX1Is+Xx0HvQYIIqhJMVVRJVSyrXlce3YNbd9PB3IFhrYYx5KkhLFiwgAULFpCZmcnmzZspKipi+fLlTJgwgQEDBtSqJ1p9qui3JdP3TtGUKbB7N8yZc3TbkSPw0EP+6a+nML3rjGiASinw95Vav3496enp9O/fv8b9arMyXteuXQMhVIsWLRr0tbFnzx7eeustQlu0pfOwS4mIjcNUw/tJRT+pukzd03SdMp9GmMVIUsx59m+FEOKCJ6GUEEIIUa661ff2FTnrNZCqzKSqgMY+u5Nwi4kWYcdP8zgnrVwJ06b5//+rr+Cdd/zNzs+woqIi1q1bR2pqKrt37wbAZDLRp08fkpOT6dat22lXS5gUEyOtI1lcuphMbyZmzJgVc5UqqbqeQ9d13Ljx6B4STAkMCR6CoiiYTCYmTJhASkoKn376KTt27ODQoUPk5+dTUFDAkiVLuPLKK0lMTDyt+1QXeXl5KIpCZGTkGTvneUVR4JFHYN8+WL/+6PYtW+Dvf4enn/bvc7axWgMhT31VSoE/lAKOC6WKiooCvaB27drFoUOHarUy3pmwbt06/vOf/+D1emlj1OnTKo7dRWWUaRqWavoNuuvY5Nyr6Xg0fyDVJz5Cpu0JIc47EkoJIYQQ5Y6dvudwe9mZX4yiUO+BVAWTquLz+dieX0yTYDNW03nwhWPZsqqX16w5Y6GUw+Fgw4YNrF27lp07d6LrOkajkR49epCSkkL37t3rfaqbRbVwie0SVjpXstu9G4/uwW63A3WvkvLpPpy6E6NipIelB32C+mBQqj4nmjVrxiOPPMIvv/zC7NmzKSwsJDs7G4fDwWuvvUZSUhJXXnklzZo1q7f7WJP8/HwiIiJqNd1R1MBkgn/+E26+2b8qX4X58yEhAW68sfHGVhPr0V549VkpFRkZSXx8PKmpqXTq1ClQDVXdyniVQ6jGqNTTdZ0ff/yR2bNnAzB8+HCmTJmCqqrYLC625jlwebXj/qBR2ybnenl1FArE2sx0iwmTQEoIcV6SUEoIIYQoV3n6nq7rpOU48Gg6QYaGnRJlUVVcXo1teQ56x0c06LnOiPJpawENHFiUlJSwceNG1q5dy/bt2wPhYlJSEikpKfTo0aPaZsP1yayYGRY8jFbGVizNX4pm0bDarKjGkz93dF3Hh48yvQwdnUhDJAODB9LMWHOopCgKw4YNo0ePHnz55Zds2LABu92O3W5n06ZNpKenM3jwYMaPH1+n3jx1oes6+fn5tG7dukGOf0GJjISXX4Zbbqka8Lz+OrRtC4MHN97YqhMVFQildE07rWmGuq6TmZkZqITasGFDYHW8kJCQ01oZr6H4fD6++OILli9fjqIoXHnllVx00UWBx6RZaBCRwSbScxwcKS3D4/WVL3ahlDc5V0+4qIbbp6EDFoNKp6gQWoQGyZQ9IcR5S0IpIYQQolzl6XsFLg8FLjemaqZf1Del/MvKkVI3DreXUPM5/s9zcXHVyw3wBdLlcrFx40ZSU1NJT0/H5/OhKAqdO3cmOTmZXr16YbPZ6v28J6IoCm1NbflixhcURxeTdHkSLt0VmGZkxIiqHA2pfLoPHz4AVFSiDFF0MnciwZyAWandl/yIiAjuuusuNm7cyBdffEFBQQFlZWUA/PLLL6xZs4bRo0dz8cUXn9Ly8ydit9vxer1ER0fX63EvWAkJ8Oyz/n5SFVPTdB0efxz++19o165Rh1dFx44oiYkoBw6g6TqMG1frm55sZTybzUZ4eDjt27dnypQpp70yXn1zOp289957pKenYzKZuO222+jZs+dx+wUbDfSODyfX6eaA3UVOaRlOrw9DkBWLwYDbp6Pg/z3r+FdmBf9szWCTgZZhwTQPDSJYqqOEEOe5c/xTrxBCCFF/KldKHXS40HQwn6G+0UZFweXTOORw0jmqcasATlsDhVJlZWVs2bKF1NRUtmzZgtfrRVEUEhISSE5Opnfv3g1WFVRbmzdvZv/O/SSHJ3Ntk2s56D1ItjebXF8ueb68QAgF/ml/0YZoog3RNDU2pamh6SkHoD179qRz587MmTOHn3/+GZ/PR1BQEB6Ph7lz5/Lzzz8zceJE+vfvX28ha8XKe9LkvB4NGwZ33w1vvXV0W2mpf0W+jz+GU2yaX+8UBT74gMXXXYchOpqbb7+9xl0rVsarCKH27NkT6KsEVVfGS0hIoHnz5jzyyCOEhYUFekydLfLz83njjTfIzMwkLCyMP//5z7Rp06bG/RVFIcZqIcZqodTjY+POPWxL30i7xG6oij+MUgCjqhBlMRFe/hMdbMagSmWUEOLCIKGUEEIIUa6iUsrt08gqdmFQlDM2ZUJRFFRF4ZDDRYfIkHP7C4nDUfVySMgpH8rj8fD777+TmprK5s2bA19m27VrR3JyMn369CEiIuI0Blt/dF3nu+++Q1EUxo4di0Ex0NrUmtYm//Q2Tdfw6B58+DAoBsyY6/X5FRQUxLXXXku/fv349NNPyczMRFVV4uPjOXLkCP/9738DzdA7d+582uerWHkvKirqtI8lKvnDH/wr8v3ww9FtBw/CX/4Cb74Jp7BKZIMwm8mopqn+yVbGCwkJCYRQNa2M16FDB7Zt24bL5TprqqT27dvHm2++id1up2nTptx77711eu5bTQYKM3ayc8UShndpR5+2ndF0f76nytQ8IcQF7Cz5V00IIYRofBWVUjlFxTx1580c2LUDS1AwEVHRPPDcv2jexj995rO3XuHHWV9xKGMPT7/7XwZfOqZO53lwynhysw/zydI1VUKJMQlxzFy/g+KmEYRbTm9VuEZ1bChVx0opr9dLeno6qampbNq0CZfLBUDr1q0DQdTZGIRs2bKFAwcOkJycXG2TcVVRsSj122S9Ou3ateP//u//WLRoEfPmzSMrK4vo6GhCQ0PZu3cvr7zyCt27d+eKK64gPj7+lM8jlVINRFHgr3+F/fshPf3o9tRUmDULpkxpvLEdIygoiCNHjpCamlrjynhNmjQJBFC1XRkvKSmJ9PR0tm/fTo8ePRr6bpzUpk2b+OCDD3C73XTu3Jk777wTq9Va5+Ps378f8L+XKYqCQbIoIYSQUEoIIYSoUFEp5XB7GTXlBgaPvARVVfn24w946dEHefnLuQD0GTSMEZdPYvoj99f5HAf37uZQxh5CwiPYtHolPfsPqjoGXcfh9p67oZSun1KllKZpbNu2jdTUVDZs2BDoMdO8eXOSk5NJTk4mNja2IUZcL3RdZ968eQCMHTu2kUcDRqORyy67jD59+jBjxgy2b99Ofn4+PXv2JD8/n82bN/P7778zdOhQxo0bd0qNo6VSqgFZLPDSS3DTTVB55bm1axs1lNJ1ndzcXHbu3En28uUMfu01LCUlbNyyhbUJCUD9rIyXWF6BlZaW1uih1E8//cTMmTPRdZ2BAwdy/fXX19ik/GT27duH1WqVPmxCCFGJhFJCCCFEuYpKKY9qJGX4xYEpJV16JTPz/bcD+3Xu2fuUz/HD159z0cSriIqN4/uvPjsulFKAYrf3lI/f6MrKwOeruq2GwEPTNHbt2sXatWtZv349xeW9qOLi4hg5ciTJyck0bdq0oUdcL37//Xf27dtH7969q62SaiyxsbE8+OCDrFq1iq+//pqNGzcSHR3NRRddxMaNG1m2bBm//fYbl112GRdddBEmU+3DUKmUamAxMf5g6q67jq7IN2TIGR3CsSvjVayKB3DtihXElpSg6zqT9u2j3+OP06Zv33pZGa9p06ZERESQlpaGruuNsvKcpmnMnDmTpUuXAjBx4kRGjx59ymNxOBzk5+fTuXNnWUlPCCEqkVBKCCGEKFdRKeWp1P8EYPZH7zHwktG1OsZHL/8DW1g4Q8dNJDg4GIPBgKqqqKqKDiyc9RXTP5tFRJMoPnntRYrtdkIqNefWAa+m13j8s96xVVJQJZTSdZ09e/awdu1a1q1bh91uByA6OpohQ4aQnJxM8+bNz6kvbZWrpMbVYRWyM0VRFAYOHEi3bt2YOXMma9asYcmSJaSkpNC3b1+WLl0aaJA+adIkUlJSavX45+XlERoaWu+r+olKEhNh5kz48Uf/6nuDBzfo6Xw+H/v37w8EUMeujGc0GgMVUH22bsVZXIyztJTI8HCigoPrbVEDRVFISkri119/JScn54xXSZaVlfHBBx+wefNmjEYjf/jDH0hJSTmtY1aeuieEEOIoCaWEEEKIcpqm+cOjSpnQZ2+9wqF9e5n+/KxaHeP6ex5k9+495OTkYDAYMJpMmIwmVFVl48pfiIyJA1MQRcUldOmdwuxP/8OoK68JVGWVuVzs3ZtBwbYiQkJCsNlsVX7qUsnSKKoJpXSbjX0ZGaSmppKamkpBQQEAkZGRXHLJJSQnJwd6rJyL0tLSyMjIoFevXjRv3ryxh1Oj0NBQbr31Vvr3789nn33G2rVrCQkJYeLEiWRlZfHLL7/w4YcfBpqhd+jQocZj6bpOfn4+cXFxZ/AeXKDi4/3T+Cp4PLB0KQQF+SunTuN1U9eV8dq0aXP0PejTT3EfPAiArmlHq7nqSUUolZaWdkZDqaKiIt58803279+PzWbj7rvvJqF8auLp2LdvHyChlBBCHEtCKSGEEAICjXkVRQmsfDfzvbdYsXA+L86YRVBw7ZraqgYDFosFS5AFVVHwaRqa5kNVVX794TuOHNzPX64djw64XS7ysg4zYvwVaLq/OqvM7Wbbzt3sWvlTtcc3mUzVhlWVf0JCQrBarYH9rFbrKfdAqbPyKXg64HG7sft8vPLUU+Tm5gIQFhbGiBEjSElJoV27dudsEFXhbK+Sqk5SUhJPPfUU8+bNY9GiRXz55Zd06dKFe++9l2XLlrFp0yamT59Or169mDx5crWBgNPpxOVyST+pxjB1Kqxa5f//kSPhH/+AY1avq0nFyngVIdS+ffvwVZpuW3llvISEBFq2bHncyngBVmvg9avpOoZ6DqW6dOmCoiikp6czYsSIej12TQ4dOsQbb7xBQUEBsbGx3HvvvfUWiEkoJYQQ1ZNQSgghhOBoKKWqKlajgW//8y4/fzebF2fMIiQsvNbHMRqMWCwWNM1H8xYtcJaW4igu5khmJr+vXc3L3ywgNr4pIaEhGA1GrhnQHbejiPaJSYD/S2GPQf25rE8SxcXFlJSU1PiTm5sbWJnuZCwWy3Fh1clCLZvNVvMX0hrk7t2LsaiIkpISvB4P9uBgXC4XQ4cOJSUlhYSEhDof82yWnp7O3r176dmzJy1atGjs4dSaxWLhiiuuICUlhRkzZrB161Z27drF5ZdfzogRI5g9ezYbNmxg06ZNDB8+nHHjxmGz2QK3l35SjaSo6GggBfDTT/Dmm3DffTXsXhSYhrdz5856WRkvwGoNvJYbolLKarXStm1btm/fjtfrbfBgPT09nX//+9+4XC4SEhK4++67qzznT1dFk3MJcoUQoioJpYQQQgj8U/fAH0o5crP44IWnadqqNdOunQiAyWzhrW8XAjDjjZf47vOPKcrPY++j23jjqUf597yfiIiK5qOX/0GQLYSBoy/H6/Fgs4Vgs4Xw06wv6TlgMLawMBwOBw6HA7PFzKBRY1kwcwb3Pv0PAAwGlXbN4om21u6v6T6fj9LS0iphVXFxMaWlpTWGWkeOHKGsrKxWxw8ODq6xAqvix+VysXv3brZt20bcpk1cUViIQVWxhYQQ0akTL7744nkVRFU4F6ukjtWqVSseffRRfvrpJ+bOncvs2bNp0aIFN9xwA0eOHGHOnDn89NNP/Pbbb4wZM4YRI0ZgNBpl5b3GEhYGrVpBeX8iAD75BNq3Rx8zJrAyXkUQdeTIkSo3j4+PDwRQCQkJp/f7O6ZSqr5DKfCvwrdnzx52795Np06d6v34FZYvX87nn3+OpmmkpKRw88031+tUaYfDQUFBQaD6SwghxFESSgkhhBBUnb7XqW0bvt+VjaqAsZow5YZ7p3HDvdOqPc4fpz5KSUkxubl5uFxlmM0WAK67+wH/edAD1VMup4uJt96Noirk5eXy3baDmEwmQs21/+fZYDAQGhpa5xWvvF5vjRVY1YVaRUVFZGZm4vF4AHC5XOTm5pKTk4OjvI+U0Whksqbh8/mnK5aVlXEoL49f3nrrhKFWxU9QUNA59YVt69at7Nmzhx49etCyZcvGHs4pU1WViy++mF69evH555/z+++/889//pORI0fyf//3f6xYsYLvv/+eb775hmXLljFp0iQKCwsBCaXOOEWBJ56AP/0J3efD4/FQ5nJRet99fD5rFtsqBSmKotC6dWsSEhICIVR9rIwX0MCVUuCfajpv3jzS0tIaJJTSdZ05c+awcKH/Dw5jxoxh/Pjx9f4+VDF1r1WrVvV6XCGEOB9IKCWEEEJQtVLKZjIQajFS5PJgPIUCn6CgIABcZS7CCKtynYKC1WrDarXh9XopLi4u/ynBg4orN5sVe93079+/XqeOHMtoNBIeHk54eO2nJhYWFrJ69WpWrlzJrl27CAkJISQkhNatW9OqVStiYmJosWgRoYsWoWkams+HXdPYunVrlb41NVFVtU69sip+zGbzGQ+zzocqqWNFRUVxzz33sG7dOr788kuWLFnC+vXruf7663nuuef47rvvWL58Oe+//z7FxcVomibT986gwMp4ubl4e/Sg29y5gfctgNGLFmG55RaaJqfQvH0HQqPjcGpQ5tNw6pBm92B1FhFm8Qff4RYTRvU0XjdWK2oDV0q1adMGq9VKWloakydPrtdjezwePvroI9atW4eqqtx4440MHDiwXs9RQVbeE0KImkkoJYQQQlC1p5SiKLQKC2azy4Om64EvXrVlMBgxmoyUuVz4W35Xf3uj0UhERAQREeGUOp043V7SN65h9Z4dzJ49m169ejF48GA6derUaBVEDoeDdevWkZqayq5du9B1HaPRyJAhQ0hOTqZbt25YLJajN8jLg40bAxebjR7N4Gefxe12n3RaYeVKrSNHjlBSUlLlS3dNjEZjnXplVex3OtNztm3bxu7du+nevft5Vf2gKArJycl06dKF2bNns2LFCt58802Sk5OZMmUKI0eO5JtvvmHmzJnk5uYyd+5crrnmGqKjoxt76Ocdt9vN3r17A9PxqqyMpyi4O3ZkwL59BFksmIODKU7qzsCErmQnppCrQ6697Lh3nzynB3ChKGAxqLQMDaZ5aBC2OlRnBlitKA1cKaWqKl26dGHdunXY7XbCwsJOfqNacDgcvP322+zZs4fg4GDuuusuOnfuXC/Hro40ORdCiJpJKCWEEEJwtFKqIvyJt1nYblRxezWCjIY6Hy8oKIhiRzFutzswha9mCqrJQozVytQ7buG3VatYsWIFa9euZe3atcTExDB48GAGDBhQp8qmU1VSUsKGDRtITU1l27Zt6LqOwWCga9euJCcn07Nnz0A12HE6dqx6uU8fFEXxr0hosdRpupeu67hcrlr3yiopKSEzM5PS0tIqzZxrYjKZajWt8NhQy2AwnHdVUsey2WzceOON9OvXjxkzZpCamkpaWhpXXHEF99xzD7t372b16tVs3ryZrVu3MmLECMaMGYPVWrtVKsXx6rwyXrNmqA88gGPfATZdfSMF7TuhGQ0Y8/MxR0ehKtWXeeq67l/906exs6CEvUWltAwLpkMTG6a69H47A5VS4J/Ct27dOtLT0+nfv/9pHy8rK4s33niD3NxcoqKiuPfee2natGk9jLRm0uRcCCFqJqGUEEIIQdXpewAmg0rHSBu/5zjwaTqGOk5zCbJYKHYU43K5ThpKeTUNRYGOUSE0CQlizJgxXHbZZWzbto3ly5ezceNG5syZw9y5c+nevTtDhgwhMTGxXpuHO51ONm3axNq1a0lPT0fTNFRVpXPnzqSkpNCrV6/aBQ4jR8Idd8Cvv0KfPjB27CmPSVEUgoODCQ4OrlMljqZpOJ3OWvXKqvjJy8vD6XTW6vilpaX8/vvvNG/enNmzZ9c61DoXm7137NiRv/71r3z//ff88MMPzJgxg9WrV+NyuRg1ahSXXnop3377LYsWLWLlypWMHTuWYcOGNfhKaeeDuqyMl5CQQHx8fJWKSV3X2fvXv7Fz5348ZgvmYjuq2+2vjFKA6Jhqz6soCgpgMRjQdR2PprO3sJTcUjdJMaFEBZtrdweOrZQqKTm1B+IkEhMTAUhLSzvtUGrHjh288847lJaW0qZNG/785z/XW/VVTaTJuRBCnJh8YhBCCCGoOn2vQsuwYLJLyzhS4iZIUev0hSLQV8rlIiys5uomrfxLYfPQIJrajoZXiqLQpUsXunTpgsPh4LfffgsEVBs3biQyMpJBgwYxcODAU/7re1lZGZs3b2bt2rWkpaXh9XpRFIUOHToEgqg6N0ZWVX8odccdpzSm+lC5N1VdaJp20mmFJSUlzJs3D6PRSKtWrdi7d+8prWRYm15ZNpuN4ODgRg+zTCYT48ePJzk5mRkzZrBt2zZWrVrFkCFD6Nu3L3369GHRokUsXLiQmTNnsmzZMiZPnkzPnj3lS3g5XdcDK+NVhFCnszKepuuk5Tg4UOxFiY0laF8GSuW+bTk5YLFA6IkDF0VRMBsUNF3H4fay7nAhXWPCaBZaQyVkZcdWStUy1K2ryMhImjZtSnp6Orqun/JzavXq1Xz88cf4fD569erFLbfcgtlcywDuNMjUPSGEODEJpYQQQgiOn75X8f9J0WE4yvJxejWCDLUPpgwGIyaTkbKyMnR0lGr6Sum6TplPI8RkoHNUSI3HDg0N5ZJLLuHiiy9m165drFixgnXr1jFv3jzmz59PYmIigwcPpkePHhgMJ55q6PF42LJlC6mpqWzevDmwml779u1JTk6md+/eRERE1Oo+nm9UVT3pSobbt29n3bp1XHLJJdx7772AfyXD2vbKKi0tPW4lwxNRFAWr1VrnBvANsZJhs2bNePjhh/n2229ZvXo1O3fu5Nlnn+WGG25g3LhxDBkyhLlz57Jy5UreffddOnTowJVXXkmbNm3qdRznAl3XyczMDPSD2rVrV2DFQvD/Xlu1alUlhKptAKyXB1L77U5MqoLRaIHmzWH/Afw97MplZkIbM1hOHjCpikKQQaVM09iSY0dRoGnISW5XqVJKa8BKKfBP4Vu8eDH79++vc7ij6zrz58/nu+++A+CSSy5h8uTJZyzslSbnQghxYhJKCSGEEBw/fa+C1WSgd3wE67IKcXk1LAa11o3PLZX6SlmOmcKnlQdSVpOBPk0jatW3qqKKqUOHDkyZMoU1a9awfPly0tLSSEtLIzQ0lIEDBzJo0CDi4uICt/N6vaSnp5OamsrGjRsDlT2tW7cmJSWFPn361N8qam43fPMNFBfDFVfAedZDpbpeUkajkbCwsDpPA/J4PLXulVUxxXD//v11Wsmwrg3gT7aSoaIodOzYkT59+hAWFkZWVhbTp09nyJAhTJ48mZtuuinQDH3r1q288MIL9O3bl4kTJ57X/XQCK+NVCqFKK/VYMhqNgfCpQ4cOtG/fvua+bCext6iUAxWBVMX7lS0E4mIhO/vojpoGBw5A27ZgOPlHfkVRsKjlwdQROzaTgTDLCRYDqFQppWtag1VKwdFQKi0trU7hjtfr5dNPP+W3335DURSuvfZahg0b1mDjrE5GRgbAebUgghBC1CcJpYQQQgiOTt+r7gt5RJCJ5KYRbMgqosTjw6gqtWoIXNHsvMzlCoRSuq7j1XS8uk6o2Ujv+HBCTmHlK6vVyvDhwxk2bBj79+9n+fLlrFmzhoULF7Jw4UISEhJo0aIFpaWlbNmyJdAvqUWLFiQnJ5OcnExMTPU9Z07LP/4B//uf///nz4c5c/xT+s4DO3bsYMeOHSQlJdG2bdvTPp7JZCpffTGi1rfRdR23213rXlklJSWntJLhiaYVbt++HafTyVVXXUVYWBhz585l+fLlbNq0iWuvvZZevXrxwAMPkJaWxjfffMOaNWtYv349F110EZdddhnBwcGn8aidHU64Mh5gsVhISkoKhFBt2rQ5rdUeKzjcXnbll6AolQKpCk2aQFkZVKrIwuOBgwehVSuoofF5ZRXBlMunkZbroF+zyJpD+MqVUrreoJVSHTp0wGQykZ6ezpgxY2p1m9LSUt555x127NiBxWLhjjvuoGvXrg02xprs378fm812XoeyQghxOiSUEkIIIai5UqpCuMXEwBZN2Jbn4JDDhdPrw6SqGJTqgyyo2lcqNDQMX3n/KIOi0DY8mA5NQjAZTi+wURSF1q1b07p1a6644grmzJnD//73Pz799FM8Hg9Go5GOHTsyfvx4Ro0aRXx8/Gmd76QWLz76/4cOwf79cJ5M3zobVtyrvJJhXarbjl3JsDahVk0rGWZkZLB//34+++wzQkND8fl8HDhwgOzsbJYuXUqrVq0YPHgwsbGxJCUlERISwrp16/jss8+YN28eY8aMYeTIkYSFhZ0zDdFLS0vZvXt3IISqaWW8ihCqZcuW9T49zD9tz45H0wmq9n1Dgfh4f7Vi5ZXwSkshPx+iardYgL/PlEq+08PewlLaR9bQm81qRSnfv6ErpUwmEx07dmTr1q24XK6TVpnl5uby+uuvk52dTUREBPfccw8tW7ZssPHVxG63S5NzIYQ4iXPjk4AQQgjRwKrrKXUss0GlW0wYcbYgdheUUFTmxaPpqAoYFAW1/Acob8irYgkOxqequHw+DIpKVLCZhEgb0db6abCr6zq7d+8mNTWVdevWYbfbCQ8PZ8SIEZjNZgoKClBVlZUrV5KZmcmQIUNISUnBYjnxioCn5Ngvw3DeVEnt3LmT7du3k5iYSLt27Rp7OHV2qisZ6rpOaWlplbDqiy++wGQyMWHCBHRdp7i4mO7du7N//37WrFnDzp072bFjB23atKFZs2aBvlh5eXmBJukvvPACbdu2pWnTpjVOLzxRA/iG7gd0spXxIiMj6dixY40r4zWEXKebApcHs3qC3naKCi1awN69/iqpCiUltQ6loOL9DDKKSmkdbsVY3eqj5RV+iqr6K6WMRtB1aKDHITExkbS0NLZt20bPnj1r3G/Pnj28/fbbOBwOWrRowT333ENkZGSDjOlkpJ+UEEKcnIRSQgghBNWvvlcdRVGIs1mItZopLPNyyO4k1+mmzKfh1jQqF5UoCpgtForycgkPs9KtVVPCLMbT/vKq6zr79u1j7dq1rFu3joKCAsC/hPyll15KcnIyrVq1QlEUPB4PGzduZPny5Wzfvp2MjAxmzpxJSkoKQ4YMoXXr1vX3Zbq4+PhtISH1c+xGdjZUSTUGRVGOW8kwPDyc1q1bc/XVVx/33PH5fPz444989913OJ1OoqOjGTNmDGFhYZSUlJCVlcXSpUtJS0vjyJEjeDweOnbsGJhmWNuVDIOCgurUK+tEKxnquk5eXh47duyol5XxGspBuwtNB4PhJK9XgxFatoSMDH9fKYA6rkQJYFJVXF6N7BIXzUOrmXLZrBn06oV66JC/UuriixsskAJ/X6mvv/6atLS0GkOpdevW8dFHH+HxeOjatSu33377Kffuqg+y8p4QQpychFJCCCEEtauUqkxRFCKDTEQGmQKr6DncXtw+DU0HVQGLQWX31kMs+uo/NJ0wgfCOp97oVtd1Dh48GAiicnNzAX9AcNFFF5GcnEzbtm2PG7/JZCIlJYWUlBSOHDnCihUrWLVqFStWrGDFihW0aNGCwYMH069fP6xW6ymPDwCH4/httQyldF2n1OPD7vbicHsp82pouo6qKJgMKqFmA2FmEzazodaN5uvLrl272LZtG126dKF9+/Zn9Nxno/z8fJo0aVLta8VgMHDZZZfRp08fPvvsM7Zt28Znn33GJZdcwrhx4zCbzUyaNIn9+/fzzTffsH37dhwOB/369WPixImEhoY26EqGwcHB2Gw2fD4fJSUl2O128vPzcbvdmEwmjEYjJpOJVq1a0blzZxITE+natWudm9jXN6fHx5HSMoy1fe5bgqB9eygsArMJTmH8Fa+zA3Zn9aEUwJtvsrK4mBKfjz888USdz1EX8fHxREZGkpaWVl6JevSx0HWdH3/8kdmzZwMwfPhwpkyZcsZW2KuJhFJCCHFyEkoJIYQQ1L5SqjqKohBkNFS7gp6lYwcAtm/fXusGvZVlZmaSmppKamoq2eUra4WGhjJs2DCSk5NJSEio9ZhjY2OZPHkyEyZMYPPmzaxYsYK0tDS+/PJLZs2aRe/evRkyZAgJCQmnVj11bKWU2ez/OQG3TyOz2MWBIiclHh++Y/oXVWZQFIKMKi3CgmkeEkSw6eQrFtaHC7VKqjo+n4/CwkI6d+58wv1iY2N54IEH+O233/j6669ZuHAh69at44YbbqBLly60atWKBx98kC1btvDNN9/w22+/sW7dOi655BJGjRpFs2bN6jSuE61kaLfbOXDgABkZGWRkZLB06VLKysrQdR1VVYmPjyc2Npbw8HCCgoIIDQ0lNTWVl156iUsvvZR27doFmr/XNK3wxRdfZO3atURHR+N0OomPj+euu+7ipptuAiA1NZUXX3yRr7766pQe98IyD95qekn98M0XJPZKplX7DsffyGiCKlM19aOv0ZAQoOprfONvv/LYH66hZbv2aJqOJSiIO594FmPvZDw+rfr+dxYLe3v1Iisrq8Gn6iqKQlJSEitWrODIkSOBFUZ9Ph9ffPEFy5cvR1EUrrzySi666KKzoofTvn37sNls9be6qRBCnIcklBJCCCE4eaPzUxUaGkqzZs3YvXs3Xq+3Vo2djxw5QmpqKmvXriUzMxPwr7Y3aNAgUlJS6NSp02mN02Aw0KtXL3r16kV+fj4rV65kxYoVrF69mtWrVxMXF8fgwYMZMGAAoaGhtT/wsZVSJ7itT9PZW1TK3sIS3D4dRQGj4l/VsLovk7quo+lQ6vGxPa+Y3QUltAgNomM9NIs/kd27d7N161Y6d+5MQkJCg53nXFFQUICu67X6kq0oCgMGDKBr1658/fXXrF69mldffZX+/ftz1VVXERISQvfu3UlKSmL58uV89913LFiwgBUrVjB+/HgGDRpU6+d55ZUMK1bGy8zMPG5lvNjYWKZMmUJSUhLt27dn165dvPfee3z99deBUGv37t08/fTTdOjQgU6dOtGpU6dAwJWTk0NGRsZxKxnu3LmT9u3b061bN8DfaPu+++7jo48+YtiwYdhsNoYNG8bHH398wl5ZNpsNczVBrqPMG2gqXtmP33xJSFh49aHUsTIzoagIAM1mgxYtUNWqwW7Ldu15b8EyAL79+ANeeexB3v3hFxxuL02Cqw+Yg4KCaj3t8nRVhFLp6enExcXhcrn497//TXp6OiaTiVtvvZVevXqdkbGcjN1up7CwUJqcCyHESUgoJYQQ4oJU4vFS4PTgcHspLPNgdyr0v+52fOHh/HYon3CLiVCLkSZBJqym0/vnsmPHjmRmZrJ37146dKj+y2NeXl4giDpw4ADg/7LXv39/UlJS6Ny5c4OsVNakSRPGjRvHmDFjSE9PZ8WKFWzatIlZs2YxZ84cevbsyeDBg0lMTDz5F6tahlJFZR5+z3FQ6PKgKgpBhhM0bi6nKAoGBQyqAb18FcOMIn8/r6ToUKKtDdC4HamSOlZ+fj5AnXoqhYaGcsstt9CvXz8+//xzfvvtN7Zs2cLVV19Nv379MBgMDB8+nH79+vHDDz+wePFiZsyYwU8//cSVV15JUlLSCY9/Oivj/fe//8VkMgVWZtM0jUcffZQZM2Ywbdo0Ro4cycSJE6ucr7qVDA8dOkSbNm0YPXp0oFIrNjaWGTNmMGjQINasWcPSpUu54oorcDqd/PTTT5SWlqIoCtHR0QwfPpz8/HyWL1+Oz+dD13V69OjBmDFjsNls7Cly8dWH/0ZVFTSfjxvvfxhHYQHbt2zk7Wef4JNX/8WtD/8f/UZcwsz33mLZ/Ln4fF4io6J58O8vEde8OR+//Sp79+7B6XJyJOcI/3rrQ2K69ajxce01aCjv//NZNB0cbi9rf1nKs88+i9PpxGAw8M9//pMRXbrQ4dNP+WjzZnp89x1aeDgTJkzgueee4+mnn2bLli0UFBSQmZlJhw4d+O9//0tUVBTFxcXcd999rFmzBoCrrrqKp556CvBPvUtOTmb16tVkZmZyySWX8O677wLw22+/MXPmTObPn09YWBiDBw9GURS8Xi87d+7kjjvuoKysjDvuuIN77rmn1s/RhlDR5LzNebL6qBBCNBQJpYQQQlwwNF0nt9TNgfLm5F7NP1VMwd8P2BRkRTeayHd6yHP6e9QYVYUYq5kWocHEWM2n9BfvTp06sWzZMnbs2FEllCosLAxMzdu7dy8AZrOZlJQUkpOTSUpKwmQynf4drwVVVenatStdu3bFbrcH+k6tX7+e9evXExUVxaBBgxg4cGDNK1nVIpTKLilj85Ei3D4ds0HFcAqPp3/JegVN1yl2+0jNKqJLVAitw0+zJ9Yx9uzZQ3p6Op06daoxTLzQ5OXlAXULpSokJSXx5JNPMm/ePBYvXsxHH33Eb7/9xvXXX09MTAzBwcFMmjSJoUOH8u2337JmzRpef/11EhMTueKKK2jRogXgr0CpCKBOtDJeRQhV3cp4N910E0uXLgVgwYIFge0vv/wygwYNok+fPseN/8knn6RZs2bcddddx61kGB0dTUJCQpXwctKkSbz77rvcd999pKens3PnTl5++WVeeuklvF4vjz/+OCUlJRw+fBiDwUBubi4jR47E4/FQUFDAO++8Q3R0NBEREcxftITb/u9ZWid0RNM0XCUltA8NpfU3X3LJldfRe8hw1P9n77yjoyjfNnzN9mx6J9SQQgKhhFACCEgXAkgXkGJXVFQUKzYU9adir6igdJXeOwYILRAC6Z1QQ0lvm+3z/bHJSqgJRf10rnNySHanvDM7E/Lecz/3IxNY/stPZCQn8uac+SiUSvZuXs8nrz3Puz8uRJTLSU1PYc7Xc/Fwr3a6lRSD29Xv5z2b1tNr6HAEIOf4cWbOnMnWrVtxcXEhOzubHj16cOKhh1idkUF/hYL/eXkhX7OGokty5GJiYkhMTKRBgwY89dRTvPbaa/z444/MmjULg8FAYmIiVVVVdO/endDQUMaOHQvYHIrR0dGYTCZatWrFgQMH6Nq1K6+99hpTp07l7NmzuLu7U1paSuPGjVm3bh2//fYboaGh6HQ6unTpQmRkJJ06darr5XnbOXHiBABNm958lqCEhITEfwFJlJKQkJCQ+E9QbjSTkl9Okd6IKNrEpksdOkarGZO+CsFBbc+GEkURsyhyrsLA+UoDnhoVYd7OOKnq999nixYtAFuuVI8ePYiPjycuLo7s7GxEUUSpVNK+fXs6depEmzZtCiOz1wABAABJREFUrlq+81fi4uLCPffcw4ABA8jKyiImJob4+HjWrVvH+vXrad26NT169KBNmza1y6suz5S6LOT8QqWBYxdKsVRn49xqSUuNy8poFUktqEAE/G+jMCW5pK6kRpS62YwctVrNqFGj6Ny5M4sWLSItLY133nmHoUOH0q9fP+RyOZ6enjzyyCP07duXZcuWER8fT3R0tD33qaysrNY2b6Yz3sKFCwFYsGABr7zyCps2bSI5OZmVK1eyZ8+eq67z7rvv1utYxavkozk6OtK/f3/mzp3LL7/8Qs+ePRk0aBBqtZqLFy8yffp0jh07hkwmw2g00rt3bx5//HHGPvYUS7/8mO73RNG+a3f8Q1pisVpRKBQ4OGhwdNRisVqJ37uL42nJvDZ5DIhgtVoQRSvHjx+nzGKlU8fIPwUpgPPnbblvWlt3vtPHc3g8qhdF+RexWCx8u3oLCLBnx3ays7Pp2bOnfVWZTMappCS6OTjwVkEB8rNn6b1wIf1mzLAvM3jwYBo0aADA448/zsiRIwHYsWMHn376KTKZDEdHRyZPnsz27dvtotTYsWNRKBQoFArCw8PJycmha9eu9O3blxUrVlBWVkb79u3p378/PXv2ZPbs2YwbN86+3/LyclJTU/9WUarGKSWFnEtISEhcH0mUkpCQkJD4VyOKtjKvrKIKTFYRlUx21ZbqNdPHS98RBAGlIKCU2TKQCqqMHDhbRLCHE81cHOrVqc9sNrNq1SrS0tKQyWTI5XLatGlDx44dadeu3d/atvxaCIJAixYtaNGiBePGjSM2NpaYmBiSkpJISkrC1dWVbt26cdddd+Ht7X2lKHWJU6rMYCLxok2QUt8GQerSMarlAgaLlfTCCrQKOT6Ot17Kd+LECVJSUuzH/1dgtopUGM1UGM0YrVZEsVp4U8hwVilwVMr/9myamynfuxpNmjTh1VdfJTo6mrVr17Jq1SoOHTrExIkTUavVZGZmkp2dTUFBARUVFZw6dYqMjAx7HtrAgQNp1aoVQUFB9cs9u4wHHniAKVOmUFhYSExMDCdOnLC74s6fP8/jjz/OuXPnePLJJ+u97cOHD+Pj44OPjw+pqan217t27cqxY8fYsWMHq1at4s033+To0aPMmDEDLy8vjh49ikKhYOTIkej1emQyGdNmfkBichJphw7wxesv0XfYKMZNeQaFQoGTszNeXt4AaNRqJk2dzpD7JyMioqus5MLFi+irqjBYrYgOWsxmM3K5AkEARBHOnIHmzW2fS3WmlNlk4ss3X+aDaU8ye9kGQKR///4sXbq09kE+8ghezs6Emc0kqdV8s2YNX+zfX8t9dinXun4vf/3S34dyuRyz2QzA1KlT+eijjzh69Cj79+9nwoQJaDQaPDw8OHbsWD0+nTuPFHIuISEhUTckUUpCQkJC4l+LKIqkFpRzsrQKQeD67pzrdH0DkMsENIIMo9VKakE5OpOFlp5O19yeTqfj2LFjxMXFkZaWxsmTJykqKsLX15eoqCjCw8PRam9vudmdxNHRkT59+tC7d29yc3PZu3cvcXFxbN68mc2bNxMaGsronBwaX9qqvdopZRVFUgrKMVpuj0PqaqhkAnqL7bNx0yhR3WL4+fr164E775KyWEUuVBo4XV5Fqd6EpTrQ/dIzJGLrPKiQCfg42kpJ3TXKv0WgKiwsRBAE3NzcbnlbMpmMXr164e7uzty5c9m6dSvLly/Hx8cHf39/5HI5CoWCLl26cP/991NaWsqxY8fsZV/NmzfH0dGxXvssKSlBp9PZu/utWbMGT09PPDw8ePLJJ2uJT7169WLatGlXZErVhcTERKZNm8Yrr7xyxXu5ubk0atSI++67j4EDB+Lj40NFRQXFxcW0bNkShUJBRkYG27dvtzuTLpzIoWlwKCEtw5ArFMTF7AJA6+RMZfmfzrG7BkSxfO539IwairObG4WFBZzNyaJzj7tRq1WU6iopkctxMRmRyxXI5TKwWOD0aVsNczUKpZKn336fB/p0Yf/2zQzsP4DvZn9IYmIibdu2BeDQoUN0dnQk12TCSxCY4OpKt9Gj6fbxx/btbNq0iQsXLuDr68vcuXPp168fAP369WPevHncfffd6HQ6Fi1adNVzVYPVauXXX39l7dq1NGvWDK1WS0FBAUeOHGHs2LG4uLjwyy+/8NBDDwGQnZ2Nh4fH3yYI1YSc1ymLT0JCQuI/jiRKSUhISEj8KxFFkbTCCk6UVqGQ2bq61YnrTCBsrhw5JquVEyU6BCD0EmHKYDCQkJBAXFwcKSkpmM1mu9uoY8eO7Nu3j8jISLp163YbjvDvQRAEAgICCAgI4L777uPw4cPs3buX9PR0kg4fhrNncXRywsnJCVW1gyW3REdRlQnVHRKkasallsuorO7O18bH5aa3deLECZKTkwkODr5jLimrKHKiVMeJEh16sxURW/dBhSBDJqvtHBFFEYtoc1KdLtNztlyPi1pJCw9HvO9QwPu1KCwsxM3NDblcfuOFr0JNZ7yaPKiazniiKOLj44NOp0On01FaWsrkyZMZOHBgrVw1nU7Hpk2biI6OZsGCBezcuZPRo0fTsmXLOu2/tLSUMWPGUFVVhUwmw9vbmw0bNtTpurw0U+pqzJ49m/nz56PT6fDx8eG1115j8uTJVyy3a9cuPvvsM7sDaPbs2bi6uvLGG28wadIkFixYQGBgIH369LGv88PHs0hPz0ClUqF2cOC5WbMBGDx+MnPef4uV8+bwyEuv02/4aMqKi5g+fgRWqwWj0Ui/EWPo0e8enJ2d0VVUUKxUoRRFHMxmrFYBhUKBYDBAYUGtcWoctDz4wmv8+vUnvPTwUZYuXcoTTzyBTqfDaDTSvn17ljZtylqdjl9LSnCorIRvvrGHkgP06NGD+++/n7Nnz9qDzgHefPNNnn32WXu3wjFjxnDfffdd9bxarVaio6NRq9Xs2bMHBwcH+zXzww8/oFAo2LBhA9OmTePzzz/HYrHg5eV1pavrL+TkyZOAVLonISEhURcE8WoF7xISEhISEv/POVNWRVJ+GTKhboKU3qDnwvkLeHi44+x8Y0HDZLFiRaSlh5ai3CwOHz5MUlISJpMtID0wMJBOnTrRoUMHXFxcqKysZPr06QQFBfHiiy/e8vH90zhz5gyVjz6Kw9GjiNWOi5TevXF5/nmqfAMxiyLqmxQy6oPJYkUUoHtjj3pnf9XwzTffkJSUxPPPP09oaOhtHqGtlDGloJziKhMCoJTLkNVRrKsRqExWK3JBoImLhhYeTihv0RlW131PnToVf39/XnrppTqtU5fOeDVZUMHBwXh6erJ69Wr27t0LQIcOHRg7diyurq61tltQUMDq1auJi4sDoHXr1owaNcrugPq3cbHSQNy5ElR1vFasVgtn8/JAhEaNGiKT2e49g0HPhQsXwGrFu7ISeXVZnFwutwmNXl7g7WPfjtFiRS6DPs28kcuust9336Vi6VIKCwvx9vZG++ij8MILAMycOZOSkhK++OKLmz7u0tJSvv32W3sp3FNPPUVQUBD79u1j4cKFPPjgg3Tt2vWmt3+n2LBhA+vXr2fKlCm0b9/+7x6OhISExD8aySklISEhIfGvQ2eykF5kC76us0OqHs9oRETMRj06k5nd5/I4smophspy/P397ULU5R3qHB0dady4Mbm5uZhMpr+sq95fRePGjaFhQ6z5+eh0OioqKjhdXMy5g0cIudsLJVYEJydUqjvr7FFUl/GdLdcT4ul04xUu4+TJkyQlJREUFERISMhtH9+5Cj1JF8v+zDe72kT/OgiCgEIAuSDDbLXlpRVWmejQwBXHmxTh6kppaSlms/m6eVK3ozPepEmTiIyMZPHixRw5coTU1FRGjRpF9+7d7ct6eXnx2GOP0bdvX5YvX05ycjIpKSn06NGDoUOH4uJy8065fyLu1SWpZquI6iqZeJdTWlqK1WLFw8PdLkgBqNUavLy8yC8ooNTFBa/ycqwmExaLBavViiI/H0GtBhebCGgRRXwc1Ne+TrVahOrfsVZRBJ3u1g+2mry8PL7++muKiorw8fHhmWeewcfHJpi1atUKgJSUlH+kKFXjlJI670lISEjcGEmUkpCQkJD415FWUI7BbEVzU+6Rq0++RET0ej26ykp0Oh1WqwgIOLq50nf8A9zV3M/eHv5atGjRgtOnT5OTk3NHHDh/OxUVyAQBJ0dHnBwdGfPII0QHtcWISHl5BeXlFahUKpycnXB0dEQm3H53jyAIyAQ4U15FoLsjinqKPpd23LvdpYZ55XqS8suwiLeerSUIAkq5gFwUKTeaOXyuhE5+bndUmKoJOa/J6RFFkcLCwloi1MWLF2utU9MZ71InVF1o0aIFb775Jlu2bGHz5s0sXryYgwcPMnHiRPz8/OzLBQQE8PLLL3PkyBFWr17Nnj17iI2NZdCgQfTr1+9fI/4q5TIaOqnJLa1CvDS37SqYzCbKy8tRKm0h6Jej1Tri7mahuLiYUmdnPMorMJvNiKIVk8mE7NRp5IFKrGoHZAI0cXG49sC0WrtzS7Raa4lSM2fOvOnjTU1N5YcffkCv1xMYGMhTTz2F0yXdPN3d3WnYsCGpqalYrdbaXUD/AZw6dUoKOZeQkJCoI5IoJSEhISHxr6LMYCJfZ0QpE+o16a8xc9RexSZEVVbn3FgttrI0pVKJi4sWraMjyOSIOKN2cbvhPkJCQti5cyeZmZn/TlHqMpeEQ1N/nDxsQp1Jo6aiooIqvZ6iwiKKi4tx1GpxcnJCrVZzLTHwZlDIZBgtVsoMJjwcVHVe79SpUyQmJhIYGHjbP5/CKqNdkFLLbl+2lkwQ0FRnaR05X0qXRu63HPJ+LQoKCqisrOT06dPMnTuXrKwsSkpK7O8LgkDTpk0JDg62C1G30hlPqVQydOhQOnbsyKJFi8jOzmbWrFkMGjSIQYMGoVAo7Pvt2LEj4eHhREdHs2nTJtasWcPu3bsZPnw4kZGR/4qw6UYuDpwq02MWRZTXOZ7i4mJE0SbcCNe4r1xcnDGbzTbxytERF10lFqsVi9mM1WLGnJODOSAINyctXte7h+6AU2rv3r0sWbIEq9VKp06deOCBB64qLoaFhbF9+3ZOnTqFv7//Le/3dlETch4WFvavuO4kJCQk7jSSKCUhISEh8a/ibLkeiyjWvWzPzp8lRgaDnspKHTpdJRa7EKXA2ckZR0ctSuWfkzRRFG3lYmVVhHpdfwIeHByMIAhkZGTUc2z/TwgKgvPnbd8rlZQHBWMpM6OSy1BqHTEZDLz+wFisVlupkEGvJz/vLN9ujKZBo0Y4OTrWKjW6Gs+PvZeCC+dYGH2o1oSvb3Nv1iZk4+TiigywilBuNNdLlLpTLimTxUpyfhlm653pPlgT8l5uNJNZVEFr79tTumaxWDh9+jSZmZlkZ2ezY8cO0tLSMBqNuLu7o1Ao7A6o4OBgAgMD0Wg0t2Xfl+Ln58dLL71ETEwMK1euZMOGDcTFxTFx4kSCg4PtyykUCvr370+3bt3YuHEj0dHR/PLLL/zxxx+MHj36joXW/1W4qBQ0dFZzukyPXBCvmi2l11dRpavCwUGDg8N1HE4IeHi4Y7aYKdZVodRocNDrkSlVWCxmLCo11qJCzqQfRefZ99pdDq/jlKovoiiydu1aNm/eDEBUVBT33nvvNe+XGlEqNTX1HyVKSaV7EhISEvVDEqUkJCQkJP41mK0iZ8v1yIT6uaRAxGg0YTDoyc/PtwsjCoUcV1cXtFotKpWKq7l5bOViAmcq9AR7OF03I0ir1dKkSRNyc3MxGo3V2/wX8corYDJBYSE88gjlSg0iFfZJq6u7Bz9t3lW9sMiSb7/g2MF9aLRaiouKKSkuRlvtnrKJG7XP5ZncHM6eOI6TqxsJsfsJ73LXVYdR89mXG8x1Hvrp06dJSEggICCgzp3c6kpmUQUVRgvqO9h9UCYIKGQCZ8r0+GjV+DjWP7vLZDKRm5trF6FycnIwGo329ysrK3F3d2fYsGF07twZf3//v6w8ThAEevbsSdu2bfn999+Jj4/nk08+oUePHowcORKtVmtf1tHRkfvuu49evXqxatUqjh49yqeffkq7du0YNWoUvr6+f8mYbzeCIBDi6UxhlYkq09WuJ5Gi4mIQqM60u9G1JuDt5cX5CxfINxjwU6lQGo3IlSqMLq54/bGVU5tW8fb+PYwbN44OHTpcef3eJqeUyWRi/vz5xMXFIZPJmDRp0g27lAYFBaFUKklJSSEqKuqm9nsnkDrvSUhISNSPf1YBtoSEhITE347BYGDq1KkEBwfTpk0bJk6cWOd1RVGkefPm9O3b94bLrlmzhoMHD9p/jouLY+zYsfaff/jhB0JDQwkPD+fs2bP06NHjhtscFBVFbnbmDXOE7u8eQXZqIkajgZKSYs6ePUtBQQHnz5zmmaF9cXFxpoFfAxo1aoSbm3t1OHftba78eQ5F+RcAUAgCJouVcuPVRZAHH3zQ3oEqISGBmJgYcnJyWLduHc8///wNj+v/DX5+8O238Ntv0L8/BosVELGKVqxWCxaLGbPZjMlkwmgysX3VMqLGTcTT0xOt1gGrKFJaWsqZs2fJzT3BxYsX0OkqMZtNiKKVLcuX0nf4GKLGTmTz70tuOBx9tcutLmzcuBG4/S6pcqOZM+V6FNXi5Z1EIQhYRJHMogrq0lxZp9ORlJTEqlWr+Oijj3juuef49NNPWb9+PWlpaSiVSsLDwxkzZgwzZswgKiqKNm3acN999xEcHPy35DW5ubnxxBNP8NRTT+Hm5kZMTAxvv/02cXFxVxyzj48PU6ZM4cUXX6RZs2YkJCQwc+ZMfv31V8rLy//ysd8O1HIZrbyckckEjNba13dFRQUmowlnJ6dabs7rIQgyfHx8kCsUnJcrMCsU6N3ccSq4SMS29XQRRcrLy/npp5/4/vvva5VrAjanVI0odZNOqfLycj7//HPi4uLQaDQ899xzNxSkwFbe2aJFC44fP05VVVW993unkEQpCQkJifohOaUkJCQkJGrx6quvIggCmZmZCILA+ZpyrDqwc+dO3NzcSExMJDc3l+bNm191ObPZzJo1awgPD6dLly4AdOzYkd9//92+zBdffMEvv/xi76wUExNzw/3PW7aKpItl133iYjIZsVqt5F/MR+lo6zAll8vQOmrRaDTIZALu7jcOp13584+063IXHt6+yASwWm0ChJvm+hP1mqDnjIwMhg8fzr333nvDfdUVURSxWCx24cdsNtf6qnnteu9d77VLf67L8v6de9AwrD2GiisFgOzkBEqLi2kW2oaCgkIA1vw8BxcPT+4aOBSdoZKyslJboLdSiQBsWraU1776ETdPLxZ88TFnTp3Exc3N1sqems/WgkwmQ6DauVEHzpw5w9GjR/H397d39bpdnC3X28v27jSCIKCS2cr4ivQmPC8rXaxLZ7yaUryrdcYrKSnB2dn5HxEe3q5dO0JCQlizZg27du3ip59+IjY2lvHjx18RLh0cHMxrr73G4cOHWb16Nbt27eLgwYNERUXRp0+ff8Tx1AdfRzUtPZ1IK6jAYLGgkskQESkpKUEmE3B1c6vX9uQyOT4+vpy/cIFiZ2c8C/KJ+OkrtBXlBEyezPMDBrB48WISEhLIyMhg1KhR9OjRw3ZtaLX2a0S8CafUhQsX+Prrr8nPz8fT05OpU6fSsGHDOq8fFhZGSkoK6enptG/fvl77vlOcOnUKJyenKzqwSkhISEhcHUmUkpCQkJCwU1lZybx58zhz5ox9otGgQYM6rz9v3jwee+wxMjIy+Pnnn5k1axYAu3bt4umnn6ZLly4cOXKE559/nnXr1rF9+3bmz5/P1KlTCQoKYtq0aRw7dozRo0eTk5PDgw8+SJs2bfjkk08IDw+3P6U/cOAAL730EuXl5YiiyKxZsxg2bBhdW4fy6jc/E9a2Hcvnfk/0+lWYTSZkCgUPv/g6jYNCMJlMWK1WLKJo6wKntYlROp2OvMvyjPo29+bhF2ewb9tmSosKmfTsdAaOuZ+FX31C4cXzzJr6GGqNhhdnf0WD5sG8+dorJOyPwWg00rx5c959910cHR0pKSnh7NmzxMXFUVVVhU6nY/Pmzezbt4+9e/cyY8YMTCYTS5cuZe/evWi1Wlq1asWBAwd49913MZvNpKSksG3bNkwmEwC9e/emcePGHD9+nJ07d+Ln58e5c+cQRZFevXrh7e0N2CZIR44cwWKxIAgCPXr0wMfHh4sXL3Lo0CGMRiOiKNK+fXsCAgJu6fpxrqqi7alTVDg5kRUUhFKhQCYTUCoVUF1SWfO1f+sG7h58Ly6uLvb3Jk97GQEBQYBz586j11ehVKpQKJUc27cHDx9fnNw9sSAQEt6BbauW0XfEGGpcbOfPXaCsQocggMbZhQvHzxG3JhlXV9erfrm4uCCTyexZUkOHDr2tLimz1crZ8qqbKCe9eWoE0tNlVYiVZdftjOfr60uLFi3q1BmvptPepZ3v/m40Gg3jxo0jMjKSRYsWkZiYSEZGBsOGDaN37961OrIJgkDnzp1p3749f/zxB5s2bWLVqlXs3r2bESNG0LFjx/9XodTNXG1ZTqkF5egtVvQV5VgsVtzd3ZHfIJftaghyOW7e3hSey+N4zDa6dO8GgYEwfDihMhlvvfUWGzZsYPv27SxZsoRDhw4xadIkfGtEKUGot1MqKyuL7777Dp1Oh7+/P08//TQuLvXLQ6sRkVNTU/8RolRpaakUci4hISFRTyRRSkJCQkLCTk5ODh4eHnzwwQfs2LEDBwcHZs6caS/He/TRR7n33nuv6u4pKipiy5YtfP/995w6dYrBgwfzzjvv2CeGaWlpfPfdd8ybNw+A6OhowsPDmTZtGmATrmpYsWIF/v7+/P7774SHh3PixIla+xk+fDgrVqygR48eWK1Wu1h1qeuj99AR3HPf/VRWVpJ2NJ4v33iJD5eswsnJEblcjq+vD54enoCIyWSiorKSKr0Oq1XkxIlc+3bKKyp46Ys55J06wftPPkRgeAfuGjKCDUsX8NArb9EkKAQEWPrtZxTnnSMkJARBEEhJSbFPmJOSksjLy6OgoICEhAQqKirYs2cP3t7enDt3jg0bNnDq1CliY2MZNmwYSqWS3bt3YzQaSUxMpKysjD/++IOoqChUKhWlpaWsWrWK+++/H71eT3FxMf3792fQoEEkJSWRkJDAgw8+SElJCTExMUydOpVGjRohk8mwWCwoFAo+/vhj3njjDXx8fNDpdEyfPp2HHnqIBg0aoFAoUCgUKJVK+/dXe63W+2YzqlGjECoqoKICRowgZ9BAMgor0FzmGKiqrCD2j218t3Y7Xl7eV1xLlbpKQESjcUCrdcDJ2ZmEvdEUnMtj5iPjEUURk8FA0cXz9B4+GmV1FzatoxYnJ0csVisyQUZlaQnJiYnXLGUTBAFRFImPj6dBgwbExcWRnZ1tF6wuFbBuJv+rWG/CYLGi+kva1duuY73BgNFsIbkgnzkLvkO0WgDbsTZp0sQuQgUFBdVLANDpdBgMhn9ki/vmzZvz+uuvs337djZs2MCyZcuIjY1l0qRJNGnSpNaySqWSe+65h27durFhwwb27NnD3Llz2bFjB/fddx+BgYF/01HUnyYuDjgq5RzNK6TMKqJxcsLJ2ale27CIIiaLFUGApm5O+JbLiM2/wGy5wCsDB6KVyUAUUSUmMtLPj46vvsrCRYvIysri3Xff5b6ICHpiyzQTRRGqqmytTG8gyMTGxrJw4ULMZjPh4eE88sgjN3WPNWjQAA8PD1JSUhBF8W8XgqTSPQkJCYn6I4lSEhISEhJ2zEYjJ0+epJXRyIcPPcTRnBz633svKW+/jW/79sx95x24RmnFkiVLGDRoEG5ubri5ueHr68vWrVsZNGgQAAEBAdx99923PMYDBw4QEhJiz5iSyWT2ibIoihhNRs6fP8eRvXvYsPhnKstKUSgUnD99Em9vLzQaLYIA+io9hYUFVFXpsVgsVFXpqKrSI4oiJSWlyKvLrdrddTc6nQ43Lx9kMhkX8vJw9/Kx7ctoxGAwIAgQt2sHpQX56KtL1URRRKPRcOrUKSorK5HJZJw+fZqysjKsVisVFRVYrVb0ej0XL14kJyeHRo0aYbHYys9CQ0M5d+4cWq2W3NxcKioq2Lx5s13kU6lUNG7c2H6uo6KikMvleHl5cezYMcLCwti6dSudO3emV69eyOVyFAoFcrmcQ4cOUVBQwOeff253LsnlcoxGI02aNEEul9da/vLvL/2yu1H27IFL82Z278Z5nC2PzCrW7hQWvWENgS3DaBr4Z+c0AKNBz6xnHuN4eioyuQJ3L28eevkNyoqLOHZgL4Etwyg4fx5HZ2esosj506fIP3OKBk39AdBV6pArlWg0GpQqFb27deGpUUOoqKigtLSUESNGcP78eX744QfKysooKytj69atmM1mnJycOHDgAAB5eXlYLBa7oFFZWUl0dDSPPPIIrq6u5ObmsmbNGtRqNe+88w7vvfce69ato1GjRmgvKWcqN5ht83Pgk1em0Xf4aNp37X7d6/uFccMY+fATdB9wZXDzpR0GRURMRiN6vQGDQY/eYMBanaG1b9tGQsI7ENq2Hf5+vgQHBxMQEHDVbmwzZ86kpKSEL774gvnz57NmzRrWrFlDXFwcs2fPtpfUFhUVAVzXTfV3IpfLGThwIBERESxZsoT09HQ++OAD+vfvz5AhQ64QPJydnRk/frw9DD0xMZGPP/6YiIgIRowYgY+Pz990JPXDw0FF1s4NlMjUhN3VG4NFRBAsyAUBuSAgQC2hRhRFrIDVKmIWRQQBHJVygj2c8HNSI/h2oDh/OGvWrOG7775j2rRpKL76CpYuBaBpt2689vHH7Ni9m/Xr17MxOprgc+ewiqLNKVUjTF0SPH8poiiyceNG1q9fD0C/fv0YNWpULVdbfRAEgbCwMGJiYrh48eLfHmJ/6tQpQOq8JyEhIVEfJFFKQkJCQgLS0mDZMpquXIkMmLB0KchktAeam0wkffQRvmo1ODiAry+MGQOjRsElf3jPmzeP8+fP21tzl5eXM2/ePLso5eRUvyf4daWsrIwjR45w+PBhDAYDRqMRXaWOb996iQ9++Y12nbtQWVHBsLaBFObnI1OoMJstlBSX4OJVCYBSpcRglCNUB6QLMsHe2r5Bw4Z4+zawdTdTKmnU0A+fho1RKBQ08POjSZMmiIjIBBnPzHibDgGNsVgs9mwni8XC999/T+PGjenbty9r1qzh4sWLKJVKZDIZSqWSLl26kJ+fj0qlIiAgALPZjNFotAtGRqORRo0a0bdvX9vEr5pz586Rl5eHyWRi3759gE08qKysZOvWraSkpHDx4kVWr15d65zVZJ707t271usHDhywCzN1pUbQCj91ioGnT9tL8fINBtZ/9TnBA0aA1YpoMdvLfNYtmU+f4WPIz8+3GSoEAQGBpd98htbFjTd+WITVauXwzi3M+/BdQiM6EdYxkqrKSh56cQZd+t6DIMC8j99j35YNPDFjJgAaBzUiUKHTobLCnPnfITdWERoaipOTE2fOnMHd3R2lUsno0aO5cOEC8fHxdOzYkVdffRW9Xk9hYSEfffQRBQUFjB49mtLSUkpLS+nQoYMthP3MGdatW0dISAhBQUHExMRw99138+mnnwKgUCjsLqvGnXrg4NMIo9XMlDdnVQt/Brugd+PuaH9S4/YqKy2lUq/HaDBgtf7pAJPJBBwcNKjVGg5t30yDxk14cuJkGjlfKUTVhcsz3goLbblf/1RRqgYfHx+mTZtGbGwsy5YtY+vWrRw5coQJEyZcNS/Mz8+Pp59+mvT0dFasWEF8fDwJCQn06tWLwYMH4+jo+DccRd3JyMjg6JE4QkJCGBDckPOVBk6XVVFpsmC0Wu2iaA0ithJPmSDQQKumsYsD3lpVLdF44MCBFBYWEhMTw/yff+aRNWv+3Mb+/cg/+YR7Xn+d9u3b89vcuRi3bUNXWYlapcLKtbsomc1mFi9ezIEDBxAEgXHjxtGrV69bPgetWrUiJiaGlJSUv12UkpxSEhISEvVHEqUkJCQk/sukpcH778P+/aDT4aVW01erZaujI1Hu7uQaDORevEjLRo1AJgO9Hk6fho8+snVZi4qCV1/lyLlz5Ofnk5eXZ3/iXVJSQpMmTcjPz7/qrl1cXCgtLa33kLt160ZmZibffPMNRqORjIwM9Ho9Tk5OyGQyNGo13t5eWMxmPH0bkF9QwPIfvwWgrLQcrbMzggAODho8vTxx0GhAEKiqqsJBo7GJLNVlbgB6nZ7ioiKcnJyq19Pi6OiEo7MrotmMs7OzbVwDoohev5qX161Bq9Wi0+nIzc0lLCyMbdu2ER4ezqOPPsqZM2coKCjAYrFQUFCAp6cnr7zyCmFhYcyYMYMXX3wRR0dHHn30Udzd3fnss8/Izs6mW7duPPnkk7Rp0waLxcLBgweJiIhg165dnDhxgvfffx+LxUJKSgoHDx7ktddeIycnh7FjxzJ06FCaNm2KXq9Hp9NhMpkYP368vbuhxWIhMzOzVolfjaB2+feXfl36uk9+PnKFwiaeiCJ6lYryokJ0xYVo3b0wVlUBIqIIr31tK+HUXZY/c++Dj6GrqsJkNCKTyWgSHMqmpQt5+v1PsVosfPP6dMrLK+wCybCHnwQgP7+An3fFAWC1WFFpHNBXVpCReIz8C+fZunWrPU+poqKCBx98kO7du5OXl8f58+c5deoUq1at4vTp04SFhXHo0CGsViu7du2iVatWRERE8O233zJr1ix27tzJhQsXqKioICsriwceeID33nuPKVOmIIoiZ8+eZdWqVVRWVuLk7smAsZPoOmAQn01/mr4j7yP8rl4c+mMrO1cvw2I2gygy6rGn6Xh3H+RyOSazicrKSsrLywAwWyw2sdVgAKC0tAytszMvjRvK3YOHkXw4ltLCAqLGTmDiM9PZ+NsislISmfvB26z67nM++ehDoqKi+OSTT1i2bBlmsxkfHx9++OGH606cd+3aZc94A1iwYAG///47e/bs4f777+ett96yC2WHDx/mlVdeoaysDIvFwowZMxgzZgwnTpwgPDyc5557jg0bNlBaWspXX31FVJTNBXatbLisrCymTZvGxYsXMRgMPP7440ydOrXOvyMEQaBLly6EhYWxYsUKDh48yJdffklkZCRjxoyx37OXEhoayuuvv87BgwdZs2YNO3fu5MCBAwwePJhevXqhUPzz/mS2Wq0sX74cQRAYM2YMaoWcZq5amro4YKjuBlpuNGO22NxRMgE0CjnOKgXOKjmKa7iTBEHg/vvvp6ioiMNHjtBPrcb/0nt1zRoICsJn3Dieee018uLjEbZsQW8wcFgUcT11itDQ0Frb1Ol0fP/992RmZqJWq3nsscdo06bNbTkPoaGhyGQyUlJS6NOnz23Z5s1y8uRJKeRcQkJCop788/6HlZCQkJC48xiNMHcufPkllJaCqys0agSCwBwXFx45cYJX8vKQCQI/NGtGo+rSl0cLCrjXzY17vb2hrAyWLYO9e5nn48O4sWNrlWC4ubnRv39/Fi1aRERExBVDmDRpEg8++CBr1qzh6aefJigo6IbD3r9/P3FxcXTr1o333nsPk8mEg4MDzz33HA899BCrVq/BbLFSVlHBiIenMH3cMJxc3YjsMwAAVzdXvHx8kMsVuLm74+Roc29VVFbYxCiF0iY8abV2EUCltnXmKisrx2q1UlRUjIePLyMefJRPX3sBjYMDL87+ijGPTyV6/rdERkbay2VqxKbLUSgUNG7cmPT0dLuDbMiQIcTGxhIeHo6bmxt33303btVdtIKCgli6dClPPPEEOp0Oo9FI+/btWbp0KVqt1l62B5Cfn49cLsff3x9/f38WLlzIW2+9hclkQi6XM2fOHDp37sy2bdt48cUXmT9/PiaTiaZNm7JmzRq7Q6zezJkDx4/bf2zUrx9dP/yQk6U6kvPLUXu4V7sxREQA0SZQiYj27/X6Kqz5+SjkCuQKOQc2r+OuAYNo6OdHQWEhMkFg9c/fsX7hTzQOCGLycy/h27gJIrD0m8/x8PZhwJjxoFCi0ZfxyEMPYjabKSoqYsaMGfTp04eSkhL27dtHVlYW586dw9HREZPJhI+PD0OGDEEURcxmMzqdjt69e2OxWCgsLMRqtZKTk0OrVq3IycmhTZs2+Pv728WuqqoqlEolu3btokOHDgQFBdFh1CQsCCjkcqrrqBBFkdCITkT07IMIFJzL49MXnqRJSEsUChUGg5H8ixfJyspCpVIhk8lRKBWo1WoA3D3c8fT2QSFXIJrNfL92my2Ev1cn7hlzP4PHTWLnmhUMffAxJt83mmAPJ5YuXUpGRgYHDhxALpezaNEinnrqKTZu3FinjzY5OZn58+czdOhQ/ve///HTTz/Z3yspKeHxxx9n06ZN+Pn5UVBQQEREBN26dQNswc9t27blnXfeYcuWLTz33HNERUVdMxvOYrEwfvx4Fi9eTGhoKDqdji5duhAZGUmnTp3qdUk6Ozvz0EMPERkZyZIlS4iNjSU5OZn77ruv1n1agyAIdO3alQ4dOrBjxw62bNnC8uXLiY6OZtSoUbRv3/5vzyy6lAMHDnD69Gm6d+9eKztLEAQ0CjkahRxvrfqmti2TyXj88cf55JNP+LmkhOnnz+OqvmRbn30GzZsjREbSaMkSDk+eTFJiIufDwzF//jl33XUXo0ePRqvVUlBQwNdff8358+dxc3Nj6tSpV2R93QparZbmzZuTmZmJ2Wz+2wTEkpISSktLpZBzCQkJiXoiiNdK/5SQkJCQ+HdSXg5PPw07doBSCV5eNwylvSYWC+Tn29afMAHeew9u44RAr9eTkJBAXFwcKSkp9g5yISEhRERE4OHhwYkTJ0hNTSU3Nxe1syudxjyA1WxGhojGwQEHjQaNRoMgXDuzJD//IpWVlYCASqVEq9VSVFSM2WxCrVbj5uaOIAiUl5dhNtscVBqNGhcXVxwcNJitNidCzyaeaJV163y1evVqtmzZwjPPPEPr1q0BW8mjs7Mzoigyffp0qqqq+P7772/1NP41zJ4Nl5R7MWIEvP46RouVPacKMVlF1PLr58ZcuHgBfVUVVlFk62+LSD50gE+WrETjoOXixQucPXmCVm3boVKpWbtwHmsX/8Iv2/fV2obJYkUU4K7GHjirbNfi+vXr+d///sf+/fsRRZHBgwej1+vtwd2bNm3ikUcewdHRkZCQEA4ePIhKpeKHH35AEAS746ekpMTe3XDq1KlERUVhsVhwdXUlOzubM2fOcP/993PgwAEsFgtZRgV6UUCOyKuT7+PeSQ/Tte8AMpMSWPz1pxReOG/LGjuezTvzluLZoCGfTn+a7kNGENa5K3K5HEdHRxRyOYIg8HCvTszffRgf3wY81Lcbb3z9I63adwTgicG9mTrzf7Tp1IUXxg1j6AOP8cC4MQS5O3Lfffdx+PBhXF1dAewuwKSkpGtmSl3qlPrqq6/47bffaNOmDZ9//jlFRUW2slVRZNOmTYwfP57mzZvbP4OioiLmz59PQEAALVu2RKfTIQgCpaWleHp6Yjab2bhxIx999BF79uyp9fmlpqbSoUMHQkJC7K+VlpYyc+ZMHnjggXpelH9iNBrt3eOsViuhoaFMmDDhutlRZWVlrFu3jr179yKKIoGBgYwZM6bWsf5d6PV63nzzTYxGI7Nmzap317q6UlJSwocffohfSgqPHz9eO5fM2RkWLICmTfnuu+84duwYTzzxBL/99htlZWW4uLjQs2dPdu3aRUVFBY0bN2bq1Kl3xEW0ceNG1q1bx/PPP3+FS+uvIjExkW+//ZaoqCiGDRv2t4xBQkJC4v8jklNKQkJC4r9ERQU8+ijs3g2enraMqFtBLocGDWyuqYULbQ6s2bNtr98kRqORpKQkDh8+THJyMiaTCYDg4GCCg4NRqVScPHmSVatWodfr7etptVpCgwJw06hA445WpazT/kTRSlWVHo1Gg8FgxGKx4urmhl6vp6oKLNUODldXFxo2aoSuspKysjL0egN6vS0bytHNHR9nLQ6Kuof1hoSEsGXLFjIyMuyi1OTJkzlx4gR6vZ6wsDDmzJlTjzP3N1NeXvvnageYSi7D301LRmEFFlFEfg0B1GA0oK/So1Aq2bDoF47s+YPPf1uLxsEWmOzm5kZVVQNKSkrw9fVl+AOPMueDmZQWF+Hqbgu6t4q28OYmzhq7IAW2vLPMzEx73plNgITnnnuOsWPHsnv3bgYPHkx6ejrHjh0jKSkJo9HIa6+9RsuWLWtN+GtyvpRKZa28IU9PT/R6PXK53B5ynJ9XTL7OiINCjlwmQ61Wo9Vq+eTlZ7l/6nTC7+qJyWTmmaF9MBgMtmwtmQyVUomrqyse7h64urmir6qisrp8qqKsAqt4AYvFTJVej15fhUajQSaX28UmsfpLUZ2RJooir732Go8//nh9P9U/Px+DAY1Gg1arpbi42P66KIqEhYWxf//+K9Y5ceIEarXa7hqRXzLGayGKIh4eHvaywduFSqVi5MiRdOrUiUWLFpGens67777LkCFD6N+/f3XGV21cXFyYOHEiffr0YcWKFaSkpPDhhx/SsWNHRowYYXcn/h1s2bKFsrIyRowYcccEKbDdd88++ywfffQRaysrGVlQ8GdofHk5PP88zJ9fLfwLtG7dmpkzZ7Jy5UrWrl3Lpk2b8PDwYMiQITz33HM378S8AWFhYaxbt46UlJS/TZSqyZOSQs4lJCQk6ockSklISEj8V7Ba4YUXbIKUlxfczsmBi4stc+q332xB6K+8Uq/VTSYTKSkpxMXFkZiYaJugA40bN8bHxwe5XM7JkyfZtGmTfR2ZTEZgYCCtWrWiVatW+Pv721wnZVUkXSy7ouPbtdDrbR33tFpbGVdNkLiXlzfnzuVhtVqRyWSUlpZhsVjx9PTA0dERvV5fLU4Z0VdVEbNrC2XNG9OzZ886hSMHBgYik8nIyMiwv3Z5IPn/Kyoqav98SW5Pc1ctFyoNlOhNaOSyq5a2lFXni21fvpTYP7by4fzfcXJxtb8vl8kx6m3CTJVez+HoHbh7edsFKVEUMVqsaJVyQjz/3PeFCxfYuXMnp0+ftpdD/vjjjzz//PMEBwfbA+eHDRvGsGHD0Ol0vPXWW8TFxaFWq9m/fz/l5eVUVVXxzjvv0LJlSyorKzEajVccQ0hICFqtll9//ZXx48fjqlaSc+Ycam8vLFYL5eVlnD5zhrKSEhxd3bFYLBzZvYPK8jJkgoBapUYul6NUqaqFKRdkggyt1hGt1nZNeXp5IlMoEbF1G7xw4SIyuQyz2YzBaEBEROvkjK6izC7MDR8+nE8//ZTRo0fj4eGByWQiOTmZ9u3b1+mj7d27NzNmzKBv376ATeSroVu3buTm5rJjxw769esHwLFjx64aKn4p3bp1Iysri5iYmFrleyEhIbi4uPDLL7/w0EMPAZCdnY2Hh4e9y+at0KRJE1599VWio6NZu3Ytq1ev5tChQ0yaNOmaDqiGDRvy7LPPkpqayooVK4iLi+PYsWP06dOHQYMGob1Gp7k7RUFBAdu3b8fT09P+mdxJGjZsyJNPPslXX3xBg4oKuhuNf5bInTwJL75I57w82qamYlm4EO0jj9CgQQMUCgUqlQqVSsXZs2c5fPgw3bt3vyOlbU2bNsXR0ZGUlBRGjRp127dfF2o679WI3xISEhISdUMSpSQkJCT+K6xYAZs2gbv77RWkanBysjmlfvwReveGzp2vu7jZbCY9PZ3Dhw9z7Ngxuzjk4uKCt7c3VquVvLw8zpw5Y1/H09OTsLAwWrVqZRcALqeBo5pMhQyD2YpGcWPHlk5XBYBW60B5RTlWqxmrxYJCocDT05P8/AJkMhkKhZyKigqsVgteXt5oNA5oNA7ojEaqKso5m5FMbkIcmzZt4q677qJfv37XdVKo1WqaN2/O8ePHbSHrt+pa+7u5XJS6pNuiXCbQ2suZ2LxiDFYralltYcpkMqLTVVFZVsyiLz7Gu2EjXn/kfgRAqVLz7ZqtGI0GPn1xKrrKSuRyOZ4+vrz30yL7NuZ99iGePg14/YVnapUJLliwgAEDBtgFqfPnzxMfH0+7du3Yu3cvkZGRtYat1Wp5+umnGTlyJGvXriUqKoqBAweyZs0aKisr2blzJ2fPnuXnn3/m5MmTdleGpfqaWbt2Lc888wyzZs1CbzTRd/wD3NV/ECajCX2VAZkg45GXXueHd2fg6OxMi3YRePraJvC+vj6AiICAi6srMtmV169Wq8XJxRWFXIGHhztOTo7odFVYLFZKioo5c/oMd987ksWff8SOxfP48H//Y8KECRQWFtq7LZrNZh5++OE6i1ItWrQgPDyc+fPns2PHDgYOHGgvBXR3d2fjxo28+OKLTJ8+vVY+2fVwd3dn9erVTJ8+nfLycmQyGbNmzWLo0KFs2LCBadOm8fnnn2OxWPDy8mLp0qV1GmtdkMlk9O3b157LlpSUxEcffUSvXr0YPnz4Nd08rVq14o033uDAgQOsWbOGbdu2sW/fPoYOHUrPnj2v6ra6E6xatQqz2czo0aNRKuvmCL1VQkNDmfTAAywxmfA8dIgwmezPDMHNmwmorCRfJkP5zTdsKC5mQ04O7u7ufPDBB1RWVrJjxw4WL15sFwCvVzZ5M8hkMlq2bElcXBwlJSX2+/2v5MSJEzg7O/8t+5aQkJD4/4yUKSUhISHxX+DMGRgyBAoKbOV2dwpRhLw8CAuDdevgMtHIarWSkZFBXFwc8fHx6HQ6DAaDPTvHaDRy6X9LarWa0NBQuxvK29u7Tk/ZT5bqSMkvRymTIZddb3mRM2fOIJPJadiwIecvnMegN+DX0A+V0laiUlhYSEVFBU5OjlgsFqqq9Kg1any8fbAiYLZaae3tjItoZNeuXezevdueodO+fXsGDBhwTQfGmjVr2Lx5M08//TRt27a98fn9JzN+PGRl/fnzrFkwaFCtRc5X6Em4WIZFFGsJUwUF+VRW6vDx8aagoACZXE6jho2uupuaz8Pb2wut1tHmkLLaCtZCPZ1p7nZ918rPP/9MbGwsU6ZMqbMoU4Moipw/f560tDTS0tLIzMy0l5CqVCo8PDyQyWSUlZVRXl6Og6s7nUZPRqFQoFEqcHDQolQqsFpFiouLqKioBMFWJubm6kplZSWFhUXI5XIaNWp43Ry0WuNCtHdWrNLpUDhoKbt4nrQtq2jTpg0dOnQgLCzsz7KrenLu3DlmzJjBgAEDGD9+PF9++SVbtmxh8+bNN7W9fxKiKHLkyBF+//13ysrKcHd3Z/z48bRr1+666xkMBrZt28bWrVvtQfmjRo2iXbt2dzTkOjs7m9mzZxMcHMz06dP/8kDtDRs2sPv335l66BBNnZxs+z91CnNZGYUqFRYnJza1bMmxli155JFH7PfYyZMnWbhwIWfOnEGpVDJ06FD69et3W4W8/fv3s2DBAh544AF70P5fRUlJib2xxbPPPvuX7ltCQkLi/zuSU0pCQkLiv8BPP8H589Cw4Z3djyCAtzekpto68z34IKIokpWVZReiajoUWa1WVCoVcrkctVqNwWBAEAT8/f3tIlRAQMBNTVqaujhwodJAvs6IRrh6uRjYJpYWi9XeAU9e/eTfYrFAtQHBw8Mdg0FPRUUlXl5eyGRyKisrOX/hPG6e3vg6a2ji4oAgaBk+fDiDBg1i37597Nixg/j4eOLj4wkKCmLAgAG0bdu21lhCQkLYvHkzmZmZ//9FqWtkSl1KAycNIpB0sQy9xYpaLsNqMVOp06FS264Fq1VEe52OYa5urlRWVlBSUoLGwQGDxZZTFeLpjL/r9d1mFy5c4NChQzRq1Ijw8PB6H6IgCPj5+eHn50efPn3szqno6Gji4+MpKCjAarUil9tEzo4dvXBRCMhcXNBWO1p0Oh1FRYVYLFZUKiWenp6oVGpE0Va+BuDm5lpnQQpAQMBB44CDxgGruzuVBhMywYxKpeLw4cMcPnwYlUpF69atiYiIoE2bNvXK9iksLOTQoUPs2bOH9957j4YNG/LDDz/U69z9UxEEgY4dO9KqVStWrVpFTEwM3333HREREYwbN87uCLsctVrN0KFD6dGjB2vXruXAgQN8//33tGjRgtGjR9OsWbPbPlZRFFm2bBmCIDBmzJi/pcPb4MGDKSwsZEFVFVOOHcPbzQ2h+vemc2UlhYCLQsH06dNrifHNmjVjxowZbNu2jQ0bNrBq1SoOHz7M5MmTb1sGU03JaGpq6l8uStWU7t2Jz11CQkLi344kSklISEj82ykpgVWrbKHmsrpPdG8alQoRqJo7lw0aDXHx8Zw9e5aSkhIMBgNKpRIPD1sukyAIuLm52UvyQkND7QLRrSAIAmFezhzMK0Zvtl4zx0hXZSvdc6h2dNUIYNZLwpgFQYaXtzfnz52jqKgIPz8/5HIZRisU51+gjXODWttWq9X06dOHXr16ER8fz/bt28nOziY7OxsfHx/69+9P165dUSqVBAYGIpfLa+VK/b/lclHqkkypS/Fz0qBRyEnJL6PUYKZKVwUIuLq42rPE1OprCyYKuQJnZ2d0BiMVVQbcHB1o5eWMj+O1hawaNm3ahCiKDBky5KYn9EVFRSQmJpKYmEhGRgZmsxmwZSTVhKLr9XqysrI4c+YMB7duoPWAYRSbjJiNBsxmCwqFHDc3N1xcXRGwjaOsvByLxYpSqcDxFu4BkxUc1Sqi+vfmvgG9ycnJ4ejRo3aBND4+HoVCQVhYGBEREbRt2/aGmUhFRUV0796dxx57jI4dO9702P7JaLVaJk6cSGRkJIsWLSI+Pp60tDRGjhxJjx49rnm9uLm58cADD9jD0NPT0/nggw+IjIxkxIgRt7XTXGxsLCdPnqRr165/m/ghCAITJkzgm+JilldUMCEnBydRxGI2I4giXno9gyMicLyKO1QulzNo0CAiIiJYtGgRWVlZ/O9//6N///4MGTLkpp18Nbi5udGoUSNSU1PtWYB/FTUh55IoJSEhIVF/JFFKQkJC4t/OunVQWAi3OcPjckRsnfN0lZXoDQYU8fHEfPghh9Rqe06Uk5MTKpWKFi1a2IWoBg0a3JEn/o4qBREN3DhyrgS95erCVJVOZ3NqVU+GZNWi1OUdwlRKFe7uHhQVFVFQWIirpycynY59q1dzVFfOs88+e0W4rUwmo2PHjnTo0IHs7Gy2b99OQkICS5YsYe3atfTq1YtevXrRvHlzcnJy0Ol0f3lg8m3DaoXq7nB2riFKAbhrlHRt5EFyXgGJ+ZVoXVyRqdRUltlK4TSaKwUmURSximAWraidXNCbijiTcoz+QwfgeJXlL+fixYvExsbSsGHDepXtiaLIyZMnSUhIIDExsVbGmb+/P+3ataNt27Y0atToiuurvLyc1LQ0UkuLkTs5U1FRgUwuRybTUFVVhSiKaDQalEqlPejdzc3NLlTVF6soYhVFmrpoUVbnatV0rRwzZgy5ubkcPXqUI0eOkJCQQEJCAnK5nJYtWxIREUF4ePhVQ/oLCwsBW6bbv53g4GDeeustNm/ezObNm1myZAkHDx5k0qRJ+Pn5XXO9Jk2aMG3aNFJSUlixYgWxsbHEx8fTr18/Bg4ceMtd5wwGA6tXr0alUjF8+PBb2tatolAoeOKJJ5hdVsbqc+e498wZHAEEAZVMhnzzZnjzzSvKt2vw9fVl+vTpxMTEsHLlSrZu3Up8fDyTJk0iJCTklsYWFhbGtm3bOHXq1F8aOC6JUhISEhI3jyRKSUhISPzb2b3bJhoobv+vfBFb57zKykrKysow6PWYLRZEq5UGVitdRRFDu3Y0adLEXpIXFBT0l4XzumuUdPRzI/58CVVmK0qZgKL66bnJbMJkMuPk7ATVIoC8OljaUt2B71KcnZ0wmIxYkGGoKKdPqD8+gwawaNEiPvvsM6ZMmXLVjmOCINiFgfPnz7Njxw4OHDjAhg0b2Lp1KyqVCp1OR1ZW1g1zbP6xVFba8sQu5TqiFNjCzzP2R3No/0GGT34YhdKVEqsVBxdXTMgwmS1XrCMTQC2X0cTdgdSsY+zZsYGDHto6dSDbuHEjoigyePDgG4qgBoOB9PR0EhISSEpKoqysDLDlRrVr14527drRpk0bXFxcrrsdo9HIwQMHOH2xkPB7x9KwSTMUAugNevRVegyGMkpLyzAaDVgsFjQaTXVXMxHqKUyJoojBYsVVrSDA/UphSRAEAgICCAgIYOTIkZw+fZr4+HiOHDlCcnIyycnJLF68mJCQELtAVXN8/yVRCmyiy9ChQ+nYsSOLFy8mOzubWbNmMWjQIAYOHHjN31+CINC6dWtatWrF3r17WbduHZs3b2bv3r32Ur+bde9s3bqVkpIShg0b9o8I0nZwcCA8PJy3V66ktSDQVhD+vK8uXoQ5c2zdXq+BIAj07NmTNm3asHTpUhITE/nss8/o3r07o0aNummBvlWrVmzbto2UlJS/TJSqEa6lkHMJCQmJm0MKOpeQkJC4DL1ez7hx40hNTcXBwQEfHx++//57goKCAHjooYc4cuQIMpkMpVLJhx9+WK+23HfffTdnz54lKyvrupPjXbt2odfrGThwIAB5eXmMHTuWmJgYANauXcurr76KWq1m0aJFTJo0iZiYGJwvFQOsVujUyeaU8vLi0RMnmODpSe8bCAa9MjKY5uvL8Kv8gS0cOcKFsDBklZWUl5VhMBgwWywIgoBCobC3Ad9UVkbb9u0J27z5hpP3mTNnUlJSwhdffMH8+fNZs2YNa9asIS4ujtmzZ/P7779fd/0boTdbSCuo4HylHlEEpUxGZUUZxcUl+Pj42Dvf6XSV5OcX4OjkiJfnn53zLFYRk9UKiJxMPkba7m089cTjhIaGkpCQwE8//YTFYuGhhx6i8w26DoLNQbNr1y6io6M5e/YsSUlJdO3alddee43AwMC/JSvmlsjLg3vvrf3anj3XdEqA7Ry89tpruLi4MGvWLPLz8/n4i69p06kzXXv1xWC2YBVFZIKASi7DWaXAWaXASaVALhOoqqri9ddfRxAE3n///es6US5evMjbb7+Nr68vb7/99lXPb0lJib0sLz09HZPJBNicS23btqVt27aEhobWSVAVRZFdu3axevVqDAYDoaGh9B59P3kGUNiFURGj0UhlpY4LFy9gMZtxcHBALlcgk8vQaDQ4aDSXCFU3ENIsFmSCQOeGbrhr6l4GJYoieXl59tK+vLw84E8xNSIigj179pCfn8/XX3/9/+/avEVEUWTv3r2sXLmSqqoqfH19mThxIi1atLjhunq9ni1btrBjxw5MJhN+fn6MGjWK1q1b1+s8FhUV8dZbb+Hk5MSsWbP+MlH/WlitVlasWMHOnTupqKigZ0ICEzIzUVqtKBQK5G5u0KUL3KADYw01YfO//fYb5eXluLq6Mn78+Ho3IgDbQ5Lnn3+epk2b8vLLL9d7/ZuhJuS8devWPPPMM3/JPiUkJCT+TUhOKQkJCYmr8PjjjzNo0CAEQeCbb77h0UcfZdeuXQB8/vnn9qehR48epW/fvraOYXV4Ap6VlUVWVhbu7u7s3r2bXr16XXU5s9nMrl27KCkpsYtSDRs2tAtSAHPmzOGtt95i/PjxABw7duzKDZ05A8XFUD1hn3sLT44NRqM9iDknJwdnbNNkhUKB1sEBraMjDg4OOGg0KFUqfk1JwffMGbreQklax44db1mQAtAo5IT7unCuQk1OSSUVRjNGEVQOWpQqNaIoIghCrUwps9VWCmURRWQCOKsVBLo7EmQNJGGLjp9//pk333yTdu3aMW3aNL799lvmzZtHeXn5DUVKZ2dnhg4dyj333MPevXt5+eWXSUxMZPbs2fj7+zNgwADat2//l2ai3BIVFbV/lslsGWbXYefOnZhMJu655x7kcjk5OTlUFhfQzN2ZUM8bZyo5ODgwcOBAVq5cyY4dOxgyZMg1l928eTNWq7VWlpQoipw+fdpellcTVAzQtGlTe1lekyZN6iUgnDt3jkWLFpGTk4ODgwOTJ0+mW7duiIB4oYy8Cj0gopAJqFRqW5c+jQMajRpXV1f0ej1V1Z30dJW2kkiFQo6m+t5SazR2R1/NcRitVgQEWnk510uQApv41KhRIxo1asTQoUM5f/68XaDKzMwkMzOT2NhYvLy82LlzJ+3bt//POKbAdn569OhB27Zt+e2334iPj+fTTz+tk6NHo9EwfPhwevbsyZo1a4iNjeWbb74hNDSU0aNH06RJkzqNYfXq1ZhMJkaNGvW3C1IGg4Gff/6ZY8eOoVAoeO6552gYHU3h//6HZ3k5clG0PQyJiKjzNmvC5lu2bMmKFSvYv38/c+bMoX379owfP/6aYfNXQ6lUEhISQmpqKlVVVfYHDneSmtK92xXYLiEhIfFfQxKlJCQkJC5Do9EQFRVl/7lLly588skn9p8vteeXVufA1JWff/6ZiRMn4ufnx7x58+yi1IkTJwgPD+eJJ55g+/btTJ48mTlz5mCxWNi1axcjR45k8uTJhIeHU1JSwrPPPktMTAzp6el8/fXX7N+/H0EQKC4uxs3NjbS0NKZNm8a548fhzBme8vZmipNTLQfU0qIivrxwAaMoYgXea9iQoZc5o4wmE+fPn0dfVYWx2jkCoJDLcdZq6VFczAQnJ3ZVVXG+spJHvLx4w8WFuQUFxBkMPJ+Xx8zwcD74+GOioqL45JNPWLZsGWazGR8fH3744YfrZnDs2rWLadOm2QW3H374gU8//RQnJydGjBjBW2+9RY3h9/Dhw7zyyiuUlZVhsViYMWMGY8aMsZ/b5557jg0bNlBaWsobsz6gRKamYVAIKfGHmfvRu1RVVmC1iox8fCqRfe/hdG4OP77/JhVFhWA2MeWJx+nxzDPQIpghQ4awfv16FixYwNNPP01QUBAvvfQSX375JcuWLaOsrIzhw4ffUMxQqVT06dOHcePGcfDgQZo0acKJEyf48ccf8fLyol+/fnTr1g21+saZSX8rl4tSTk62TozXoKqqiujoaFxcXOxdsrKysgBbpk9d6d27Nzt37mT79u306tXrqiH5+fn5HDx4ED8/P9q0aUNSUpLdEVUjsiqVStq0aWN3RN1MCY7FYrF3FjObzbRr147777/fvi0BaOvjggicr9BjFQWwmKmorATA3cMDlVKFRuOAG2C1WtDrDej1Vej1eirKK6got51nlUqJRuOA2kGDIFcil8to6elEE5dbn4A3aNCAqKgooqKiyM/P5/Dhwxw7dgydTsfy5ctZvnw5/v7+RERE0L59e3zucFbdPwVXV1eeeOIJEhIS+PXXX9m7dy+JiYmMHTuWDh06XPde9/Dw4OGHH6Zv3772MPT333+frl273rAc7/jx4xw6dIiAgIC/PWS+rKyMb775hpMnT+Lo6MiTTz5pu1/PnqXEy4vTej3eMhnuwcHIX3ut3tt3dHTkgQceoHPnzixatIijR4+SkZHB6NGj6datW53F4VatWpGcnEx6evpNua3qS42g/VdmWElISEj8m5BEKQkJCYkb8OWXXzJs2LBar7366qssX76c4uJiVq5caXe0vPXWWzRs2JApU6ZcsR2LxcKCBQv4448/8PLy4p133qG0tNT+FLi0tJSwsDA++ugjwFYSUFPSBjbhqoavvvqKxMREpk2bdkXordlsZtiwYbzzzjuMb9wYxo2j4CodoO5xcWG8uzuCIHDCYKBLejonXVxQVx+LVRTJz8/n4sWLyKpL87SOjqDX06xZM3wcHJCVllIpCBwIDaXAbCYwKYmHPD151MuLxRcvMs3ZmeFbtkDjxixdupSMjAwOHDiAXC5n0aJFPPXUU2zcuLFOn0NycjIzZ87k6NGjNGjQgLffftv+XklJCY8//jibNm3Cz8+PgoICIiIi7IJHaWkpbdu25Z133mHLli12J1xLNw1Tpz7MjwuX0L5LN3RVeub//DPZe7aycsE8lixeTMuWLdHpdHTp0oUuXbrQqVMnoqKiSE9PJykpiT/++IO+ffvSsGFDXn75Zb788ku2bNlCeXk5EydOrJPbKSQkhKysLAYPHoyrqyvbt2/n6NGj/Pbbb6xbt467776b3r1718sx8Jdyeaettm2vu3hNaergwYPtzo+srCw0Gg2NGjWq826VSiVDhgxh8eLFbNmyhdGjR1+xzKpVq8jLy8PFxYUXX3wRo9EIgIuLC927d7eX5d2K8Hfq1CkWLFjAmTNncHZ2Zvz48URERFwxiZbLBMJ9XchWyckt0VGmNyDI5Gg1alTK2g4nmUyOVqu1O3HMFjP6Kj16vR69vopKvR6jCPryC1SdzMTq5YYlNBR/f//b5rDz9vamc+fOhIeH065dO1q0aEF8fDw5OTmcOHGCVatW0bhxYzp06ED79u2vGwT+b6Fdu3aEhISwdu1aoqOj+emnnzh48CDjx4+/oYOsWbNmvPDCCyQmJrJy5Ur2799PXFwcAwYMYMCAAVdcg6IosmzZMgDGjBnzt5ZO5uXl8fXXX1NUVISPjw9Tp07F19fX9qZWi5OzM3K1mkJRpFinI0AmQw6QmmrLnOvQoc7dX1u2bMnbb7/NunXr2LlzJwsXLiQ2NpaJEyfWSQRt3bo1y5YtIyUl5S8RpSSnlISEhMStIYlSEhISEtfhgw8+IDs7m507d9Z6/cMPP+TDDz9kx44dvPzyy+zbtw+VSsW77757zW1t2rQJf39/QkNDAejXrx9Lly7lySefBGwT7IkTJ97ymDMyMtDr9bayvgMHQBDwksuvWC7XYGBCXh5njEYUgkCRxUKu0UhodalfVVUV5WVlyGQy25dcjkFv64529uxZTFotVouF4Q4OGE0mPJVKAtRqco1GGtW09hYEe8D6mjVrOHz4MB06dACu7HB3I/744w8GDhxIgwYNAHjsscfs53v//v0cP36cQYMGXXEuAgIC0Gg0jBw5EoCuXbty9uxZZDIZFRUVhIaEMPyefgCILg6UnznOicJC0lJT7aWRUN1JLTWVTp06IZPJeOSRR5g1axarVq2iRYsWNGnSBE9PT15++WW+/vpr9u3bR0VFBY899tgNS25CQkLYsGEDmZmZjB07lieeeIL8/Hx27NjB/v372bx5M9u3b6dz587079+fhg0b1uvc3XHc3WHmTPj5Z/DwgGnTrrmo0Whkx44daLVa7r77bsAmKhYUFBAWFlZvQaVbt25s27aN6Oho+vbti5ubG2fPniUxMZH9+/ezatUqHBwc8PX1pUmTJrRt25Z27drRrFmzW57km0wm1q9fz/bt27FarXTp0oX77rvvqh3sapAJAi08nNAXXuRE3lmcvX1wcHbBZLWiuDQs+jIUcpsorHLQ4iiKiFYLYlkRRWlHyEhNIcFgYO3atWg0GkJCQmjZsiWhoaG33N2yJuTc39+ffv360a9fP0pKSjh69Cjx8fFkZWVx5swZ1q5di5+fHxEREURERFy1G+G/BY1Gw9ixY4mMjGTRokUkJSWRmZnJsGHD6N2793WvYUEQaNeuHa1bt2bPnj2sX7+eDRs2sGfPHoYNG0a3bt3s6x8+fJjc3FwiIyMJCAj4qw7vCtLS0pgzZw56vZ7AwECeeuqp2q5ErRaZTIZKpUIURcqLili6dCkTTSaE776zLdO5M3zxBajqVl6qVqsZM2YMnTp1YsGCBWRkZPDuu+9y77330q9fv+ueYx8fHzw9PUlJSbGXZt8ppJBzCQkJiVtHEqUkJCQkrsEnn3zCqlWr7BPoq9GvXz+mTp1KUlKSXWy5FvPmzSMzM9Nu8a+qquLEiRN2UUpb/Yf9bcXNzSYKmc1wmTAyLjeXDxs1YnS1i8rj2DH0l3Sdq9LpcHR0xM3dHZ1Oh0KhwGwygV6P3mCgxGjEarWiKykhT6dDLpMhms2UlJejVyhs3djkcqgOORdFkddee43HH3/8thzapRMNURQJCwtj//79Vyx34sQJ1Gq1fXmz2YzVaiUoKOiKcGxBEHB2diY/Px93d/er53RV4+7uzgMPPMB3333Hjz/+yBtvvIFarcbJyYkXXniBOXPmkJCQwBdffMHTTz993eyZ5s2bo1QqycjIsL/m7e3N+PHjuffee9m9ezfR0dHs37+f/fv307p1a/r3709ISMg/Z+I/ZIjt6wbExMRQUVHB0KFD7c6Q7OxsoH6lezXI5XKioqL47LPPeOONN3BxcaGoqAiwZZ+5u7szYcIExo4de1uzkLKysli0aBEXLlzA3d2diRMn0rp16zqtK4oiW1av4HjuCXoPGUYDv84U6k3oLbb7T6Cm+lEARKxizXegkgs0dnKgkbMDbmo/hI6tMZvN5Obmkp6eTlpaGklJSSQkJAC2cuPQ0FC7SFXfiXONKOXh4WF/zc3Njd69e9O7d2/Kyso4duwY8fHxZGRksHHjRjZu3IiPj49doGratOk/5zq9jfj7+zNjxgx27NjB+vXrWbZsGbGxsUyaNOmGeVFyuZzevXsTGRnJ5s2b+eOPP1i0aBE7d+5k9OjRBAcHs2rVKpRKJSNGjPiLjuhK9u3bx+LFi7FarXTq1IkHHnjgSpHd0RFBEBCw/T8mVyrZu3cv98THY/c1HToE770H77xz3dLey/H39+f1119n27ZtbNy4kZUrV3L48GEmT558zXMsCAKtWrUiJiaGCxcu2B9k3AlKS0spKyurd3i9hISEhMSfSKKUhISExFX47LPP+PXXX9mxY0etSZzJZOLkyZP2TnyHDh3i4sWLN3yKfeHCBXbu3Mnp06ft27NarTRu3JiEhISrlmW5uLjYywLqQ0hICFqtll9//ZXxo0aBoyMFlZV4XRb4Wmw207z6qfXiwkKKL3EumS0WrIKAi6srTk5OnDt3DovZbCvPKS7G18cHuU4Hej1msxmD1YpMJsNitVJRUcEFoxEHk4lMi4Xf168nMDCQ/v37M2fOHEaPHo2Hhwcmk4nk5OQ6l1f07t2bDz/8kIsXL+Lj48O8efPs73Xr1o3c3Fx27NhBv34219OxY8do1arVFdtJT08HIDw8nIiICLKysoiJiaFHjx5YrVbkcjlubm44Ojryyy+/8NBDDwE24cTDw6PW5Lxdu3b07t2b6Ohofv31Vx588EHA9pT/6aefZsGCBRw6dIjZs2fz3HPPXVMQUCqVBAQEkJGRQXl5ea0Oio6OjkRFRdG/f38OHTrE9u3bSU5OJjk5mSZNmjBgwAA6dOhgD2n/J2M2m9m2bRtqtZrevXvbX6/Jk6q5r+pCeXk5ycnJJCYmkpyczIkTJ0hJSeHuu++mW7duNG3alKVLl+Ln58eUKVNum+Cr1+tZvXq1vfFBr169GDFixHW7/11OUlISOTk5qNVqBvXoiouLCxVGMyV6E+VGMyUGE0aLiFjdfVCrlOOqtnUf9HBQoZLXPhaFQkFwcDDBwcEMHToUvV5PZmamXaQ6ePAgBw8eBMDPz88uUrVo0eKGQdA1At+1BD0XFxd69uxJz549qaysJCEhgSNHjpCWlsaWLVvYsmULnp6edoGqefPm/6rJu1wu55577iEiIoIlS5aQlpbGBx98QL9+/Rg6dCiqGziDtFoto0aNolevXqxevZrDhw/z1VdfAVBZWcnYsWNxv0r59Z1GFEXWrl3L5s2bARg0aBDDhg27+mfn4FAtpAqIokgjT0/8/PxIqarC0WL50zm4aRMEBcHkyfUai0KhICoqivbt29ubCHzwwQcMGDCAIUOGXNWJGhYWRkxMDCkpKXdUlKr5P/p62YgSEhISEtdHEqUkJCQkLuPMmTNMnz6dgIAA+8RZrVYTGxuLyWTigQceoLS0FIVCgaOjIytWrLBPGq6VKbVgwQIGDBhQS5SQyWSMGzeOefPm8cILL1wxjhEjRrBo0SLCw8PtQed1QaFQsHbtWp555hk++OADZGfP8pQo8oSXV63lvmzShNHHj+Mml9PH2Zmm1ZMnqyhiMZuRqVQ4OzsjEwR8vL05f/48BQUFAHi4u+Pq5YW8tBQXV1c0ZjPiJS4rQRB4UC5nekkJ+qefplOnTjRt2hRPT0/atWuHSqVCLpfz+OOP11mUatOmDW+88QZ33XUXzs7ODBw40C7mubu7s3HjRl588UWmT5+OyWSiadOmrLlKS/Lk5GTAJii5u7uzevVqpk+fTnl5OTKZjM6dOyOTyfj+++/58ssv+fzzz7FYLHh5ebF06dIrtjdq1CiysrI4cOAALVu2JDIy0v45PPzwwzg7O7Nz504++ugjpk2b9mcOy2WEhISQkZFBVlYWEVfpXKVUKrnrrrvo1q0bycnJbN++nYyMDObNm8eqVavo168f3bt3r5c4cls5dsyWH9OlC1xDpD148CAlJSUMGDCgVolbdnY2CoXiukHBoihy7tw5e0j58ePH7SH3DRs2ZOLEiRw8eJD27dvzwAMPsHjxYmQyGVFRUbdNkEpOTmbx4sUUFxfj6+vLpEmT6u3uslqtrF69GoD+/fvjUu0kdFIpcFLdnj/LNBqNPbQdbG6O9PR00tPTSU1NJTo6mujoaGQyGf7+/nYXVUBAAApF7TFczSl1LRwdHenWrRvdunVDp9ORlJTEkSNHSElJYfv27Wzfvh03Nze7QBUYGPj/p8PkDfD29ua5554jNjaWZcuWsW3bNuLj47n//vsJCwu74fqenp48+uij9O3bl4ULF7J8+XIUCgV9+/alrKzMfp38FZhMJubPn09cXBwymYyJEydy1113XXuF6ntZkMmwiiJyi4Vnpkzhp7NnabJxI43l8j9/L339tS2DrkePeo/Lz8+Pl156id27d7Nq1Sq2bNlCfHw8kyZNokWLFrWWDQ0NRSaTkZqaesNuqLeCJEpJSEhI3DqCWPMXnYSEhITEv5NPP4XZs6FRozqVTZSWlVFSXIybuzuul0yEKiorKSwoQKVS4dugAbJLtmW1WtHpdFRUVmLQ65FZrbjo9ewYNoyi3r0RRZETJ07YJ7g1qFQqmjdvTmBgIIGBgQQEBFy3zO1SF1FNoHjNk/y6YLVaeemll3B1deWtt9666jK//PILBw8e5PHHH79hSWYN586d44MPPkAQBN54441aYbyiKLJ161ZWr16No6Mjzz777FXFl+zsbGbPnk2vXr1qZVldj5MnT7J9+3aOHDmC1WpFo9HQs2dP+vTp89e6K/bu/TNHysEBFi+GyyZpVquVt956i+LiYv73v//ZJ9k6nY4XXniBwMBAXnrppVrrWCwWsrKySExMJCEhwS6KymQyWrRoYc+H8vLyQhRFZs+eTU5ODk8//TTff/+9vaHArQofFRUV9tIsmUx2XYfGjdi/fz8LFizAycmJ999//y8XEUVR5OLFi6SlpZGWlkZGRgZVVVWA7X4MDg6mZcuWtGzZkkaNGvHFF1+QmZnJt99+e9PnUa/Xk5ycTHx8PElJSbXC5sPDw+nQoQMtWrT41whUFRUVLF++3O5Oi4yMZMyYMbUckNfjl19+YcOGDbi4uKBSqVCr1dxzzz3079//hs6rW6WiooLvvvuOnJwcNBoNU6ZMoWXLltdfKT8fBg3i7NmzCIJgy7z74w9OFhez+vnnGbl/Pw18fVHV3C9aLcyff03xui4UFxezZMkSkpKSAOjRowejRo2q5fz7+OOPOXXqFJ9//vlN3at14euvvyY5OZmPPvpIypSSkJCQuEkkp5SEhITEv50RI+C776C83J7vdC2sokhZdbj55RMoJ0dHDAYDFeXlFBUV4enpSY0sJZPJcHJywsnJCbPFgikvjxIXF3ZpNBjj4tBqtXTs2JGWLVsiiiI5OTnk5ORw6tQpMjIyamUpNWzY0C5SBQYG4u3tbS8ZefXVV9m3bx8mk4mGDRvyww8/1OtU5OTkUFFRQc+ePa+5TI1YUlZWVuft+vn5MW7cOBYuXMjcuXN5+eWX7Y4TQRAYOHAgzs7OLFq0iM8++4wpU6ZcUVro7+9/Ra7UjWjWrBmPPvooI0aM4I8//iAmJoZt27axY8cOOnXqRP/+/W+YbXNb2LTpz++rquDgwStEqSNHjpCfn0+vXr1quT5ycnIQRdHuOKqsrKxVlqevDtfXarVERkbStm1bwsLCrig7EwSBESNG8Mknn/Dpp5+i0WgYPHjwLQkdoihy5MgRfvvtN8rLy2nSpAmTJ0++6S5bJpOJdevWARAVFfW3uNoEQcDX1xdfX1969eqF1Wrl1KlTtUSqlJQUAJydnUlLS8Pd3Z3i4uKbzuTSaDR07NiRjh07YjQaSUlJIT4+nsTERPbs2cOePXtwdHS0C1QhISFXOLb+P+Hk5MRDDz1Ely5dWLx4MbGxsSQnJzN69Gi6du163fLFkydPcvDgQTp06GB3BW3cuJF169bZw9BvtI2b5cKFC3z99dfk5+fj4eHBM888U7emCpc6pWocs5WVNGvWjD4zZxL93HP0TU/Hr0EDW5mxTgfPPw8LF8JNdhR1d3fn6aefJi4ujt9++42YmBiSkpK4//77adeuHWDrwpeTk0N2dvaNhbWbQBRFTp06hYuLyz+3M6qEhITE/wMkp5SEhITEf4GHH7YJBzdwS13LJVWDKIqcv3ABo8GAh4cHzlarbXuXLmu1Ql4ePPccZyZNIjY2lkOHDlFSUgLYylQiIyOJjIzEw8ODkydP2kWqnJwcKisra+3T2dmZgIAAAgMDCQoKomnTpjf91HvFihVs376dGTNmXLPcYtu2baxcuZLBgwdz77331nnboigyd+5c4uLi6N+/P6NHj75imYSEBH766ScsFgsPPfQQnTt3rvX+F198QVpaGp988kmdXRWXotPpiImJ4Y8//rCf75YtW9K/f39atWp157J8pkyBuLg/f37xRRg3zv6jKIq8++67nD9/nvfee6+WuLFq1SpWr15Njx49KCsrIycnxz6x9fX1pV27drRt27bOpV4ffvghixYtolevXnz99dc3LUqVlJSwdOlSEhISUCgUDBkyhAEDBtxSdteOHTtYvnw5np6evPvuu/9I4cVoNJKdnU1aWhqpqan89ttvODs7065dO7y9ve0uqpCQkOt2GawLJpOJtLQ04uPjSUhIQKfTATYBsm3btnTo0IGWLVveMZfLX4HRaGTDhg32Do2hoaFMmDChlpuyBlEU+eSTT8jOzuall16yZ6xVVlayadMmoqOjsVgsNGnShNGjR9s7ud4OsrKy+P7776msFpOmTp1a95JBqxW6deP8mTOYTCabEL5uHVQLWruio6mcPp2ICxdqu2w7dIBvv7V3aL1ZLnemRUREMH78eIqKivjf//53zd/Ht0pxcTGvvvoqrVu35plnnrnt25eQkJD4r/DP+2tIQkJCQuL28/jjsGsXlJbaOvJdheu5pGoQBAFvb2/OnTuHePo0Vmwt7ikuhqZNbQJVfj54e8OECTRu3JjGjRszYsQIMjMziY2NJT4+nk2bNrFp0yaaNWtGZGQk3bp1Y+DAgfbSouzsbHJycjh+/Djnzp0jISHB3k1MLpfTrFmzWm6qukyeRFEkISEBNze36zpdarZVXl5+w21efm4mTpzIiRMn2L59O6GhoVd0Y2vXrh3Tpk3j22+/Zd68eZSXl9fKO2nRooXdrdKxY8d67R9sk/l77rmHvn37EhcXx/bt2+0OmIYNG9K/f386d+58+8WQy8/Vpe3igcTERPLy8ujatSuenp5YrVays7NJTExkzpw55OXloVKpUKlUBAUF2YWoq03cb4Sjo6Nd1LoZEU4URfbt28eKFSuoqqoiMDCQyZMn33JYclVVFZuqHWX33nvvP1KQAlsJX6tWrWjVqhV9+/YlNzeXxo0b26/NGmeTIAg0bdrUHpoeFBRUb/FIqVTas6/MZjMZGRnEx8dz7Ngxezi7Wq2mbdu2RERE0Lp16ztevna7UalUjBw5ks6dO7No0SLS09N55513ripyxsfHk52dTceOHWuF/js6OjJmzBjuvvtuVq9eTXx8PJ9//jlt27Zl5MiRtgYUt8ChQ4dYsGABZrOZdu3a8cgjj9g7Y9YJmQx69UJYuhSrKIK/P1wypl69e7P65Zc58dZbyPPz8fbxsblsjxyxlZa/9totjb/Gmda5c2eWLFlCfHw86enpjBo1Cq1WS0pKyh0RpU6dOgVIeVISEhISt4rklJKQkJD4r/DOO/D99+DrC1eZPJaVlVFcXIybm9sNSxH0VVUImZkI2CaWgiDYhAhPT5vw9b//XbPDktFoJDEx0V7SYq3u3FcTEh4eHl5rQlRZWcnx48ftTqrc3FxMJlOtbXp5edUSqRo2bHiFQ+bcuXPMnDmTu+++m/vvv/+ax5aSksJXX31FeHg4Tz755HXPw9XIzc3l448/xtHRkTfffPOq5/Ls2bN89dVXlJSU1OpqlZOTw8cff3zDMdYVURRJT09n27ZtpKamAuDq6kqfPn3o2bPndfO76sWwYXD27J8/f/op3H23fQwffvghOTk5jB49mrNnz5KcnIxOp8NqtRIbG0tAQAAzZswgLCzsltw3xcXFvPHGG3Yh5dlnn71CGLweBQUFduFArVYzYsQIevXqdVscZmvWrGHz5s00btyY119//f9FflLN9RgVFcWwYcMA2zlKS0uzB6dXVFQAtmD/oKAgu0jVtGnTmz5Gq9VKZmYm8fHxHD161F5Kq1KpaN26NREREbRp0+bvC/W/SaxWK7t27WLNmjUYDAYaNmzIpEmTCAgIwGQy8fbbb1NaWsq777573VLJnJwcli9fTm5uLjKZjB49ejB06NB6uytFUWTz5s2sXbsWgH79+jFq1Kib+9wMBqKnTCEvO5uxK1eiuExQFkWRhZ9/TocvvsBPocDDw8Ne/s0rr8CYMfXf51WHYWDt2rX88ccfiKJISUkJGo2GL7/88rZnPq1bt46NGzfy1FNP2UsGJSQkJCTqjyRKSUhISPxXKC+He++FtDRbWcUlEw+rKHL27FkQRRo1blwrxPxaGDMzEauqkAkCCqUSQRRt2xw+3JYVUoeJTXl5OUeOHCE2Npbjx48Dtk6H4eHhREZG0rJlyysmSBaLhdOnT9udVNnZ2fZStRo0Gg3NmzcnKCiIwMBAmjdvTnR0NGvWrOHZZ5+9bjes06dP89577xEYGMjLL798w2O4Glu3bmXVqlW0bNmS55577qqiRmFhIV9++SUXLlzgrrvuYuLEiYiiyLRp0/Dw8OCdd965qX1fizNnzrB9+3YOHTqE1WpFrVbTvXt3+vbte9NZQXb69IFLM7h+/BEiIsjPz2fDhg38+OOPqFQqe7mRt7c3bdu2xdnZmVWrVtG3b1/GXVLud7P8+uuv7Nq1i6FDh7Jx40YaNWrE66+/fkNRyWq18scff7BmzRpMJhNhYWFMmDDh1s9LNaWlpbz++uuYTCaeeeaZegllfyeHDx9m7ty5TJw4kR5X6ZYmiiJnzpyxi1SZmZl2wVir1RISEmIXqXx8fG5K3LNareTk5HD06FHi4+MpLi4GbCJYWFgYERERtG3b9vYJrH8BRUVF/PrrryQmJiIIAr169UKr1bJx40YGDRrE8OHDb7gNURSJi4tj9erVFBYWotFoGDRoEH379q2TY81sNrNkyRL279+PIAiMGzeOXr163dJxLVy4kH379vHZZ59dVVw2mUwsfPVVei1ZgpeLy58l4jKZrYyvU6db2v+lHD9+nEWLFnH06FGysrJ47LHHePbZZ2+rGCyFnEtISEjcHv6Z3nEJCQkJiduPszPMmQMTJsCZM7byiurSkYrycqwWC25ubnUSpACUzZphysrCarFgMZlQiKKt81p4eJ0EKduQnOnVqxe9evXi4sWLxMbG1vpycXGhc+fOREZG0qRJEwRBQC6X4+/vj7+/P3379kUURYqLi2vlUp0+fdpetgbYXUhyuZySkhIKCgpsQe1XOdYat0F9gs4vZ8CAAfb9b926lYEDB16xjKenJy+//DJff/01+/bto6Kigscee4ygoCBSU1MpLS29reG5jRs35qGHHmL48OFER0ezZ88edu7cyR9//EGHDh3o37//VbsC3hBRhGq3jIjNqbBn/372r1vHuXPnSExMpLS0lOHDh3P33XfTtm1bGjRogCAIbNq0CZlMZg85vxWKi4vZu3cvXl5eDBo0iJKSEmJiYjhy5Mh1SyHz8vJYsGABJ06cQKvVMmHCBLp06XJb87c2bNiAyWQiODj4uoLoP42abpnXEucEQaBJkyY0adKEAQMGYDabOX78uP3aP3bsGEePHgVswdQ1eVShoaF1ziuquT6Cg4MZM2YMubm5HD16lCNHjtjLeuVyOS1btiQiIoLw8PBbzrq603h4ePDUU08RHx/Pb7/9xtatW0lMTKR169ZX/V1xNQRBoFOnToSHhxMdHc2mTZtYvXo1u3fvZvjw4XTu3Pma17BOp+OHH36wOwIfe+wx2rRpc8vHVeNw1ev1V/0MlEol4959l+XnztF7+3YUCgWOWq0tk+qVV2DBArhNjRkCAgJ4/fXXWbFiBbNmzWLVqlXodDomT55M48aNb3n7oihy8uRJKeRcQkJC4jYgOaUkJCQk/mskJ8Mjj8CJE+DlhVWjqbdLqgZrZSWWrCxkViuiVosiIABUKpgxA0aOvKnhiaJIbm4usbGxHD582B587ufnR2RkJJ07d76hg8VgMJCbm2t3UqWlpbFr1y57UDPYsqNqnFSBgYE0adIEhUKBxWLhqaeespd83CxlZWW8++67VFZW8tJLLxFwjfbnBoOBOXPmkJqaSlBQEMHBwWzevJlHH32UTrfROXA5er2effv2sWPHDoqKigAIDg5mwIABtGnTps6ijL6oCEv37lTpdFTp9VgtFr695x4Mnp54e3sTFxdH9+7defHFF69Y98svvyQ1NZXZs2fXPVT5Gvz2229ER0czadIkunfvbi/l8/DwYObMmVcElJvNZrZs2cKmTZuwWCz2cORbHcflXLhwgZkzZ2K1WnnllVeueR38E1myZAl79uzh3XffxdfXt97r63Q6srKy7CLV+fPn7e81atTI7qIKDg6udymeKIqcPn2a+Ph4jhw5wsWLFwGbiBUSEmIXqG7353m70el0vPDCC+zevZsWLVowaNAgxo0bV2/nTUVFBRs2bGD37t1YrVaaNWvGmDFjrhB8CwoK+Oabbzh37hxubm5MnTr19nToNJlIfuIJSvfto9Wzz+L+5JPXfDiRn5/PnnHj6JiUhK+vL5qacu3mzeGXX67IpLtVXn75Zfbv30+rVq2Qy+Xcc889DB48+JYC9GtCztu0acPUqVNv42glJCQk/ntIopSEhITEf5ETJ2zhsnv2oDeZuGi14uruXr8nvmYzXLyIxWSisrKSMkdHfBo2RKVUglZr6/Z3i5MLi8VCSkoKsbGxJCQk2EuDgoODiYyMpEOHDnUq29mzZw8//vgjPXr0QK1Wk5OTY5/E1qBUKu0B6qtXr0ahUPDjjz/e0sSlJp/K09OTN998EwcHh6suZzabmT9/PocPH8bBwYGSkhL69evHxIkTb3rfdcVqtXLkyBG2bdtmD+719fWlf//+dOnS5arHX1hYSGJiIomJiZw7downN2wAQK5QoHVw4OLvvxPUvj0//PADiYmJvPzyywQGBl6x32nTpuHq6sqsWbNu6RhKSkp4/fXXcXV1rdXVrqbb4uXlZydOnGDBggXk5eXh4uLC/fffT/v27W9pDNfixx9/5MiRI7Rv354pU6bckX3cKWrKk7755pvb0gGvuLiY9PR0e7lfaWkpYBOSAgIC7E4qf3//enU5FEWRvLw84uPjiY+PJy8vD7C5iYKDg4mIiKB9+/b/yBKr06dP8/777+Pg4ICzszMXLlxAo9EwcuRIevbsWW/H3oULF1i5cqW9MUR4eDgjR47E19eX3Nxcvv32W8rLy2ncuDFTp07F3d399hzIkiWUvvMOJcXFNGjQAPXPP1+3HO94dja5Y8bQIi+PBg0a/Hl9jRgBr79+e8ZUzcqVK9m6dSu9evXi4MGDGAwGfH19mTRp0k27NI8dO8b333/PkCFDGDp06G0dr4SEhMR/DUmUkpCQkPivYrFgnj+fgldewaGqCmdXV2Tu7qDR2LroXQ1RBJ3OFmYO0LgxzJxJbkoKso8/RqFU4ufnZ3NbrV0LjRrdtuFWVVVx9OhRYmNjycjIQBRFFAoFbdq0ITIykjZt2lyzo9nXX39Namoqn376qV3EKi8vtzupcnJyOHnyJGazGYC4uDh0Oh2DBw+mTZs2BAQEEBQUZC87qw8rV65k27ZtdOzYkUcfffSa64uiyPLly9m+fTtHjx6lZ8+efPbZZ/Xa160giiJZWVls27aNpKQkwFbK2Lt3b3r27ElBQQGJiYkkJCTYnHXYJv3hzs6MX7MGB632z9D72FjO5OUxa9YsgoODr+qSOnnyJB988AHdunXjgQceuKWx//777/zxxx9XiE8VFRW8/vrraDQa3nvvPURRZN26dezYsQNRFLnrrrsYPXr0HcsjqjlGQRB4++23b7lL2l/NzJkzqaio4JNPPrnt2xZFkfPnz9tdVJmZmej1esBWBtaiRQu7SOXn51ev++78+fN2ger06dP21wMCAujQoQPt27e/bXlht4Ioinz22WdkZmYyffp0AgIC2LJlC5s3b8ZsNhMYGMjEiRNp2LBhvbedmZnJ8uXLOXXqFDKZjGbNmpGbmwtAWFgYjz/++O0Nin/jDcqXL6eoqAgfX18cnnoKnnrquqsc278f0+TJNKyspIGfH3KZzNYddseO2zcuID09nc8//5yhQ4fSrVs3lixZQnJyMgA9e/Zk1KhR9T4Xa9euZdOmTVLIuYSEhMRtQMqUkpCQkPivIpez29+fjYMH86CrK22TkiAnB6pLuZDL/yy/sFhsuR8AajV06AATJ8LgweDkRPOoKI6cPo3Hb79RUFCA96hRCDcxkboeDg4OdOvWjW7dulFcXMzhw4eJjY3l6NGjHD16FK1WS4cOHYiMjCQoKMg+iTUYDKSnp9OiRYta4oOzszPt2rWzTyjMZjOnTp0iJyeHgoICsrOzycvLo7y8nP379wO28OaAgAB7yZ+/v/8NW6cPGzaMzMxM4uLiaNWqFXfddddVlxMEgTFjxuDi4kJqaio7duwgMTGRtm3b3o7Td0MEQaBFixa0aNGCc+fOsXnzZrZu3conn3xiL4Nr1KgRrq6uhIeH07ZtW9q0aYPL8eMQHf3nhhwdQSZjy5YtAERFRV11f9nZ2QC3nCdVWlpKTEwMHh4edO3atdZ7Tk5O9O/fn/Xr17No0SL7Z+vp6cmkSZPspZx3ilWrVgHQrVu3/3eClCiKFBUV3bFxC4KAn58ffn5+9OnTB4vFwokTJ+wuqtTUVLs46uLiYs+iatmy5Q3dPQ0aNCAqKoqoqCjy8/PtAtXx48c5fvw4y5cvp1mzZnaByueyTnF/FQkJCWRmZtK+fXtatGgBwJAhQ+jQoQOLFy8mOzub9957j3vuuYeoqKh6udVatGjBjBkziI2N5fPPP2f37t3I5XIGDx7MlClTUKlUt/dgVCqE6v8vRKsVqqpuuEp4t27EvP8+pdOnI7t4EV9fX2R34PddUFAQKpWK1NRUhgwZwtSpUzl06BC///47e/bsITExkQkTJtTrd22Nq7RZs2a3fbwSEhIS/zUkUUpCQkLiP4rJZGLLli2IHh4Ef/CBLQvq2DFITbV9pafbXFGCYAtJDwuDVq3+/Pcy50L7L77gZ3d3TqWnc1e/ftxzG4OiL8fd3Z0BAwYwYMAAzpw5w6FDh4iNjSUmJoaYmBg8PT3tAennzp3DbDbf8Gm2QqEgICCAgIAAcnNz8fT0ZMKECWg0GrKzszl+/Dhnz54lOTnZ/pRdJpPRpEkTu5MqMDDwigmzQqHg0Ucf5b333uPXX38lICDgmhN9QRAYOHAgqampzJ07l/fee4+ZM2fSqlWr23PibkBxcTFJSUkkJCSQnp6OVqtFpVKhUCgQRRG9Xk9kZCQDBgz4sxyvvLz2RpyduXjxInFxcTRr1uyawk9WVhZgmzDeClu3bsVkMjFo0KCrOuXuuusuvv32Ww4cOEBkZCT9+/dn2LBhNxQTb5UacUWpVP6/LO/R6XQYDIa/zFEkl8vtYu+QIUMwGAz2PKr09HR78wOwlZfWiFQhISHXdbp5e3tzzz33cM8991BYWGjv4lfjjly1ahWNGze2C1R/lXhoNptZsWIFcrmcUaNG1XrPz8+PF198kb1797Jy5Uo2bdrEkSNHmDhxol28qguiKJKTk4OLiwv+/v6oVCqKioqYOXMmI0eOpEOHDrcv0N/R0Z5HaK1x1NaBHmPHsqasDOPPP+MbEECPd9/l9vXHs6FQKGjRogWpqanodDq0Wi2RkZG0atWK5cuXExsby7fffkvHjh0ZO3bsDXPILg05/yeWhEpISEj8f0MSpSQkJCT+o+zZs4eysjKGDh36Z9ZRhw62r5tAJpMx7qWXeP/991m9bh3NmjcnND4etm6F0FB45hmbuHWbady4MY0bN2b48OFkZmYSGxtLfHw8mzdvZvPmzRQUFGAwGOoVMO3s7IwgCCgUCiIjI4mMjARsJYS5ubn2Ln/Hjx/n5MmTnDx5kuhqt5C7u7t9ch0YGEjjxo3x9vZmwoQJzJs3j7lz5/Lqq69e1/UwcuRI9u/fT3FxMV9//TUPPfQQnTt3vrUTdRVEUeTUqVP2srxLS52aNWtG27ZtadeuHd7e3hw8eJDt27fbs6QCAgIYMGAA7UpLa08inZxsYqcoMmjQoKtOekVRJDs7G1dXV7y9vW96/GVlZezZswd3d3e6det2xfuJiYksWbIEuVyOUqmkc+fO3HfffTe9v7oiiiIrV64EoHfv3rcvt+cv5Ead9+40arWa1q1b07p1a8D2WWdkZNjL/Xbt2sWuXbsQBMEufoaGhhIYGHjNe8vT05N+/frRr18/SkpK7AJVVlYWZ86cYe3atfj5+REREUFERASNGjW6rV0YLyU6Opr8/HwGDBhw1XtAEAR69OhB27ZtWbZsGXFxcXz66afcddddjBo16oYdBvV6PT/99BPJycmo1Wree+89goKCWL9+PTExMfz000/s3LmT0aNHX5H3dlM4ONR2StVRlAK495FH+MFsZuexY1zYtMl2j+bmQkkJtGtX526u16N169YkJyeTnp5OREQEYPs9//DDD9O5c2cWL15MXFwcaWlpjBkz5rodOEtKSigvL78tHQslJCQkJKRMKQkJCYn/JCaTiddffx2TycT7779/WzN1Tp48yccff0xgaSnPJif/6V5p2xa++86WWXWHMZlMJCQkcPDgQX766SdUKhUdOnSgVatWREZGEh4efl2nzMaNG1m3bh3Dhw9n0KBB11zOarVy9uxZu0CVnZ1tn8zXoFKp8Pf3JygoiKSkJHJzc+nXrx/jx4+/7naff/55TCYTarUanU7HfffdR9++fet/Mi7DZDKRlpZGYmIiSUlJlJSUALag95YtW9rL8q7mALBarSQkJLBt2zaOHz8OQO/8fAanpODo5IRMEDCEhfG8oyM+Pj68/fbbV53YXbhwgbfeeouOHTvy2GOP3fSxLF++nB07dnD//fdz9913218vLy/n999/5/Dhw8hkMvr378/BgwfR6/W8//77ON8BcfRSDh8+zNy5c9Fqtbz33ns3FBD+iRw9epQ5c+Ywbtw4evfu/XcPpxaiKJKfn18rNF1XLYIolUqCgoLseVRNmjS5obBUVlbGsWPHiI+PJyMjA2t1qbKPj49doGratOltE6jKy8t54403UCqVzJo165oNEC4lMTGRpUuXUlxcjLOzM2PHjqVjx45XHVNxcTHffPMNZ86cwdnZmaeeeqqWKH/u3DlWrFhhd3x26NCBESNG3JJAzIIFGD75hPPnz+Pm7o7roEFQj+6lRqORTz/9lBMnTjC1YUPabNhgyzDs2BE+/xzqcI6uR83vnO7duzNp0qQr3tfr9axZs4Zdu3YhiiKtWrViwoQJeHl5XbGsFHIuISEhcXuRnFISEhIS/0FiYmIoLS1l6NChtz3kuVmzZtx///0cef998vPz8W3QwFbWkZgIM2bA7Nm2vKo7iFKppGPHjri4uBAfH09QUBByuZyUlBRSUlJQqVS0b9+eyMhIWrZsieyyJ/E1okX55aVpl1FTvtekSRN69eoF2J6iHz9+3O6mOnXqFJmZmWRmZmKxWDh27BjHjh0jJyeHPn36EBgYiI+PT63JpUwmIzg4mKSkJB599FEWLlzIsmXLKC8vZ9iwYfWeHJeWltrL8tLS0uxdDF1cXOjevTvt2rUjNDT0hjkzMpmM9u3b0759e3Jycti+fTvG336jqKiIkpISnJ2dOXP+PBZ/fwYOHHjNcd6O0r2ysjJ2796Nu7u7PadLFEV7VkxlZSXNmjVj8uTJNG7cGF9fXxYuXMiWLVsYM2bMTe/3RpjNZtauXQvAwIED/18KUgBF1dly/4RA8MsRBAEfHx98fHzo2bMnVquV06dP2wWqmrI/AEdHR0JDQ+15VF5eXldcly4uLvTs2ZOePXtSWVlJQkICR44cIS0tjS1btrDl/9g777AozrcL37N9F5ZexYYgWBEr1thbTKIm0ZiYmGqipvfEX5oppvfejIlREzWx994VlSooAgIWpJdle5vvj5VVYi+p39zXpcDO7My7OzML75nznGfVKoKDg70CVXR09BUJVEuWLMFqtXLTTTddlCAFkJCQQFxcHEuWLGHDhg1899137Nq1i9tuu63BMTp69CifffYZNTU1RERE8PDDD58hrERGRvLwww9z4MABFixYwL59+0hLS6N///6MGDHi8n4nXIFTCjzi/UMPPcRbb72FddYszCqVZxx798K0afDmm+duwHERhIWFERwcTFZWFqIonnH8NBoN48aNo2vXrsyaNYvs7GymTZvGyJEjGTBgQIPfEUVFRYCUJyUhISFxtZBEKQkJCYn/Z9RnSWm1WgYMGPCn7KNXr14U3XwzFfv3o6qqOjVp2rLFM7n43/+uaIJxsaSlpaFUKrn99ttp2rQpZWVl3vyp+n9+fn507dqVpKQkrxuiPlPEYDBc8j4DAgK8k1fwvN+FhYVeoUoQBHbs2MGvv/5KXl4earUaX19fb7lfixYtaN68OfHx8WRmZlJXV8czzzzDxx9/zMqVKzEYDNx+++1nCGmnI4oix44d85baFRYWepc1adKEhIQEEhISaNas2WVPruvHW1dRgf3IEUxGI9U1NWxNTeWEWk2TJk3O+dx6UepKQs7XrFmDw+Fg2LBhKBQKqqurmT17NpmZmSiVSm666SYGDRrkfZ969OjB6tWr2bRpE4MGDfrTSuq2bdtGeXk5AQEB/ziH0aVQ7/gLCgr6m0dyYeq7yzVr1oxhw4bhcDjIz8/3lvqlpKSwb98+wCOy1buo4uPjz3DN+fj4eBsqmM1mMjMz2bdvH1lZWaxdu5a1a9c2uMZjYmLOey3+keLiYrZu3UqjRo3o3bv3Jb1OjUbD2LFj6datG7NmzWL//v288sorXuEkOzubb775BpvNRnx8PJMmTTqvwNS6dWv+97//sWvXLhYtWsS6devYuXMnI0aMoG/fvufsZnpWLjNT6nT0ej2PPPIIWatXU3HkCOHh4R5H67p1EBMDV+CqFASBtm3bsmXLFkpKSs6ZHRYTE8MLL7zgLf+eP38+e/bsYcKECUSd7CZbL0o1bdr0sscjISEhIXEKSZSSkJCQ+H/Gtm3bqK2t5brrrrvqLqnTGTthAt8eOMCA2bNRGY3ofX09CxYtgpAQmDTpT9s3eISZ9PR0AgMDvQJJWFgY1113HSNGjKCwsJDdu3ezZ88e1q9fz/r164mIiCApKcnbjetCTqmLQalU0rJlS68AU583NGvWLGw2G82aNaOkpIT09HTS09MBT+izn58fhw8fZvXq1bRu3ZpnnnmGTz/9lO3bt2M0Gpk4cWKD7Byn00lOTo43H6q6uhrwhPy2bdvWK0RdbZFBL5NBUBABAQGUnDiB+eTE9NVXXyUhIYHBgwfTsmXLBuJXXl4eWq32slrdg+e4bNq0iYCAAHr16sWWLVv47bffsFqttGzZkgkTJpzRUU0mkzFy5Ei++eYbli5dyoQJEy7/RZ8Dm83GsmXLAE8Xtave4ewv5J/slLoQSqXS644aPXo0JpOJnJwcb7nftm3b2LZtG+ARaevzqGJjYxuU9dYHYiclJWG1Wtm/fz8pKSlkZmayYcMGNmzYgJ+fH4mJiXTu3Jm4uLgLisXz5s1DFEXGjh17SWLW6TRv3pypU6eybt06li5dyvz585k7dy42mw0fHx969OjB7bffflGikkwmo2fPnnTu3Jm1a9eyZs0a5s2bx6ZNm7jxxhtJTEy8OOH6NKeU+zKcUvWEh4djfu89rOPHU1ZeTkREBEqFAr7+2iNMXcGNlDZt2rBlyxays7PPG2ivUCi4/vrr6dy5Mz/99BMFBQW8/vrrDBs2jGuvvZYjR47g7+8vhZxLSEhIXCUkUUpCQkLi/xH1LimNRnNV8onOh0KhYNzUqfxQUsKotWtRqVSo6yfp333nEaZuvvlP239xcTEVFRX069fvjEmVIAhER0cTHR3NmDFjyM7OZvfu3aSlpbF48WIsFguHDh3CbrdjMpmuagmWIAjcdNNNlJaWkpGRQZcuXRgwYIA3QD0vL4/CwkKqqqooKSlh0aJFFBcXExISQtOmTTl+/Djbt2+nrq6Ou+66i/z8fDIyMsjOzsZmswEex0HPnj1JSEigTZs2f26nudNKWFxuN349e/L4I4+wceNGr1OrWbNmDBkyhE6dOmEwGKioqKBdu3aXPSmvd0klJSXx8ccfk5ubi0ajYfz48fTp0+eck+hOnTrRpEkTduzYwdChQwkPD7+s/Z+LdevWUVdXR3h4uLek8N9KZWUlWq32osvL/sn4+Pg0cC9WVlY2yKNas2YNa9as8XYArBepmjdv7j1HNRoNXbp0oUuXLtjtdrKyskhJSSEjI4MtW7awZcsWfHx8vAJVfHz8GaLQ/v37OXDgAAkJCefsSnmxyOVyhg4dSmJiIs8++yy7d+9GEARGjRrFuHHjLs3lhCdY/rrrrqNPnz4sWbKE7du389VXXxEbG8uYMWNo3rz5+TdwmlNKvEynVD3R/fpx4PXXcT77LGVlZURERCCXyeCll6BxY7iEDoSn06pVK2QyGVlZWRf1+69Ro0Y888wzbNq0iUWLFrFixQq2b9/O8ePHL9nlJiEhISFxbqSgcwkJCYn/R2zcuJFffvnlLw1oPXjwICueeYYxe/fSqH5yAZ7yvbffvqI73+djxYoVLF68mMcee+yiJ4BWq5WUlBS2bdvGzJkzUSqV9O7dm/bt25OUlET79u0vebJ3LoxGI6+99hq1tbU88cQTDVq9u1wujh07xgcffEBqaiodO3bEarUiiiJGo5HMzEzKyspQqVTExcURHBxMXFwcXbp0ISEhocFk+k/HZIJp06hYs4YdCgW6V19l0LXXIooimZmZrFmzxluuFxwcTFRUFKmpqdx8880MGzbskndXV1fH888/T1lZGSEhIbhcLtq3b8/48eMvqiQvKyuLTz75hM6dO3P//fdf8v7PN64XXngBq9XKpEmT6Nix41Xb9t/BE088QWBgIC+++OLfPZQ/FVEUKS4u9pb65ebmesVdjUZDfHy8V6SKiIg4Q/CsbxyQkpJCenq6N3Bdp9ORkJBA586dvbl106ZNo7y8nFdeeeWqCKJ2u50ZM2aQkpJCVVUVKpUKvV5PcHAw48ePp23btpe97WPHjvHbb7+RnZ0NQNeuXRk9evS5nXMZGYj33MORI0fQaDSEN2sGW7de9v4BMp57Dv/vv0etVhMWHu4RvcLDYdYsuEzH53vvvUdhYSEffvjheTug/pHKykpmz57N5s2bOXDgANdeey2vv/46mr+gcYeEhITEfx3JKSUhISHx/wSn0/mXuaROp1WrVhx56CGWv/ceow8cICw8HAE8nZVeeAE++wxOOhiuJmlpaWg0mkvKLdJoNPTs2ZMePXqQm5tLcXEx4eHhpKamkpqaik6no3PnziQlJREbG3tFYce+vr7cc889fPjhh3z//fe89NJLXkeWXC6nWbNmXH/99ZjNZpKSkrBYLGzfvp2ysjI0Gg0qlQq73U5RURHBwcFUVFSQmZmJyWSivLycmJgYgoOD/7SW9l58fLC99hpvulyIosibJ88tQRC8JYOFhYWsXbuWffv2sXv3bsrKyujUqRM1NTWXXALzyy+/kJycTFhYWINw4ot9nW3atKFly5bs27ePI0eOXLVcmJUrV2K1WomOjiYxMfGqbPPvwmazYTKZiImJ+buH8qcjCAJRUVFERUUxaNAgnE4nBQUFXidVfYMA8OTF1Qemt2rVioCAAJRKpfc8ry+hTUlJIS0tjV27drFr1y7UajVKpZLCwkJuuummqyJIGQwGPv/8cwoLC/H19eXpp58mMjKS+fPns2vXLj755BO6devG2LFjL6vbZOPGjXn00UfJyspiwYIF7Nmzh9TUVAYOHMjw4cPPdND5+CAAMkHwOKUsFnC74QrE8fbTp5ORl0fA5s1UVlZ6QupLS+Gpp+Crr+AyymPbtGlDbm4uubm5tGnT5qKfFxwczMMPP4zVaiU3N5fCwkJeeeUVxo8fT/v27S95HBISEhISp5CcUhISEhL/T9i0aRNz585lxIgR3HDDDX/pvkVR5JtvvsFn9mxGFBcTeLoQ4evrKee7gk5sf6SmpoZnn32Wrl27ct99913WNqZOnUplZSXvvfceBoOB3bt3k5yc7M1qCgoK8ubNnC+f5EIsXryYFStW0KFDByZPnowgCJhMJjIzM9m0aROzZ88mODiY+Ph4fHx8aN++PQkJCcTExLBgwQJ+++03bDYb0dHRZ7i4/Pz8vIHkMTExNG3a9Ko5vU5n/fr1zJs3j+uvv57rrrvunOtVVFQwZcoUDh48SFJSEkqlkm7dujF48GBviPC5cDqdLFiwgDfeeAOZTMaUKVO47bbbLmvCnZ+fzzvvvEPbtm155JFHLvn5f6SyspKXXnoJp9PJk08+2cD19m/kxIkTvPLKK/Tv359x48b93cP5W7FarRw6dMgrUhUXF3uXRUZGekWquLi4BkKN2+3m0KFDpKSksHv3bjZs2IAgCPTs2ZOOHTvSqVMn2rdvf1lOm+LiYj777DMqKysJDQ3l4YcfbiB0HThwgJ9//pmKigp0Oh1jxoyhR48ely1Qu91utm/fzpIlSzAYDPj6+nL99dfTp08f5PWdVIuL4YYbOHb8ODKZjEaRkZ7GFleYW+i22TjQvz++ubn4+fuf+t1x/fWecr5LfE1FRUVMnz6dQYMGXVYXzk8++YTU1FQ6derE/v37AY+L7JZbbrmszyIJCQkJCckpJSEhIfH/gr/LJVWPIAjceeedvFVczOaVKxliNJ4KWTca4eGHYcYMuAJx53TqnQ0dOnS47G3o9XoqKysxGAxERUVx4403Mnr0aA4dOsTu3bvZt2+ft0NT06ZNSUpKomvXrvj7+1/Sfq6//noOHjzIzp07MZlMCIJAXl4eoigiiiI+Pj4EBgby5JNPEhsb26As77777qN169bMmjULpVLJzTffjEwm83b6Ky0t9bq8wJPz1axZM2JjY72d/q50IuW02yn4+ms6Wq0MuEDOik6nIygoiHHjxtGpUyc2bNjAzp072blzJ23btmXw4MG0atXqjMlzfn4+s2bNYseOHcjlcqZMmXJFpXcxMTEkJCSQkZFBbm7uFXUBBFiyZAlOp5O2bdv+6wUp+Hd13vuz0Wg0XicUQG1trVegOnDgABs3bmTjxo3IZDKaN2/u7ewXHR3tDVsHKC0tJS4uDrvdTkpKCikpKd4mBJ06dSIhIeGiGk8cPHiQr776CovFQkxMDFOmTMG3vonESVq3bs3LL7/M8uXLWbNmDT/++CO7du3i9ttvP6MBwMUgk8no06cPXbt2ZfXq1axdu5a5c+eyYcMGbrrpJhISEhBOujy9Tinw5EpdoSglU6uJ/e03jvTvj6GyEoVC4WmasXSp50bG+PGXtL2mTZvi6+vrLUu8FERRpKioyCsEZmRkMGfOHPbs2UN2djZjx44lKSnpz3enSkhISPzHkEQpCQkJif8HbN++nerqaq699tqrGtp9KWg0GiZNnsyblZX479hBb4fjVKZHeblHmPr+e7hEUedspKenI5fLadeu3WVvw8/PD2jYgU8QBOLj44mPj+fWW28lIyOD3bt3k5mZyZEjR1iwYAGtW7cmKSmJjh07njdg3OVykZ+fT3p6OuXl5aSlpZGSkkKnTp1ITEykQ4cOJCQk8Ntvv5GWlkZQUNBZc6J69eqFr68v3377Lb/88gt33303d955p3fs9QJVfn4+hYWF3u/rCQsLa+CmioyMvKRJ1fFJkxi8cSN+/v7oXnoJPvnknOvWi21t27Zl2LBhDBo0iOTkZNauXUtWVhZZWVk0btyYwYMH06VLF1wuF4sWLWLjxo3Y7XYA+vXrx913333R4zsXI0eOJDMzk4ULF/L0009f9kTy2LFj3pDpG2+88YrH9U+gXpT6N3be+7Px9/f3OiRFUaSsrMwrUOXk5HD48GGWL1+OSqWiZcuWhIWFsXLlSuLi4nj55ZeRyWQUFBSQmprKvn37vF035XI5rVu39l7/Z/uc3rFjB7NmzcLtdtOlSxfuuuuuc+YiqVQqRo8eTdeuXZk1axY5OTlMmzaNESNGMGTIkMtyTGo0GkaOHMk111zD4sWL2bVrF1988QVxcXGMGTmSpoAgk+FyOj1PuIKw89NRR0YSMXs2FaNGUVVZiVwuR6fVwscfQ3Q09Ox50dsSBIHWrVuzZ88eqqurLyqHrp7q6mqMRqNXoExISCAuLo6FCxeyadMmfvjhB5KTkxk/frx07UhISEhcAlL5noSEhMR/HKfTyQsvvIDFYmH69Ol/myhVT0pKCt9/8QX37ttHokzm7dgEQPv28OWXcAXhsVar1Rsc/thjj132dn766Se2b9/OfffdR9euXc+7rtFoZN++fezatYvDhw8Dnklhx44dSUpK8gYdm81msrKySE9PJysryxuKrNVq8fPzIzMz0+tyqBe0NmzYwK+//sqECRPO29EtLy+Pzz//HLPZzNixY8/qiHM6nRw5csQrVOXl5WEwGBqso9VqadGiBbGxsbRo0YLo6Ohzimtuq5XjMTGIDgdRUVGeUp4NG+CkoPdHfv/9d1avXs0jjzzSIIRZFEWys7NZs2YNBw8eBDyindFoxMfHh8jISCIjI8nIyGDMmDEMGjTonO/DpfD999+TnJzMgw8+6J1oukURo91Jnd2J2eHCLYIAyGUCvioFfioFGoXMK2J99tlnZGZmkpSUxD333HNVxvV3U3+cnn/++Qt3XZPw4na7KSoq8opUhw8fJi0tjaqqKpKSkujdu7c3jyo4OBhRFDl69CgpKSns27ePsrIywONMio+P9wpUer2eJUuWsGLFCgCGDx/OyJEjL1pIdbvdbN68mYULF2Kz2WjUqBF33HEHLVq0uKLXe/ToUebPn09OTg4KvYIHju6nLlSLQyEQGBiI6s578W/cmiBZEBrZlQeCV8yfj/mhhwAIDw/3dHP18YGZMz3i1EWyc+dOZs6cecHP1D+SmprKV199ddYy5fz8fH766SdKSkpQq9WMHDmS/v37/3UNJyQkJCT+xUhOKQkJCYn/ODt27KC6uprhw4f/7YIUQKdOnSi89lp+dLnwz8igBZ5JPwCZmfDhh/D885e9/f379+Nyua44bLreKfVH0eZs+Pr60rdvX/r27Ut5eTm7d+/2/tu0aRMWiwWtVovb7Uan0yEIAmFhYfTs2ZOEhARiY2ORy+XMnj2bLVu2eEUogPj4eABycnLOO4GKjY3lqaee4pNPPmHevHnU1dWdMXFVKBS0aNGCFi1aMGjQIERRpLKy0uueys/P5/jx417XEngmyI0bN27gpgoMDEQQBNI2bybYbsdXrz+VLXOee125ubkIgnBGgLYgCLRt25a2bdty6NAh3n77bXbu3AlA8+bN6d27N7t27cLPz49rrrnmgsfjYrnhhhvYu3cvixcvJio2nuNGG6UmG063iFsUOX3KL+I5T2UyAa1cRmM/LbbyE2RmZiKXy//ynLY/k6qqKkAq37tUZDIZ0dHRREdHc+2115KWlsb06dOJj4+nRYsW7Nmzhz179gAeh2J9HtXgwYMZOXIkxcXF3tK+emFr1qxZ1NTUYLVaCQ0N5d57770kIaV+XP379ycxMZE5c+aQkZHBO++8Q9++fRk9evRld5Dza+THNZOvIbA2kGpLNdvtkQiiCCLIFQrwPYRgLEAhKPCT+dFS1ZIYZQw62eWV9IWMGUPJoUPYPv6YsrIyIiMiUJhM8MQT8OOP5xTD/0h9wHl2dvYlvZdFRUUAZ22OEBMTwwsvvMCKFStYtWoV8+bNY8+ePUyYMIFGjRpd9D4kJCQk/j8iiVISEhIS/2GcTicrVqxArVYzePDgv3s4XkaNGkVRURFfOp08m5NDqNt9auGqVfDcc5ccYFvP1ciTArxZS6eX710MwcHBtGrVCpvNxpYtW8jPz6e8vByHw+ENHh8+fDhDhw4lNDS0wXPHjh1LXl4e27dvp02bNnTp0oVGjRrh4+NDTk4Ooiie1x0RFRXFM888w8cff8zKlSsxGAzcfvvt57xbLwgCISEhhISEkJSUBHicZnkFhRw+foLiklJKS0sxWa0kp+9n0+YtiG4XAQEBxMTEcGjVKqa43V4BD/AE158Fh8NBUVERTZo0OeckODU1lTlz5qBQKLjhhhto1qwZBw8e5JdffuHo0aMMGzaMkpKSq9YxLzQ0lD5Dh2NQ+bGtqAyFUoVcEFAIAjKZcMZ77RY9YpXJ4eJgpZG6OhdxfQYTpYaQkJCrMqZ/ApWVlSiVSim4+Qpwu90sWrSI4OBgXnzxRRo1akRdXR05OTneTKotW7awZcsWBEGgadOmXpFq2LBhVFZWsn37dr799luKiopQKBRotVq2bduGxWKhY8eOl1wiFhgYyJQpU0hNTWXu3Lls2rSJtLQ0br311ksS8WtcNeyx7uGY8xhO0YlMIyNIE4T7mA2hstrzOaVQ4NAFoVVqcOGiylXFLssuUq2pxChj6KTphFamvfDO/kDE1Kkcz83FuXw5ZWVlhEdEID96FN55B15//aK24e/vT+PGjcnOzsbtdl+0m+nIkSMANGvW7KzLlUolI0eOpHPnzsyaNYuCggJef/11hg8fzvDhw/+UJhMSEhIS/wWkT0cJCQmJ/zD/NJdUPTKZjPvuu4833niDj10upubno3O5PAtbt75sQcrlcrF//36aNm16SVkhZ+NSnFJWq5WsrCwyMjLIzMzEZDIBngyWsWPH0rZtW5RKJRkZGaSlpbF161a2bt1KTEwM3bt3p3Pnzvj4+KBUKpk4cSLTp09n1qxZNG/enJCQEOLj40lJSaG8vPyCQcXBwcE8/fTTfPbZZ2zfvh2j0cjEiRPPmT0DnvK5GpuT4joLVRYHZnUQiuhAmjRvQxPA7Xbhcjhx2K3UlpdyIvcgm7dvx5iaisNu50RxMSq1GoVez4nsbFq0aHHG+VZQUIDL5TprqLjBYGDu3LmkpKQgl8u5/vrrGTZsGAqFgsrKSu677z58fX0xGAy88cYbxMfHM2TIENq2bXvZWVBOt5u8KhMB7bohGOowGgyEh4UiE849QZUJAjJBQCEDk8WMw+Ekqm0HQvz9KDFaCfdR/ydCjquqqggKCvpPvJa/ApPDicHmpM7mKft0uN2Ul1cQnJBEm0B/RH0gBpsDva8vXbp0oUuXLoCnG+WBAwe8ItXq1atZvXo1CoWC8PBwDhw4QFBQEHFxcXTv3p2ioiIOHz7M4cOHmT9/Ps2aNaNTp0506tTpogPMBUGgU6dOtGrVioULF7Jlyxa+/PJLOnbsyLhx4wg4vTPqH3CLbg7YD5BiTcEiWlAJKnwEn1PnSeOmGN1gNRqx6nS4qqqpqzMSGBiIj9YHt+jGLtrJtmdz1HmUHtoeNFU0vbTzTBCI+uorjl97LY7MTMrLywkPC0PYsAHcbrhIgalt27asXr2awsLCiypjrA85DwgIuGAzi8aNG/Pss8+yYcMGFi9ezLJly9i3bx8TJky44pJJCQkJif8ikiglISEh8R/F6XSycuVK1Gr1VcvguZro9XomTZrEu+++y3uJiTwdFoY2IADuu++yt5mbm4vZbL5il1T9+ODcTqnKykoyMjJIT0/n0KFDuE6KasHBwXTr1o0OHTrQsmXLBnfHu3XrhtVqJTU1ld27d3Pw4EHy8/P55ZdfaN++PUlJSbRv356xY8cye/ZsvvvuO55++mni4uJISUkhJyfnoiafer2eJ554gq+++or09HQ++ugjHnzwwTO6e4miSKnJRkGNmRqbE7coIsNToqaSyU6Vr8lluBVKXBoNOr0/EdGxNO/cg6rEbih++h7x+FFsVivlgsBnn30GQGRkJC1atCAmJobY2FgOHToEeMoMT9//rl27mDdvHmazmejo6DPKXXbs2EFYWBj3338/QUFBrF27lpycHHJycoiMjGTw4MEkJSVdkgvB6nSRUlJLtdWBXJChkoHBasVsMuHrezHuIJHa6hqcDgc+WjV2N6SW1hIdoCM+yPdfLeY4nU5qa2u9XeMkzo7LLVJmtnHUYKHK6sDlPq1sVRQxuiCkeQw6vR9ppQbkgoBeraCpn5YIHzVKuYyQkBD69OlDnz59EEWRY8eOeR1Uv//+OzabDb1eT0BAAAaDgZ49ezJ69GiOHj1Kamoq+fn5FBUVsXDhQho3buwVqCIvooupTqdj/PjxJCUl8fPPP5OamsqBAwcYPXo0ffv2PeMcdogONpk3UeQoQkDAVzjLeS6T4fD3xyQIhIeHYzQaMZlMlJWVodFoCAwMRKPS4Bbd1LnrWG9eTztVO7pqul7aNaPR0GjuXIoHD8Z24gSVlZUEDxiAcAn5TW3atGH16tVknxTRL0RVVVWDkPMLIZPJGDRoEImJifz8888cOHCAd955h379+jFq1KjLLpmUkJCQ+C8iiVISEhIS/1F27txJVVUVw4YNO6Nl+D+F5s2bM27cOH7++Wc+0ul4+tFHT4kLBw7AwYPQqxdcpAsgLS0N4IrzpOBMp5QoihQUFHiFqOLiYsDjPIiOjiYhIYEOHTpcsHudRqOhR48e9OjRg5qaGvbs2cPu3btJS0sjLS0NrVZLp06diIqK4vDhwyxevJju3bsDcOjQIfr06XNR41er1Tz44IPMnDmTPXv28O677/Loo496nRBWp6cE7YTRilsEZb0QdY6xywWQn5SpLFY7INCk32AK2ncidtlvhG9cg9rfnw4dOpCfn8+JEyc4ceIE27dvBzyZWE6nk8LCQvR6PXq9nl9//ZXs7GyUSiVjxoxhwIABDUppzGYz69evx9fXlwEDBqBWq+nWrRs5OTmsXbuW/fv389NPP7Fo0SIGDBjANddcc0FHoNXpYu+JGmptTtRyT9C+ws8PY10dNbW1+PhcWFQyGk04HA7kchl+fn7IBBl2l5vD1WZEEVoF/3uFqepqT/nVf6kc8WpTbraRXVGHye5CxHPtKOWnrp3q6iqsxjoCgwLRKRWIoohLFKm1OsiwOjikkBEf5EuUXuN9jiAINGnShJKSEqqrq+natSuNGjWiffv25OXlkZaWRmpqKuApw2vdujWdO3fGarVy8OBBcnNzOXbsGEuWLCEyMtIrUEVFRZ33XIyNjeWFF15g1apVrFy5krlz57J7927uuOMOrzjsEB2sN63nqPMoakGNUji367L++pXJPKKbXq+nuroaq9XKiRMn8PX1JSAgAF+5LzbRRoYtA5fooru2+yVdM0J4OBFr17Lj/vs5UVaGz6BBjLjoZ3tet0qlIisr64zQ8rNxodK9cxESEsKjjz7qFd83btxIeno648ePv6LusBISEhL/JSRRSkJCQuI/yD81S+ps9OnTh4KCArZv386vv/7K+PHjYdcuePRRcLlAq4WPPoLOnc+7HVEUSU9PJzg4mKioqCsel16vx+VykZuby48//khmZqbXNaVWq+nYsSMJCQm0b9/+srN3AgICGDx4MIMHD6a4uNgbjr59+3acTifZ2dkUFhYSHByMXq+/qFyp01EoFNx77734+fmxfv163nnnHR599FGUfoGkldZicbpRygTU8kvrEGWorcVps+Ejl+HQ6si+5U4q2ibQIWUnU6ZMQRRFysvLveHp9TlZarWaVatW8cMPP1BYWIhWq6V169aMHTuWzp07n5Htsn79eiwWCzfeeKO3A6AgCLRq1YpWrVpRXFzM2rVr2b17N4sWLWLFihX06tWLQYMGnVVUcYsiaaW1DQQpAJlMjp+fPzU1NdTVGfDzO3d5jiiK1NTWAJ5smvpyP5VchsPlpqDWjEYhJzrg8sKc/24qKysBKeT8bDhcbnKqjBwzWHGJYoNzqB6n00FdXR1KpcL7uSCczClTyDznoM3lJqPcQKnJRptQPVqFHFEUWblyJYsXLwZg8ODB3Hzzzd5rwmw2c+jQIW+p344dO7z7jIqKomfPnrjdbsrLyzl8+DDLly9n+fLlhIWFeQWqpk3PXiqnUCi47rrr6NKlCz///DO5ubm89tprDBs2jOHDh7PNsY2jzqNoBA0K4fxTh/rtu0/mBKrVaiIiIjCbzVRXV3vdU/7+/h7hX4AsexY6mY4OmktzuMrDw+n666+8++67HNu8mYBmzegVHQ2VldC27XlL+RQKBfHx8ezfvx+z2XyGi/SP1IecX6ooBZ73pEePHrRt25Zff/2VvXv38umnn5KUlMSYMWOk7DYJCYn/9wiieJ42ORISEhIS/0q2bt3Kzz//zNChQ7nxxhv/7uFcEIfDwbvvvktRURF33nknPTdtgt9/P7WCjw989x2cJY+onqNHj/L6668zYMAAbrnllsseS3V1tdcN9fXXXwPQq1cvgoKCSEhIICEhgfj4+PNmNF0JoiiSm5vLrl272LBhA8nJySiVSho3bowgCHz44YdnzWW60DZXr17NwoULiYyJo9uocYgyBRr5uZ1R58Jmt1FyogStVkOYXA5lZbhUKuy+fgRXldF5yDUo/yByFRUV8eqrrxIaGsqJEyc4dOgQFouFZs2aER4e7h1DcHCwt8Nfo0aN+Pzzz5HL5bz55pteUeps1NbWsnHjRjZv3ozZbEYQBDp27MiQIUOIPq1VfH61iYOVRlQyGXJZw9ctim6OHz+OiGeSf65sKYOhlurqGhQKBY2iGiHQcDs2lwu5INAjKgi9+t9372/Hjh38+OOP3HPPPd7wewmwOV2klNZSZXGcFJjODMIHKC8vw2y2EBYWilZ7bqHD6XbjcIv4KuV0CPVl8fxf2bFjB4IgcMstt9C/f//zjqe6utorUB04cMDr6KzvlqlSqTAajZSVlXkFouDgYK9AFR0dfdbxi6LI9u3b+e233zCbzTTu1ZjQa0PRKXXndUjVYzQaqaysJCwsDK22YZC5KIoYjUZqampwu93I5XICAgKQ6+TIBBkjfEYQqgg9x5bP/1689dZbRGZnc/+RI+iUSujY0XMz4zzOyY0bN/LLL7/wwAMP0KlTp/Pu4+OPPyY7O5t33323YWOHyyA9PZ05c+ZQU1ODr68vY8eOpVu3bv9ad6WEhITElSKJUhISEhL/MZxOJy+99BJGo5E33njjX3MXtrKykjfeeAObzcbLXboQ9vnnDVcICYEZM+Ac7bWXLVvG0qVLefzxxy8pD0cURY4cOUJ6ejoZGRkcPXrUuyw7OxtfX18++eQTYmNj//JJg8Ph4Msvv2TevHnU1tbidDpp2bIlAwYMoHv37iQmJl5SNsmmXckUuVSotDr8dFq0mkvvflVWXobFbCEiIhx1ba3HlQC4FArsoWGEhgXTJTKggYNk9erVfPTRR+h0OkJCQujQoQPjxo3DaDQ2cFNVV1d7n1NUVMSxY8fo1asX1113HS1atKBFixZnTHRPx2azsWPHDtauXet1/MTGxjJkyBCax7dhd3E1LpFzOsPq6gxUVVXj7+9/1sBnt9sjXLndbkJCQs5aKiiKIlaXm2Ctkm6NAs9w0vzTWbp0KcuWLeOpp566ZPHzv4rd5WbviRqqrY6zuqPqsVotlJaWodFqCA8LA85/7N2iiNXpwlBZTvJvP+O2WbjvvvsuOreoHlEUOXHihDc0/dChQ1itVsAjUvn6+mK326mtrUWlUiEIAgEBAXTs2JHOnTsTExNzhkvRYDAwd/FcqjtWo/BRoMGTCXWhTnUmk4mKiopzXh/guY5qa2upq6tDFEVUKhXaYC2N1I0Y4Tvigm6ss3Hs2DGODBtG4/JyIsLDUalU0KcPvP/+OR1TZWVlvPjii/Tq1YsJEyacc9uiKPLkk0+iVCp5++23L3lsZ8NisfD777+zZcsWANq1a8f48eMlh6KEhMT/S/59t/AkJCQkJM7Lrl27qKysZOjQof8aQQo8d/EnTpzIxx9/zMeHDvFKr14oT+YRAVBRAQ8/DN9/D2cRDNLS0tDpdBc1kbbb7Rw8eJCMjAwyMjKora0FPC29691Q7du355NPPuH48eP4+/v/LXexlUolDz30EE6nk+TkZIqKilAqlV53hFKppGPHjiQlJdGmTZvzThhdbhFl4xgCjBaqy8ooN9YRHHzuiePZsDvsWMwWNBo1arUG3KdEJLnTicpup8Jsp6DGTEygZ7tHjhzh888/p7CwkAEDBnD33XfTuXNnBEEgKCiIpk2bel0h1dXV5Ofnc+DAAfbv349cLsflcrFs2TLAUwbTqFEjr5sqJiaGkJAQ77FRq9X079+fvn37kpqaypo1a8jLyyMvL4/O140hNCYevebcjitfvR6DwUBdnQG9nx65TN5gucFQi9vtRqVS4uNzdheMIHiyuaosDkpMNhr5/rsCjauqqgDP9SjhESQyywwXFKRA9IiqAic7f17488LtdlFTUY5crSZh2Cj6x0YR3azpJY+x/rpo1KgRAwcOxOVyUVhY6BWp8vPzvc4ki8WCTCYjPz+f0tJSNm7ciJ+fH4mJiXTu3Jm4uDhkMk9WWsJNCaSZ0zCVmzA6jVgsFk8nvfN8ZtR/Bp3vnrdMJiMwMBC9Xk9NdQ0mswlHmQO7v5295r10D+9+ye9B48aNUffti3n+fMrKyoiIjESxdSt8/rnn98ZZCA0NJSQkhOzs7POWRVdVVWEyma5KA416tFot48ePp1u3bsyaNYv9+/fzyiuvMHr0aPr16ye5piQkJP5fIYlSEhISEv8hXC4XK1asQKVS/eOzpM5G69atGTVqFAsXLuSrli15KDER4WR4OQBFRfDYY/Dll56sqZNUVVVx9OhRkpKSkMvlZ2wXPCVe9SLUgQMHcDgcgCcXqE+fPiQkJNCqVSvPHfaTnN6B72Jbrl9tZDIZ99xzD8eOHePgwYMEBwfz+uuvk5yczO7du0lOTiY5ORm9Xk/Xrl1JSkqiWbNmZ0xqDteYqLE68VGrUYWHUVZWTkVFBW63C73+4spRDCfFO29L9JMdB+uRC+AUBPKrTQSqZGxavZI1a9ZQVFREdHQ0b7/99nlD9wMDA+nSpQtlZWW0a9eOESNG0KpVK6+bKj8/n+PHj3P8+HGvw8DPz4+YmBhvp79mzZqhUCjo3LkznTp1Ii8vj/Vbt6ELi8RYW0tdlQO9Xo+v/kzRSUDAPyCAyopKamtrCQo85VpwuZwYDJ5MsYALiA5ymYDdDccMln+dKFVZWYlMJjurU+z/I8frrJSabShlwnldb0ajEbvdga/eF5VSdc716rHbbZSVleFyuREEgbCmzbFfpYYUcrncK9ped9112Gw2cnNzvSLVsWPHUCqVVFdXU1lZSVFREYcPH2bDhg34+/uTmJhIQucE8hrnoVao8W/kyVszGAxUVFRgMpkICgo6a8fLP2ZKnQ+FQkFIaAh6mycM3eF0sK5oHblrcxl5w8hLLpML/d//qNm9m9qiIo8wFRGB7McfISYGrr32rGNt06YNW7ZsoaSk5JxdC+vzpJo2vXTB8EK0bNmSF198keXLl7N69Wp++eUXkpOTmTBhwkV1UZSQkJD4LyCJUhISEhL/IepdUkOGDPlXuaROZ+jQoRQWFpKamsryAQO4zmCAw4dPrbB/Pzz3nKcs4+SkKD09HaDBnez6Fuv1ZXn1EwuAJk2a0KFDBxISEs4Z/gunOvDVB5z/XQQEBHD33Xezb98+9u7di9FoZMSIEVx77bUUFRWxe/du9uzZw4YNG9iwYQPh4eEkJSWRlJRESEgIZoeTwzVm5IJnYq1Wa4iICKe0rIyqqmpcLtdJEeLck26H04HJbEalVp0qGfzjxFMmRyUTMNrt/L5lN3tWr0alUhEXF8eQIUMuqguk1Wpl7dq16HQ6hgwZgkajIS4uDvAc05KSkgYiVWlpKampqd7OZAqFgmbNmnkn5S1atGDgqDEcrKjDZjRgNNqoqamlttaAr68Pej8/lIpTWTm+Pj4Yag0Y6+rw8/NDIfecYzW1tYiiiEajRnsRJZNKmUCVxYHB5sBP/efkj/0ZVFZWXlSZ1v8HLE4XOVVGABTneT/copuamhpkMuGixDyz2UxFRQWiKKLVaggJDcXphsJaC2E+GgI1V/d8UavVtGvXztvtzWAwkJOT43VcVlRUUF1dzeHDh3E4HGRlZZFckUz0zdEoXUpErUhAQAA6nY6qqiosFgvFxcUEBAScIRxdjFPqbOOLiIigzlKHLErG3vV72fPCHoYNG8agQYMa3Cg4L40aEfDNN4jjxmGorqa8rIyw8HCE11+Hpk3hLN3u2rZty5YtW8jKyjqnCHS5nfcuFqVSyahRo+jSpQs//fQThw8f5vXXX2f48OEMGzbsrOKfhISExH8J6VNOQkJC4j/C6S6pIUOG/N3DuWwEQeCuu+6iuLiYpZs3Ez15Mm3few9KS0+ttH07TJ8OL74IgkB6ejoKhYK4uDj279/vdUTVZxQpFAratWvnLc3zlNdcmHphrz5E+O+kffv29O/fn19//ZUvv/ySt99+G0EQaN68Oc2bN+fmm2/mwIED7Nq1i7S0NJYsWcKSJUuIiYkhYcAwnP5haBWnnEFKpYqIiAjKSkuprTXgcrkJDg7iXMKUodYAIvj7+Z9a5w9OKVEmUF1djdlmQxMUSv+hwwnx82X+/PkXnU+0ceNGzGYzI0eOPCMvSxAEIiMjiYyMpHfv3oDHoXL48GGvSFVYWOj9vp7ut9yNb0g4apWKsPBwrFYrxro66uqM1BmN6LRa/Pz8ToapCwQE+FNeXkFtTQ3BwSEeQc7oEScutjRLLgg43G4qLf8eUUoUPSVoLVq0+LuH8o/gqMGC1elGc4HulIbaWlwuN4GBAWe47xoiYqir83wuiaDX+57MEBJQyjxZZIU1ZgIjzt398Wrg5+dH165d6dq1q7dTZn1oenZ2NsXFxaiaqrDb7RiqDNTIa1Aqlfj6+OLn54fT6aS2tpbq6mpMJhPBwcFe4ehSnFJ/xFfji6AW6DGyB7t/2M3ixYvZsmULo0aNIikp6eJK2jp1IuDNN3E9+SQmk4nKykqCg4MRnnwSZs2CPzheW7VqhUwmIzs7m0GDBp11k1fSee9SaNy4Mc899xzr169n8eLFLF26lH379nHHHXdI16SEhMR/Guk2mISEhMR/hF27dlFRUUG/fv3+tS6pejQaDZMnT0atVvPNokVUvPwy/LGUY8kS+PJLSktL2bx5M+Xl5fzvf//j008/ZfPmzTidTnr27MnkyZP54IMPePjhh+nbt+9FC1Lwz3FK1TNx4kR8fX3ZtWsXu3btarBMLpfTrl077rvvPt577z3uvvtuWrduTWHREU6Y7NTWVFNRUY7ZbPK6GBRyBeEREajVnk5d5eXliKKI3WblxfsnMKF/EhOH9+Op22/icI4nw0qnO1U2mbo3mcHX9ee3RfNxu91UVldTV1eHTHTjFxBA4jUDvcHxsbGx3ufdeeed+Pn5YTKZGryG011S/fv3Jy0tjV9++aXBOomJid7jsWvXLnr06MGdd96JTqdjw4YNTJ48meeff54xY8bQqVMnAoODUWh9sJpNVFZW8dX0aaxe8AtKpRK1WoUAmMxmSkpKKSkpwWw2odXp+OGdV/l95rc4HA5qamoQRdD56FCp1NzWuxN52ZnnPVaCILBz7Uq27dhxUcd25syZjBo1CoBNmzaRmJgIQHFxMX369LmobVwptbW1uFwuKU8KcLpFjhksyATOK4Y4nU4MBgMKhQL9aZ9Ruzeu5YHrBnD/tf24d2gfVv82l6qqaqqrPIJUYGCAV5ACzz4UgkCZ2YbZ4aJv377ExsZe0HG0adMmVq1a5f35j+fL4sWLad26NYmJiWRmZja4fur3GxYWxjXXXMMDDzzAhx9+iEwuQ3SLaFVaVCoVoihiNpspLSvl2LFjVFRUoFAo+Paeb0ldmcqJEyeorq5GFEWvU+rB5g9iqml4fZ+NzbM3c/zQce9YAIJignj99dcZOHAgtbW1/PDDD0yfPp2HH36Yxx57DGh4vezdu7dBx1XhppsInDQJtUaDyWj0ZAZWVsKTT8LJAPh6NBoNMTExHDp0CLvdjtFtpNRZSrGzmBJnCZXOSoqOFp3VFfZnIJPJGDx4MC+//DKtWrWiuLiYd955h3nz5mGz2f70/UtISEj8HUhOKQkJCYn/APUuKaVS+a/MkjobkZGR3HnnnXzzzTd8tmIFU995B9UjjyDa7TgcDixmM+bp05kzcyYH6+po2bIlISEhXjfUuVqeXwr/JKcUQKNGjUhKSmLHjh3MmTOHFi1aEB4efsZ6Go2G7t270717dwrKKkkrN2ExGjBbbZjNFmQyAZ3OBx9fHzRqNeHh4ZSXl2M2WygtKyXAz4/rbr2Dbv0GIQgCs7/8hB/eeY13Z/9G/STaaDDw7Xef061LEi6XG6fTiVMU8ff3w98/ALvbTbHJk2Wj0+mIiooCPO/l0qVL6dChA/Pnz+euu+7yjnvjxo2YTCauv/56lEolaWlpLFq0iHHjxnnXSTstY+zHH3/ktttu4/nnnwc8pZ/1NG/enEGDBlFtsbPjWCUuux27SsUtkx7G7nBgtdZP8ERcLhdulwubzYrJZEKlUnsm16JIRUUFdrsdQeCSc5Z2r1tJbbsE7rj27A6Mi6FRo0Zs3br1sp9/KdR3LJQ6gEGZyYbV6UZ1AZdUdU014kmRSTh5bYiiyJuPT+H9uYuIad2W4qOF3DOoFx8t6oDOx5eQkGB0ujPDwhUyAavLzY70/eTm5hIYGMjmzZvp16/fWfftdDrZtGkTNTU1DBs2DDjzfPnqq6946aWXuPXWW4GG18/ZkMlkfDv7W34z/oYcOWH+YdhsNiwWC0ajJ+y8/meny4nNbsNsNmO3271ZU5fCljlb0PnriIrzfD7IkVPhqkCn1zF27Fj69evH77//TmpqKnv37sXPz4+SkpIG2+jSpQu//vprg8fkTz9N8OHDlC9bRm1NDQqFAt8DB+DVV+GNN+Dk7wa7aKfpNU2pa1vH7OrZoPGUY4qICAiIbpHw+8LR2rTss+4jRhlDgDzgkl7j5RAaGspjjz3Gjh07WLBgAevXryctLY3x48fTtm3bP33/EhISEn8lkiglISEh8R9g9+7dVFRUMHjw4L/kbu5fRefOnRk8eDCrV6/mrRUr6DloEPHffYf7ZEg5gsCovDxq4uK4+6OPrlqJwyOPPMKSJUsoKirixhtvbOAssNlsPPnkk6xevRqNRkOHDh34+eefL2q7oijSokULWrRowfr168+77qJFi4iIiKB7d08nqr179/Luu+8yZMgQjh07RnJyMu3ataNRo0asWLGCcePGnVW8EJUaVGonfrpwnr9rHHc8/gwBoeEYjUaMRiMKhRwfHx8CAgKQyQyYTGbuHNWD1775EUEQcLldNI6Jo7L0O+qqK7m1RweWZOTz6cvPctvY29m6bbO3c1VwWBjKk6Hpi2d+Q+/hIzHaHMTFxHgFwrlz5zJo0CBuvfVWPvjgA+666y7uuusu2rVrx9NPP01UVBQLFy5kxIgRLFu2DIPBQGJiIt27d+err75CEDwlgl999RW//vorWq2WX3/91esuWrRoEYmJiRw/fpxHH32UrAMHsbrc9Bo8nHuefJ63n3qImNZtuW78XezZspHZn36AzWbF6XAw6OZb6T5oGFar1Tv5Li8vQ6PR4Ourx26zYbfZEEU3FosFk8nI1LvGEdu2PYcy06gqKyOxR2+mvPwGe7dsZNe61aRs28zquT9y8803M3LkSFauXMn8+fNxOp3odDqefPJJWrZsSX5+PtXV1ezevZvs7GzMZjO7d++muLiYCRMmsG7dOgC2bNnC559/jlKppHv37ixdupSZM2fSqFEjjhw5wocffugJjXY4GDVqFGPGjAEgKSmJyZMns3nzZqqrq7nvvvu4/vrrASgoKOCDDz7g+PHjGAwGTCYTjRo1oqKigvfff58TJ05gs9m45pprmDx58kWd639V97Dz7cdmszF16lQKCgpQq9UEBgby/PPPewOrJ0yY4G144HK5yM/P55dffqFly5ZUK32xK3S47a4ztjvn8w9ZPncWQaFhWK0WmsbE8fgb73mX17ubKsvLSNm5lcMHD+Cj90euUODv7wcImM3ms47ZKcj46auvGDx4MCEhIbz77rvexgLFxcWMGzeOm266id27dzNixAhmzpyJy+Vi5cqVDBgwgOuvv55bbrmFrVu38vbbb7N582YyMjJ46623+PHHH+nYsSNbtmzBz8+Pw4cP8+6771JRUQHA2LFjGTNmDHdNvItuD3ajy9AubPt9G+u+W4fT4UR0i4x6ZhSt+rbCbDYjCAJut0eQttlsmEwmb8n06TzS/hH6jOtD5sZMaktr6TehH6OfHs3GHzdyOPUwPz//M7+/9TtjXxpL28FtWfzJYj5Y8gFup5uwsDC+/vprBg4cyKRJkygrK2PatGmIoojT6QQ8brHHHnvMK7h9/fXXvP/++/hqtdzg68u03FwKBQG5XM7+hQt5duFCany0mBwmhj4xlDbXtcHhcPBg3IMMmTSE9NXpmA1m7nj7DuKuiUOmkFGUV8QnD3yCzWhDiZIXX32Ru2+8m9zcXB577DHKysqw2Wzcf//9PPTQQ+c7ZS8aQRDo1asX7du3Z+7cuaSkpPDJJ5/QvXt3xo4de0mdUyUkJCT+yUiilISEhMS/HLfb7XVJ/ZuzpP6IyWQiMzPTm3eybds2kmNiuLFLF27IyECr06HRaDh+/DiTiouJqqqCqyRK3XzzzTzzzDNeQeh0p9Rzzz2HIAgcOnQIQRDOuGt/PtavX09AQAAZGRkUFBQQHR191vWcTqdXXKkfQ70bYNu2bezevZutW7fSp08fbrvtNqKios7ppjHYPRM3QRB468d6N4GI9eQk0mwyUVtroLbWgEqlRKFUIIoiFZWVRJ/MUlq7YA7dBw6l3iW1cdki7HY7Hbv1YOu2TchkAkqVCnx8WDp7JhWlJaz9fR7xHbvhGxTSIE/q+++/59VXX2XgwIFMnjyZnJwcAPLy8gCPS2fAgAF89NFHzJw5k0WLFrFo0aIzXtdzzz3HwYMHSUxM9Jb0nM7tt9/OkCFD+OSHn0ktrcVaU+VdJggCGo2WxKSe9Bw4FLlcRlV5GVNGDqF7/0HeQHa3243T5cLhcGC326io8DiJXC43NdU1VFRU4nA4KMrP4/F3PsPlcvLCnWPZtXkjsW0TSOzdl6YtW+FnN1JZWcn06dNJTU1lyJAhyOVyTpw4wSOPPMKYMWPIycmhqKiIGTNmUFxcTFVVFTNmzKCurg673c6MGTOwWCzMmzePkSNHEhAQQE5ODrW1tcybNw8fHx8WLVrEgAEDaNeuHU6nkxkzZpCXl+ftHJmamkrv3r2pqanhrbfe4sSJEwDMnz+fzp070759ewoKCqirq+O7775jxYoVJCYm0rt3b9xuN6tWreLo0aP/mnwbp9OJVqtl6NChCILA/v37mTJlileM69Wrl3fdw4cPU1VV5W0W0HHkrejDIrGbjGds12Qy023AEEbdOxmnw8GP777ODx+9w7gpj3vXuf/FN3j9oftQabSYjXVMfOE1lEolNTW1LJzxFQHBIfQfefMZ25YrlaxZtIAhgwZSUVHBunXrCAsLQ6VSUVdXh9FopKioiGuuuYa6ujqaNWuG3W6nZ8+euN1ufv75Z+x2O19++SV+fn4EBATQrl07mjdvzpdffgl4rkGlUuk97v379wfg6NGjfPHFF5RXlGOz2aisqCS8fTgTZ070iMHHq/ni9i94dvWzKFQKZDIZGrUGtVqNw+E4eZ3YvaLc/v37CYkMweVyUVNew/NLn8dqsPJUl6foO74v/e/sz7Z52xg2eRhdr+sKwJZ5WyjJLWHD9g34Kf2YNWsWU6ZMYfny5fTu3ZucnBwCAgLYuXMnR48eZfXq1Q06ru7fv59XXnmF1NRUIiIiePmRRyA3F0EQyCstZaLRyFe39qXgtRs5YTXyRt83eKXbK4hmEYvBQnS7aG753y2kr0vnp2d/4sX1L1JXUsd3d3/Hoz89SkzPGKwuK3WGOrbVbePRWx9l9s+zadXKI9R1796dpKQkunbteqWnrxc/Pz8eeOAB0tLSmDNnDrt27SIrK4tbbrmFLl26/GUCsISEhMSfhSRKSUhISPzL2bVrF+Xl5QwaNOhf7ZKq76xWH1Ken5/vndz07t2bgwcPotVq6fnSS4Rs2wZffYXFakV0u/FVqz15ITNnwjmEnkvhmmuuAU51kqp3SplMJr7//nuOHTvmnQhERERc9Ha///57Jk6cSE5ODjNmzOC1114DPHf6H3zwQbp3786+fft4/PHHWbJkCWvXrmXmzJk89NBDxMbG8thjj7F27VrWrVtHbW0tW7duJSMjA71ez6233kpNTQ0AO3fu5Omnn/ZMYu1Oxj/6DP2GjeC23p149ZsfiW3TnqWzfmDj0t9xOhwIMjkTHn+WJi3jEUURERGTycSRI0dYOedHyoqP8b+Pv6S2sgK328WPH7/Lsx9+zQ3X9yc2No70/RksWrGEOx57muvH38VPn7xHZVkJ7z0xGbfTQY8vv8DhcDBp0iTS0tJ49tlniY+P5+abb2bGjBm4XC6vONW3b1/v+1UvwNXz8ssvA9C/f39GjBjBggULvNlLq1evpqSkhFtvvRWdTkd6ejqrV69m6fpNPPHoI3To1oOsfXsoPX4UvwBPiZGhuoqp99xKXtZ+QMRht7N99QoG3ngLVpORHauWkbZtEwIw+p4H6DFo2MnzQkCn0+Lr64NcLuea4dfhH+Bxs0S3ao2hsty7TKlUkJTomaDOmzePuro61qxZ431Nbrebzp07e7OJunfvzsGDB0lPT6d79+5UVFSwaNEiunfvTmpqKtHR0d5SrW7durF9+3Y6duyIzWajtraWnTt3erctk8kIDg6me/fufPPNN9xyyy3ez4hly5YRHx+PxWJBLpczfvx4du3ahclkol+/fmg0Gr777juEk40EAOx2O76+viQlJZ33PL+UzmtX8pxLJTQ0lJycHLp06XLGsu3btzN48GDPMpkM/8goBEDpoztjXZVKiVUmQyaToffzo2PPPqTt2IKptppnx9/IgFE3s/a3X+gx+Fp0ej/Ki4/y8wdvMu+Lj3h0+vuMf/gJADYtXci+LRt48t1PWT57JttXr6CutgaL2UyLuHiaRzUiMzOTsrIyCgsLveWVERERBAcHM2PGDDp37ozVaqVTp06888479O7dG7lcTqdOnQBPSWyLFi281wlAQkLCyW6BMq+T7nQ2btuIXC5Hq9NSkVfBgv8toKakBrlCjsVgwVJpITwmHLlMjkarITAwEFH0fGZYLBasJ3ObnC4nNTU1uJwumnZvSlFREQqFgoDIAA6kHCBeFY/T6cRuNuOwWlGo1exbvo+ClAJ6d+2NDBmu05opCIJAREQEr776Ks899xxHjx7l999/x2g0YjabEUWRDRs2MGzYMO/n8sRnnuHVTz8lJCyMtSeOk4uLsTvTEQdlIJMrEASB0rxS1P5qFGoFnUZ43reWXVtSWlCKzWbjSPoRIltG0rpXawCUciXqQDXrs9aTmZXJzeNuRnFySlVXV0d2dvZVFaXqSUxMJC4ujt9//52tW7fy3XffkZyczG233XZJWYkSEhIS/zQkUUpCQkLiX8zpLqnT83T+LbhcLvLy8sjIyCA9PZ3y8nLAM5mOi4vz5kOFhYVx+PBh3nvvPb777jte+N//CKiowPzNNwBodTowmeCHHzyZIVeJelGq3imVn59PUFAQ06dPZ926dWi1Wl555RUGDhwIwH333ccNN9zADTfccMa2qqqqWLVqFV9++SVHjhxhxIgRTJs2zbuPAwcO8MUXX/D9998Dnsnk6S6gTZs2ARASEsLYsWP58ssvmTVrFsuWLWPBggXeSX1VVRWjRo1iwYIF9OnTh42FZZRXnllSM3j0GMbc5ynFyk7dyztPPcyMtdswm83IBBlyuZyls2aQunUjj7z5IaWlZRwvKsDldFJTUc7L93kyagoLC1BrtXTt3otPX36e+C7d6Td6LMvm/sQDr77Lkf2prFmzhjfffJOCggIEQaCgoICcnBxcLhcqlYrg4GBvKc7u3btxuVw89dRT7N69m7q6Op5++mkOHz7Mli1bAOjTpw+LFy/GbrezdOlScnJyWLJkCWq1mp49ewKQkpLC1KlTqbE6OHY4j7ufeZGxUx7jlfsnsH3tSkbe8wCvPjSRo/m5PPfpd4Q1bsJbD92LQq2muqqCvKxMho65jZvum0xdTTUv3D2O6NZtCQoNRxTB4XBitztwiyKCXIHd4fD4yAQB+8ncM1EEl8NBaWkpgiBgMpno2LEjw4cPb+BuqKurw2QyYbfbqT4ZFu9yuaipqaGurg5RFKmtrfXk+DidXpHU7XZ7xQC73Y5Op+PZZ5/1brd+HxaLxbu+3W73LrPZbDidTm8plMFgQBRFdDqdt6xt6tSpKJXKBuP94/eXu+xsXy/02JXsc/bs2fTs2dMrWNQvKy8vJy8vj6lTpxIQEIAoV7B04Twqy0q5bcqjnnU5hUwmw+VyIpPLUSmU7N28npYJiYiA2ViHQqnCLzCYsVMeY82vs4iMakptRTnR8a1JXreK7td4nEk7Vi1jzMQHCQ0JZdTtd3PXo0/z4v13kLZrB++9OZ2oqCjMZjPZ2dn88MMP9OjRg8TERD799FNCQkLYvHkzQUFBBAcH061bN2QyGa+88gqLFy/mgQceADzlskOHDvWGgk+aNIl77rmH48ePM3PmTCZNmsQfmTV3FlqNlqDgIN584k3GvTKOpFEeIXJis4n46nwJDQ1FqVLi5+eHn58fFRUVyGQyAgMD0ek8Qp5CrvB+vokyEbvd7unKJ4ChxkBtRQWC1YqstAx3Tg4muRyn2cKgiX25Kf5mIiIbExERQWRkZIPxKZVK2rdvT05ODv369WPu3LlUVVXx1ltvnVEWWX+Mlc8/w9GUuURs2M/U7+5CZxdBpYLmzUGhoOhgEQqVAqvViq+vLzK5DLfLc73IZLIzzik1ajRo8An0YeqWqQzSDSJM0bCz35+BTqfj9ttvp1u3bsyaNYuMjAwOHTrE6NGj6du371V3TdldbhwuNyIgEwTUchlymeTMkpCQuLpIopSEhITEv5jk5GTKy8sZOHDgv8YlZTab2b9/PxkZGWRlZXknETqdjm7dupGQkEDbtm29E5t6WrRowbhx45g9ezZff/MNTzz+OPsXLiTWYkGpVHpWOilsXE3UajVms9kT5O10UlRURJs2bXjrrbdITU1l8ODBZGVlER4eznfffXfO7cyePZvhw4cTEBBAQEAA4eHhrF69muHDh3tf3+kuoXMhCALx8fG43W7kcjmjR4/mhx9+8E74du7cSXx8vLcDl0wmR3+WgO68rExmf/4hhppq5HI5Rw/n4bDb8fXVI5PJ2LtxLVn79vDAtLdRa3UY6uo8+1Qo+WrlZmQyGbcmtaNLvwG0bJfItbfeQfKmdViMdQSHR3hDn4OCgohQBbFo0SJKS0sJCQlBqVTidDoJCgrCaDRiMBi8Ack6nQ6n00loaCh6vR5RFAkKCmLnzp106NCB9evX4+/vT48ePcjJyUGtVnPs2DFqamqwWq0sXrwYpVKJXC5nx44dtG7bjvDGTYlo2hwQ8AsIpPRoESaTiYrSEprExhHWuAn5WRkcLzyMf0Agxfm52CxmNi75jX1bNgAgilBcWEBAcCgg4hbduN0uEEXcbrfH0SGC6BZxu1ye0jEfX6pLSyhXeMSjiIgIVq5cSYsWLfDz88PtdlNWVkZ4eDhVVVVYLBaKioooLS3FbrdTUFCAwWDA7XaTl5eHy+Xi6NGj7Nq1i8DAQA4ePOgVdn18fBAEgfnz59OqVSsAampqUKvVaDQaADIyMlCr1YCntC0rKwsfHx/cbje//vorNTU1OJ1OduzYgUajITIykm+//dbrLDKZPJ0b68sb/+mIougV3NLT0zl69Cj9+vVj1qxZuFwu77/6krAffvgBl8uF1i+Am596CYfdTvHxYkA86SAERJHaWgO71q3mYGoKCBDbrgM9ht1AZUUFcoWCjn0HsmLuTxzOOYjZbKG6vJRjBYcZcecDfPvq8wy65Q7qqqsoyjtE45bxlJaWkLV3Nwu++ZxDmWk0jY2nKPcgBw4cIC8vj8TERNq0aeN9XSEhIQA8+uijTJs2jW7duvH5558zZcqUixYl4uPj0el0zJ071xuCXlFRQUhICApBgRw5LlyYakyENgsFYNuv287oqGcxW7wlzPXlgvVoNBr8AvyQnXSVne56UigU+AE6nQqz0QqiiMrpJHFgPBu/2swTpq3UhoezNiSEZJWKoDZtyMjMRC6Xs27dOo4dO4bb7eaWW25BpVKRkpJCYWEhVVVVrF27lqeeeoq2bdt6Rf4tIxsRkNCLyplbydmUQ7tuLVAKAoUb99G4b2fvdWGxWBqc3263m7hucSyctpCDOw7Sqmcr3G435hozUXFR6PQ61sxaA3fAcJ/hVBZUEhQU9Kc3C4iLi+Oll15i2bJlrFmzhrlz55KcnMyECRMuyb37R+wuNyUmG9UWO7U2Jxani9NNjDIZ6JUK/DVKQrQqQnQqZFL5oISExBUiiVISEhIS/yBEUcTmclNnd2Kyu3CdnAjJBQEfpRy9SoFGIfMGzC5fvvxf4ZIqKyvzuqHy8vI8d8uBsLAwevbsSYcOHYiJiWmQDXI2+vTpQ0FBATt27ODLr7/mYPv2PNSkCVGFhRAWBnfffdXHXh8mazQaadq0KTKZjPHjxwPQsWNHoqOjyczMPGsXvNP5/vvvKSkpoXnz5oDHHfP99997RalLmejHxcUBUFhYyP3338+2bdu8mT9/RCkT+GNllMNu5+XJd/H+nEW06tARU10dNyS0wG6zYjrZSWv1vNmENWrMVy89A4BCoeT+l95AEASCgoJQqTyTOJ1CiV6jJiwsHIVCQcrm9ThOugsUCgU9kroxun9vPv/8c2JiYsjNzW0wlilTpvDTTz/Rv39/CgoK6N69O1arlWeffRa9Xs/BgweZO3cuAIMHD2b9+vU8+eSTZGVlMWfOHIYMGYJSqUSv17Njxw5vFldxcTEPP/ww69euobKklE2L5jP67vsBEbdbRKVS0+Wa/mxftYwPnphMbNv2tE7sTHBIiKdTl58/t01+hJvuOdNJIpcrCAsLIyqqMWq1muDgYBpHNQZAq9USGBRE48ZNGDr2Nr6Y+jjHD2p48MEHef/99/nll1949913PWVLdjsjRozgvffe8+Znffzxx2zatIn8/Hw++eQTCgsLWbRoEZ999hkA119/PVOnTkWtVjN48GCSk5P57LPPCAgI4JlnnvF27HK5XISEhDBnzhyioqL45ptv+PDDDz1OIFHkt99+4/XXX6dZs2Y8/vjjPProoxQWFqJUKklKSmLixIm8+OKLPPPMM2zbtg1BEPDx8eHTTz8lISEBOCX61PPH78+37I9fRdHjpLFard6vNpvtjO9tNpt3HZvN5v13rmWiKJKdnU1RUREDBw5EpVKdse/i4mK6du1KaKhHfFH7+CKXy5FpNKgUcq/7ShBkiIjI5TK6DRjCxOdf9rpoZIJA+Qklao2W+DbtuPXhp5j9wXTqamtwu1zc9MAjNIuLp3FsPN+98RIOh53O/QZRZzRSXV3Nu089RNKgYWj0eu5/aToPDu1N7969UalUyGQybrvtNnr37o3NZuOHH34gLCyMZs2aUVVVxfbt2ykoKOCpp54641w9FwqFgsWLF/Pwww8zffp0ZDIZU6ZM4YEHHkBAwEfmg1N0MuHtCXw84WN0/jra9m1LSBOPIOZyubDb7RhNRuRyOcHBwWi12ob7UHqmGYIgEBUVhcpXhcViwS26cTgcOIF+NyTyy+cbWbNgHzdP7EO3kR1Rb87jzjVZCNXVuIDbfH25rbCQl4xGjiuVLPv5Z9IKCyksLOSRRx7xhq4nJiaSnp5Oy5Yt6dmzJ0FBQdx8883o/fUUOAoIatyYZz6/k5/fWcYvX2zE5XITEhHAE706IJd5fvfUlx6eTkBYAI/PfpzZ/5uNxWhBJpNx8/9upvPwzjz969P89NxPrPtiHc+5n6N5WHPmzpl70cfhSlAqlYwePZouXbrw008/kZ+fz2uvvcaIESMYMmQICsXFT/OMdidFtWaKjTbsLs/vZ5ngcUfJBM8tBhFwu0WqrQ6qrA4Ka834KOU08dPSRK9FeYFOlRISEhLnQhKlJCQkJP4BWBwujhutHK+zYHW6PWKU2LBkBDxZNmq5jChfDWX5BykrK2PgwIHe7kz/FNxuN/n5+V4hqrS0FPBMTmJjY0lISKBDhw4XFHL+iCAI3HbbbRw7dowVK1ag0WhQvPMONG4MWq23zffJQXhu614hPj4+WCwW6urqaNKkCQMHDmT16tVce+21FBQUUFBQQOvWrc+7jX379lFeXk5xcbG3nKWmpoYmTZp4Sxb/iJ+fH7W1tWddFh8fD0BRURGCIDBmzBjefvttli5dysSJE8nNzfUGofsqZRwpqUZz0l0BYLfZcDochDXytGFf9OO3AJwoKUGt0aFQyHn7598JiWqMyWTCx8eXRo0iyTt4AFEUOXGiBN+TTrZH75mEr68fnCxxHDTqZiIaN2XLqmVY6mppmuhx7dxzzz1s2+YpD9TpdJjNZg4dOoROp6Np06bEx3vyrF555RXvxFCn03HNNdewaNEili1bxtSpU6mrq8PHx4cZM2YQEhLCY489Rl5eHtOmTWPdunVewaSoqIhp06bx008/MfvX+Yy8bwrmujpuvv8hPnrucSIiwpnw8OPs3byBFz/7jqYxLXE6HFgtFkTR40JrHnfquOZlZ9IsNh6lSsWcbSnexz/4ZXGDY/PKlz8A4HKLtOrQkZ0paYTo1N7l48aNY9y4cWcc0/ouhAD9+vXzdhJr3ry5NysMYNCgQYwePdpz3E6+L/XulJiYGJYuXXrWc+Z0UUgQBG/HNYA2bdqwcOFCnnzySTp37sz9998PQOPGjZkzZ84Z26ovAzxdFKr/Vy8gne2x8y07PSD7UlEoFF5HWEBAABqNJ3xbrVazadMmqqur+eCDDwgODvY+Xr9+SkoKmzZt4ueff0ar1aJWq1GqVGw+VoPLLaL6w0S7oqIchUKJ3teX8LCGn192swlBEAgNDeWmO++h74iRLJ75DW6nnZHjJ2CxWOh3/Wi2rVhM2bGjjH/4KQIDA7FZLbicTm6Z9Ah+QSEsOXk9qtVqLBaPCFJTU8OWLVto3rw506dP9woOLpeLwsJCAgMDSU5OJicnhxtvvJGnnnqK4OBg7rjjDmQyGcnJyV5HXr0AHh8f3yDfrJ5NmzaRbEkm3ZZOr7G96H1Lb++y29+4HYvFwokTJ7jvu/vQaDSEhISccUPh+5LvKSsrQ6VS8cyqZ9BoNISFhVFTU8OkWZM8nS1tNpr1asn0nrHIBQFRIcMmCNwdHMBzpzmu5HI5GpuNb/R6VEolrn37qI2M5Fj//uSoVGTqdFx77bWkpaUhiiJhYWE4HA4EQWD5uuU0SmiE3WZHLshpOqALL8aG4bBYEN1uBI0GpU5DaDMN72e+j8FgwGazofHV8EXRF9TW1qJSqWjZtSWvrHnljPcqvEU4T897GqfoxCpaSdImEaWOuviT9yrQpEkTnn/+edauXcvSpUtZvHgxe/fuZcKECd6bIOfCLYoU1prJqzLhcIvIBQGNXHYex92px12iiMnu4kCFkWMGC21C/AjRqa7eC5OQkPh/gyRKSUhISPyNWJwuDlUZKTHacLpFBEAhE1DJZAg0zENxiyJuUcTqdJFbbcQg86XdkBu4pl/Pv238p2OxWMjKyiIjI4P9+/djMnnKPDQaDV26dCEhIYF27dpdcRtrpVLJpEmTWL58OSUlJZ6yi9NL/Q4ehGeegYoKuPNOuP/+hmLVRfDAAw94tz9jxgwEQeCRRx4B4KuvvuLee+/l2WefRSaT8fXXXxMV5ZmEnCtT6vvvv2fcuHFeQQogICCAwYMHM2vWLG8o8enccccd3HXXXSxatIgHH3yQ2NhY77L6SWBRUZG3nEqlUiGKIvPnz2fu3Lk899xznmwiEW566GlChwzzPt9Hr+fuJ57nwVFD8Q8KovvJAG+Xw4U20FPqZbFYUCgUaLRatFoNKpWaoKAgZDIZarUK10mhxOVyAyIYGgpoQ8eO59s3XmJxgB8//fQTzz77LDabjaSkJO95PWLECOrq6oiIiPA6WM7Fddddx+7du0lMTCQgIIC+fft6xZjY2FjmzJnDAw88QE1NDQaDAb1eT9++fSkuLsZpt6GUK/AND8NtNZ18DRqimsfwzLsf8+bjk3E6nMjkMh5//T1aJXbijRlz+Hr6K3z1xss4nQ7CG0Xx6jc/nXeMp+MQ3fgqFQRrr+4k7dNPP+XXX3/F5XLh5+fH7Nmzz7u+2+2+KKHo6NGjFBQUoNPp+OGHH867fn3e1OWgUqkaiEJ6vf4Moej0n09/7GzL1Gr1OR2Wx44dY8qUKbRo0YIPP/wQ8Ag9p4fnv/3220ycONF7Ddez7tdZHDl2jIlPTfU+ZrNZMZnMHkHoArk6Pj6+OByevC67zYF/QACBgUGMGHMrsz96m/AmTYmKboHD4UQmV3LjvZN55b7x+AUG0//6UQCsWLECvV5PSkoKjz/+OCdOnMDlctGzZ09atWpFZWUlJ06cYP78+TRt2hSDwUBlZeU5BT6ZTIZSqUSlUuHj44Ofnx+BgYHecrOwsDBCQ0OJjIxEHaZG8Bewy+yo5R5RVRRF7/UFEBgYeM6yca1Wi1wux+Vy4evri9FopLa2lsDAQPz9/amoqKCqqooyh4NyQUAvl+MToEFXbSEmrQx9o0Yel5zFgsVqxWQ0YjJ6OiEqVSq0ZjNti4vppFYzvlMnrO+8Q0ltLSUlJUybNo2KigqMRiO+Ub7c+sqtnCg8gVqlRqFQoFCp0IsiDocDg0yGf1UVfn5+aLVaDAYDFosFtVqNzWbznrMXQiEokIty0qxptFC2wFf215a4ymQyhg4dSseOHfn555/JycnhrbfeYuDAgdxwww3e8sTTsThcpJfVUmXxCHjnF6PORC4IyBVy3KJInd3F3hM1NPPXEh/sK5X0SUhIXBKC+Fe0O5GQkJCQaIAoihQbrRysNGJ1ulEIAgqZcNF/EBpNRmoNRnR6Pf4+OloG+dDUT/uXt4auqKjwdss7dOiQNzMkJCTEG1LesmXLSyojuNj9TpkyhfLycnr27MnUqVNPiV1TpkBy8qmV77oLHnrosvc1e/ZstmzZwt1330337t2vbOBXkZkzZ7Jz505eeOEFmjRpAsDChQtZtWoVnTp14v7770cQBGqsDnYer0Z+8hw7HYfDTkVFBXa7A7lchlKl8riURPD19SEwMIiSkhO4RdFbnuZBxFZ8AqGi3BO2hICg90XZogUgIIpuSiurcZoMTOh79i5Udrud//3vf9hsNt58882LEivr6uq8OVNPPvkkFouFL774guPHj5OcnExycjLV1Z5Qd4VCQfv27enWrRvt2rVjb5mRaqsDreL8JaJXA/fJMty2IXqaB5zZve1cOJ3OCzqKzvXYH5fX/+y8yJy1iooKsrOziY2NpVGjRgCXLRSd67H6UrR/Awcr68ivNp92vni6g9psdiIjI7zlq+dHpKKiApPJjFbrcQqd7jQRET3HymLBYrFgtzvQ+OopL8yjeM8W2rZtS9u2bWnVqtUZGXv1LFiwgC+//JL169djs9moq6ujpqaGsrIySktLKSsro7y83CsC1YtK9eKi3W4/6zkil8sZ+MpAIjtGYq2yIhNkOJwORFH05EH5+XmcqgoFCoXCU+4o8zRIqP89VF1djcFg8Dqk7HY7oaGh3tficDgoKyvDYDB4xKswX46uOYpyZx2tDAb6KJW0rKpCZTDgsNuxWK1YLRZvWSacDB1Xqzk2aRLhd95JVFQUgtUKW7dSq3WxoPUxjCYzdVV13hy+egHf6XRisVgQRdEropnNZm/zhaqqKhQKBU2aNLmo362iKGISTXTVdCVRk3gR58efgyiKbN++nQULFmCxWAgJCeH2229v4Og1OZzsPVGD0e5CJbvy8HJRFHG4RVyiSJReQ0KYnyRMSUhIXDSSU0pCQkLiL8YtimRX1HHUYEGES747CSKG2lpcTid6TTAOl5us8jqqLA4Swvz+1M44brebwsJCb1lecXEx4JkYREdHe8vyIiMj/1SBLD09ncDAQPr06cP+/fv5/vvveeihhzwT3j9OsGbOhJAQOEvJ1MVQ7wSodwf8U4iPj2fnzp3k5OR4RakbbriBQ4cOkZKSwtatW7nmmmvwVyvwUyuotTpQyE5NsA11ddRUVyOKHieAy+XCarGiUHjyYTQaTz6MQqnEYvbkwMiEekFBQK1UgFLpDYs2W22YS0oICgzEjeec0DrMZ4y7nq1bt2IwGBg+fPhFu+cmTJhAYWEhVquV2NhYbrvtNqZNm8aJEyc8oxIEWrduTbdu3UhMTGwwkW8RoCO1tBan243iioURT3mtW3QjukVE0Y1bFBHdbtxuEacgIHM6OLgnkwyL+aKFpdODoC+F+ol5vRjk6+t71lK184lHe/bsYeXKlUyaNInOnTujUqn+cpH7n0SQRsVhwYzLLSKXeTon2mx2fHx9LlKQAhAIDg7B6SzBYrFSVVXdIABbQECj1qBRawgICMTpdGC2OwlQChTYbGzbto1t27Yhk8lo0aKFV6Rq2rQpgiAwbNgwDh06xMKFC4FTImJISEgDZ+UfEUURi8VCbW0tBoOBqqqqBgJWZWUllZWV1OypIaJ9BDK1DHOtGVEUEQQBURSprKw89ToEwZutVf99/Y0Ii8UThK731WM2mykpKSEsLMzrcIuKiiI4OJgKUwWmGhP7FuxDaVdSEhVFip8f8uBgejVqxEAfHyKLivBPScFd79o76aKyWq2s27OHw0eO4K/X88C2bYTX1nJoQmfcjdoRWuciWKej1uWi7uTnlU6nw8/Pz1NGWF6Oy+VCo9Egl8uxWq1UV1djNntccUeOHEGhUKBUKlEqlQ2+P92lJwgCMlFGjj2H9ur2yIU/XwA/G4Ig0Lt3b9q3b8/cuXNJTU3lo48+okePHowZMwa5WsO+E7UY7S7UctlVEY8EQUAlF3C63RyvsyITBNqH6v9ff4ZISEhcPJIoJSEhIfEX4hZFMssNHDNYUcgElJcxOTaZTDgcTvR+ehQKJQrA6XZTbLTiEkU6hvtfVWHKZrORnZ1NRkYGmZmZ3nb0arWajh07kpCQQPv27dHr9VdtnxciPT0dhULBxIkTmTFjBunp6SxbtsxTNjdxIjz4oCdTqp7334egIBgy5JL3Vf+66l/3P4X6sPOcnBwGDRoEeNwN9957L6+99hrz5s3zul6a+mnJsDo8JaBuF5UVFVitNgSZgEqpwG63gwB6Pz2BAQEIwqnzUqlUYsGC0+FoOBk/KaB4HBJynBo1drudkpJSlFotTrudlsFnzzpzOBysWrUKtVrtHfvFMHPmTPbu3UtycjKFhYVs2rQJgOjoaLp160aXLl3OKCeqD9DWumwEyEXKbS4UeLrhie5TYpIoegLQRdF96usf1jn9K+fwmctVKuQKBVlrl1JZlH/GcplM1sBl5O/vf0Xla/VulSud/KWkpKDVamnWrNlZS33+vxGqU+GrlGO0u5AJAtU1NQiCQOBZOlmeD0EQCA0Lo+TECerq6lAqFej1Zy95cwtyfLRyrh0xlNuvG0phYSFZWVlkZWWRn59PXl4eixcvRq/X06ZNG6ZNm0abNm0u+bNXEAR0Oh06nY7IyMhzrmez2ZiRNgNThAmFoMDP1w+1Wo3T6fQElZ/sSOp0OnG5XLjdbtxut3e5eLJEzm63ezs3ulwujEajV/SUyWQoNUqUPkpqttVgr7ZzovIExcXFREZG0rhxY1YYDKxTqWjWrBnD3niDbmo12n370CYnE2gwYOndm369ehGek0PJ9u0o8vOpAA52jUC02XGZrMgsFoIEAb1eT/lJR5TFYkGv1xMREUFFRQWiKBIeHk51dTVarRa32+11+DkcDqxWKxaLpcF7VF8S6RWplHJqlbUUO4ppompyScflauPv78+kSZNITU1lzpw57Ny5k/379zP4zgdwav2umiB1OgqZDBE3x+osBGmUNPbTXvhJEhIS/++RRCkJCQmJv5BDVUaOGawoZcJlujVEamtrEQTwP23yrZDJEBApNdnIqjDQPtTviiap1dXVpKenk5GRQU5Ojre8IzAwkH79+pGQkEBcXBxKpfKy93G5mEwmcnNzadu2LRqNhrvvvpvp06ezfPlymjdvTkLXrvDCC/Dqq6eeJIrw8ssQGAhdz15Odi7+qU6p4OBgQkJCyM3Nxe12e8uiQkJCuOOOO/j222/59ttvef7554nwUZOnkFNrsVBbUY7bLSJXyBHdbux2B0ql4qSzRnPGfpQKzzF2OJwNRanTRD9BAJ2vL5EBAVTX1OAA9q5fhaplU9q1boVG03C79S6poUOHntF+/fRuazabjZqaGtLT00lNTeXw4cPeCbC/vz/NmjWjcePGqFQq9u/fz759+84boK3U6ug06jY0en9sxguLjIIAgswzcRNkMs91plR6f27wVZAhyGSIcjl6t41ON96A5izC0tUuZb1a1DtfgoOD/+aR/DMQBIEmfjqyK+qoNRhwOV0EBPgjl1/68ZPL5ISGhVFaUkJVdTUKhfKMTnWi6Cl9auyr9Yart2jRghYtWnD99ddjMpnIzs72ilS7d+9m9+7dCIJA06ZNvS6qFi1aXJUSyaNHj/Ltt99Sbign9v5YgqKC8JP78dOzP7Fv5T4qjlQwfet0mic0B8ButfPp3Z9yLOcYKrUK3xBfbnvjNgLDAjEYDOh0OgRBwGKxYLVacblcKBQK3LgRNAJH9x7lu+e+w+FwEBoaSnV1NSdOnCA9PR2TyUSTJk0QRZGff/4ZHx8fWrZsSceOHVm7di0FH39Ms0WL+Oijj+jeqRNBaWkYZTZsYXrkFidulwu3ywWCgFBdjW+TJri0WkwmEwaDAZlMhmgXmf38bE7knPC6wfrd248hdw9pcKycDid2h90rvNX/q8+fAlD4KZj42ETcJW5uuukmIiIiiIiIIDIykvDw8Aai7yuvvEJNTQ0fffTRGcdAFEU+//xzvv76axwOBzqdjpCQEJ5//nn69+9/0ceyY8eOxMfH89tvv5FbWkmlzYXcUYPK3x/kCl6ZfDfdBw5h2M23XvqJchaUMhkul4ucKiPBWhVa5d/jGJOQkPj38M/8y0hCQkLiP0ilxU5hreVkts/lTRpMJrPXJfXHyZFcJqBA4HidlVCdmkjfMwWGcyGKIkVFRV4h6tixY95lzZs3p0OHDiQkJHjyOv5mO/7+/ftxu9106NAB8ATqTp48mTfffJMZM2YwdepUwm64wRN0/sUXp57ocMCTT8K338LJ7nUXwz/VKQWeEr7t27dz9OhRmjVr5n28S5cuHDhwgG3btjF//nxuuOEGCpK3oI5ujaBQIHe5cDmdCIKAv78//gH+CGf0evSgPNnW3eH8Q7i1y+UxC538z3VyUq3T+3OsIJ/klYspSA5iwYIFdOrUiejoaOx2O2azmd9//x2r1UpoaKhXSDo9QNvtdlNZWUl5eTlVVVW4TwpgGo2G0NBQwsLC0Ol01NTUeDvTKZXKBo6icwVoK511uFQhaEPDkONGfk6BSTjne3I2XG4Ru9tNiFZF58ioq1Ai+NdSWVmJTqc7Q0D8/0xjvYbDVQYMLjdyhfycod4Xg0qpIiQkhLLycioqyomIiECpPBWgbXO5UctlRJ8jg8zHx4euXbvStWtXRFHk2LFjXoEqLy+PoqIiVqxYgVarpXXr1l6RKjAw8JLGKYoimzZtYsGCBTidTvr160e/Fv1Yb1vvyUu6oSvXPXod04ZNO+O5A+4eQOLgRARBYPU3q5nz/ByeX/w8DocDrVZLaGgoAGVlZZjNZvwC/JD5yNBatYTZPSV9/v7+DB8+HKVSSWpqKvn5+ZhMJmpra/H390cURcrKyjh+/Djr16+nrq6Oli1bYrFYePjhh1GpVDRRqRgepcWhkqGus54yNbrdOAWB4pJSFEqFt/TO4XCw/N3laP21PPjrg4huEavRit1oPyPkXKFUoFCeOX1yuVynBCq5jeGThlOytISUlJQz1g0MDCQyMpKIiAgKCwsRRdHbnOH0368vvvgiGzZsYOXKlTRu7Mn027NnD/v27bskUQo8nUxvuW0863KPYzBbMNfVYTWZLvn8uFjUMhlWp5ucKiOJ4f+s7sASEhL/PCRRSkJCQuIvwOn25D653CIa+eVOVkVqa2vOcEmdjlImw+p0cbDSSJBGifo8oc52u50DBw54g8rrnUBKpdIrQrVv3x5//3/WH5RpaWkIguAVpQAaNWrEhAkT+O677/jyyy957rnnUN99t0eYmjfv1JPNZnj4YU/O1Mkw5wtRL0r905xS4Cnh2759Ozk5OQ1EKYCxY8eSl5fHwoULWbFiBSqViraD3DRqlYDFWIdCJsff3w+5XI7JaDxnyZrL5cJsMeNwODzrnXw8qK4O1Wn5XbUGAw4ERGDFzK/RaTxugNzcXA4cOICvry8tWrTAbDZTWlpKTEyMp+OTRkNAQAAqlYqqqiqOHz/O8ePHvV27IiIiaNu2LR06dCA2NvaMErbLCdCusthJLa3F6nRfccjv6QG/oToVieH+/zpBCqCqqqpB3pEEKOUyjqfuQtYkDv/AoAZlrZeDVqsjMDCQ6qpqysrKiIiMRC6T43J7ZJO4IB98ziJ4/BFBEGjSpAlNmjRh2LBhWK1WDh486BWpUlJSvGJIo0aNvALVhZpOmEwmfvzxR9LT09HpdEycOJHExEQABikGscG8gaY9m6IRzhQuVRoVHYd09P7csktLln+6HIVCgVqtxmKxeB2dISEhlFSUYMVKuDOckZEjmZ45nfvuu4/IyEhSUlL47rvvAMjLy6Nly5bExcVRXV3tbaKhUqn48ccfEUWRI0eOEBwcTGxsLEeOHGFfWRnL85S0nu5k2H198ZXJmPfGMgRB4ERxDcZqMw/MfcArdgNUHq8kqm0URqPR46wUQB+h99yMEGHRW4vI2pgFArTp04Y7pt+BQqXAXGvm5//9TN7ePGRyGdGJ0dzx6R2sWbqGSEskn3zyCZs3b+bpp5/GaDRiNptp3bo1rVu3Jjs7m8zMTOx2O08//TQ6nc7rqvL39+fdd99l/fr13sYDgFeYrOfdd99l5syZyGQyEhIS+OKLL/D398doNPLII4+QfLLpx5gxY7jrsacRFSrMlUW889Qj1BlqCY9qgtNhv+w8u3MhCAIKQaDUZMPscKGT3FISEhLnQRKlJCQkJP4Cjhqs1NmdqC851PwUJvNJl5T+TJfU6ajlMswOF0W1FuKCG7alrqmpITMzk4yMDA4cOOB1pgQEBNCnTx8SEhJo3br131KWdzE4nU6ysrKIjo4+w7XQtWtXCgoKWL9+PbNmzeLee+9FeOopqKyE9etPrVhV5enG9/33nnK+C1C/n7/CKVXfEepiO61VVlaSm5vLzJkzyc3NbbC+0Whk7969ZGRkIIoiYWFhpGXuZ+zjU2kS1xqn1Ux1dc35ByTgyRwRRVxuFzKZ3NslUmkyeYQgweMn0gYGo9JpqTq0H43Lxvj776dbt244HA42bdrEnj17AI8jJykpiY8++ghfX18OHz5McnIy+/bto66uDrlc7i3N6dq1K61bt24QJnw1CNKq6NYokP3lBqotDpyigOoSul/WU99lTy4TaOGvo2WQ7xkdDv8N2Gw2TCbTecOx/z9SWFjI9jUr6TxMRWD7zlclJN9Pr8fpcFBXZ6S8rIzQsHDsbpFwHxVNLjN/R6PRkJiYSGJiIqIoUlpa6hWoDh06xNq1a1m7di0qlYr4+HivSOXpBujh0KFDfP/999TU1NCyZUvuvffeBi6aCEUEw32Gs82yjRJniSeT7TwNvFd9tYrO13YGPC6vZR8sI7J5JMMnD8eCBX2gnpJ9JWRsy2D4o8P58ccf2bBhAyEhIUybNs3rjKo/J99++20WLlxIWloaycnJREdH88ILL/D6668zffp09u/fz/79+7Hb7XzyyScEdAvgxcdeJHXlQQY9MAhRp+NI1lEm/zQZuVqO25sj58nA6ja2G7+/8DsHNxwkok0E0V2jiekZg91uJ3VRKjnJOdz1/V3I5DJ+efIXZr06i3739uP3l39HpVXxxMInUKqUWGot2G12rHYrtbW11NXV0alTJ5KTk73iXM+ePRk/fjyxsbG8/PLLlJSU0KtXL0pKSjhx4gSHDx+mrKwMURSZNWsWc+bMISwsrEEZYEREBGlpacyYMYOdO3cSEBDA/fffz3PPPceXX37Ja6+9hs1mIyMjA4vFQu/evSEkiq5Dr+edpx7mutvuZNDoMaQl7+LFe26l24Ah1Bpq8fPzuySH6PlQyASsLjfH6yy0DPK98BMkJCT+3yKJUhISEhJ/MqIoctRgQYArCBUVqa05mSXlf/4SEkEQkAlwtM5CdICOkuLj3rK8oqIi73pNmjTxOqLquzn90zl48CA2m62BS+p0brrpJo4cOcKePXuIjo5m4MCB8NprUF0Np5dRHDkCjz4KX30F52i1Xk99q3CDweDtPgWnArTP1VntQsLSHx+r/3q+id7ZqKmpoby8HD8/P7RaLWq1GrPZTGZmJlarlYiICKqqqnA6nbRr3YrGgg29QsAdHIpM9JSvyWRnlq7JBBmelypQUnICu91BZGQE3pb2J51jImD380eh0xEXHsCGTYWEhIQwZMgQQkJCAGjdujUjR47kzTff5Pjx4zgcDqZNm4YgCF4HmkKhIDExka5du3qdEH8mepWCpEaBFNSYya8xYXW5EfC4DWUC57weRFHEKYo43SLCye20DdUTrP1zx/tnUp8nJTmlTiGKIvNOuiz7t4/H6Kuh2GgFrlSYEggMCsLhdGK3O6g1mWgU6E+HMP+r8hksCIJXvBg4cCAOh4NDhw6RlZXF/v37yczMJDMzE4DQ0FBat25NZWUlmZmZyOVyrrvuOkaMGHFW52GgPJBrfa5lv20/AFasmN1m1IIaGaduuCx6bxElh0v435L/AZ7SsSGPDkGtV2MWzfjIfOim60aFfwU/lP7AE088QbNmzWjVqhUAgwYNYs6cOUyePNm7706dOtGnTx/WrVvHL7/8wqFDh5gzZw42m42wsDCeeeYZbr31Vlq1asXu3bsJVYbS47Ye7PxxJ2OfH4tGo6H3mN7EtvKIXG63G7fLjcvt6cYXGRlJq6RWHNh+gIKUAtZ+uJaC5AIGPz6Yw8mH6Xh9R9QaNaIo0nFkR/Ys2EP38d05sPkAd317FxWVFd7331Zlo6qqiuRVyeTn5+N2uykoKMBkMqFQKDCZTLz//vvccMMN1NbWolar6dq1K3q93uvMXbt2LevWrWPgwIGUlJRw9OhRXnjhBVwuFz4+PowYMYKdO3cSHBzMzJkziYiIoGvXrrz++uvU1NSwbt063n//fWQyGT4+Poy5dTybN2+kQ+8B5GXvZ+jNtyKXy+na+xpad+wCQE11DWaTieDg4EvoMHn+c1EAjtdZiQ30+Vf8jSEhIfH3IIlSEhISEn8yFRY7Rofzsjrt1eNxSTku6JKCk4G5djvVThcffLOYw2keh4pCoaBdu3Z06NCB9u3b/2lZEn8m6enpAOcUpeRyOffffz9vvPEGCxYsoGnTprRs2RL3e+8h3ncfYm7uqdK01FRM991HwSOPYHO5zisUZWdnYzKZeP7553GdXPf0AO1Lpb6kpb4ULTAw8LK7ry1cuJDdu3fz7LPP0qRJE5YsWcKaNWu8wcBqtZr8/Hz0ej033XQTo0aNwu5ys7/MQInZE86rlp3fwadUKrHZ7DhdLhT155/LhUupxK73R2kyEue20DQwim/y8ggMDDwjMFsmk1FSUoJCocBgMLB8+XKUSiW9e/dm3LhxdOnSBd0FBMKrjUwQiAn0IUqv4XidlWMGC2any5vhfvo7Ip78WcST3xaq8zhbwnTqq9rt8u+gqqoKkELOT2ffvn3k5+fTtWtXWsbG4HKLyATPBNvldqG6AtcrIgQEBWMwGikryEd91IGy8fCr+wJOolQqvc6osWPHUlFR4Q1MT0tLY926ddTW1qLVahk2bBhqtZqSkhIiIyPP+vrkgpwOmg74yHxoo2oDMjC6jYh4OlKu/XQtyUuTeWrRU7g0LoxuIwig0WswHjfSq2Uv2vu1RyPTEJsUS2FBIc899xyVlZU0b94cAIvFQmFhYQNRqv61DB8+nF69erFw4ULmzZtHVlYWn3/+OfHx8SgUCkaNGkVcXBw7y3bidDqx2WyUlZXhcrnQ+JwqO5TJZMhkMhwWB3V1dVitVpRaJR0GdyC+TzxxveP45YlfGPLEENyi2xM07qPD39+fkpAS1Go1ERERyGQygoODCQgOwOXyCFwylQyZKEOtVqNSqcjKygIgJiYGp9NJQUEB27ZtIzs7m/LyctxuNwcOHEChUHi7+Gk0GsxmMwsWLCA6OpqQkBBef/118vPz+eWXX+jUqRM5OTm43W4OHTrEgQMHqKiooKamhnbt2lFSUsJNN91EcHAwjzzyCCcqKhFFkfuvvQaX08mk6wciALdOeRSlUklAQAAajRqr1caJkhL89H4EBPh7S1afGDeS0uPH8NHrcTqdhEc15ul3PiEoNIycjDTmffMZL3723RnnS71byupyoz1PnICEhMT/byRRSkJCQuJPpsJsRxRBLv/zXFIutwuL2YLFYsZitSK6RdR6P3zCIunVq5e3LO/f0Ord7Xaf1VFksVhYtWoVMpmM/fv3k5KSck4Hkt1uJzk5mdtuu43ExERkMhm+Gg13VlXhbzaf2llJCSU5OSzr3BnxHBNMpVLpbXvudrsJCQm5oFB0oceuZjlaQkICe/bsYceOHRw+fJijR49SW1uLQqHA19eXdu3a8corr/DZZ5+xatUqWrVqRatWregY4c9xo5WcSiNWp8clpDpHi/D6ck6nw+HJwBFFHP6BCKJIcE42bef9hP6TjzhRWorJZKJNmzZeF9S+fftITk5m+/bt5Obm0rhxY/r164dGoyE/Px+Xy8W6desIDAykXbt2f8vddI1CTkygD9EBOmqsDursTursTox2Jy5PvAxKmYCfWolepcBfrcBH9d/5E0rqvNcQh8PBb7/9hlKpZPTo0YBHiEwI8yNAoyS3yuOsU8g8uTkXe87Wd9hzuEUUMhlxQXpmz15DuqGWiNCQBllBfxYhISFcc801+Pn5cfDgQZo1a4a/vz8RERGUlpby22+/8dtvvxEQEEDbtm1p164drVq1OkMwFhBopW5FO307Sp2lVLmr+PLDL9n3+z5eWvQS+kA9SkFJiDyEIHkQZUfLmPvDXKpGV6EZekoc6tOnDydOnODWW29l8uTJJCUl4Xa7ady4Menp6We9AeHn58edd95Jy5Yt2bBhA8HBwaSkpGA2m3n77bf59ttvuX3k7Yy6aRStr2mNxWLBYrFQW1uL0WhEp9N5O+/Vd5Yt3FtI255tceDA6XRSVVBFUOMgAgMDiU2KJXNlJq0Htqa2upats7eSODiRwMBAulzbhZ2zd3LXe3chk8kwVBiQBcloHtGcVkNb8dxzz3H//ffTunVrbrjhBg4dOsSTTz7JwIEDSUhIYNWqVRgMBpo3b+5t9mC32zEYDISEhLB8+XIiIyO9n8FGo5Hy8nI+/vhjbz6fxWLBx8eHvLw8/P39vZ1xLRYLGo2GqVOn4hsYxE2THyNl2yaiolsw5OZbGXzjWE4UFZC5ZzcDRt5EeHg4RqOR6upqDAYDZrOZ4OAgNBpPWenkF1+j95BrAfjohaeZ/+0XPDD1FeITEs8qSIFH+He63NTZnJIoJSEhcU7+O39RSUhI/KcYMmQIJSUlyGQy9Ho9n3zyCR07drzgsgshiqK3zfb603OGzsKiRYuIiIige/fuAOzdu5d3332XX3/9FYCvv/6aDz/8EI1Gw/Llyxk3bhxbt249Yzs1NofXcfH83eOY8sLrNIk5f3bLbb078eo3PxLbpj1mr0vKl/ITxdx/bX+WZORhdzi8QpTNbqe+xZBKpUSr1bFi3myuHX0zN3YdccH35a677iIxMZHHHnusQYvqJUuWsHHjRj788MOzPq/+TvSllqqdr9zNeVp49unU1dWRmppKkyZN+D/2zjsqivNtw9dsZ5deBQuKgAUQrGisKNbYa6oao6aYoomJGn+mmphiukn0iy1GUzT2XrBG7AUBRWyg9A4LbN/5/ljZhNiNLcle53hinN2Zd2ZnZ2fu937u57fffrvqa6pEH2dnZ6KiokhOTiYvL4/u3buj0WhIa9mS1t9/j1Kns5erxeh0NPP0pOLpp68ZoP3VV1+RnJzMmDFjCAkJueHxvJcEBweTkZHB7NmzqVOnDnq9Hnd3d9zc3Bg2bBitWrVCEASefvppPv30U+bPn8+0adNwcXGhlosT3k4KMv7kEqoyf0kFgapnbYlMgcJJjcEKosWKFBHvlCRqx++iRsIRJBYLuLhw5tQp+znxxRdfkJKSYs9tKSwspH79+nz55ZfUr18fgMrKStavX8+OHTuYNWsWjRo1YujQodWCfe8lEkHA00mB5z+4FO92cJTvVWfbtm0UFRXRq1evakKdIAgEuqnxdlKQUlhOvs6I3mJFIghIBQHpVco+RVHEKmLvTmk7x+Q08nLBXSXHeewYPv/8cxYuXIiXlxdBQUF3dd9MJhO//fYbO3fuRCaT8eyzz9KpUycEQaC0tNTuojp58iR79+5l7969SCQS6tWrR3h4OIsXL2b37t3k5OTQvXt3XFxcOHv2LGKGyHdTviMoKIiv+n0F2K7HBw4cAGDJL0s4k3qG/fv3061bN/txWrJkCd27d8fHx4cff/yRmjVrUqtWLR555BHmzZvHV199dc19CQgIQKlU8vbbb7N06VISEhI4fPgwQ4cORalSEtY7jP6v9UeKFJlMhsViISsrC7PZjFwmR66Q4+LigqurK0lZSXzY/0MsJgsSqQRnb2ce/ehRBEGg34v9qMivYOHohQDUaVqHhj0botVqeWLGEyyesphJbSYhk8uo17Qej335GBqZBoWTgnr16vHxxx/z5JNPsnPnTurXr0+3bt3o2LEj48ePRxAESkpK+PzzzzEajZSXl1NRUUFFRQVarZZFixaxdu1aysvLUalUyOVyYmJicHV1paysDIlEYndiKZVKfH19yczMRKFQUFxczMmTJ7FYLDTv3I2m7Tqx5PMP6TZsOBuXLmHVD3PxCahJUFgEOdnZpKamIpfLbcfKasGkM5KdbcTF1YU/e4ItZjO6igp8/G3X6eP79/Ltu1P5vw07sZjNvDHqUcpKijHo9dRvFMa49z6hwuTCmTNnGDlyJOXl5VitVvr168f06dPv8BnuwIGDfyKCeLu1Bw4cOHBwFykpKcHd3R2AlStX8vbbb9tLt6637EZs27aN1157jYyMDHtY6tUwm82MHj3aLtRcjUaNGjF//nzatGlzze1ZrCLb0/OxWG0ulJvlD1EqnKysbMxmEwEBAWSmX+CF/j2YvWk3ZrOtW44g2IJunZyccFKr7eVVj7ZrxluzF/J0j44opJLrBmj/73//o169evTp04eFCxdSVlbGo48+ekNh6XY79giCcMsuowMHDnDo0CFGjx5NSEjIFa9XKBTVHghFUWThwoXs37+fmJgYHnnkEduCxER49lkwGKoP6v/+D5o1u+p4FyxYwP79+xk7dizNmze/rX2+GxQUFLBw4UJ++uknSkpKaNq0Kd7eNsfFsGHD7PkkVaxbt461a9cSERHBuHHjqh0vqyiSX2mkRG+i1GCizGDGcvkWwWKxkH3pIi5yCS3CG+OtLcZ1YP9q702YNYvPv/uOI0eO0LRpUzQaDR4eHvYW9lu2bCE2NpahQ4desR95eXn89ttvJCQkIAgC7du3p2/fvleM38HdYe7cuRw6dIiZM2f+5495aWkp06ZNQ6lUMn369Ou6S7VGM5llOjLL9Zgsov37UlXmWfVfiQAyiYQaGiW1XJ1wV8qqfff27dvHwoULcXFxYcqUKXfNsZadnc33339PZmYmfn5+jB07llq1al31tVarlfT0dHtg+oULF+zlyi4uLjRu3JiwsDAaN2580+fMDz/8QHx8PNOmTbtiu2fOnOGzzz7D09OTqVOn3nIpr9lsZufOnaxevZqzZ89SXFxMz8964lLLBbWgxmg0UlFRgclowmwx20vlnJ2dcXV1RSaTkZWVZXfD5ufno1ar0el0KBQKatSogVarJScnB51OhyiKKBQKnJ2d8fLyso/XLJoxikYedn6YGrIat7QPt4PZbLYLWeXl5XYn1fTp00lOTsZqtdKrVy86jngOtbsnE/p3ReWkRhRF6oQ2pM/w0Whc3RER+fXrmTRu2YbwVrZ7GpugaivL/L93JlNWVICLqxsFuTl4+vjxxdK1OLu6VhOlRFGkrKQYNw9PRFHky2mv4+lfk/+9MYWv336DGjVqMGXKFMDR8dOBAwd/4HBKOXDg4IGkSnQC20PCn2/gr7fsRsybN48xY8Zw+vRp5s+fz3vvvQfAzp07GTduHK1bt+bIkSNMmDCBNWvWsHXrVhYuXMgLL7xAcHAw48eP5/jx4wwePJhz584xcuRIIiIimDlzJlFRUZSUlAC2h4zXXnuN0jIt5UYTIyZMokP3h6s5oJbN/Y4da1dgNpmQyeWMe+sDwppVL9+w3WyWI5FKuXTpEnk5uYiiiN5gQKFQMLJ9Mx4ZN55DO+IoKyli4NPP0fHhfiyf+x2FudlMH/c07xj1tG/bFg8PDw4dOmS/8XZzc6N9+/YolUr7TXxVKK7RaGTPnj2cPn2a9PR0+vfvj1KpZN++fSQlJaFWqwkLC+Pw4cN88cUXKJVKkpOTWbZsGSaTCZlMxvjx42nXrh3Hjh3j3eyaTnUAAQAASURBVHffpW3bthw6dAiLxcIPP/xgL1VZv349b7/9NkajEUEQmDNnDtHR0Rw6dIhJkyZRVlaGxWIhJCSEevXq0b1796uG8P4VQRB4/PHHycjIYMeOHdSrV4/o6GiIiICPPoJXXoE/tQTn8OFrilL3sgPfzSCKIvHx8fz666/k5+djsViQy+V4e3vz/PPPXzNzq1evXqSkpJCYmMj27dttQfCXkQgCfholfhqlfRsW8fKDidXCy5++Q8OGDQlq3xJyLiICer3e9jCk0zF7wQISExNRKpX06NGD6OhogoODsVgsTJs2DYVCQffu3a86Ll9fX55//nlOnz7N0qVL2b17NwcPHqRXr1506dLlum3sHfx9CgsL7Q/o/3VWr16NwWDgkUceuWG5s4tCRkNvF0K9nKkwWdAazFSYzJitNvFGKgio5VJclDKc5bJrZo+1adOG3NxcNm7cyKxZs5g0aRIqleqqr70dRFFk7969/PLLL5hMJtq2bcuwYcOuu39V7qh69erRu3dvKioqOHXqlD0w/cCBA3YXVGBgoD23Kigo6JrX5+joaOLj4zlw4MAVolRISAiDBg1i2bJlzJ8//wrR/EbIZDJiY2Np06YNa9euZcuWLaRuTSXssTAKCwqRSqQ4OzvjU9MHJycnKisrKSsrs7uSrFYrVqsVLy8ve3l1VUh4eXk5er0eFxcXnJycyM3NpaysDJPJRElJCTqdDmdnZzw9PbEoLHhIPfCT+t302P8OMpkMd3f3avdFAD169ABsQuCvv/5KWFgY5UYzs5ZvxK9mTQwGAws+ncGq/5vFtG/nYTKbmfDBZxiMBvQ6HQajEYvZjGC1YrFYsFos9B35DAOfHIlCoeSHLz5m5qSXePu7hdW2K4oiy+fN5sCOrVgsFiq0ZTRs2gJRFOnQoQOvvfYa5eXldOzYkdjY2HtyjBw4cPDg47jLc+DAwQPL8OHD2bFjBwAbNmy4qWWjR4+mb9++9O3b94r1FRUVsWnTJr777jsuXrzIww8/zDvvvGO/gT516hTffvst8+bNA2DHjh3VnFI7d+60r+u3336jbt26/Prrr0RFRZGWllZtO/379+e3336jWXQbdqUXoNOWXTGergOGMGS0Lcj15LHDfDzxRRbG2cJZraKVwqIiBKUT5eUVyOUypFJbG2sAmVSK1WL7u9UKU79dQHZ6Gu89O5xWnbvRb+QYdq1bxcsffYlzZTGCvoK4uDiCg4P53//+h1Kp5LfffqO0tJTJkyeTnZ1NWFgYY8eOZdasWfbuQL/88gtr167l888/Z/369axbt460tDScnZ0ZNWoUzs7OPPHEE5w/f57PPvuMffv24erqytmzZ2nfvj1paWnk5eVx7tw5Fi1axPz585k9ezb/+9//2Lx5M6mpqTz11FPs3r2bhg0bYjKZqKyspKSkhLFjx7Jhwwb8/f1JSUmhRYsWTJ8+/aYEqSoUCgXPPfcc77//frXSENq1g2nT4N13QRRBLofLZZpXo8oJUNUp7n6i1Wr58ccfOXr0KBkZGSiVSoKCgigtLaVnz57XFKTA9pD19NNP895777F8+XJCQkKoU6fOVV8rCAKyy933kErw9va2tys/u3w5jTIysF52yhlUKsIjIigqLqZDhw488cQT9vXs27ePoqIiOnfujJub23X3rUGDBkydOpV9+/axatUqVqxYwe7duxk0aBBNmzZ1dG+6SxQWFuLp6fmfP74XL14kPj6e2rVrX9cB+1ckgoCLQobL38gZ69evH7m5uRw9epTvv/+ecePG3dK17lrodDoWL17M4cOHUalUjBgx4rayqzQaDS1atKBFC5vAkJmZaReozp49S3p6Ohs2bMDJyYlGjRrZRao/N9QIDQ3F3d2dgwcPMmDAgCv2r0uXLly4cIHDhw+zfv16evfufcvjFAQBDw8PNBoN6QfTCekXgtJZiUVnCyGvqKiwC7DOzs7o9XqKi4spKipCEATKy8uxWCx2V5irqyvl5eWUlZWhUqmQyWTUrFkTZ2dnCgoK0Ov1GI1GiouL0el1OPs4EyqEIrg8GN+lESNG8Oyzz/JiSTGikwt+NW1ioFKpYsjo5xjRuTVSqRSj0YjRaMBitiCXK5ArFCgVCswWCwa9HkEQMFvMZGZlEeDvT6fe/RnX79srthe3ejnH9v3OZ7+sQePiwooF/8eR+D1IJQKDBg3ioYceYuvWrcyaNYsvvvjiins7Bw4c/DdxiFIOHDh4YFm0aBFgm+mbNGlStZuXay2bO/fqYZtgy67o2bOnfVbRz8+PzZs307OnretRUFAQHTt2/Nvj3rdvHw0aNKB9+/ZUmsy27Cv3KzvdnU1OZMk3n1NWUoxEIuHS+bOkp50HwSY4mQwGpFIpKicVri4uuLm5oZTZOgb51ahhD6QeOPwpvHxrUCewDjK5HI1SiY9/AFKpFI1Gw2P9e+KhkrNw4UJKS0s5e/YsAEajkbp16xIdHY2npyd+fn7Uq1cPFxcXe9vpPwdyx8XFMWTIELtA8/TTT9uFwU2bNnH27Fk6dOhgf71EIuHixYuALfcoOjoasLkCZs6cCdjaXvfo0cPeClwul+Pm5saGDRs4f/68/bOpcihpNJpb/jy8vb15+umnmTVrFt99990fpSF9+kCDBrB/PzRvDmFh11zHg+KUSkhI4Mcff+TixYvk5+dTu3ZtateuzYABA/j+++9JT0+/4To8PDwYMWIE3377Ld9//71dpLweWVlZ5ObmcvjwYQoLC2mck0MDqxUntRqNWo0qNJSSVq1ISkqiQYMG9veZzWY2btyITCa7pkvqr0gkEtq2bUvz5s3ZtGkTW7duZc6cOYSEhDBkyBACAwNvaj0Obg6z2UxpaSk1a9a830O5r4iiyNKlSxFFkWHDht1zgU4QBJ566ikKCwtJSkpi2bJlDBs27G+t8/z588ydO9fe2W7MmDF4e3vfkbHWqlWLWrVq0b17d/R6PadPnyYpKYnk5GSOHj3K0aNHAVvuU5VAFRISQqtWrWwuptRU+3X/z+sdPnw4WVlZrFu3jrp16xIeHn5TY8rNzWX79u3Ex8djNBpRKpWMHT4WUS2Src5GJ+owGo2YTWZ7MLit45xNaFKr1ahUKkwmE6Wlpeh0OiQSCa6urqjVaiorKzEajSgUtrw5Nzc3nJycyM/Pt5UFmkwITgIl2SXM/34+p1ucpl+/fvj6+v7t430rlJSUUFlZac/lW7VqFV5eXtT09eFMbiFmRFQaDbpKHWt+WkTt+qHk5eUDIJEIaDRqnJycUDk5YbVYyc7OQqlUIpPLkUgk6CorycvL48DObdQKqn/F9stLS3Dz8ETj4kJleTmbl/+Ct39NNHIZZ86coX79+gwfPpxWrVrx0EMP3dNj48CBgwcXhyjlwIGDB56qmb7CwsIrsjaut+yvzJs3j5ycHHvbaa1Wy7x58+zCx90oXZFJJAgCtnbZf2owbzQYeOvZkbw55wdq1qtPeVkZ4x7uhK5Sj7efL1KpBN8aNahdpw6ZGZlYrSJqjQa1ky23QqX8o7TDSa2xzzhLpFIslj+CwgUEFJdLRkRR5Ouvv6Zbt253ZN/+mt/UtWtXfvrppytel5mZWa0URSqVXjPM/M/rCwsLIz4+HoCZM2eSlpbGiBEjbmus4eHh9O7dm7Vr1zJv3jxeeOEF2/hDQ21/qigqgp9/BokEHn8cLotR99sppdfrWbp0Kbt27eLChQsolUpCQ0Pp2rUrffv2RaFQsG7dOs6ePYvZbL5huVtkZCQxMTHs2LGDn3/+mZEjR17xmsLCQg4dOsTBgwfJzMwkLS0Ng8FAQEAAHTp3plZWFtIqp0ODBnaxMzj4jxD//fv3U1hYSExMzBXlJTdCpVLRv39/2rdvz8qVKzl06BAzZsygdevW9O/f/5bX5+DqFBUVAY7Oe8ePH+fMmTM0a9bsvjUzUCgUPP/888yYMYPt27fj5+dHp06dbnk9oiiyefNmVq9ejdVqpXv37vTt2/eulcGqVCoiIyOJjIxEFEXy8vLsAlVqaipbt25l69atKBQK/Pz8yMrKYtu2bVeIUmAL63722Wf54IMPmDdvHlOnTr2mkCaKIqdPn2bbtm0kJiYCtvM4JiaGtm3b2sQkayWry1dTpCiissAmLFU11KioqECpVKLX63FycqJGjRpYrVaKiorQ6/XodXqysrKQy+VYLBbKSsvw9vljLAqFgoCAAIqLi9HqtIgSkVNLT3H28FmyLmRx6NAhOnbsSO/evW/oEr1TlJaWMmTIELuo5uPjw+rVq9GVFJKbmcnHL4+xO8F8Amry7Fvv4+bmipOTE1+/OZmHYnvwUNcegEhhQQGiaItMkMvlrJ73HZt/tk0IOru68dJ7H1+x/a4DhxG/bRMjOrfG3cuL8BbR5GZm4KKQMf+331i8eDEKhQKr1crs2bPvyTFx4MDBg49DlHLgwMEDx7Vm+jw9Pa+77HocOXKE/Px8srKy7AJOSUkJtWvXJj8//6rvcXV1pbS09JbH/9BDD3HmzBn27Nljy2ySCOQVFOLh6YEoWikuLsJy4RwmkxEnZ1cEQcLv61cBULNWAC6u7giCrTOcVCLFxdWFstIytLcoiDg5O6Ov0KKW29xO/fv35/PPP6ddu3b2md8LFy4Qdh2H0J/p3Lkzb7zxBq+++ioajYb58+fbl3Xv3p133nmHEydO0KRJEwAOHjxIq1atrrvO7t278+6775KSklKtfO+hhx7iwoULbNu2jdatW3P27NlqZSC3w8MPP0x6ejonTpy4dmnICy9Aaqrt75s3w/z54Ol5X51SZ86cYcGCBZw+fZrs7Gzq1atHaGgoI0aMsAusYCuN2bVrF+np6fbudtdj0KBBnDlzhn379tGoUSOio6PRarUcOXKEgwcPcu7cOftrAwMDadiwIQkJCfTt25cWzZpBTg78+ivUqgVjx3Jm8WLkcrm9HNBisbBhwwZkMpk93+R28PLyYvTo0cTExLB06VL27dvHkSNH6N69O926dbM7FxzcHlWi1H85cNhsNvPbb78hk8kYNGjQfR2Lu7s7L7zwAh9//DG//vorPj4+N32NBptwPn/+fE6dOoWLiwujRo2icePGd3HE1REEAT8/P/z8/OjSpYs9p7AqMP3ixYtkZ2czZ84cMjMzadKkCWFhYTRo0MDu2PTz8+Opp57iu+++Y/bs2UyaNAm5XG7fhslk4uDBg8TFxZGZmQlA/fr1iY2NJSoqqlpZoFqiprVTa7Zbt6P2V6Mr06HVajGbzfZud2Bz4VqtVntX34qKCnvzDIPBgNFoJL8gH5lchpubm31SRhAE3D3cEZwFihOLMZ41olarEQSBEydOUFhYSHx8PF27dqVbt263HOB+qwQGBnLw4EG0Wi1JSUkkJiby448/Inf1IKrPMN5dsBSFXIqTkxontZO9MQrAxI++sP9dp9Oh0+lRKGyljl8uXcPFixfRarWoVE42gVOA8nItUa3b8n8bdgLg7OrKJ4uX29djsFhQyaQ4ySRMmTLFHnLuwIEDB3/GIUo5cODggeNqM33r1q2zt6y+1jK4dqbUvHnzeOSRR6rdrLq7u9O1a1d+/PFHml0l4PrJJ59k5MiRrFq1inHjxlVzgFwPDw8PVq5cyauvvkpxcTF6s5UhL7xCw6jmWCxW9DoDtd09eOyFV5jxwijcPL2I6T0AsDmb/oqrqyvlWi1lZWVIResVy69Fn+Gj+fKNV/nxw7dYuHAhkyZNwmAwEB0dbT9ekyZNuukHnt69e3PgwAGioqJwd3enY8eOdrdKcHAwP/30E88884y9zKFp06ZXdU79meDgYBYsWMATTzyByWRCKpUye/ZsWrVqxfr165k4cSKXLl2ioKCA0NBQ3nrrrZve/79SVR7zwQcfXL00RKf7Q5ACyMiAl1+GOXPui1PKbDazevVq1q9fz5kzZ1CpVDRp0oTevXvTo0ePK1wPDRo0YNeuXZw+ffqmRCm5XM6YMWN455137EH9GRkZ9twyPz8/WrVqRcuWLfHz8yM1NZWUlBRycnJsTrJXXoEJE0AUqdDpyM7OpkGDBvZxVbmkOnXqdEdcTfXr12fy5MkcPHiQlStXsnbtWn7//XcGDBhAq1at/vN5SLdLYWEh8N92Sm3fvp2CggK6d+9+R8rb/i61a9dm9OjRfPfdd/zf//0fkydPxt/f/4bvS05OZsGCBWi1Who3bsxTTz1lF9TvF3K53F6+B7bzbcGCBaxevZoLFy5QVFTEzp07kclkBAcHExYWRnh4OJGRkfTs2ZONGzeyZMkSRowYgVarZdeuXezatQutVotEIqFly5bExsZWE+j/Sl1ZXcKUYSQaEtG4a3BxcaG4uJjS0lKsVitSqZSKigr0ej1ubm52oVupVOLp6YnRaKSwsJCioiLycvPQarW4uLjg4uKCIBGoFCvxVfoyuMVgvLp5cejQITIzM/Hx8UGn03Hy5En72Hv27ElMTEw1ke1OIIoiFy9eJDExkcTERNLT0+25WF5eXkQ0CsXdWY1V7oGTTHr9dSFSXFwMgIenJ1Uubzc3N/R6PRaLGWcXZwx6A4WFRVitVlxdr3SC2br3QYCzynF9duDAwXURxKorlgMHDhw4+NtYrVbOnTvHiRMnSEhIIDc3F/9GTWjQoSuCxYzayQkntRqp5Po3hX+lpKSY0tIy3NzcbuoBXxRF9BYrDbycCfa49Ryma1F1My6KIq+++io6nY7vvvvujq3/asyePZvjx4/zySef3JF29RkZGXz44YfI5XLeeOMNfHx8/lj44ouwb1/1N7RujWXmTJ5/+WVUKhVffvnl3x7DzYxx3rx5HDt2jMzMTIKCgoiKimLEiBF2l+Bf0Wq1TJw4kYYNGzJhwoTrrt9kMpGUlMTBgwfZsmULJ0+exMXFhY4dOxIdHU2rVq2oXbt2tQeJsrIyXnvtNVq2bMno0aOrrS8hIYFvv/2W3r1706dPHywWC2+++SYlJSVMnz79b7vc/orRaGTr1q1s2rTJno02dOjQmxLjHFRn7dq1rFu3jokTJ963srX7SVlZGdOmTUMulzN9+vQ72vXu77JlyxaWL1+Ot7c3kydPvub1z2w2s2rVKrZu3YpEImHAgAF07dr1gRUCioqKmDJlCuHh4XTv3t3uorp06ZL9Ne7u7jRq1IjExEQyMjKoX78+paWlmM1m1Go1HTp0oFOnTjd9bbGKVuJ18aQYU5AiRS7KuXTpEnq9HqVS+UcTkcuiuslkwsPDo5pYm5mZSWVlJUqlElEUEWQCKncVXjIv+nr2xVXqiiiK7Nu3j19++YXCwkIqKipwcXGhoKAAiURCQEAAXl5e9OnThzZt2vytMHudTsepU6dITEwkKSnJPmkikUgIDg4mIiKCiIgIatSogSAIXCip5GSBFpVUct1zo0xbRnFRMWqNGh/vP34fLRYzGRkZVFRUoNE44+3tRVFREVariJu7G+5ubvw5psBktSIC7Wp54vw3mgA4cODg34/jCuHAgQMHfxODwcDJkyc5fvw4iYmJVFRUALYcorZt2xIeGUWBixtWUUAhvb0bUFdXV7RaLVptGa6uLkhuIGqZrSIyiUCA8519wBo+fDhpaWno9XrCwsLueiaEyWQiOTmZoKCgOyJIAdSqVYsnn3zS3glw0qRJf5SAvfUWPPUUZGf/8Yb9+5G+/z7OajXllZWYTKY7PstdhdVqZevWrfz666+cPn0auVxO8+bNGTx4MJ07d77uA4yLiwsBAQGcO3fuqrlSVquV06dPc/DgQY4ePYperwegTp06uLu7U15eTqtWrRg8ePA1169yUpGnyyPHnIM1MwNJTh7qhlGknrE5zKrchAcOHKCgoICOHTvecUEKbFkuDz/8MG3btmX16tXs27ePjz/+mBYtWjBw4MD/tOvnVqlySj0IDqH7wZo1a9Dr9QwePPiBEqQAunbtSk5ODnv37uW7775jwoQJV1x78vLymDt3Lunp6Xh7ezNmzJjruoYeBDw9PQkNDeXUqVOMGjWK0NBQBgwYQGlpKSdPnrSLVOvWrePixYucP3+enTt30qpVK5588kn69u17y5+VRJDQ1qktToITJ4wnKNYXI5FJ8Pf3RyaTUVxcjNlsxmKxYLVY0Rv0lJSU4OTkZC+58/DwwGy2uYQElYDBZKD0TCmpa1PJ9M6kS5cuNGvWjIceeojQ0FDmz5/P2bNn0Wq1+Pj4IJFIyMvLo6ysjEWLFrFlyxb69+9PVFTUTQmIoiiSk5NjL8s7c+aMXUxzcXGhTZs2hIeH07hx46uWCQY4qzhXXIHBYkV1DbeUxWqhtKQEQeCKa7dUaguEN5lMl0vtdfj5+ZGbm0dpSSmi1Xr5PQKiKGK2itTQKB2ClAMHDm6I4yrhwIEDB7dBSUmJ3Q2VkpJiD+729/enXbt2REZGUq9ePbuIkJxfRlqpzja7ehuz1xKJFBcXW8ZVWVkZ7lfp5leFKIqYRZEAjcqeJ3WnWLly5R1d3404deoURqORqKioO7re6OhoLly4wI4dO1i8eDFPPfWU7XPx9oZZs2DUKPhzntjGjXTx9mZtUH2KSkrx8vJCKnBHnQgFBQXMnz+fPXv2kJGRQVBQEO3ateOJJ56o7ua6Dg0aNCArK4sLFy4QEhKCKIqkpaVx8OBBDh8+bJ9JVygUtGzZklatWtG4cWNMJhPTp09n27ZtNGrUqFpZY7m1nHPGc+RYcqj7Ql0sEgvrC1cgGksR3EUkOScojZAT4BaAsY6RSnMlGzZsQCqV/q0sqZvB3d2dESNG2POmDh8+zPHjx+natSs9evR44ESGB5HCwkIkEsk9C2J+kMjIyOD333+nVq1atG3b9n4P5woEQeCxxx6joKCA06dP8+OPP/5xrcIm/i5ZsgSDwUDLli15/PHHcXJyus+jvjmio6NJTU3lyJEj9q6tbm5uNGvWDL1eT1paGnK5HE9PT1xcXEhPT+fixYts2rSJAwcO2EsCGzdufNMTFoIg0NypOU5lTqy8uBInfyekzlKUEiUBTgGUlZVRVlaGSTQBtkmR/Px8lEqlrVOfkwqFiwK9oMdL6UV7t/ZIy6XsqLuDEydOcP78eTw8PIiJiaF9+/ZMnDiRzZs3s2bNGgwGAwqFAl9fXwwGA+Xl5aSlpTF79mzq1avHwIEDCf1z043LVGVyVZXlFRQU2JcFBgba3VCBgYE3/D1SyiSEeGpIztdisYpIJVe+vqSkxOZ8cnOrljdVhbOLC5WVlegNBiorK3BxdaFGDT9yc3MpK9NiFUW8PD0xWqwopAINve58AxkHDhz8+3CU7zlw4MDBTSCKIpmZmSQkJJCQkEB6ejpgu8kNDg4mKiqKJk2aXLP9s9ZoZl9GERYRlLfplrJarWRmZgAQULPmNUsAjRYrCNDK3x1Pp392CPSPP/7I77//zrvvvoufn98dXbfZbOazzz7j3LlzPPLII8TExPyxMCkJ8dlnKalRk6KQhpTVCiS/Zm1KPb1Q+/khk8uRSQRclTLclHLclXK81QoktyFSiaLI3r17WbBgAUlJSchkMpo0acLjjz9O27Ztb0n4Onr0KHPmzKFt27a4ublx8OBB+0OMRCIhPDycVq1a0aRJE3uocBUXLlzg448/RqPRMG3aNEzOJpIMSaSb0jGKRgB05Tp05ToCLCISnQ5RELBKBYrUSkS1EmdnZ6x6KxnxGYQQwvCBw2/5eNwuoihy7Ngxli9fTkFBAa6urvTr14+HHnrob5XI/NuZOnUqoijywQcf3O+h3FNEUeSLL74gJSWFCRMmXLUT3INCRUUFH330Ebm5ufTr148uXbrw888/s2/fPhQKBY8++iht2rR5YMv1rkZlZSWvvfYagYGBvP766xQVFbFjxw727NmDTqdDJpMRHR1NbGwsAQEBbN26le+++w6FQkGtWrWqNZ0IDAy0i1RBQUE3/L5//fXXJJ9Opt9L/dD76ymyFGHF5jiSiBLKissoKSlBJpOhUCmQyCQIMgG5VI7MKiNrXxatvFvRr3M/+zpzc3PZsWMH8fHxdgGqTZs2dO7cGYPBwPz588nJyUEmk+Hk5IRWq7WXDoqiiEQiISwsjAEDBqDRaOxuqFOnTmEy2UQylUpF48aNiYiIIDw8/LbywkRR5FB2CfmVxivK+IwmI9nZ2UilUmoGBCAIVzuOIpmZWegNegRBwEmlwt/fH7PZTG5uLmazBY2LCxpXN8J8XKjrdneD3R04cPDvwCFKOXDgwME1MJvNnDlzhoSEBHsXHbCFn4aFhREZGUlERAQazc1lNp0vruBUYTkKieSqM5Q3Q2lpCSUlpbi6ueJxFbeUVRQxWKzUd1fT0PvOlLvdL0RR5PXXX0etVvPOO+/clW2UlpYyffp0ysvLmThxIvXr18diFcku13MpLZOSsgqsl7sMYTCA0Yjo6Ync3R1RBMvln1BBAI1cSm1XJ2q5ON10mWZZWRk//PADGzZsIDMzk7p169K9e3cef/zxWw4HLywsZM+ePbz//vt2YUsQBEJCQmjVqhXNmjW74bm6efNmVq5eScN+DXFv7Y5RNKIQFChQ2BsNlJSUUMtoRHrZHWi1WilQKJB7e+Pm7kZOUQ5IwcvZi1aaVjRUNERy1Yebu4PZbGb79u2sX78evV5PrVq1GDJkyAMtOtwvrFarvYnDq6++er+Hc085ceIE33zzDZGRkTz//PP3ezg3JC8vjxkzZpCXl4eTkxNSqZRatWoxZswYatSocb+Hd1vMnj2bXbt20bJlS3spmqurK506daJDhw5XOKAWLVrE3r176dq1K61btyY5OZmkpCTOnj1rL2NzcnKiUaNGdpHqryVoSUlJfP3114SEhNjP+WxLNpnmTArMBRRaCqk0VdqCvkVb8xFjkRFTronyi+WUny4n/Ww6QUFBfPnll1eUU1ZWVrJ371527Nhhv2cIDw+nffv2nDx5kl27dgG2sum8vDz0ej0SiYTKykqys7MpKipCo9EQGBiIk5MTNWrUsLuh6tevf0VZ9u1QabJwMKuYCpPlT8KUSG5uLnq9AW9vLzSaazucyspKKS4uxmK1IpVI8fLyxNnZBbPFTH5BARK5AnNxPsPat3R0R3XgwMFN4Sjfc+DAgYM/UVlZSXJyMsePHycpKcmeu1PVbS4yMrJah7Fboa67mrxKA4U6E0pBcluuGhdXV8rKtGjLtLi6ulZzS4mXBSlXpYxgzzsXbn6/uHDhAmVlZbRp0+aubcPNzY1nnnmGTz/9lDlz5vDypCmk6URK9CZQuyBHgiIzAwEwmy1YrRakBj1SpQL+dNNusYpUGC2cKignvVRHIy9n/DTK6zoXjh07xrfffsvx48eRSCS0a9eOUaNG0bx585t2PGi1Wo4cOcKhQ4c4e/as/d9FUaR///60bt36ljKd2sS24ZT/KcxuZioqK/BQe1QbS9UDmGixVNuWVRBQKpXoKnWYyk1onDVYJBbidfFcNF2kk7oTKsm9KaWTyWR069aNNm3asGbNGvbs2cPnn39OZGQkgwcPvqab8b9IVfcxT0/P+z2Ue4rZbGbZsmVIpdJrZqg9aPj4+BAVFcUnn3yCKIo888wzPPfcc3ct3+5uYrFYOHr0qD2LsaSkhLZt2xIbG0uLFi2u+fv66KOPkpGRwdatWwkKCqJ79+50794dvV7P6dOnSUpKIjk5maNHj3L06FEAAgIC7AJVvXr1WLZsGYIgMGzYMPu1LUAWQIDM1kDCKlo5ee4k333/HdEto2kU3IhVa1dRUFCAKIp2QXDnzp288847vPbaa9VKX9VqNV27dqVLly4cP36c7du3k5SURFJSEv7+/rRt25bjx49z7tw5BEGgoqKC5ORkzGYzTk5OtvJ7s9l+TB599NE73kFRLZfSrIYbR3JKqTRZUEol6HWV6PUGlErFDScvNM7OlJSUXL6HESkpKUGt0YAgwc3Ti9xzpzm89jcKThzk+eefv8KV68CBAwd/xSFKOXDg4D9PQUGB3Q2Vmppqn3GtXbs2kZGRNGnShDp16vzt0giJIBDp68bB7GLKjbYbwVsVpiSCBFdXV0pKSigrLcXDw/YwWdVtz0kmIcrPDdm/oFzp+PHjAHc8T+qvBAcHM3jIEPanpvF7Wh7Obm4opVLbZ+PqCiZfyMvD/lGJImRkQGAgqGz5LVKJgFQiRRRFdCYLx3JLCXBWEebjcsVnodPpWLJkCUuXLiUzM5PAwEAGDhzIo48+elOuO71ez/Hjxzl06BAnT560n6++vr52R9TRo0epX7/+LQlSZZYyNldsxqm2E2V5ZWjLtGhqaKo9UMhltgdgs1SK7E9OKbNUilKpJDc3FwB3N3dkEhlm0cwl8yU2V2ymm6YbTpJ7l3fj4uLC448/TkxMDMuWLSMhIYHExERiYmLo3bv3VYOA/2sUFRUB/OeC4Xfu3EleXh6xsbH/CJGyvLycH374gRMnThAWFobFYuHSpUtotdp/lKBYWVnJnj172LFjB8XFxYiiiL+/P/Xr1+eNN964YdmdXC7nmWee4f3332fhwoX4+/vj7++PSqUiMjKSyMhIRFEkLy+PpKQkTp48yenTp9m6dStbt24lNzeX/Px8OnbseE2hRCJIUIpKzKVmZCYZLZu2JCo8iu3bt7Nhwwb0ej1169YlJyeH9evXU1RURLdu3ejWrVu1a4pEIqFZs2Y0a9aM9PR04uLi2LlzJwcOHLA7Ti0WCzKZjKCgIBQKmxvVz8+P4OBgMjMzOXr0KElJScTGxtKtW7c7mhfmqpTT0t+d47mllBpMlFVUAuDh6cmfO+hdDalEilqtpqKiEplMhtliobSiErWTE3Xc1HRo14K8Y/tISUnhiy++4MUXX3Rcbx04cHBdHOV7Dhw4+M8hiiLp6en2fKjMzEwApFIpoaGh9nyou3WzX2EycyS7FK3RjFwi3LKAZBWtZGZmIlqt1KxZEwQpRqsVtVxK8xpuuCr/eTPnV+Ott96isrKSjz/++K5mpVhFkeT8Mk7nFKHX6XBSyOxinw0RcnOxFBRgMZuRSKXIpFKQyaBuXZBfWZ5gsloxW0W8nRQ0reFmL+dLTU3l008/5fDhwwiCQKtWrXj++eerBYtfDbPZTFJSEocOHSIhIcGeMeLu7k6LFi1o1aqVXTg9fvw43333Hb1796ZPnz43dQwqrZVsrNhIkaUItaDGoDeQl5eHTCbD39/f/rAoiiIXL17EWS7Hy2AAk4kCQcCo0eDm5kZBQQHOzs7VRA6LaEEn6qghq0EPTQ/kwr0/P0VRJDk5mWXLlpGTk4NGo6FPnz506NABqfTONgP4J3Hw4EHmzZvH8OHDH8ig77tBeXk506ZNQxAEpk+f/sA/LKempjJv3jxKSkoICQnh6aefZseOHWzevJlatWrx2muvPfCB/rm5uWzfvp34+HiMRiNKpZK2bdsSExPDli1b2LNnD2+88QaBgYE3tb5Tp07x5Zdf4ufnx5QpU667/yaTiTNnznDkyBG++eYbKioqaNmyJXK5HB8fH7uLqkGDBnah6syZM8ycOZOOHTvy2GOP2del1Wrt7ssLFy6Ql5dHo0aNcHFxQa1W07NnT2JiYuzuNb1ez6lTp0hMTCQpKYn8/Hyys7PJzs5GpVIhlUqRSCTUrl2b1q1bU7duXeLi4jAYDNSoUYPAwEBOnDiBTqdDo9HQq1cvOnbseEfdcWaryPr9R9FKVajUGlzUamQS4Ya/uXqDnry8fJRqNYJEgq6sjDb1a9Ggpi+CIKDX6/nmm29ITU2lVq1ajB8//o510HXgwMG/D4co5cCBg/8EJpOJlJQUuyOq9HJnNbVaTUREBE2aNCEsLOyedS7Smy0k52vJrTQAoJRIbkl4sWU6lOLi6YHaSY23WkGYjwsa+b/DAJubm8ubb75Ju3btePLJJ+/adkRR5GSBlrRSHTIBCvJyMRpNePt4o1H/2bUkYkpLRywtQSKR/FFeolDYhKmrdCmyiCJGixVvtYImXhpW/LaMBQsWkJWVRc2aNRk+fDhDhgy55gOV1WolNTWVgwcPcuzYMSorbTPZarWa5s2b07JlS0JCQq5wF1RUVPDqq68SHBzMxIkTb+oY7KrcRaopFY2gsec/FRcXU1ZWhkatwdvH2/76KhG3Zs2aGAwGcnJycHZ2xmAwYDKZqFmz5hXlN1XCVKQyklZOrW44pruFxWJh9+7drF27loqKCmrUqMGQIUMICwv7R4VE3yk2btzIqlWrGD9+PI0aNbrfw7kn/Pzzz+zcuZNHH32UTp063e/hXBOr1cratWvZuHEjAL1796ZXr15IJBJEUWT27NkcP36ciIgInn/++QcuzF8URU6fPs22bdtITEwEbI68mJgY2rZtaxcDqwSgLl26MHTo0Jtef9W526xZM8aOHXvD7+9PP/3Erl276NatG76+viQnJ3Pq1Cl7ib5MJiM4OJiwsDDUajWLFi2iU6dO1USpKjIzM1m8eDE//vgjTk5O9OnTh5KSEioqKpDL5QQHByOKImfOnMFyudRZo9EQHh5OREQEISEhJCcnExcXx7lz5zh9+jSiKBIcHMzo0aM5e/Yshw8fBqB58+ao1Wr27duH2WzGw8ODvn370rp16zvymZeUlDBt2jRcfGrQb+QYSsw2oQpAKghILneZtRXq2SZxbH+gXKulUluGyqAlfv1qGoUGV8tnMxqNzJkzh6SkJPz8/JgwYcItuXcdOHDw3+Hf8fTiwIEDB1dBq9WSmJhIQkICJ0+exGi0dRDz9vamS5cuREZGEhwcfF+cEiqZLdMhs1zP6cJy9GYrggByQWK/Cbwa4uWbQblag5PFSmVZGY29nGnk7/6veqhOSEgAIDIy8q5uJ6tcz8UyHTKJgFwiwcfHh+zsbAoLC1HI5cjtLigBaw0/jBXlqCzWP1ZgNMKlSzZh6i8lD1JBQCGRkKfVMWvLJpZ88QmCINCpUycmTJhASEjIFeOpcvEdPHiQw4cP28VTuVxud0SFhYVdN9NMo9FQq1YtLly4gMlkuuGs+gXTBc6ZzqEUlNUCyd3d3dHr9VRUVqAqV+Hs7Gwfi06ns2WYGQz2cZtMJpydna86NqkgRYaMZGMydeR1qCG7P8HMUqmUmJgYoqOjWbduHTt27ODrr7+mcePGDBkyhICAgPsyrvvFf618Lzs7m927d+Pv70+HDh3u93CuSVFREXPnzuXcuXN4eHjw9NNPV7teCILAqFGj+OSTT0hMTGT58uUMGTLkPo74D0wmEwcPHiQuLs4uYNevX5/Y2FiioqKuEFKCg4Px9PTk0KFDDB48+KaFlh49enDhwgWOHj3K1q1b6dat2zVfm5mZye7du/H19aVfv37IZDLat2+PxWLh/Pnz9sD0lJQUUlJSKC0tJSUlBalUSoMGDWjUqFE1R13NmjV5/fXXkUql/PTTT/z+++9IpVKMRiN5eXnExcWhVquJjo6mZ8+eNGnShLp161bbt7Zt2/LQQw/Zyws3btzI8ePHmTBhArGxsYwcOZKNGzdy5MgR1Go1vXr1Ij8/n/379/PDDz+wZcsWBgwYYG9ocbusWLECo9FI+5bNiK7jS6XJQna5nmK9kVKDGZNFRBT/+M2TCAJquRQ3pZzs3HR2/rqIppFN8HRzISEhgVOnTtkFboVCwXPPPce8efM4evQon3zyCRMmTMDHx+e2x+vAgYN/Jw6nlAMHDv5V5Obmcvz4cRISEjh//jxVl7h69erZMyf8/f0fKAHHaLGSfVkcKTeauTxJiQD2HCNRtM1SAkgEW1BpeWYam379idYtm/P444/fj6HfNT7++GMuXbrEZ599dteCfHVmC/EZRRgsVlR/EiZ1ukry8vKRyWX41/ijdM1sNpGdkYl3ZQVOfz1/AgNB/dc8KFsAbKm2HIvVyvZFc+jbtTODBg26Yp+ys7M5dOgQBw8eJD8/H7BlkjRu3JhWrVoRFRV1S2Gxy5YtY9u2bTdsdW8STazQrkBr1aKRXJlnZTabyc7Otme/yOVyu4MqwD+AkpISKnWVSKVSLBbLVV1S9qMhilSIFfhIfejn3O+B+A7m5uayfPlyEhISEASB9u3b07dv3/9MmclXX31FcnIys2bN+kcGZt8qVfv70ksvERYWdr+Hc1WOHTvGokWLqKysJCoqiuHDh18za66kpIQZM2ZQUlLC448/fl+FtrKyMnbt2sWuXbvQarVIJBKaN29ObGwsdevWve57V61axcaNG3n55Zdp3LjxTW9Tp9PxwQcfkJ+fz/jx4696rRNFkS+++IKUlBTGjRtHkyZNrrsPJ0+eJC4ujl9//RUfHx+Cg4MRBIGgoCB7qZ+bmxtJSUkcOHCAH374AbPZjEwmQxRFatWqRYMGDaioqEAqlVK3bl0GDBhww+6feXl5LFu2jEWLFtkcqhoNw4YNIyAggH379mEymahbty6xsbEcOXKEY8eOARAUFMTAgQOvOslxI86fP89HH32El5cX77zzzhXXAFEUqTRZMFlFREQkCKhkEpQy2+9lRUUFr7/+OqIoMmLECObPn09AQADTpk2rJsBZrVZ+/PFH4uPjcXNzY8KECfj7+9/yeB04cPDvxSFKOXDg4B+N1Wrl3LlznDhxgoSEBHvQslwup1GjRkRGRhIREVGtO86DiiiKlBjMlBlMaI1mygxmzFYREZAJAq5KGS5KGa4KGe4qOWaTialTp1JRUcF77733r3E7aLVaXnvtNaKionj22Wfv2naO55aSqdX/qSX2H5SWllBSUoqT2glfHx9AQBStXLx4CbkAAUYTmIx/vCEoCJR/lOGZzCays7Mp12oB8Krhj7eLhi7BAfZw++LiYrsQdenSJft7Q0JC7IHlVe6kWyUhIYFvv/2WXr160a9fv2u+7qzxLDsrd6ISVEiFqzsGKyoqKCgoQKFQUKNGDcrLyykqKsLHx4fCwkLMZjMSieSKLKmrYRbNGEUjPZ172rtdPQikpKSwbNkyMjIyUKlUPPzww3Tu3PmOtF9/kHn77bepqKjgk08+ud9DueskJSXx9ddfEx4ezosvvni/h3MFJpOJpUuXsnv3bmQyGUOGDKFjx443FG/T09P55JNPsFgsvPTSS/e8DDMjI4Nt27Zx6NAhzGYzarWaDh060KlTp5su1crOzubtt9+mdevWPPXUU7e0/aysLGbMmIFSqWTq1KlXbLMqY69Ro0a8/PLLNyWGp6amMnPmTMLDwwkKCiIpKYkTJ05QWFhIUVERBoMBDw8PPDw87GXLL7zwAjqdjt27d2O1WqlXrx4uLi4kJSVhtVpp1KgRAwYMuGFuVllZGR9//DHr16/HYDBQu3ZtOnTogF6vJzc3F4lEQocOHWjatCkbN27k9OnTAISHhzNgwABq1ap1U8dNFEU+/PBD0tLSeOaZZ2jWrNlNve+vLFiwgP379zNw4EBSU1NJSkriscceo2PHjlds79dff2XHjh1oNBpefvnlm84Qc+DAwb+ff/fdlgMHDv6VGAwGTp48ac+HqqioAGzdttq2bUtkZCQNGzb8x7UhFgQBD5UcD9XNORYUCgU9evRg6dKlbNiw4a5mL91LTpw4gSiKd7V0r8JkJrfCgEy4eqCrm5sbBoMBXaWO0tJS3Nxs5ZGCIGBChMA6kJMDJhO4u/9JkBIpLS0lJzcXs8mETCbD188PVzc3TFaRi4WlXDx5goMHD3LmzBn79mrXrk2rVq1o0aLFHQnYDwkJQRAEUlNTr/kaURQ5bTyNiHhNQQps5YB6vZ7y8nKKi4vtZSw6nQ6r1YrFYkEikeDmemPhV4oUK1ZSjakPlCjVsGFDpk6dSnx8PKtXr2b58uXs2rWLwYMHExUV9UC4uu40oihSUFBw0w+x/2QsFgu//fYbEomEwYMH3+/hXEFWVhbff/89WVlZ+Pv7M3r06Jv+XAIDA3n66aeZPXs2c+bMYfLkydSocXfLY0VR5MSJE8TFxdlFET8/P2JjY4mOjr7l315/f39q167NsWPHeOyxx27p/QEBAQwfPpy5c+cyZ84cJk6caBeTzWaz/XMfOnToLX2PzWYzZWVl5OXlkZeXh0qlQqPRYDabEUURtVqNq6urPTtr/fr1vP7663Ts2JFVq1bZ3ZdNmjTBarWSmJjIqVOnaNGiBX379sXPz++q23V1dWX69Ok88sgjfPLJJ5w+fZrVq1fToEEDNBoNFRUVbN++naNHjzJw4EC6devGqlWrSEpKIjk5mVatWtG3b1+8vb2vuv4qDhw4QFpaGqGhoTRt2vSmj8tf6dChA/v372f37t288MILnDx5ktWrV9OyZctqJY+CIDBs2DBUKhUbN27ks88+48UXXyQ4OPi2t+3AgYN/Dw5RyoEDB/8ISkpK7G6olJQUzJfb0fv7+9OuXTsiIyOpV6/eAxf2erfp0KEDW7ZsIT4+nh49evwrshqqbuYjIiLu2jaytHrMVhGV9Frni4C3tzfZ2TmUlJaiUCpxUjkhlUowmy1YpFKktetUe4fFYiY3I4Oy8nIsCLi4uODv749MJqOyshK9xcq6E8dI3LQSAB8fH1q1akXLli3veCmDWq2mTp06XLhwAaPRiEJxZYfAMmsZeZY8FMKVy/6Kp6cnBoMBrVZrX5dOp7N/D52dnZHdRMi+IAjIkJFuSscoGm9q2/cKiURCu3btaNGiBZs2bWLr1q3Mnj2b0NBQhgwZQp06dW68kn8Q5eXlmEymf43D8nrs3r2b7OxsYmJiHqiyIVEU+f333/n1118xmUy0a9eOoUOH3rKo07RpUwYOHMiKFSv4+uuvmTJlym27LK+HwWAgPj6e7du3k5eXB0CjRo2IjY39280CoqOj+e233zhx4gQtW7a8pfe2bNmSCxcu2MvuqsrZ4+LiyM/PJyYm5oZ5caIokpmZSWJiItu3b2f//v2kp6cTHByMu7s7MTExRERE0KhRIxQKBZmZmSQnJ5OcnMzFixf5/fff0el0eHl50ahRI9q3b2+fPFOpVLRv3578/HwOHz7M0aNHadu2Lb1798bd3f2q4wkPD+fbb79l8eLF7N69m6ysLJydnfHx8SEvL4/MzEwKCgoICwtj5MiRZGdns3r1ag4cOMDhw4fp0KEDvXr1wtXV9Yp16/V6VqxYYReK/s7nFhQURM2aNcnMzKSkpISYmBji4uJYt27dFcH1giDQv39/VCoVK1eu5IsvvmDcuHH/mSYLDhw4uDYOUcqBAwcPJFU3iAkJCSQkJJCeng7YbmqCg4Pt+VC+vr73eaT3F7lcTs+ePfn555/ZsGEDI0aMuN9D+lsYjUZOnjxJcHDwXXmoAtu5laHVXzdQHkAikeLj60NOdg4F+QX4+/sjlUoxmy1YLRakkj/cRdqyUsxpaXiYLbhLBKy+vkg8PCgtLaWy0hYKLlUo8KwVSJcevWjVNJLAwMC76sAJDQ0lPT2dc+fOXfWmv8haxOLXF3Ni4wkKLhXwwZ4PqNukrn35sS3HWPbeMkRRxGK20OP5HgR3Cqa4uBiwPaAajUZUKtVVXVLv9nqX4qxiPjv2WbX9HOU+iq/SvqJYU4yf7OpugfuJSqWif//+tG/fnhUrVnD48GE++OAD2rRpQ79+/a75EPlPoyrk/E448x5kKisrWbt2LWq1mj59+tzv4diprKxk8eLFHDlyBJVKxciRI2nRosVtr69bt27k5OQQHx/P7NmzGT9+/B0rPy0qKmLHjh3s2bMHnU6HTCajbdu2xMbG3rHmAC1btmT58uXs37//lkUpgEGDBnHx4kV2795NvXr1CA8PZ8OGDdf93A0GAykpKSQmJpKUlGS/tpWWluLi4kLz5s158cUXqVWr1hXX6lq1alGrVi26d+/Oww8/zNSpU3FyckKj0djznkRRtOfyZWdnExQURPfu3Tl9+jR79uxh//79dO7cmR49elRzFVWh0WgYO3YskZGR/Pzzz5SVlSGRSAgMDKS0tJSzZ89y/vx5jh8/Tv/+/Zk8eTJHjhyxN3GIj4+na9eudO3atVqX102bNlFaWkqHDh3+tlNSEAQ6dOjAzz//zK5du3jyySfZv38/O3bsoGPHjld1hPXo0QOVSsXPP//MrFmzGDNmDFFRUX9rHA4cOPhn89+yFDhw4OCBxmw2c+rUKX755RemTp3Ke++9x5o1a8jJyaFZs2Y89dRTfPrpp0ycOJGuXbv+5wWpKtq1a4eHhwf79u2zz17fDV566SXq1q2LIAgcP37c/u+FhYVERUXZ/4SGhiKTyewPvTdDx44dCQ4O5uTJk5hMpmveoO7cuZNNmzbZ/z8rK4v27dvb/3/16tU0atSIqKgoEhMTiYqKQns516mKSpMFg9mK9PJDxsxJ4zm27/erbk8hV+Dl5YnVauXloX04smcHgL3Nt9VqISPjEv0jgzGXliKRCCikUhT5BZRlZVFRUYkg2JxEXh7uHNq+hRp16tqP4/V4++23GT9+PAALFy6kf//+ABw+fJhhw4Zd970ADRo0ALCX1/yVYksxzfs2563Nb+Fdp3qphyiKfDvmW5797llm/D6D1359jR8m/oBKpsJqtWI0Gu3ley4uLle4pLLPZZNzLgeZUsap309dsW0rVoosN39+3A+8vLwYM2YMr7/+OoGBgcTHx/Pmm2+yfv16eyfPfzKFhYXAv7/z3rp166ioqKB3797XDAy/15w/f57p06dz5MgR6tWrx7Rp0/6WIAU2ceDxxx8nJCSEM2fOsHjxYv5ubOz58+f5v//7P6ZOncqWLVuQy+X07duXDz/8kOHDh9/RbpXu7u40aNCAkydPXnHNvhmkUiljx47Fzc2Nn376iXnz5qHX6+nbt2+1zz0/P58dO3bw1Vdf8corr/Dtt9+yZ88eDAYDLVu2ZNSoUUyaNImoqCiaNm1K7dq1b3itbtCgAW3btkUqlTJ+/Hjeffddhg0bRkREBGq1Gl9fX0pLS1m/fj2ffvopaWlpdOjQAS8vLzZv3szUqVPZvHnzVa8rgiDQunVr3nzzTbsbTSqV0rlzZ7p164aPjw/JycnMnDmT559/HpVKxXvvvUe/frZmEuvWrWPq1KnExcVhNpspKChg69atODk50bdv31s+zlcjOjoahUJBQkICJpOJvn37YrVaWbZs2TXf06lTJ0aOHInFYmHOnDkcOHDgjozFgQMH/0wcTikHDhzcVyorK0lOTiYhIYGkpCR0Oh1gu0Ht2LEjkZGRhIaG/ic6Q90uMpmMXr16sWTJEtatW8eoUaPuynYGDx7M66+/Trt27ar9u5eXVzWRaubMmezateumHRhnzpzhzJkzeHh48MsvvwBcNU/KbDazc+dOSkpK6NGjB2DLE9mzZ4/9NbNnz+bNN9/k0UcfBag2riq0RjMWUUR+udRz4kdfXHd8Go0zBoMR0WrFaDACIharlfJyLZmZmZhMJgAkElvmlNVqa5/tpdNhqeWF0t0dAdtDzbYVvxIS4EuH5refl9WiRQt+/fXXG76uKlfqWqJUiaWEkLYhOEuu7kgTBIGKUltem06rw9nTGU9vT4pKitDpdJhMJpycnK7aRGDXj7toN7Qd7jXc2fnjThq3v7KjVpm17Ib78CBQv359Jk+ezMGDB1mxYgVr1qxhz549DBw4kJYtW/5j86b+C06p3NxcduzYgZ+fH506dbrfw8FqtbJ582bWrFmDKIr06NGDvn37IpVeO9PtVpDJZDz33HPMmDGDffv24efnR8+ePW9pHRaLhaNHjxIXF8eFCxcAW+ZdbGwsLVq0uKvh/9HR0aSkpHD48GFiYmJu+f2urq4888wzvPXWW/z444/06NGDtm3b2t1QiYmJ9mYoYHM7RURE2APNq8r/r5fFdy169OhBSkoKW7du5fHHH8fPz4/OnTtjMpk4c+YMycnJHDp0yJ6/tHv3boKCgoiIiLB33ouLi6N37952gevPeHl58corr7B161ZWr17Nvn37aNOmDQMGDGD79u2sX7+ew4cP88wzzxAdHc2kSZPo2LEjGzduZMeOHSxdupRt27ZhsVgwmUwMHDjwjnUZdXJyolWrVvz+++/s3buXnj17smvXLhITE0lOTr5mp8s2bdqgVCqZO3cuCxYswGAw3NcOkg4cOLh/OEQpBw4c3HMKCgrs+VCpqan2h/jatWsTGRlJkyZNqFOnzj/2Ye9+8NBDD7Fp0yYOHjxIz54970puys3eLM6bN48ZM2bc9Hrnz5/PE088QY0aNZg7dy7Dhg3Dx8eHtLQ0oqKieOaZZ9i6dSvDhw9n9uzZWCwWdu7cycCBAxk+fDhRUVGUlJTw0ksvsWfPHlJSUvj666+Jj49HEASKi4txd3fn1KlTjB8/nvSMTAwWK/2fHEWfx0fyyiP9GDjqGdp160Xc6uWsWPB/mE1GrFaRp16dwkOx3fHw9EAikWA2mzGabJ31KisrEa1W+0OaTqXCVSJhxOjH6NqlO0eOHaa4uJiejz7BEy+/zvpffuRsUgLvT53MnJkf8u6779KtWzc+++wzVqxYgdlsxsvLi48//piAgABKS0spKyvj3Llz5ObmUl5eTnJyMvv37+fjjz9myZIlWCwWli9fzs8//4yTkxNt27Zl4cKFbNq0CYvFwsWLF1m2bBlz5sxBFEX69etHy5YtycnJYdrb0+gwpgPJW5Ipyizi8JbDqP3UiIhcOHoBZy9npveZjkSQIJFJGDlrJEmHklgzYw2XTl4CAdqPbE/tV2ojlUrtD3RWi5XdP+9m6tqpuHi5sOKjFVSWVqJ2q16eYsZ80+fI/UYQBKKjo2natClbtmxh8+bNzJs3j+3btzN06FCCgoLu9xBvmf+CU+q3337DarUyePDgOyb83C4lJSUsWLCAlJQUXF1dGTVq1F3J0tFoNLz44ot8+OGHrFq1Cj8/v5vqrFZZWcmePXvYsWMHxcXFCIJAZGQksbGxdoH7btOsWTN++uknDhw4cFuiFNgyjuRyOYWFhRw9epSJEydiMBgAW3OQJk2aEBERQURExE13B7wZGjZsaHdU9u7d2y7Wy+VyGjduTOPGjRkyZAiFhYVs27aNX375hbNnz5Kenk7NmjWRy+WcPXuW5ORkgoODeeKJJ2jRokW14y6RSOjevTuNGzdm3rx57Nu3j9TUVEaNGsVjjz3G8uXL7RlU8fHxdOnShQkTJtC5c2fWrVvHhg0bSEhIwNfXF09PT0RRvGOfa4cOHfj999/Zs2cPPXr0YODQYfy0fCVxx5KQ+NYGQUAiCKhkElwUMlwUMuRSCc2aNeP5559n9uzZLFmyBL1eT7du3e7ImBw4cPDPwSFKOXDg4K4jiiLp6en2fKjMzEzAZrdv0KABUVFRNGnS5F89Y3+3kclkPPzwwyxatIj169czevTo+zKO+Ph4iouL6d27t/3f3nzzTQICAnj22WeveL3FYuGHH35g+/btlJWVMWXKlGrdeEpLSwkLC+Ojjz4CbA92JSUlfPHFFwCkpaXZX/vVV19x4sQJxo8fby91q8JsNtOvXz/eeecdmnbrzfniSgxlJVeMp0WHGDr3HYggCORkXOSFAT1o0f4YCqUSuVyOIBEoKysDEQSJgFwuR6GwhRJXapwxCAIitrben334FaVlpYx6djiRbTsR8VAH6jUMo32P3lCQyYYNG/jqq6/IysqiXbt2SCQSUlNTGThwID179uTw4cMYjUY+/vhjTp8+TVpamv31hYWFfP/99xQVFbF+/XoGDRqEWq3m8OHDAKxYsQKDwcDvv/+On58fUVFRqNVq5s6dS0FBAVarFV25Dt9QXzo91Ynpnaazfe52Wg5pia5Mx7zn5uHq58rYeWOp17we5w+c5+fJP+Ps5cyTnz2Ja4Ar2hItC8csJKhZEHUi6qBSqXByciJldwo+dXyoGVoTgPCO4exdtpeuo7tWO9ZV7rF/EgqFgt69e9OuXTtWrVrFvn37+Oijj2jZsiUDBgz4Rwk8/3ZR6tSpU5w4cYJGjRrd1aYJN0NSUhILFiygvLzcHkx9tQDqO4Wfnx/PPPMMX375JfPnz8fLy4vAwMCrvjY3N5ft27cTHx+P0WhEqVTSuXNnYmJi7nmJvEqlIjIyksOHD5OXl3fT2xdFkbS0NJKSkti0aRP79+9HoVCQnp6Oq6srAwcOJCIi4qZd11Vlj7ci2AiCQI8ePZgzZw5xcXEMHDjwqq/z8vJi2LBhDB06lP3797NgwQLS09MpLS3F3d0do9FIXFwcu3btIiQkhGHDhtGzZ89qJYi1a9dm6tSprFy5kri4OGbOnEmPHj0YPXo0w4cPZ/78+SxevJjNmzeza9cuunbtyuOPP05iYiKZmZn4+voye/Zs6tevz8CBA+9IB7zAwEDqBYdgUGrYcjoDicqTZn2GYTKbSckvQXn5d1IEJAJILncbru3qROOwMF5++WVmzZrF8uXL0ev19OnTxzEx6cDBfwiHKOXAgYO7gslkIiUlhYSEBE6cOEFpaSlg6wrWqlUrIiMjCQsLw8nJ6T6P9N9D69at2bBhA4cPH6ZXr153NO/jZpk3bx7Dhw+vVuLx7rvvXvP1GzZsoG7dujRs2JDly5dTs2ZNe8kI2GaZn3jiib89rtOnT6PX63n00Uc5VWDLK3HzvPJhPOfSRWaMf5b8nGykUina0hJyMi5Sp34IYAs/l0qkWCxmZFIZgiBgNtvK9wxGA4Wu7oBATMfOCAK4u7nh71cDQ9p5VLVs2SRqtZqQyEikUilHjhyhoKCAzZs3IwiC/WGoW7dulJSUoNPp6Nu3L9u3b8doNDJ06FASExNJSUlhxIgR/Pbbb/bZcKlUSn5+Pl27duXVV19lz549LFu2jIKCAvbv34+bmxvu7u4MGjSI+vXrs3zlctoObYuL1AWVWkVRVhGBgYEc23wM79reGCoMdBzQEYA6deqw4dMNZJ/N5tfJv2KxWLBYLJgNZsqzy5E3k6PT6dDpdGz+fjNZqVm8EPYCEkGCUW8k/2L+FaKU7B98C+Lu7s7IkSOJiYlh6dKlHDp0iOPHjxMbG2sP8X3QKSoqQq1W/yPGeqtYrVaWLl2KIAgMHTr0vj3cms1mVq5cybZt25BIJAwePJjY2Nh7Mp6GDRvy2GOPsXjxYr755humTJlidwaJokhKSgpxcXEkJiYCNrEkJiaGtm3bXjV0+14RHR3N4cOHOXDgwHWD6SsrKzl58qS9REyr1WK1Wjly5Aienp6MGzeOY8eOUVpaSuPGja9ZQnYniYqKws/Pj127dl0zvLwKQRBo06YNzZs3Z9u2bWzatAmtVotKpbIfg+TkZKZNm8Znn31Ghw4daN++PWFhYQQGBiKXyxk6dCgREREsXLiQjRs3kpyczKhRo3jhhRcYOXIks2bNYvXq1axdu5YNGzYglUrp1KkTzz77LBs2bCA1NZVPPvmEJk2a0L9/f2rWrHlb+222ipwrLqdRryFU6A1oTWbcVeCidiInOwtTZQU1awYgkUgRRRErYLWK5FcaKag0opZLCa5Ri/HjJ/DVV1+yfv169Ho9Q4YMcQhTDhz8R/jn3hE6cODggUOr1ZKYmMiJEydITk62h3Z6e3vTpUsXIiMjCQ4Ovu9lFP9WpFIpvXv3ZuHChaxdu5Znnnnmnm6/vLzc/oB+s8ybN4/U1FTq1q1LSUkJJpOJVatW8b///Q+wiZhVZWF3Cqnk2je5018aw+jXp9Gxly0Atn9UCEaDHrPFjNFoREDA19cXvUGPSqVCqVBguHyeyyQyEAQsgoBU5YQg2DKmJFIpMosFX6MRiVRKgH8Nnh9lE9q2bdvG2LFjGTt27BVjSUxMpKSkhIcffpj8/HzOnTtHly5dkEqlaDQaHnroIQ4ftpUIVpXnVLkQQ0NDOXPmDBERETRt2pTAwEAmT55sX3daWhoKpQKJRGK/6Retf4QiyxVycs/nknk6k5oNapJzLofCzEKcPZyZ8fsMKisryc/Px8vLy94l0Ww2k3sxl3MHzzFp8yScXG2Cs1Qq5f2Y9zlz5AwhzUPs23CXut/aB/cAEhgYyMSJEzl27BjLly9n48aN7N27l/79+9OmTZs7fu7eSQoLC/+1Lqnff/+drKwsOnTocF/EeYC8vDy+//57Ll68iI+PD6NHj6Zu3br3dAzt27cnJyeHbdu28c033zB+/HgSEhKIi4uzXyvq169PbGwsUVFRD8T52rhxYzQaDQcPHqR3795/XJ9EkezsbHs21Llz5+yl/66urrRt25bi4mKsVis9e/a0C4AffPAB8+fP54033rjrzq+q8rpFixaxa9eum8rzUigU9OrVi4ceesieFZWbm0vfvn0JCQlhzZo1HDhwgDVr1hAfH09gYCC+vr52oa1x48a8+eab/PTTTxw+fJj333+fQYMG0alTJyZPnszjjz/Ol19+yS+//IJOpyMxMZHvvvuOjh070rp1a7Zv386JEydITEwkOjqavn373tJ1oUhn5GSBllKDGYVSSUlxEQarFXdnDQq5HBcXF8rKtJSUluLp4WkLagekUgE5YBVFKs0WTuSVUUPtzkuvTOTbr74gLi4Og8HA448//kCclw4cOLi7OEQpBw4c/C1yc3NJSEjg+PHjnD9/3u70qFevHpGRkURGRuLv7++Y7bpHREdHs2HDBo4ePUpGRsbfbvd8K/z6669ERkbSsGHDm3p9bm4ucXFxXLp0CZ1Ox9tvv027du2YNGkSCQkJVw3QdnV1JT09/ZbH1qBBA9RqNT///DPtH+6PCJQUFuDuVb3rnLa0FP/atjKXrSuXoS0twWK2kJuTgyiKOLtoCAjwJycnB4PBiKenJ15Km9PE188HuVKFRCKg1ThjBGQWC4giZpMZU0kJTkoVOZfSuXTpEgEBAfTv359PP/2UwYMH4+npiclkIikpiaZNm97UfsXExPDhhx/aS13mzZtnX/bQQw+Rnp5OeHg46enp6PV6UlJSaNzYFjouILB4/GIStyZSmmtzMk6ImsB7O96j4FIBvcb14quRX112g5kZ+clIVn+6mp2Ld5J9NhupRkrz7s2hFjh7OiOTyTi65ihNOjchNCwUnU5HZWUlOp2O8O7hbJq7CSc/m1BlNVlxFe9e+dK9RBAEmjVrRkREBNu3b2fDhg0sWrSIHTt2MHToUEJDQ+/3EK9Ar9dTWVlJSEjIjV/8D0On07FmzRpUKtUd6y52q+zfv5+ffvoJg8FAq1atePzxx++bI23QoEFcvHiRrVu3snPnToKCgpBKpbRs2ZLY2Nh7LpTdCJlMRsuWLdm5cyepqakYjUYSExNJSkqyl5wKgkBgYKA9G6pOnTqUlJTw5ptv4uHhwcMPPwyAr68vo0aN4ptvvmHOnDlMmjQJhUJxV8cfHR3NmjVriIuLIzY29qabtLi7uzNixAhiYmJYtmwZx44dIzExkS5duvDiiy+yatUq4uPjycjIoKioiMLCQg4ePAjYnKyNGzcmNjaW3bt388svv3DixAlGjBhB7dq1admyJWfOnLFnax06dIjs7Gy8vb1p2rQp4eHh9hD2Q4cO0bFjR3r16nXDIPRLZTpOFmgxW60opVIkgoBGrUGr1VKuLcfd3R03N3cqKioo12pxcXa54nhIBAGVVIrZaiW7wkCJTMFzEybyf199zu+//45er2fUqFGOyUwHDv7lOEQpBw4c3BJWq5Xz58/b86GqOtnI5XIiIiKIjIwkIiLiqoKCg7uPRCKhd+/ezJ8/n7Vr1/Lcc8/dsXU/88wzrF+/npycHLp3746Liwtnz561L583bx5jxoy54n3XypT64Ycf6NatG+7u7uzfvx+Apk2b8sgjjzBv3jxeeeWVK9Y1YMAAfvzxR6KiouxB5zeDTCZj9erVvPjii0x//310FpE+jz9FvydGVnvduDff553nR+Hs6kpUm/b4BtSksKgQlZsHMrkcJyc1IODl7U12VjaFBQX4X3ZiKJUqnF3dkEikeNfwQxUUDGlpIAgIEgGrCN37DGDeB2/xw1ef0bpNG9q3b0+TJk3sLbUBRo0addOiVEREBP/73/9o27YtLi4u9OjRw/7d8/DwYP369YwYMYKsrCy2bNlCSEgIq1atAkCChKe+fAoAsUJkVM1RfH78cwAmLJnAkqlLQLBlZz36zqM079mc0FahLJqyiPz0fIxGI8lbk3l54cv28fQZ/0e5jUajQaPRIIoioz4ZRWVlJZWVlXx89mOMRiMfvvEh4Y3CiYqKIjw8/B9fRiaXy+nevTsPPfSQvUPfp59+SlRUFIMGDbrn+TzXo6rz3r/RKbVhwwa0Wi2DBg26Y93Fbha9Xs/PP//M/v37USqVjBw5ktatW9+3SZmMjAy2bdvGmTNnKCwsRK/X06xZs2qlfA8ahYWFiKJIUlISL730kl00c3JyokWLFkRERBAWFnbFZ7ty5Up7ifOfYwGaNGnCww8/zPr161m8eDFPPfXUDT+Pqsm120Emk9G1a1eWLVvG3r17b7nrY506dXjllVc4fvw4y5cvZ/PmzcTHx9OvXz/69OnDqlWrOHfuHAaDAV9fX5ydnUlPT+fixYv2dRQUFBAXF8fp06fp168fO3fuJDg4mMmTJ9tzpvLz88nMzESn06FUKqlduzZ16tQhNTWV7du3s3fvXrp27UrXrl2vem2+WKbjZL4Wqyiikkrtx9TFxdkmSpVrcXN3QyKR4O7uTmFhEcXFRZevg1cef5lEglQQ0ZktnEfKc69MZO7XX9qzFceOHevowuzAwb8YQfw7V14HDhz8JzAYDJw8edKeD1VRYWsT7+LiQpMmTezuGKVSeZ9H6gBswuE777xDTk4Ob7zxxjUDbh8kPvroIzIzM/nss8/uastxAItVZHt6PhYrKKTXLgswmU3k5uZiMVvw8vLE2bn6Q5BWW0ZRUTHOzhq8/uK4sqPXQXo6WK1YFArMShVN3/8fl0KC2B0YSHp6un32GmzlioGBgdStW9f+x93d/br7o9Vq7Q9oX375JZs2bWLjxo325SdPnuTLL7+kW7duDBo0qNp7t1RsId2UjrPE+brb+CtGo5Hs7OzLM+E3L0CLokipuRRFuoLzy89TUFAA2B7kGjZsSFRUFJGRkXc1BPpekZmZybJlyzh16hRSqZSYmBgefvjh+5rXU0ViYiKzZs1iyJAhxMbG3u/h3DHy8vJ4++238fT05O23377r15I/k56ezty5c8nLy6N27dqMGTMGPz+/e7b9KkRR5MSJE3ZRAmzB59HR0cTFxVFRUcGTTz5Ju3bt7vnYrobFYuHcuXP2srzs7GxEUeTQoUM4OTnxyiuv0KRJE+rXr39Nt8z58+f56KOPqFWrFlOnTr2i3MtqtTJr1iySk5N55JFHbtjZLyUlhc8//5yYmBgeeeSRW94ng8HAlClTUKlUvPfee7ft8jGbzezYsYP169ej0+moWbMmgwcPtpe5Z2Vl4eTkROfOnQkICODMmTMkJSXZBacLFy6g1WpxdXVlwoQJjBgxArlczvnz5/npp5+4ePEier0eNzc3DAYDgiDg7OyMq6ur7bfPYsHFxYVevXrRoUMH+/cpr8LA0dxSRFFE8acS8CpycnMw6A34+HijVmsAW+ml0WjC19f3ulmioiiit1hxkkmIcJPz3ddfkZmZScOGDXn++ecd95kOHPxLcTilHDhwcFVKSko4ceIECQkJpKSkYDbb2rf7+/vTrl07IiMjqVevnqPW/wFEIpHQp08fvv/+e9atW8e4cePu95CuS1lZGRcuXKBp06b35CFSKhHw1SjJKNNfsyW2yWwiNycHi9V6VUEKbKJspU5HeXkFarX6sovqL6icoFYtuHQJk9oZl6wM/ApyqZGfQ8sePbBOmEBOTg5paWmkp6dz4cIFUlNTOXXqlH0V7u7uts5G9epRt25dAgMDqwkbkydPZu/evZhMJgICApgzZ061IdSvXx+JRGJ/QP0zDRQNuGi6iEW0IBVu/sHpzzkvt4IZM0qZkj5N+uDd1JusrCyOHz/O8ePHSUpKIikpiSVLllCvXj2ioqLswcH/RGrWrMnLL79MUlISy5YtY9u2bezbt4++ffvSoUOH+3rtrCqD+rd1PF2xYgUWi4VBgwbdM0FKFEXi4uLs2+7cufM93X4VBoOB+Ph4tm/fTl5eHgCNGjUiNjaWsLAwBEEgLCyMmTNnsmTJEry9vW+61PpOo9VqSUpKIjExkZMnT6LT6QCb2zA8PJyIiAg6dOjAnj17aNiw4XVLYEVR5NdffwVg2LBhV/1eSSQSnn76ad5//32WLl1KnTp1qF+//t3ZOUCpVBITE8O6des4fPgw0dHRt7WeKtdV69atWbduHbt37+bLL7+kSZMmjB07lrS0NNasWcP69etxdXWld+/eDBkyhKKiIpKTk1m+fDlLly4lLy+PWbNmsXPnTqKjowkPD2f06NGcPHmSNWvWoNPpcHd3JyAggAsXLpCVlYUoiiiVSgoKCvj111/Ztm0bffv2Jap5C04WaLFYRVTSKwUpABdnZwx6A9ry8suilICHhwe5uXkUFxehcgq4ZvdVQRBQSSXozFYuGeCVV17h66+/JiUlhS+++IIXX3zxgRD2HThwcGdxOKUcOHAA2G7sMjMzOXHiBMePH7fn9giCQHBwsD0f6kEqQXFwbURR5N133yUrK4spU6Y8cLkhf2bPnj0sXryYUaNG3fbN+61SpDNyIKsYmSC5IvjcZDLaZomtVluQt+baLiKLxUxWVhaCIOAfEIBUcnVhRywtRa83ELZ0EXV3bbP9o1QKX3wBbdpUe63ZbCYjI4O0tDQuXLhAeno6OZczrarw9fWlXr16dldVnTp1rlva8PHHH3P+/Hk+//zzarPUFtHCqvJVFFmK0Aiamy4zMpvNZGZm4urqetNlQKIoUiFWUFNWk56anldsq7Cw0J5Pd+bMGXuIsb+/v12gCgwM/Efm01ksFnbt2sXatWuprKzE39+fIUOG3JOOYFdjxYoVbN68+R/jpLwZUlNT+fTTTwkNDeWVV165J+eJVqtl4cKFJCUlodFoGDlyJE2aNLnr2/0zhYWF7Ny5kz179qDT6ZDJZERHRxMbG3vVkPejR48yZ84c1Go1kydPvieiryiKXLx40e6GSk9Pt1/PvLy87NlQDRo0sF/HcnNzefPNN2nZsiWjR4++5rr379/PggULaNas2Q2be1y8eJGPP/4YtVrN//73v2s6Mv+uUwqgoqKCyZMn4+3tzZtvvnlHzsfs7Gx+++03kpKSkEgkdOrUiR49enD48GE2bNhAeXk5Pj4+9OvXj6ioKN577z0yMzPx9fW1N8Xw9PQkMDAQiUSCt7c39evXJyMjg4sXLyKVSmnWrBn+/v4cOHCAgoIC9Ho9JpMJs9mMu7s7zXsNoEbDCDRKBRLh6sK6KIpkZGZgtVipWTMAmcz2mebn51FZqcPT0wMXl+u7Yc1WK2ZRJNLXFS+5wDfffENqaiq1atVi/Pjx97w014EDB3cXhyjlwMF/GIvFQmpqqr0sr2r2XKlUEhYWZs+H0mg093mkDm6HqoePsLAwXnrppfs9nGvyzTffkJSUxMyZM+/ZuSaKIvGZxZToTdVme/8sSHl7eaG5jiBVRUVFOQUFhajVTvj4+HC1vAyD2YJMW0bH8WNQVJT/sSAgAFatghu4ZvR6Penp6aSlpdn/VOUCgc0JULNmzWplfwEBAXbXwOrVq9mwYQPjxo274qE5y5zF5orNCAgohZsrjbBYLGRkZODi4lLdbVNSAgYjuLnCX3JIdFYdMkFGH+c+eEivL2RVVFSQmJhod1CZTCbA5hqLjIwkKiqK0NDQe+5G+btUVFSwfv16duzYgdVqJSwsjCFDhuDv739Px/H9999z+PBhZs6c+a94uLNarXzwwQdkZGQwdepUateufde3mZKSwvz58yktLSU0NJSnn376hqW2dwpRFDl//jxxcXEcO3YMq9WKq6srnTp1okOHDjf8TDdu3MiqVavw9fVl8uTJd+W6q9frOXnypD2kvKysDLBdq4KDg+1CVI0aNa4p2MyYMYPMzExmzpx51Vwjg8HAtGnTqKio4J133sHb+xpl1H8iPj6eH374gZCQECZMmHDV0roqUapz584MGzbsFvf8D5YuXUpcXNxVr7t/h+TkZJYtW0Z2djZqtZrevXvbO+lt3boVg8GA0WikoqKCzp078+yzz5KSksKCBQvIyclBLpcTHBxMdnY2er0esHXPLSwsRCqVUqNGDR599FG8vLzYsWMHp0+fpqKiAotUTo9nJiARBKQSAQ93d5TKq2cBFhcXUVamxdXNFQ932/XebDZdnsSRULNmAJJrTOJUobdYUMukdKjjhdlkYs6cOSQlJeHn58eECRMe2Fw0Bw4c3Dr/rLs5Bw4c/G0qKytJTk4mISGBpKQku23e3d2djh07EhkZSWhoqCNQ8l9A06ZNqVWrFsnJyZw7d+6ulivcLgaDgVOnThESEnJPxU9BEGjk5czB7BJMVhGFVMB4WZCyWq14e3nf9Hg0Gg2VOh2VFZVUVFRcIWRZrCIiEBJUE0X/frBkyR8Ls7NBr4cblCOoVCoaNGhAgwYN7P9WVlZWTaRKS0vj0qVL7NmzB7C1Gq9Tpw5169ZFFEV0Oh0pKSlXPBwFyAJopGhEoiERKVJkwo1vDa5avldUBJcbH1BcBHXr2oUpk2jCipWmqqY3FKTAdkxbt25N69atMRqNnDp1iuPHj5OQkMCuXbvYtWsXKpWKiIiIf1RQukajYejQoXTs2JHffvuNEydOcOrUKTp06ECfPn1wdr61bK/bpaioCLlcfs+2d7fZt28fly5dom3btnddkLJaraxZs4ZNmzYB0LdvX3r27HlPyjEtFgtHjx5l27ZtpKWlAVC7dm1iY2Np0aLFTYu0PXr0ICcnh/379zN79mxefvnlvy3wiqJIbm6u3Q31Z7eji4sLbdq0ITw8nMaNG990+VWrVq1YunQpx48fp3Xr1lcs37RpE6WlpfTs2fOmBCmwdSW9cOECu3fvZsWKFQwZMuTmd/IW6dq1Kzt37mTjxo1ERETcMfdeWFgYjRo1Ys+ePaxZs4alS5eya9cuBg8ezPTp01mxYgWfffYZFouF8PBwzp8/T8OGDXnrrbf46aefOHToEJcuXaJfv37UrVuXkydPkpycTHp6OpmZmRw4cIAjR45Qv359hg8fTp8+fYiPjydPVCBTKikryEeQSNDrdGg0Gtzd3ZHLq3c1dHZxoaxMS3m5rQufgIBMJsfV1ZXS0jJKSkrw9Lx+owW5REKl2UJepYEaGhXPPfcc8+bN4+jRo3zyySdMmDDh8kSQAwcO/uk4nFIOHPwHqCqLSUhIIDU11X6jWLt2bSIjI2nSpAl16tT5R5bFOLg+CQkJfPvttzRq1Ijx48ff7+FcwbFjx5g9ezZDhw6lS5cu93z7KQVazpVUIljM5OflIYpWvL2rwllvHqvVYs/h8A8IQCa1PeBVhbb6qBW09HdHEEWYMQNWrrS9sU8feOutO7IvoihSWFhYrewvPT0do9GIxWJh3759uLu7M2zYsGoZVa6urhhFI1srtpJlzsJJcLphvlRVOY5Go/njYfDSJSj/kwtMo4E6dTCLZvSinnryesSoY24pu+qvWK1Wzp07Z8+h+qcHpZ86dYply5aRmZmJk5MTDz/8MDExMXfdAfb666/j5OTEO++8c1e3cy/Q6/VMmzYNg8HA9OnT7+pnX1hYyNy5czl//jweHh6MHj2a4ODgu7a9KiorK9mzZw87duyguLgYQRBo0qQJsbGxhISE3NZvt9ls5vPPP+fs2bO0bduWJ5988pbXYzKZSE1NtQtRVd9HgMDAQLsb6nbLbsvKypg0aRINGzbk5ZdfrrassLCQN998E41Gw3vvvXdLAdhms5lPPvmEtLQ0xowZQ4sWLaotv1NOKbB1mY2Pj2fixImEhIT8rXVdjcrKSjZu3EhcXBwWi4WGDRsilUo5cuQIrq6umEwmRFEkKiqK/v374+/vz8GDB/npp5/Q6XQ0atSIkSNH4u7uTllZGSdPnuTgwYOsWbOGzMxMwFZC3aVrNxr2HIRVkKDTlmEwGjEajEgkAnKFAldXV9zd3e2/fQC5uTno9Qa8vf+Y5LGKVrIyM7FYrQT4+18hZv0VndmCn0ZBC3/bZIbVauXHH38kPj4eNzc3JkyYcM+dpg4cOLjzOEQpBw7+hYiiSHp6ul2IqrqxkEqlhIaG2oWof2M7cAfVEUWRDz74gIsXL961m+K/w8KFC9m3bx8ffPDBfTkfzVYru85mkV+hx1ChxcvT85YFqSp0ukry8vJRqZT4+fkhiti7CEUHeKBR/EloOHkSzGYID79h6d7fwWq1kp2dTVpaGt988w1nz56lSZMm1UpWPDw8qFu3LrXr16Y0vJRyVTkqiQq5cH23ZHp6Omq1+o+Z6j87pQARMAYGYFJJCZQHEqOOueE6b4WqHLwqgerSpUuAzcUVFBRkz6F60HPwrFYre/fuZfXq1Wi1Wnx8fBg0aBBRUVF3ZaLAbDYzbty4B76s92ZZtWoVGzdupH///vTs2fOubefo0aMsWrQInU5H06ZNefLJJ++6uzM3N5ft27cTHx+P0WhEqVTStm1bYmJi7sh5XV5ezowZMygoKGDQoEF069bthu8pLi62i1ApKSkYjUbA5uZs3LgxERERhIeH3zFx8KuvvuLkyZN89NFH1Tp9zpkzh6NHjzJy5Eja/CWX72YoLi7m/fffx2g0Mnny5Gr5W3dSlMrJyeHtt98mLCyMF1988W+t63rk5+ezfPly9uzZw7Fjx6hXrx4LFy7EaDSyatUqTpw4gSAItGnThj59+gCwYMECUlNTUavVPP7449XEOavVSlxcHPPmzePChQv41q3P0Ff/h8WgR0BAIhEwm82YTCaMRhOCIKBQKvBw98Dd3Q2JREplZQX5+QWXfxNr2NddXq6lsLAIlZMKP19frlbyXoXJYgUButT1Rnb5t7Iq3H7Hjh1oNBpefvnlf002ngMH/1UcopQDB/8STCYTKSkp9nyo0tJSwNZiPjw8nMjISMLCwq7bitfBv5Oq9u+hoaG8+uqr93s4dqxWKxMnTsTDw4Np06bdlzGcP3+eb//vexp0fhjfOnVRK5RXBJ/fCoVFhZRry/Hw9EDupEElk9C8hjvuquuIMaIICxfCvn3QsiWMGAGK688e3w5VXZrGjBmDp6dntbK/3MtikkQpwa+XH66NXZHJZChFJUqFEoVCcYVAcvHiRVQq1R8Px1YrnDsHZjNWiYDOQ4UECaH+bWitfuimygL/Dv/0oHS9Xs+GDRuIi4vDbDYTGhrK0KFD73g5Wl5eHtOmTaN9+/Y88cQTd3Td95oqt4ybmxvvvPPOXSk7NxqNLF26lD179iCTyRg2bBjt27e/a+eRKIqkpKQQFxdHYmIiYAsDj4mJoW3btne881h2djYfffQRer2eZ599lqioqGrLrVYr58+ftwtRVZNcADVq1LCLUMHBwXfF4XfgwAHmz5/PkCFDiI2NBf4Ita9bty6TJ0++7c+iqqObr68vU6ZMsd8fVYlSXbp0YejQobc99m7dupGTk0NxcTFms5kFCxbQo0cPwFa6/uqrr7J582ZUKhWRkZEsXrz4ptYriiJBQUEEBQURFxdX7d8nT57M9u3bqVmzJnXq1MHNzY3u3bvj7+/PihUr2LdvHydOnGDGjBn06NGD//3vfyxatAipVMqUKVNYsWIFe/futa/TbDazZcsWjpzPoF50B/TaUr564xUeGfcK/nUCsVisWK0WTCYTFqsViSCgUCrx8vRiXJ/OjHvvE2oFhRAQEGD/fuZkpDOmZwyz1u3A19fn6p1rL7Ns3mzaPdyPXlEN8fjT76goiqxevZqNGzeiUql48cUXmT59OlFRUYwfP563336bkpISvvjiC9asWcOOHTv4/PPPb+nzc+DAwb3DkSnlwME/GK1WS2JiIidOnCA5Odk+Y+nt7U2XLl2IjIwkODj4qkGeDv47hIeHU7duXVJTUzl9+nS1XKL7yblz56ioqKBTp073bftfffUVZrOZFjXcMLhoKNIZMYsCColwWw867u7umMwWdCYLGicrLf09cVXe4EF52zb45hvb348ehfPn4f3377iDqkGDBqxfv57z58/TokULgoKC7MsqKyvtQeoXLlwgNysXp1ZOmFxNlGnLsOqtyKVylEqbQKVUKhEEwS78ACCRYPHzQV9ZjCiV4Jqtpc0PCdTpEQyD7v7thpeXF507d6Zz585XBKVv3LiRjRs34u7ubheoQkJCHqigdJVKxcCBA+nQoQMrVqzgyJEjvP/++7Rp04b+/ftXc4n8HaoaWvwbnLIrVqzAbDYzcODAuyJIZWVl8f3335OVlYW/vz9jxoyhZs2ad3w7YJtYOnjwIHFxcXbhp379+sTGxhIVFXXXMqv8/f155pln+Oqrr5g3bx6vvfYanp6eJCcnk5iYSHJyMpWVlYCtTDYsLMxelnezOU5/h6ioKBQKBQcOHCA2Nhar1crSpUsBGDZs2N8SBxs2bMiAAQNYsWIFCxcu5Nlnn0UQBO7UfP3SpUtxd3cnLS2NsWPHMmbMGLujs0pMS01NRRAEcnJybnq9cXFxuLu7c+LECS5cuEC9evUAWzl8SUkJAwYMoGPHjqxcuZKlS5eyb98+PvzwQ1599VWSk5NZuXIl27Zt4/fff2fFihX83//9HydPniQ1NZUOHTpw5swZu6taJpPRq1cvPNNyuFRWialSxoSPvkKtdkKuUGAwGDDo9chkMoxGIwaDkcqKCnSVlZjM5ssjFtGWa/H0qGqK8cfva3FxMSonJ4RruKVWLvg/Grdsg9YYXE2UEgSB/v37o1KpWLlyJV988YV9Mvav9O3bl759+9708XXgwMG958G5G3PgwMFNkZubay/LO3funP3mqV69ekRGRhIZGYm/v/8D6wZwcO8RBIG+ffvy1VdfsWbNGiZOnPhAnB/Hjx8HIDIy8p5v+8yZM3z99ddYLBaee+45wsPDsYoiF0oqOVdcgd5iRSYIyG5SnBJFEYsoYrKCi5sbpw/Gk56TTudXJtx4MFlZ1f9/61bw8oJXX4U7+DkFBQUhk8k4ffr0FcvUajWNGjWiUaNGfwyrOIujxUe5JLuEwc2AxWJBZ9VRUVGBWCaiM+qQi3IKtYVI5BKkMilSZzmuOVYarE+iwc40VFojpM2GHj1sGVP3iOsFpe/cuZOdO3fi5ORkD0oPCwt7YILSvb29GTt2LGfPnmXp0qXEx8dz5MgRevToQdeuXf+2+FLVtbFa18R/IOfOnePw4cPUr1+f5s2b39F1i6LInj17WLp0KSaTifbt2zN06FAUd8HBWFZWZg/v12q1SCQSWrZsSWxsLHXr1r3j27saDRs2pEuXLsybN4/hw4dTr149+756eHjQokULIiIiaNCgwS1lN90JlEolTZs25cCBA2RnZ3Pu3DkuXbpEq1atqgnrt0u3bt24cOECx44dY/PmzXYn052gqhtj3bp18fLyQqfTkZ+fj1qtZt68eWRkZNh/X2rUqHGdNVVn3rx5jBkzhtOnTzN//nzee+89TCYTn376KRs3biQ2NpalS5fywgsvMGfOHDIzM+nRowedOnVi8ODBrFixgjlz5jBixAiysrJ45plnCAsLY+jQoUycOJHi4mK6deuGj48PU6ZMQavVUmE08+jLr9O6YwwjO7fmhfdmUq9hI3atWc7vm9ZjMhqRSCU89tJEagaFotfrEK0iJSUlaIqLMRgMuLm6Iv1T5pRao6ayopLYer6MmvgGe7dspLSokCdfepUeQx5j0VczKczL4aPxz/CtRs3PPy4iLCyMadOmsX37doxGI6GhoTz55JOsW7eOU6dOXTXjbeHChaxatYpVq1YB8NZbb7FkyRI8PDzo3r07ixcvtjcO2Lx5M++99x46nQ6pVMpHH31ETEwMO3fu5IUXXqBDhw7s3bsXs9nMDz/8YC95XL9+PW+//TZGoxFBEJgzZw7R0dEcOnSISZMmUVZWhsVi4Y033rir4foOHPxTcYhSDhw84FRZ56uEqKoyG7lcTkREBE2aNKFJkyZ3bBbdwb+Txo0bExQUxNmzZ0lJSakmPtwPRFEkISEBDw8P6tSpc0+3nZqaytdff40oijz//POEhYUBIBEE6nto8FErOF9SSW6FAb3FinB5mVQQ+HNlnxWwWm1ilAhIBQFvJwVBHu4YkyRsOXuGDRs22PM7rknPnjBvHlx2IwDwyy/g42Mr5btDyOVygoKCSE1Nvdwl8PoiUYBHAAEeAZhFMxdNF8k155Khy6DIXITJbMKCBavVSmV5JYY8A/psPeRD+EUjgSsOYVUosCiVSIuLbeWJ48bdsX25FRQKhV2w/3NQ+rFjxzh48CAHDx58IIPSg4ODmTJlCgcOHGDlypWsXr2aPXv2MHDgQFq0aHHbwvK/wSlVlSkDf98t81cqKyv58ccfOXr0KE5OTjz11FN3XPQCuHTpEnFxcRw6dAiz2YxarbYLB/ei1b1eryclJYXExESSkpIoKSnBZDKRl5eHSqXi+eefp3nz5gQEBNz3SYzo6GgOHDjAnj17OHjwIAqFgoEDB96RdQuCwMiRI8nOzmbVqlV3PJto+PDh7NixA7PZTExMDFu2bCEiIgJPT08++OADtm3bhpOTE2+//ba92cfo0aOv6e4pKipi06ZNfPfdd1y8eJGHH36Yd955h61bt1JWZutqN27cODp27AjA77//TmhoKDVq1ODAgQMsWbKEoqIiQkNDSUpKolatWnTr1g0nJydSUlKQyWS4ubmxevVqli9fzo8//siAAQPYc7GArLwCnJzUSKUynF2csVqtNGnbidhBj+Dp6cmZpBN8PPFF5m7ejU6nQyKVIJVKsVis6HQ6zl+4QK0/OQ093D3QVbnw5HK+Xb2Fi+fO8Hy/rnQdMJThL01k09KfmPTFHLq1jaaBlzMffPABGo2GgwcPAvDee++xadMmRo4cyc6dO4mPj+fAgQPX/DzWr1/P8uXLOXbsGM7OzowaNcq+7Pz587z99tts3rwZV1dXzp49S/v27e2CVUpKCvPmzePbb79l9uzZTJ06lc2bN5OamspTTz3F7t27adiwISaTicrKSkpKShg7diwbNmzA39+fgoICmjVrxkMPPXTXHJcOHPxTcYhSDhw8gBgMBk6ePElCQgKJiYmUX+5o5eLiwkMPPURUVBQNGza85zOWDv65VLmlqvIVGjZseF8fNLKzs8nPz6dTp073dBwpKSnMmjULgHHjxl1VnHNVyonyc6PSZCGrXE9+pQGt0YzZakW02gK8ASSCTaxyVcjwclJQ00WFi0JmP9ZJSUls2LCBiIiI6zsefH3hk0/g5Zdt4edVfP21zTHVu/cd2/8GDRqQmppKamoqTZs2van3yAQZQYogghRBoAaTaMIgGvj8188x6o088eQTXOISaYY00orS2F1WhI+TEyHZ2QBIZTJkn31GipcXAc2aUadOnfvmSpJIJISEhBASEsLgwYOrBaUnJSWRlJTEkiVLHpigdEEQaN26NU2bNmXLli1s3ryZuXPnsn37doYMGXJbTpF/gyh14MAB0tPTad269R0VEc6dO8fcuXMpKioiKCiI0aNH39HjJIoiJ06cYNu2baSmpgLg5+dHbGws0dHRd/03PS8vz54NdebMGcyXrzcajYbo6GhGjRrFnj17OHPmDFlZWfTp0+e+C1IAjRo1wsXFhV9++QVvb2/69u17R4U7lUrFs88+y4wZM/j+++/vqJNl0aJFgM2tM2PGDHx9falVqxbp6ek0btyYDz/8kGPHjtG1a1eSk5Px8/Nj7ty511zfkiVL6NmzJ+7u7ri7u+Pn58fy5cvZvXs3MpmMunXr2gWpKtRqNaNGjaJz587MmDGDffv2MW3aNLp164ZSqWTcuHEUFBTw22+/YTabUSgUmEwmXFxc2LZtG66urjg1aIqL+x/H3MXFBf+AAE4fP8K3b02ioqwEuVzBpfNnsZjNuLi4IpNKcXV1RS6XY7aYMegNZGVlYaysAGylgVUTAC062fLC6tQPQSqVUZSfh4//H+HzVafhqlWrKC0tZfny5YAt861u3br/z955hzdVtnH4PmnSpOkelLZQKLNldLD33jJFcKF+ICrKEsGBeyKoKKiogAxxoIIisvcGgRY6KZRVCl10JV3Zyfn+CD1SWV0gau7r6nU1JznvGTk5531/7/P8Hjp16kTTpk0pKChg+fLl9sis60zW7ty5k9GjR+Pu7g7A+PHj2b17NwBbtmzh7NmzdO/eXfq8TCbj4sWLgH2ioEOHDgB06tSJuXPnArB9+3YGDhxIWFgYYJ8A8vT0ZNOmTZw/f/6aAgwpKSkOUcqBg7/gEKUcOLhL0Gq1JCQkEB8fz6lTp6TOYmBgIF26dCEyMpIGDRrcNl8JB/9+wsLCaNKkCWfOnOHEiRO0bNnyb9uX+Ph44M6m7iUnJ/Pll18iCAJTpkyhadOmN/28WuFEY29XGnu7YrWJlJgtGCw2RFFEEAQUMgF3ZzkKp2t/kwqFgscff5zZs2ezbNkyXnvttZun/nToAG+9Ba+9Vn75O++Ajw907lyFI76WsmNOSUmpsCj1VxSCAoWgQG6UU1JYQkhwCCHBIXTr1g2wDxKyDh/Ga8IETHq93WekqIjSDz/k43btEASBgIAAGjRoQP369WnQoAF16tS54/5OgiBQt25d6taty5AhQ64xSj937hy//vorQUFBkkBVr169v2WQrlQqGTp0KF27dmXt2rUcPnyYDz74gHbt2jFy5MhKpeIVFBQgk8n+sdG1RqOR3377DWdnZ+69994aadNms7FlyxbWr1+PKIoMGjSIoUOH1pgfo9Fo5NChQ+zatYucnBzALrT07duXFi1a3LZrymKxcObMGUmIKts2QHBwsOQNFRISIvUtIiMj+fDDD4mNjeW3336rsYik6iCTyWjatCmbN2+mVq1aFaoSWFkCAwP53//+x+LFi/n555/L++XVAGPHjuXpp5+mpKSE1NRUZDIZY8aMAaBVq1Y0aNCAxMREateufdN2li5dSnZ2tjTRUVxczAcffECbNm3o0KEDqampN1w3JCSEBx98kN27d+Ph4cHGjRvRarWcPHmS//3vf9SvX59Vq1ZRUFCARqNBJpOh0+lYtWoVnUc541O/MXDVb8ImMv/l55i17Ef86tajpLCQSYN7UqjV4O0nx2q1G6Arlc54uXii0+sxGIzoSkoQRRt6gx6PK/chk9mCyWzCWeGMzMkJq9VSbt+dr6q89/nnn1/3GvD19aV58+aUlpYSHR1doSjsq397oijSr18/Vq5cec3nMjIyyk2mODk5Sf30GyGKIi1atODQoUO33A8HDv7rOEQpBw7+JsrKmZcJUWXhwYIg0LhxYynd5G4vZ+7gn0NZBM/HH3/MunXrbutg6FbExcWhUqluKQzVFElJSXz11Vc4OTkxZcoUycS1ojjJBDyVCjwrEcgQHBzM0KFDWbt2Lb/99tutS4sPHAj5+XB1hSCbDV58ERYtgitphtWhYcOGKBQKKUqjOpTNpl9vef3u3WHcOLhiSGy12fA3mXBv1Yokm43U1FQOHTokddblcjl169YlJCREEqsCAgLu6PX5V6P0hIQE4uLiOHHiBJs2bWLTpk3ljNKbNm16x4tIeHt7M27cOHr16sWqVauIjo4mLi6Ofv36MXDgwApF2uTn5+Pt7f2PneDYtm0bWq2WoUOHSp491UGr1bJs2TJSUlLw8PBg/PjxUsRDdcnPz2fPnj3s378fvV6PXC6nS5cu9O3bl6CgoFs3UAW0Wi1JSUkkJiZy8uRJjEYjYBc2o6KipGp5Nzp3KpWKSZMmMWfOHLZu3UpAQACda0gUrw7Z2dnYbDbq1KlzW7y9ANq0aUP//v35+eefycvLo1+/flVuS6vVotPppO957dq1+Pn5Ua9ePWJjY+nZsydbt27lnnvuITU1ldTU1Fum1R87dozc3FwyMzOl329cXBzt27ena9eutG3blp9++qncOh4eHuUMwAVBQK1W8/bbb7Njxw5+/PFH1q9fT0ZGBp07d0ahUPDOO+/w888/s2/fPrKzs3FxceHciURMchWBtf/sk5qMRixmM/UaNsbLz49lK+1RYbm5uWiKihFFu3ijUqnw9fXD1WhAo9GgyTMiipBzOQc3dzd7YyJoCgquEeXUbm7oSopwd7YPWUeMGMG8efPo2rUrarUanU5HamqqlIYfGBjIPffcQ3R0NGfPnmXdunXljOt79+7NK6+8wowZM3B1dWXZsmXSewMGDODtt98mISGBiIgIAI4ePUr79u1v+r0MGDCAd955h1OnTpVL3+vcuTOpqans2LFDqhwZFxdH8+bNb9s17MDBPxWHKOXAwR3EarVy5swZyR+qLI1CqVTSunVrIiMjCQ8Pv6XXiwMHVaVp06aEhoaSkpJCYmKi1PG6k2i1Wi5cuEDbtm3vSHRMQkICixYtQi6XM3XqVBo1anTbt1nGgAEDSEhIYNeuXURGRt56sDtmDOTlwXff/bnMYLCn9i1bBtX035LL5TRq1IhTp05RXFwspTBUBYVCIVX8vC5PPQWbNkFJCU4yGS4qFd2PHaP74sWI/Hkd/PVvz549gH1wXL9+fUJCQqQ/b2/vOyJUubq60qlTJzp16nRTo/SIiAiioqJo3rz5HU1JDAkJ4YUXXuD48eP8+uuvbNq0iYMHDzJixAg6dep0w3Nks9nQaDTXNQP+J6DRaNi6dSve3t41Ei2TkJDAN998Q2lpKS1btmTs2LHV+k2AfcLp/Pnz7Ny5k9jYWGw2Gx4eHvTr14/u3btXu/2/YrPZuHDhghQNVVbdDcDf31+KhqpMtUlfX18mTpzI3Llz+f777/Hz87tjEwjX4+TJk6Snp1OnTh0KCgqwWCy37dlx7733cuTIERITEzl16lSV2yksLGT06NF2byWZjFq1arFhwwZKS0v59ttvGTNmDB999BEvvfQSMpmMRYsWSSldN/KUWrp0KQ8++KAkSImiyObNm6lTpw4ymey65+TRRx9l7NixrF27lkmTJkm/fYVCwaBBg/D29qZVq1ZcunSJr7/+GpPJhM1mY+LEiQQHBzNt2jS0Wi17Dxzgfy++ibxdB2w2KzabiKu7O+Omv8ykEQPw9PGh1xB75KLJZAKZEyKiPZpIEHBRu6B2VWM2mVBJ4rlISbHdnkKQyexRVDp9uf0f9r8n+PzV51n54dus+OYbXnrpJYxGIx06dJDucy+99JIkSoG9n9OpUyf27t3Lxo0by00eDBkyhCNHjhAVFYWXlxc9evSQBNrGjRuzcuVKJkyYgE6nw2Qy0apVq+tGTl1N48aNWb58OY888ghmsxknJycWLlxI+/bt2bhxI88//zwzZszAbDZTr149yXDdgQMHfyKINVX31IEDB9dFp9Nx4sQJ4uPjSUpKQq+3P3C9vLykAU3Tpk1vSzlrBw6ux5kzZ5g7dy7BwcG8+uqrdzxaat++ffzwww+MHz/+ljOQ1SU+Pp5FixahUCh49tlna6RaU2XJycnh3XffxdXVlTfeeAO1Wn3zFWw2ePNN2Ly5/PKgILswVc0y7Bs3bmTdunVMmDCB1q1bV7mdr776iri4OBYuXHjja+jbb+Gzz8ov++gj6NXrmo+KokhOTg6pqamSQHXp0qVyKRIeHh7lRKqQkJA7KuLbbDbOnj0r+VCVTSzI5XKaNWtGVFQUERERd9Qo3Ww2s3PnTjZv3ozBYCA4OJj777//uiKCRqNh5syZdOrUibFjx96xfawpli1bxpEjR6p977BYLKxZs4adO3fi5OTEyJEj6dOnT7XuhVarlePHj7Njxw4p8jk4OJi+ffvWuABfWlpKcnKyZFJeWvqnR0+TJk0kIaq6kdYxMTF8/fXXuLq6MnPmzL8lcttms/Huu++SlZVFhw4dOHz4MBMnTrytqd/R0dE8+eST1K5dm8WLF9eob5nFYuHVV1/FarUye/bsavX9Dh8+zPLly2natCnTp0+v1vWbnp7O6tWrOXXqFE5OTvTu3Zt77rkHtVrN+fPnWbthI75tumOxWDEb9bio1dSqVQuV8k8xvqy6oCjaULm4YNAbKCkpxtnZmbp16+LiosZqtZCVlYXVakWpUmE2mewFQ6xWLBYzaldX6tSpg4D9WAwWK54qBV3qVr5aaEZGBvPnz6eoqIiuXbsyZswYZDKZNCEjiiIzZsxAr9fz1VdfVfncOXDgoPo4IqUcOLgNlHmTxMfHc/r0acmbIDg4mMjISCIiIv42bxIHDpo0aUKzZs2k6I+qegtVlfj4eGQy2W33tIqNjWXx4sWoVCqeffbZO1Ze/a/4+/szatQoVq5cyc8//8y4ceNuvoJMBm+8ARoNHD785/LMTJg6FRYvBje3Ku9PaGgoYPeVqo4oVTaYslgsNx5YPfgg/PKLfd/L+Owz6NoV/rKOIAjUrl2b2rVr07FjR6ntjIwM0tLSuHDhAqmpqSQmJpKQkCCt5+fnVy7tr169erfNMLrM26Zp06aMHj26nFF6WaSKIAh31ChdoVAwcOBAOnfuzLp16zhw4AAff/wxrVq14r777qNWrVrSZ//JJuepqakcOXKEBg0a0K5duyq3c/nyZb7++msuXbqEv78/TzzxRLVEh9LSUg4cOMDu3bvRaDQIgkBkZCR9+/alSZMmNfKcL0v3L0vLO3funJSS5OXlRdeuXQkPD6dZs2Y1eu23bduW7Oxs1q9fz4IFC5g5c+atRfUaZt++fWRmZtK5c2fuueceDh8+zOHDh2+rKOXq6kqzZs0oKCiQqqy5VeOeezVyuZx+/fqxevVqDh48SM+ePavUjtFoZM2aNQiCwP3331/t66xu3bpMmzaNhIQEfvnlF7Zv384ff/zB0KFD6d69O89Nmczuk6nkmmxocg0UFRVh0Bvw9fXF28cbnU5Hfl4eIvb7i5ubOxpNAUajAZlMRk5OLmq1C94+PvjVqkXO5csYjUZ8fXwpLS3BYDBiNpsoKixCpVLh5+uH7co1Xs/DpUrHVKdOHV544QU++eQTDhw4gMFg4PHHH+exxx7jwoULGAwGWrRowcKFC6t17hw4cFB9HKKUAwc1gCiKpKWlER8fT0JCAunp6YA9lz40NFQSov6JAwEH/06GDRvGyZMn2bBhA1FRUXdMIC0rQ960adPbOrg5duwYS5YsQaVSMW3atBov811ZunfvTnx8vDSYuqUYpFDAhx/ChAlw8uSfy0+fhhkz7JX5quhJERISgkKhICUlpUrrl1HmiWE2m28sSjk7w+TJ8Morfy67dAl+/dUuWN0CuVxO/fr1qV+/vlQRyWg0cvHixXIpfzExMcTExAB2cSsoKEiKpGrQoAFBQUE17v90PaP0MoHq7zBK9/Dw4JFHHqFnz56sXr2a2NhYEhISykU8lIlSlTFGvxsQRZFVV/zJqjMAP3z4MCtXrpTSfx5++OEqp11evnyZnTt38scff2AymVAqlfTu3ZtevXrViBBpNBqlNOvExEQ0Gg1gv+4aNGggRUPVrVv3tt6/Bw8ezOXLlzl69CiLFi1i6tSpd8xLrbS0lHXr1qFUKrn33nvx8PCgYcOGJCQkoNfrcXGpmlhRETw8PAgLCyMzM5MlS5YwderUGvNh69atG5s2bWLbtm1069atSudz8+bNFBYW0q1bN4KDg2tkv8oE1RYtWrB37142bNjAjz/+yJ49exg9ejSRDZtwNFODi7Mz+Xm5lJaUkJ2dRX5+PnK5E05OTtTy80OtdgVE9Ho9KpUK/9q10Wo06HR69PpMPD098fLyQqPRotFoCAwMQK/XU1AgUlJSwuXLlxEAN09vnJ1kBLhWXWj19/fnxRdfZN68ecTExGAymVi1apUjO8GBg7sMhyjlwEEVMZvNpKSkSEKUVqsF7KV327dvLz3Yb2enyYGDqtKwYUNatmxJUlISsbGx1YqYqQzJyclYLBaioqJu2zaio6NZtmwZLi4uPPfcczXWYa8OgiDw2GOP8fbbb/P999/TuHHjW6d4qdXw6afw+ONwRegG4NgxeyTV++/bo6oqiVwup3Hjxpw8eZKioqIqp5qVdeqvZ3Zejn79YOVKSEr6c9n27RUSpa6HUqmkSZMm5czqi4uLpWiqsr+DBw9y8OBBaV+Dg4PLpf35+/vX6GDe19eXPn360KdPH0pKSkhMTLzGKN3b25vIyMjbapReFvGQmJhYLuJh2LBhlJSUSPv6TyImJobz58/Trl27KqXgGgwGVq5cyZEjR1AqlYwbN06KxqsMoihy6tQpduzYQdKV69nX15devXrRpUuXagvteXl5kgiVkpIipa6q1WratWtHeHg4LVq0qLGonYpQdu/Ky8vj1KlTrFy5kkceeeSOTGSsX7+e0tJSRo4cKd2nOnTowPnz5zl+/DhdunS5rdtv1qwZ9erV4/Dhw6xbt44RI0bUSLtKpZJevXqxYcMGYmJi6NChQ6XWz8vLY/v27bi4uDB8+PAa2aerkcvl9OnTh44dO7Jhwwb27NnDZ599RosWLYgcPJoSZyXBwcEUFxeTmZFBaWkJgiDg5+eHy5XfgNlsxmy24KJ2QaVUERAQQElJCRqNFq1Wi0Ihx9lZgclkJjc3l4CAAFxcXEhPT6e0tJSCwkJsMjkhbgoUTrVuscc3x8fHhxdeeIH58+eTkJDAggULmDhx4m2LqHXgwEHlcYhSDhxUgrKBRnx8PMnJyVJVGz8/P/r06UNkZCSNGze+4xWZHDioCkOHDiUpKYl169YRFRV1R6pxxcXFAdw2g/UjR46wfPlyXF1dee6556hbt+5t2U5V8PLy4uGHH2bJkiV89913TJw48dYDOx8fWLDALkwVFPy5fMcOaN4cHnusSvsSGhrKyZMnOX36NG3btq1SG1dHSt0UQYDp0+3HUEYNi/Xu7u60bNlSSgkVRZGCggLS0tIkj6q0tDTOnz9/1S64SAJV/fr1adCgQY1UcwNwc3O7pVG6Wq0mPDycqKgoWrRoUaMDJEEQiIiIoHnz5lLEw8qVK8nNzcXJyekfFSllMpn49ddfUSgUjBw5stLrp6Wl8fXXX5Obm0twcLDkFVQZzGYzR48eZefOnWRkZADQqFEj+vbtW617p8Vi4ezZs5I3VHZ2tvRe3bp1admyJeHh4TRs2PBvrZaoUCiYOHEis2fP5sCBAwQGBkrVxG4XWVlZ7N27V+pfldG2bVt+/vlnjhw5cttFKUEQeOSRR8jIyGDz5s00aNCgxtIGe/XqxbZt29iyZQvt27evlMj3yy+/YLFYuPfee2vcOP9qXF1deeCBB+jZsye//PILCQkJXMjIovND48HVDZvVhlqtRiaTYbFYpIqDgYGBWKxWexuSUCvg5uaOWq1Go9FeEchFzGYLomgjv6AAP19f6tWvz4XUVBRqN/LSL7Jny69k9OnD0KFDqxXd5OHhwYwZM/jss884deoU8+fPZ8qUKXc8HdWBAwfXxyFKOXBwCy5fviz5Q13t41DWOYmMjCQwMNDhD+XgH0dISAgREREkJCRw7Nixavm0VASr1UpiYiLBwcG3JVLj0KFDfPvtt7i5uTF9+vTbVnK9OrRr1464uDhiYmI4dOhQxQZVdevafZieegp0uj+Xb9hQZVGqzAQ7JSWlyqJU2QDhphX4yoiIsJu3L18OXl52keo2IggCvr6++Pr6SlGANpuNy5cvl4umOn36NCevSo/09PQsl/ZXv379ag9anJ2dpWfFX43Sjxw5wpEjR5DL5TRv3lwySq+pgebVEQ9lvkD5+fmsXLmSBx54gMDAwBrZzu1kx44daDQaBg8eXCkxTRRFduzYwW+//YbVaqVPnz6MHDmyUobjRUVF7N27l71791JcXIxMJqNdu3b07du3yh51RUVFkjdUcnIyBoMBsF8nERERUlqet7d3ldq/Xbi7uzN58mQ++OADfvnlF2rVqnXbfJ3K0jVtNhujRo0q9525ubnRsmVLKaXxdpynq2tAKRQKnn76aWbNmsWyZct45ZVXKi1qXg83Nze6devGzp07K1UJNyUlhdjYWGrXrl1lP6rKUrt2bSZNmsTJkydZvXo1J/Zuo0HHHlhMJuROTjS4Er1oj5oqJe3iRWSCDJVKiYtL+funTOZ0xXPKlYKCAmyiiE6nx2yx4OzsjLubO34BQRRqC/hj7U+onWDr1q0kJCQwbty4aqXil01WffHFF5w+fZqPP/6YadOm3VZhz4EDBxXDIUo5cPAXbDYb58+fl4Soy5cvA/aOSXh4OBEREURERODp6fk376kDB9Vn2LBhJCQksH79etq0aXNbZ+PPnj2LTqejd+/eNd72gQMH+P7773F3d2f69Ol39WD74Ycf5syZM/z888+EhobiV5FqemFhMHeu3ei8rBpdo0ZV3oeQkBCUSiWnT5+uchsVTt8rY+hQ+9/fhEwmIzAwkMDAQDp16gTYI1XS09PLCVUJCQnEx8dL6/n7+5dL+6tXr16VZ+yvZ5QeGxtLXFwcCQkJJCQkIAgCjRo1IioqisjIyBrxJ3J1deXBBx8kOjqa+Ph4Tp48yTvvvEP37t0ZOnToHU0HqwxarZbNmzfj6enJgAEDKrxecXExy5cv58SJE7i5uTF27FjCw8MrvP6lS5fYuXMn0dHRWCwW1Go1AwcOpGfPnpUWQco8J8vS8tLS0qT3/Pz86NSpE+Hh4f+IKrxBQUE89dRTfP755yxdupQXXnjhtqRHlwl2YWFh10317tChAwkJCURHR9O/f/8a3/5f8fPzY/z48SxYsICFCxcyc+bMGols7NevH7t372bz5s2Eh4ffcnLTZrOV81aryYqOFaFZs2bMnDmTV155hWMlelr3HoBMJmAxW1Cr7ZGnWq2Wy5cvo9frsVot5Obm4O3jg7OivAeiUqkiMDCQ4uJi8sQ8dDodGenpBNULwcVFxaXTSaidIDw8nFOnTpGVlcWcOXMYOHAggwcPrvKxq1QqpkyZwqJFi0hKSuKjjz7iueeeu+tEYAcO/ms4RCkHDrAbiiYnJ0uDgjLfDXd3dzp37kxUVBRhYWGO/HMH/zqCg4Np1aoVsbGxREdHV9rbojKUDfRr2k9q3759/PDDD3h5eTF9+vQamcW+nbi6uvLYY4/x+eef88033zB9+vSKiYHt28PSpfDdd/a0vkmTqrwPTk5ONG7cmBMnTlBYWFglkb1SkVLX4+RJMBrtUVR/U2qSXC6XxKYyDAYDFy9elNL+Lly4wNGjRzl69ChgF5bq1KlTTqgKCgqqtKB7tVH60KFDycvLIz4+XjJKP3v2LL/88kuNGaWLoojRaKRfv34MGzaMVatWsWfPHo4ePcrgwYPp2bPnHR/k3orff/8dk8nEww8/XOHn76lTp1i6dClFRUWEhoby+OOPVygt02azkZiYyI4dOySxtnbt2vTt25cOHTpU6vmv0+lITk4mMTGREydOUFxcDNivnbCwMMLDw2nZsiW1a9f+x0VZt2jRggceeICffvqJL774gpdffrlGJ+ksFgurV6++aVW5iIgIVCoVR44cuSOiFEDLli0ZMmQI69ev59tvv+WJJ56o9nfn7e1Nx44dOXToEGfPni3nk3c9Dhw4QHp6erlU5TuJ0Wjkyy+/pLi4mFouLij1hdg8/Cgs1VFcXIS3jw9eXt5YrVZpQjc/P5/S0lI8PD3x8vT6yz1GwN3dA7XalaysLGxOcvJyc5DlXKJ7q5Yk/bGPrKwsXnzxRb7//nsuXrzIpk2bpKipqqbnOzs788wzz7B06VKOHz8uCVNXVyp14MDBneXu6n04cHAH0Wq1khHtqVOnJEPRgIAAunTpQmRkJA0aNPhbfRwcOLgTDBkyhNjYWDZs2EC7du1uyzUviiJxcXH4+PjUqM/Tnj17+PHHH/H29mb69Ok1ElVyJ2jZsiXdu3dn37597Ny5k379+lVsxRYtYM6c8stOnYK0NOjWzW6OXkGaNm3KiRMnOH36dJVSNyvsKXU9vv4aFi2y/9+/P8yaZfeeugtQqVRSNFMZRUVFki/VhQsXSE1N5dKlS+zfvx+wC3T16tUrl/ZXq1atSg1ay7xzyozSExISiIuLIzk5uZxReplA1aRJk0r5F5aUlGA2m/Hx8aFZs2a8/vrrHDhwgHXr1rF69Wr27t3LfffdR2Rk5G0TSkRRRG+xYbLasIkiMkFAKZehcpJds820tDQOHTpE/fr1K2RKbrVaWb9+PVu2bEEQBIYPH87AgQNveT8zGo0cOnSInTt3kpubC9gjQvr27UuLFi0qdC5EUSQrK0vyhjp79iw2mw2we9l07tyZ8PBwmjdvXuVqf3cTvXr14vLly+zevZsvvviC559/XrofVJc9e/aQk5ND9+7dqVOnznU/4+zsTOvWrTl06BAZGRk3/FxNM3jwYFJTU4mJiaFhw4blvK6qSv/+/fnjjz/YsmXLTUUpnU7H2rVrkclkjB49utrbrSylpaV8/vnnpKamEhISwtSpU1Gr1SRn5nIiW4PVSUlObh4uSmcMBgOurm64u7tRWFiI0WikoKCA0pJS3D3c8fT0xElmv3eJoogVAd+AQPIyL7Fh0Xyyz58hvkMHPD09ycvL48SJE8ycOZPNmzezceNG0tPTmTVrFkOGDGHgwIFV8nGVy+U8+eSTfPfddxw6dEgSpu7mKGsHDv7NOEQpB/8ZRFEkIyNDSs24cOECYJ+tbty4MZGRkURERNz1URYOHNQ0devWpU2bNhw7dozDhw/TuXPnGt9GRkYG+fn59OrVq8YGvDt37mTVqlX4+PgwY8aMiqXB3UWMGjWKkydPsnbtWlq0aFE1D6zt2+HVV8FmAz8/u9BTQc+N0NBQwO5RUhVRqtLpe1fz889//r9tG/TsaRen7lI8PDyk1G2wP0/y8/PLpf2lpaVx7tw5aR1XV1fq169fLqKqohElbm5udO7cmc6dO2M0GiWj9ISEBHbv3s3u3bsrbZRecMUov8zPTSaT0b17d9q1a8fmzZvZuXMnX331FaGhoYwePbrG0rIMFitZJUYK9Ca0RjNmm1jOs0cQBJROMjyVcnxdnAl0U6GQCVKa0ujRo295z8jPz2fJkiWcP38eHx8fnnjiCRrdIr01Pz+f3bt3c+DAAfR6PXK5nC5dutC3b98K/RbLKvCWpeXl5+dLx1O/fn3JG6o60W13M/fffz85OTmcOHGC5cuX89RTT1X7OIuLi1m/fj1qtZphw4bd9LPt27fn0KFDHDlypEoG+BXhr8cjCALjx49n1qxZ/PLLL9SrV++W0U23IjAwkKioKGJjY0lPT7/hpM2GDRsoLS2lT58+BAQEVGublUWr1fLpp5+SmZlJWFgYzzzzjCSutqjjT0gtH46kZpIrgNlmw2C2onCS4e3tg9rVlfy8PIxGI0ajAZvWSmlJCe6enqjUrogIyGUCDTxc6BbQjNMN62MuLCAlJYXAwEBOnTqF0Wikc+fODBkyhMjISL755hvS09NZt24d8fHxjB07tkrPT5lMxmOPPYZSqWT37t189NFHPPvss9XyrXLgwEHVcIhSDv7VWK1Wzpw5I/lDlXUalUolrVu3JjIykvDwcFxdXf/mPXXg4O9lyJAhHD9+nI0bN9KhQ4caryBZlrpXU8a4O3bsYPXq1fj6+jJjxox/XIl7QCpN/9FHH7Fs2TJmzpxZ+fSpzZvtghRAXh5MngzLlkEF0hDq16+PUqkkJSWlCntfTVEqOBi02j9fL1hgF6ZqKNridlNW/tzPz08yirfZbGRlZZUTqk6dOkVycrK0nre3dzmRqn79+rjcohKhUqmUoqOqY5Re9vz7q1m4i4sLI0eOpHv37vz6668cP36cWbNm0blzZ0aMGIGHh0eVzlGx0UJqoY7sEgNmm12EkgkCTgJS9JKIXeAzWGzozEaySoycKShFKNGQfjmXNm3a3HLQf+zYMb777jv0ej2tW7fm0UcfvaE5vSiKnD9/nh07dhAbG4soinh4eNCvXz+6d+9+S8Pj/Px8SYRKSUmRrn0XFxfatm1LeHg4LVq0+E8YJ8tkMp588kk++OADjh8/zrp16xg+fHi12vz9998xGAzcf//9tzyHoaGheHp6cvToUe699947Jvyp1WqeeeYZ5syZw+LFi3n11VerXbVz4MCBxMbGsmXLFp544olr3s/Ozmb37t24ubkxZMiQam2rsuTk5DB//nzy8/Np1aoV48ePv8b7zNVZTq+mwRToTazbexBR6YZcrSZXq0WlUuHjH4BOr8NsMiMiAgJ6k5mSomx8FSIdwpvhpbaLXJMmTWL27NkUFRXh7+9PdnY20dHRvPjii8ybN4/g4GBmzpzJxu07OJaYDF6+rNp3hIaNmhAcXBdnJyfcnZ1wV8pxd5Yjv0WkpCAIPPDAA6hUKjZv3swnn3zClClTaNy48e06pQ4cOLgODlHKwb8OnU7HiRMniI+PJykpCb1eD9jLsXfv3p2oqKh/hKGoAwd3kqCgINq2bUt0dDR//PEHXbt2rdH24+PjcXFxKZcSVVW2bt3KmjVr8PPzY/r06f9IQaqMRo0aMWDAALZs2cLGjRsrP6hr1gz27fvzdVaW3Qz966/hFubVMpmMJk2akJSUhFarrfTAqixdp0qeUo89Bi+88OfrzEz46acqVxO8GyjzmqpTp45UVdFsNnPp0iXS0tIkj6rY2FhiY2Ol9WrXrk2DBg0koapu3bo3fD791Sg9PT1dEqiuZ5QeFRUl+aSUiVI3+r34+fkxYcIEzpw5w+rVqzl48CAxMTEMGjSIvn37VviZaRNFUrU6zmlKMdtEnAThuul5EoIgdUZtoojZaqPQKqPNvWNoExKIKIrXXddkMrFq1Sr279+PQqFgzJgxdOvW7bqftVqtHD9+nB07dkhR0sHBwfTt25e2bdveUAy2Wq2cO3dOSsvLzMyU3gsKCqJly5aEh4fTqFGjGhfy/wm4uLgwefJk5syZw6ZNm6hdu3aFUi2vx6VLlzhw4ECFq8rJZDLat2/P9u3bOXPmTI08WypK3bp1eeSRR1i+fDmLFy9m+vTp1fJjCwkJISwsjJiYGIYPH36Nt9Hq1aux2WwMHz682tVAK0N6ejqffvopRUVFdO7cmUcfffSG6bCCIOCrVnIp+gCX8wvoNXAwZy9loPL0xtvPnzrBdbHIRFJOJKLNzsRQpEVXkItBp2O/tzfDhg2jY8eOBAUF8b///Y+vv/4anU7Hs88+y7vvvsv+/fuZ8cKLjHr8SVxqB6MKa0O7RpHodTpsokipKHLmcj4uLmpkMhkyARQyGUFuSup4uODhLL/hPUgQBEaMGIFSqWTt2rXMnz+fSZMm0axZs9t5eh04cHAVDlHKwb+C/Px8yX/j9OnTkpdD3bp1pZnjf2sIvQMHNcWQIUOIiYlh48aNdOzYscZMjzUaDWlpabRr167aA7fNmzezdu1a/P39mT59+r+iYs7QoUNJSkqSKjA1vFJeu0KMGQP798NV0TicOQPTp9ujj24ReRQaGkpSUhIpKSmVNrmvVqRUz57QqhVcJc6wbBkMGwbVjDq4m1AoFDRs2JCGDRvSq1cvwD5xcvHixXIRVYcPH+bw4cOAfbBdt25dSaRq0KABAQEB1wwGBUEgODiY4ODgChmlX7x4EVEUbyniNmnShJdffpnDhw+zdu1a1q5dy/79+7n33ntp27btTZ+jeouV+MuFFOjNCLcSo66DTBAw6krQFxbi6etLml5En60lwt8TZ6c/jz8jI4Ovv/6arKwsgoKCePLJJ6+bvlNaWsqBAwfYvXs3Go0GQRCIjIykb9++NGnS5Lr7VlxczIkTJyST8rKJLYVCIYlQ4eHh/2gxvCbx8/PjmWee4ZNPPuHbb7/F19e30iltoiiyatUqRFHk/vvvr/BzokOHDmzfvp3Dhw/fUVEKoGPHjqSmprJnzx5++eUXHnzwwWq1N3DgQE6dOsW2bdsYM2aMtDwpKYmkpCTq1q1b45NFN+PcuXMsWLAAnU5H3759GTVq1C1/y5cvXyYjI4PmzZszalA/tFota9eu5dCqTQC0bduWoX36sG7dBTLT03B2dqZ58+akpqayYsUKtm3bxogRI2jTpg0XLlxg+/bt7N+/n3ffe4+1uw/QontfcgQ18nwtrmoXXJ0VuDp7UliopbCoGETQFRbi6e2Fm5sbFptIaqGei0UGaqmdaebnjlpx42tr0KBBuLi48OOPP7JgwQKefPLJGi/M4sCBg+vjEKUc/CMpK7EcHx9PQkIC6enpgL0zHxoaKvlDOTqNDhxUnICAADp06MDhw4c5dOgQ3bt3r5F2a6rq3oYNG1i/fj21a9dm+vTp1U6ZuFuQy+WMGzeO2bNns3z5cl577bWKV/pSq+HTT+Hxx+HSpT+XHz8Or78Os2fftLJd2UCuKqJUtYzOBQGee658ZFRJCSxeDC++WPn2/kGo1WrCwsIICwuTlhUWFpYTqS5cuMDFixfZdyUKTqlUSkbqZX++vr7lBom3Mko/ceIEJSUl7Nixg/bt29/UKF0QBDp16kTr1q3Ztm0bW7duZcmSJezatYv777+fBg0aXLOOzmwlJktLscmCs0yGk6zyk0BWm5XCwkKcnGR4uKqxiQKXS00cy9bSJsALhUxg3759rFq1CovFQvfu3Rk9evQ1JtuXL19m586d/PHHH5hMJpRKJb1796Z3797XRKGIosjFixeltLy0tDTJ88rX15cOHToQHh5OaGioI8L6BjRq1Ij//e9/LF26lK+++oqXX365UpXMYmNjOX36dKWrytWtW5fAwECOHz/OQw89VOPfz61EmNGjR3Px4kV2795NgwYNqlW9NiwsjPr163Po0CGGDBmCp6cnFotF8la7//7771jhnRMnTrBw4UJMJhPDhw9n0KBBFRKXjx07BkCbNm0Ae4bC2LFj6dWrF6tXryYmJoa4uDj69u1LWFgY69evJzk5mYiICHx8fDh48CBfffUVDRs2ZPjw4aSlpXEpO4cMXOh638OYzWYsBh1Gi5XSIi3u7u54eXnh5eWNi1pNfl4eZrMFbYEGvU6Hn68vCic5VlEku9SIxmAm1NeNuu6qGx5Pz549USqVrFixgkWLFjF27NjbWpXYgQMHdgTxardJBw7uYspMRcuEKO0VPxK1Wk3Lli2JjIykRYsWt/TncODAwY3JycnhzTffxNPTk3fffbdGOvmffvopKSkpfPLJJ1WqPCWKIuvXr2fjxo0EBgby3HPP1WgJ8ruFsrTEnj178tBDD1Vu5YwMGDcOrphZS9x/vz1N7gYdcJvNxnPPPYe7uzvvvfdepTZ58eJFZs2axfDhw7nnnnsqt79lvP663RerDCcnWLWqwmbt/1ZEUSQ3N/cakepqAdDNza2cSBUSEnJdH54yo/R33nmHCxcuSKb2arWaiIgIoqKiaN68+U2FUI1Gw2+//caRI0cAe4TKvffeK0UqGq02jmZqKDJaUDrJkFUxKjm/IJ+S4hJ8fX1wc7Mfi00UMVpteDnLSNyylthjMajVah599FFat25d7pydOnWKHTt2kJSUBNhFpd69e9OlS5dyfQODwUBycrKUlldUVATYJ7YaN25MeHg4LVu2JDAw0BFhXQnWr1/Phg0bCAgI4KWXXqpQqpnZbObNN99Eo9Hw5ptvVtrEuyx6dsKECeWuh+qQlJTE559/Tv/+/bnvvvtu+lmtVst7772HwWBg5syZ1aoue/z4cRYtWsSAAQMYOXKk5J3YqlUrnn766Sq3WxliYmJYtmwZNpuNBx98sEKplGW8++67ZGZm8tFHH+H2l/RxURSJjY3l119/JS8vDw8PD7p06UJiYiLp6el4e3szcuRITp8+zcGDB7HZbDRv3Q7P5q1Rurojw4auuAQnuRPe3t5oNRosFitOTjK8vb1xdXVFFEW0Wi1FxfaoKUEm4O3ljbu7G6Jov08hQH0PF5r5ud/0PnX8+HGWLFmCzWbj4YcfrrFJOgcOHFwfR6SUg7uakpISEhMTiY+PJzk5GaPRCPw5IxwZGUnjxo3/k14ODhzcDvz9/enYsSOHDh3iwIEDUspRVdHr9aSkpBAaGlplQer3339n8+bNBAUF8dxzz1XZePlup1+/fsTHx7Nnzx4iIyNp3rx5xVeuUwc+/xyefBJ0uj+Xr1plr8r3+OPXXa3MoyghIQGNRlOpdMhqpe+VMWkS7NwJZb5UVqs98uuTT6re5r8AQRDw9/fH39+f9u3bA3YBMTMzk9TUVMmjKjk5WRJgwC7C1K9fX/KoqlevHiqViqioKOrXr09UVBQPPPCA5ENVljaoUCho1qzZDY3Svb29efzxx+nduzerVq3iyJEjHD9+nP79+9O/f39SCo3VFqRMZhMlJSU4OyvKDWhlggBWM5fyDWgEJY0aNWL8+PFSJLTZbObo0aPs2LFD8nxq1KgRffv2JSoqCplMhiiKZGdnSyLUmTNnsFqtALi7u9OpUydatmxJ8+bN76hnz7+NIUOGkJ2dTUxMDIsXL2bKlCm37J/t2LGD/Pz8KleVa9++PWvXruXIkSM1JkpVBi8vL5566inmzZvHwoULeeWVV6p8DUVFRVG7dm327t1Lly5d2LBhA3K5nFGjRtXwXl+f/fv388MPPyAIAo8//rh076kIOTk5pKen06xZs2sEKbDf01q3bk1ERAS7du1i48aNbN68mcDAQFq0aEFycjJLly6lb9++vPrqq2zdewB5/VDkKhe0+bk4KxQonZX2aCmzmaCgIAoLCykqKiIvL5+SkhJ8fHztFf/UavLy8rFYLBQUFKDT6/D19UUll2O22Ugr1CMCLfzcbyg6t27dmokTJ7Jw4UJ++OEHDAYD/e/iCrEOHPzTcYhSDu46Ll++LFXLO3funBRG36BBAyIjI4mMjHTMXjpwcBsZPHgwhw8fZvPmzXTt2rVa0VInTpzAarVWqeqeKIqsWbOGbdu2UadOHSmi59+KTCZj3LhxvPvuu6xYsYI333yzcoOb0FD4+GOYMgUslj+Xf/mlXZi6QYn1MlEqJSWlUibFZddFlYzOywgIsPtiLV/+57J9++DYMbiSAuLATpnXVN26denWrRtgP/eXLl0qF1F1/Phxjh8/DtgHggEBAdSpU4ezZ8/Svn17GjVqRGhoKPfff/8NjdIbN24sGaX7+flJ+xASEsILL7zAsWPHWLNmDRs3biTpQjrN+w7BRaWssiAFIpqCAhDB28cHEKTlhYWFaAsLUShVhHbsRo9GQfiqlRQVFbFnzx727dtHcXExMpmMdu3a0bdvX0JCQjCbzZw8eVJKy8vLy5O2Vr9+fckbqn79+o7+RA0hCAJjx44lPz+fkydP8tNPP/Hwww/f8PxqtVo2b96Mq6trlavKlXlYJSYmUlpa+rdUU27atCn33Xcfq1evZvny5UycOLFK15RMJmPAgAF8++23zJ07F71ez6BBg8r9Bm8XZZG6CoWCCRMmEB4eXqn1/5q6dyPkcjn9+/enU6dOrFu3jv3795OVlUVgYCAajYYdO3aQcu487e57FKMNSgs1OAkydKWlmE0mFApnCouKcHNzw8vLG1c3NwoKCjDoDWRlZeLh4YGnpydBQYFoNFqKi4vt72Vm4u3tLQlmFwv1qBVONPS68fXSsmVLpk6dyoIFC/j1118xGAwMHTrUcb9w4OA24BCl/oGYrDaKTRaKTRYMFhs2UZSqTLg7y3FXyittLvp3YrPZOH/+vCREXb58GbAPeMLDw4mIiCAiIuJfma7jwMHdiJ+fH126dGH//v3s27ePPn36VLmtuLg4gEqLUqIo8ssvv7Bjxw6Cg4OZNm3adWdf/23UqlWL0aNH8/333/Pjjz8yfvz4yjXQrh288w688kr55e+9B97ecEXMuJrQ0FCASotSZT4+lqsFsKowdiz8/nv51MN58+Dbb2/qh+XA/h00atSIRo0aSctKS0tJS0uTRKrU1FT27t3L2bNn0ev1ZGdnlzNSb9u2LUOGDCE/P18SqM6ePStV4atTp44kUAUHByMIAm3btiUyMpLtO3dyWeWHTq+ntLAQHx9vlMrKR0Tq9XoMBiNqtQuqK+tbrRby8vIwGIw4OTnh7ekBcmfiMvK4dHA70dHRWCwW1Go1AwcOlNKMEhMT2bhxI6dOnZIEU5VKRevWraW0vH9rtOXdgEKhYOLEicyePZt9+/YREBBww2fI2rVrMRqN3HfffdWKUOvQoQNnzpzh+PHjkmB7p+nTpw+pqanExMSwefPmKqc0d+jQge+//55t27bRt29fBg0aVMN7Wh5RFPntt9/YunUrKpWKyZMnV9qoHuyilCAIFfaOdHd3Z8yYMZLfVHJyMjabDblcjrp+UwqKS1HJZdT298fTw4OMzEwMej0mkwlnZ2cKCgqoVcsfhVxBbX9/SnU6NAUaCguLKC0txdvbGx8fb9RqF/Lz87FYrOTnF6DT2aOmbIKMswWl1HJR4q688XC4adOmTJ8+nU8//ZSNGzdiMBgYPXr0P2aM5cDBPwWHKPUPwWoTuawzcqlIj9ZgxiqKlLmB/TmfaP9fJhNwVTgR7OFCkJuqXMWauwWj0UhycrI0M1tSUgLYH1KdO3cmMjKSZs2aVdzs14EDBzXKoEGDOHTokBQtVZXfotVqJSkpifr161cqLUwURX7++Wd2795NvXr1mDZt2t8y+/130bVrV+Li4jh69CiRkZG0bdu2cg307w/5+faoqTJsNpg5ExYuhL/MgNetWxe1Wk1KSkqlNlMjkVIArq4wYYLdlL2MU6fsXlODB1ev7f8grq6uNG/eXEr/FEWRgwcPMm/ePCIiIvDw8JBEqzJUKhX16tWjQYMG9O7dm9GjR5Oeni6lzm/cuJGNGzfi7e0tCVRNmjQhqktPjmdr0ZcUUWI2kZ19GbVajbe3F3J5xSIsRUQKNBoEAek+odfryMvPx2a14eKiwtfXD5PJSLGmAC2QfOGS5Bfl7+9PSkoKCxYskIqegL1wQ5kI1bhx4xqrJurg1nh4eDBp0iQ+/PBDVq9ejb+//zWRNxcuXOCPP/4gKCio2kJSmzZt+Omnnzhy5MjfJkoJgsBjjz1GRkYG69ato379+rRo0aLS7Tg5OWGz2TCZTDRq1Oi29oNtNhsrV65k//79uLu78+yzzxIcHFzpdnJzc7l06RJhYWGVjmYOCgpi6tSpnDhxgtWrV2NVe+LfKBSDrhSD1YJBr8fXz4+GDRpw6VI6JSUlGAxGcnJykclk+PjYCz64ql1xcXGhUGuPjsrNzcPFRYWPjw+BQUFoNBpKikvQ6w1kZmbi5eODXOnCibwi2gd53zTKMyQkhOeff5758+ezc+dOjEYjY8aMuWPG8w4c/BdwPKHvckRRJL3YwFlNKXqzFRGQywScZTIErq0MYhNFbKJIscnCidxizhSUUM/DhUbebsirUAmnJtFqtZI/1MmTJ6XZ9YCAALp06UJkZCQNGjRw3OQdOLgL8PX1pWvXruzdu5e9e/dWyUvh9OnT6PX6SkVJiaLIjz/+yN69ewkJCeHZZ5/9z3m8lA1u3n77bVauXEmTJk0qHyn60EOQlwcrVvy5zGiEZ5+FZcsgJERaLJPJaNKkCfHx8eTn51e4ammNeEqVMWIE/PQTpKb+ueyLL6BPH6iCF5mDPxEEAYvFgp+fH8OGDaNVq1aIokhOTo4USZWWlsb58+c5ffq0tJ67uzsNGjSgT58+WK1W8vLySElJYffu3ezevRu1Wk2HUY/i4uOPj5cX7m5uaDQadDoder0O9ytpNDLh5s/04qJiLGYLHh4eODnJKdAUUFxUjCCAl7cXMkEg+3I2FrO9z+Dq5UXb3gMoTInn999/R3fFQ00ul9OiRQspLe9OpDw5uDF169blySef5IsvvuDrr7/mxRdflEzAyyYeAB544IFq9/vUajXh4eHExsZW6h5W0yiVSp555hnef/99lixZwquvvlrp6zA2Nhar1YqPjw+XLl3CarXeFt9Ui8XCsmXLOHbsGD4+PkybNo3atWtXqa2ydOFbpe7dCEEQaNmyJWFhYWw6cYFSq4hJV2r3j7JaMZlM+Pj4EFQniOysLEpLSzGZzOTk5qLXG/Dy8sTNzQ2ZIMPb20dK6SsToDw9PfG54jWVn5+P1WKlIC8fFzdXEL3I1Zmo7Xpz8a9OnTq88MILfPLJJxw4cACDwcDjjz/u8LR14KCGcIhSdzE6s5XkvCJydCYQwbkCBqIyQUAmCMixP/TNNpGzGh05OhMt/NzxcXG+6fo1iSiKZGZmSml5ZbOyZX4VkZGRREREVPkh6MCBg9vLoEGDOHjwIFu3bqVHjx6VnrGNj48HKp66J4oiP/zwA/v376dhw4ZMnTr1P1tN09PTkzFjxrB48WK+/fZbJk+eXPl0gcmT7cLUxo1/Lisqsi//7jt7Ot8VQkNDiY+PJyUlhc6dO1eoeScnJwRBqH6klL0xu2A2bdqfy3Jy4IcfoLIpjA6uIT8/HwAfHx/A/hyuXbs2tWvXlsqdWywWMjMzy/lTJSYmkpCQILXj4+ODj48PBoOBEqMZi1xJoUaDxmLGxUWFq9oVV1dXigoLKSosorSkBC8vryupt9dev1ablcJCLU5OMtSuarKzszCZzDg5yVAqlRQVFmKz2W0KFHI5giDDaDAgKlTExifg4eZK27ZtCQ8PJzQ01BFdfZcRHh7O6NGjWbVqFV988QUvv/wyHh4eREdHc/78eaKioggLC6uRbXXo0IHY2FiOHj1621Pebkbt2rUZO3YsCxcuZOHChbz00ksV9mU0m8388ssvODk5MXbsWGJiYoiJiZF+ozWF0Whk4cKFJCcnExAQwLRp0yoVzfxXKpu6dyO0JhsKN3d8gBJBpLi4GJPJhE6nw2Kx4Obujo+PL1arDVEUpedPfn4BRUVFeHl5oVarcVY4E1C7NiWlpWg1GrTaQkpKSu3CVmAQBZoCSktK0ZeUIgpOxJ4vZkDLxrd8xvr7+/Piiy8yb948YmJiMJlMPPXUUzVSpdiBg/86jpCUuxSNwcwfGQVcLjUhF2So5E6VNhAVBAFnJxlKJxlFRgvRWVouFelv0x7bsVqtnDp1ip9//plXX32Vd955h99//52srCxat27NuHHjmDt3Ls8//zz9+vVzCFIOHNzFeHt7061bN0pKSti9e3el1hVFkbi4OHx9falTp84tP2+z2fjuu+/Yv38/jRo14tlnn/3PClJltGnThvbt25OUlMSBAwcq34AgwOuvw19Fpuxs+PHHcouaNm0KUKkUPkEQcHZ2rplIKYAuXeCv1Z6++caeivgvYerUqYSEhCAIguS3VsaWLVto27YtERERdOzYURJ1K0qPHj1o3LixVBzkagqu+HX5+vqyZ88etmzZIr2XmZlJt27dkMvl1KtXD41Gw+zZs1mzZg1PPvkk+/bt45577qFt27bIZDLS09PJy8vD1bcWgpMcs8nA0jlvcXTvbnJzc8jPy0fm5IRSpcRqs5GfX0BmVhYGg57pDw7nwLZN0rYLtVpsNhvOSiX3hNYlPy8XUbRhsVgpKirGYDRisVoRELBabVisFg5uWkthXi7TXpzJ7NmzGTNmDBEREdcVpN566y2mXRE6v/nmG0aMGAHYy94/8MADlTq/DqpG79696d69OwUFBXz55ZeUlJSwZs2aGq8q17JlS1xcXDhy5Mh1fwNVoaq+Qa1atWLAgAFcunSJlStXVnh/tm/fTn5+Ph06dOChhx7C2dmZLVu21NjxAOh0OubPn09ycjL169fn+eefr5YglZeXR1paGk2aNKm2V1t6sR6bCHKZPeIpKCgIT09PnJ2VGAxGNAUF5Ofn4+rqilKpxGq1Ipc74eHhjsViJTc3j+zsbAwGPSDg5upGUFAQ7u5uWKwWcnJyyM/Pw8vLC3//Wjg5yTAbDeTpTCxZ8R3FxcW33EcfHx9eeOEF6tSpQ0JCAgsWLJAqgztw4KDqOESpuxCNwcyxLC0Giw2Vk6zaaXcyQUDlJMNmEzmRW1zjwpROpyM6OpolS5YwY8YM5s2bx65du7BarXTv3p0pU6bw8ccfM2HCBDp27PifMCt24ODfwsCBA1EoFGzbtg2DwVDh9dLT09FoNERFRd2yY2+z2VixYgUHDx6kSZMmPPvss6gcKVsAPPTQQ3h5ebF69Wpyc3Mr34BcDh98AH/1NvnLd1m3bl1cXV1JSUmp1ABILpfXnCglCPZIqauvF70e1q2rmfbvAkaNGsWBAweoX79+ueUajYYxY8awYsUKEhIS+OijjxgzZkyF2z1z5gxnzpxBqVSyd+/ea97Pz8/H2dkZpVJ5jSgVFBTE/v37pdcLFy7kjTfeIC4ujjZt2nDy5EmGDx/Ok08+yaxZs/j444+ZOnUqHbr1wNlZgUIu5/GX3qRF2/ZYrFb0eh1ajYaiwkLMJhNms5nSkhKysrIwmU1YrfY0PLPZRHFxMWazhaKiIsDuT2YwGNAb9IiIOCsUODs74+buhl8tP4Lr1uWPrZu4nH4Jlad3lUWDtm3bSuljDm4vgiDw4IMP0qxZM1JTU5k5cyYFBQX06dOHWrVq1dh2FAoFbdq0ISsrq5y3WFWoCRFoxIgRhIWFcejQoXK/rxtRVonQ2dmZkSNH4ubmRrdu3cjMzCQxMbHa+wNQWFjI3LlzOX/+vGTgXd2KtjdK3ausyC6KIvl6E/NfepY1yxcBIJcrqFXLn8CgQDw9PREEgaLiIjQaDTKZDCe5nNLSUgRBoE6dOri7u2EymVg0510+mvkcJpMRmcwJHx9fAgMCcFY6s/HH71g6dw4ms5nEQ/v4/OVpOCkUHI1PpG3bthw/fhytVsucOXNuuK8eHh7MmDGDkJAQTp06xfz586U0YgcOHFQNhyh1l6EzW4nN1mK02mq0gl5Z1JSIyIm8YnJ11VP18/Pz2b17N/Pnz2fGjBksWbKE6OhofH19GTJkCK+88gpz5sxhzJgxtGzZ0hHa6sDBPxQvLy969OhBaWkpu3btAkBvsZJZYiAlv4ToTA37LuazNy2PfRfzic7UkJJfwvEzqShd3W+Zumez2Vi+fDmHDx8mNDSUKVOmOFJwrkKtVjN27FiMRiPLly/HZrNVvhEXF/j0UygzTA8MhL9EKAiCQNOmTdFoNFKqV0VwdnaumfS9Mpo2haFDyy/7F0VKde/eXfLVuZpz587h6+srGSN369aNixcvSgO+W7Fs2TIeeeQRnnjiCZYuXSotv3DhAl5eXqxZs4ZffvmFL774goULF/LDDz8QFRXFO++8I30G7JFc+/fv55VXXpHSOAVBQKvVAnDy5Enuu+8+HnroIZ5/ahx71v5C3bp1mf/iFM4lxlLLz4/ko4f4ePozvD9xHO889Sgx+3ZhMpkoKSnBZDKTnZVNVlYm2dnZFBYWodOVoistlfbZ2VnJW+MeZPvP3/PhtAnMfGgEW1auwFXtyuZVKzmdGMeS99+gf5eObNpkj7qaO3cu7du3p3Xr1gwcOJC0tLSbnq89e/aUSzVatGgRTZs2pXXr1rz77rvl+l7R0dH07t2btm3b0qpVK1avXl3u3L755pu0adOGxo0bS/sD8Mcff9C1a1fJquD3338H7ALi4MGDadeuHRERESxYsKBC3/E/GScnJ5566im8vLzYvXs3eXl5Va5OdzPK0twOHz5c421XFplMxhNPPIG3tzc//fQTqVf75V2HNWvWYDKZGDRokPR77NevHzKZrEaipfLy8vjoo4/IyMggIiKCqVOn1sjkT1nqXuvWraVlVRHZdWYrZqt4nSRfUClVBAUFEhgYhJurq13oLi3FarFgtdrIyclBRMTHx5egoCCcnRVYLFaysrLJy8vFbDHj7KwkMCCAUY8/xT0P/w+tRkthYSFyuRwXFzUt2nWkZ8+eLFq0iIULF95UlAJ7QYnnnnuOpk2bcv78eT7++OMKRVo5cODg+jg8pe4iRFEkOa8YvaVmBakyBMFukG602kjOLaZTXUWFK/OJosjFixclf6iyWSiZTEZoaKjU6fq7zCUdOHBw+xgwYAB79+7lSGIy/pEdyDdasdj+7CDbgzkFQKTYBDk6EwafOrR/YBw6z9rk6Uz4uiiuLcxgs7F06VJiYmJo1qwZEydOxNn5zvne/VNo1qwZPXv2ZM+ePWzfvp0BAwZUvhEvL/jqKygosP9/HXPWpk2bEhsbS0pKSoXNeRUKRc1FSpUxdaq9+t7p0+DnB/fdV7Pt34U0adKE/Px8Dh06ROfOnVm3bh3FxcVcuHCB1q1b88YbbxAUFMTTTz99zbpWq5UVK1awa9cu/Pz8ePvttyksLJTM8QsLC3F1deXll19m6tSpaLVatFot8+fPByhXhe+zzz4jISGBadOmSaluZVgsFoYPH87bb7/Ngw8+yJGMAs6lZ2E2WxARkQkynJ2VdOrTn74jRmG2WMhMS+WNJx4hvH0nBEEGoojRaCQzM1OqGCwIglQZT+nsjMJZAYgUagp49YtlFGs1zLh/KFHde9Gqex9CQlcy+NHxNGlQn2PHYli+fDmpqamMGDECJycnYmNjGTBgAOPGjePgwYMYDAbmzp1LTEwMZ8+eZd68eZw9e5bc3Fw+/fRTsrKy+Oqrr3jxxRfx8vJi4xUPtq+++gq9Xs/8+fOZPHky3t7eFBcX8/TTT5OWlobNZqOwsJC8vDymTp1KQkIC48eP58MPP6S0tJSXXnqJqVOn0rx5c2w2G7m5ufz444+89tprTJkyhTp16mA2m3n55ZfR6XQ0adIEQRCk+2TZ/3/9u9F7lV0HkAzG79R7zs7OWK1WSkpK2Lp1K61bt67QehU9rsDAQFxdXTl06BCDBg2SfO8qci5qus8N9mIBTz/9NB999BGLFi3i1VdfvW5k0vnz5zly5Ai+vr7069dPWu7t7U3Hjh05dOgQZ8+epUmTJlXaj8zMTObPn09hYSEdO3bkscceqxGD7vz8fC5cuHBN6t7NRParxaurKTZZsIpiuUDZFfM/5OLZ0xj0ejIvXsCnlj+vf7EEDw8P4o4e5odPP8JkNGAxmegycAiTXn0buVyBWu1KVloqc2dMpCDnMrXr1uPZ9z6kbv0Qfl3yFcWFWh6aPB2r1YbRYMRsNmETbWzatIl77rmHL774gqKiIkJDQ3F3d2fhwoU88sgjnDx5UrpOOnfuzOuvv86UKVNYtGgRSUlJfPTRRzz33HPVSod04OC/ikOUuotILzaQozOikNW8IFVGWcRUidnK6YISWta6cf632WwmJSWF+Ph4EhISpJlStVpN+/btiYyMpEWLFv953xcHDv7tOLmo6fXoUxhkzlzUluCqcrmpcG6xWtAVFeLm4UFmiZHsUiN+Ls40r+WOq8L+2LFarSxZsoTjx4/TvHlzJk6c6IiovAkjR44kOTmZ33//nRYtWlw32uaWCAJcPXEQHw9vvgk6HUyeTOiV9IuUlBS6dOlSoSadnZ1r3k/Dy8vuJZWRAQEB9kivfzmenp788ssvvPzyy5SUlNCpUyeaN28uiTXvvPPODdfdtGkTISEhkmF03759WblyJc888wxgFw6bNGmCj48PJpMJo9GITqcjLS0No9HI2bNnsVqt7N27F4PBQF5eHvv370er1Uopu5988gm5ublkZ2dz+PBh9u7dS4v+w/Gt34jMzExMRhNabSG5ubmknkrmt6Vfocm9jMxJTmlRIZrcy/jUDgSuDP4FQQrVl8lk2K5Egeh0ehQqPaIIHfoMQBRF3Dy9qBVUh5yMDLz9/EEUEa+IW4WFhSQkJJCdnc1nn32GeOU9gIyMDIqKijAYDFy4cIHc3FxKS0s5ffo0GRkZGI1GkpOTSUpKIiAggIsXL3Lx4kVpQBkXF8fFixfJzs7mww8/lM630Whkz549V6oFOmE2mzl06BAmk4nLly9z4MABLl68iFqtJicnh5ycHGldjUZDWloa7733nrTMZDKxefNmzp07V+Xr559AYWEh8fHxKBQKMjIyeOWVV4iIiKi2D9FfSU1N5dKlS2RkZFRaHCgTqfLz8zlx4gRpaWns2rWr2qJccXExhw4d4tixY3To0AHZlX5+2ToHDhxAq9XSrl07Pvjgg3LvFRcXExcXx4wZM+jatWulhcb8/Hz27t2LyWQiNDQUg8HAkiVLakRoTEhI4Ny5c9SuXZvVq1dLy3U6HVlZWcyZM4ewsDCio6MpLi5m9erVZGZm8v333+Pr68vgwYOldSxu3pi8AzGbzRgMRoqKizAajZyIjeHjn37Hw8uHuS9O5bcVSxj9xESahUfw0vyF6A0GdKWlfPHqdFp16krn3n0BOHEsmq8370Ht4cm8V2bwzbwPGffCa1f8psDX1w9PTw9kMnvxhJy8ApydnXn88ccxm8388MMP9OzZk44dO9K8eXN8fX3Zvn07/fv3JzY2ltzcXAYOHIggCDzzzDMsXbqU48ePS8JUTaamOnDwX8AhSt0lWG0iZzWlIILc6fYIUmXIBAG5IJBebKC+pxp35z8vg5KSEhITE4mPjyc5OVkabPj6+tKnTx8iIiJo0qSJowSqAwf/AURR5FKRntMFpSh9aqHLy6W4QIdHnTr2qIcboNfpQBRxcVagcpJhFe3RU4XpGpr6uBKoVrB06VJiY2Np2bIlTz/9tEOQugVKpZJx48bx4Ycfsnz5cl5++WVJsKgys2ZBmffKO+8Q9OqruLm5Sb5SFZkcUSgUlJSUVG8/roezMzRo8Odrmw2qWTb+bqdXr1706tULsAsfAQEBNG/eHIvFgsFgkP70ej1GoxG9Xo/BYGD27NkkJSXh7+8viTVHjx6luLiYzMxMBEHgjz/+ICMjg/3790tVo95//30AqcLVypUrAbspelJSUrnvNTc3F7PZjCAIBAUF4eLigo+np92nysMduVyOm5srvr4+vDLrdcZOf4mOfQZQXFzMhAHd7FX4/EUEmYCrqyuNGjbE09MTk8lEUXExpSVX0vcEMJtMiKIN65VjcXFRI5crcFW7EBAQgFKpxMXFhf59+xD2wAjS0tKYMWMGTz311DXn9K233pKiwr755hvWrl3LV199xZ49e7h48SKLFi3i008/JTY2lq+++gqbzUZGRgYrV67k888/Z9OmTWg0Gvbs2SMJXmV/Fy5cYNOmTcydOxdRtFcK++abb5g9ezZbt25Fq9Uya9ascuucOnWK/fv3c/z48Wvau1pQq6n3ACnl9+98z2azsXr1aurVq8eIESMoKipi586d6HQ6+vfvj4eHR41tr0mTJqxbtw5XV1cpKqey5zEzM5NLly5Ru3Zt6tevf8v1btVevXr10Gq1XLp0iZMnT9K0adNy4mlubi7e3t6o1Wo0Gs01bbq6unL+/Hn8/PxQq9U3Paar0Wg0JCcnY7VaqV+/Pk5OTtcUWKgOcXFxFBcXc/HiRS5fvlzuvZ49e/LZZ59hNpvx9/fH29ub48ePU1BQgKenJxaLRUppBajTohWNu3hhMtqr7WkKNOj1elq06YDFZr8vBTcOJe3MGXu1PU0BP305j4tnTiGKoM3PJSUxnvphzTEajXTo2RefWvZiSqPHT+CNCWORy50wGIzoS0soKrKn7ilVSlxdXUEmo7CwkG3btjFq1Ch++uknwJ4KeurUKe69914WLFhA//79+eKLL5g4caL0jJTL5Tz55JN89913HDp0SBKmAgMDa+xcO3Dwb8chSt0l5OiM6M3WCqfTVRe5TMBgtZFRbMDbUipFQ509e1Z6qIWEhBAVFUVkZCSBgYG3LXrLgQMHdx+iKJKSX0JqoQ5RBBe5HJOLC4WFRRQVFUueF9dDp9cjCKBycbGn5gjgJNhTh5Nyi9mVkkhsbBwRERFMmDCh+uLKf4SGDRsyaNAgNm3axPr167n33nur1+BfzM6F2bPp0b07G7VacnNz8ff3v2UTCoWiZj2lrse338LChfYIqg8+gPDw27u9GsBms5UTkq4nJu3fv59z585Jry9fvoxSqcRgMLB582b8/PyYN28eFovlhtvR6XTExMTw8MMPS15soijyww8/EBcXJ0WhuLi40LRpU0JDQykqKiIrK4tBgwbh4uKCRqNh/fr1TJw4EZVKRXx8PA899BAjR45EpVKxePFiZs+ejZubGwcOHKBevXo89NBDnNWUcvRMGgH+/nZfFrULzkolJUVa5C5q0tPTObJzK7qSYhQKObVq1cLFRY1/7dp4edkjWJRKFbWUKmr52aMK6terBzInBEGGANL5sljM5Oblc/HiRQS5HE1+HudOnkDm78PgwYP57LPPGDVqFD4+PpjNZpKSkmjVqlWFvqvevXvzwQcfkJeXh7+/PytWrADsUYA9evTgmWee4cCBA/Tta4/AiIuLo3nz5lLRlrJzXDZZ5+Pjw8CBA5k+fTonT56kW7du2Gw2tFotXbp0wcvLiy1btjBu3DgAzp49i4+PDz4+PhW+vv5pHDp0CLVazahRo3jyyScBCAsLY9OmTaSlpfHSSy/VaNS9Xq8nNzeXcePGVSklPCEhgaKiIgYMGMDIkSNrZJ/MZjMfffQRaWlpjB49mtatW2M0Gnn99ddxdXXl1VdfJTg4+LrrXrhwgdmzZ9OuXTueeOKJm26nTKSKjY1lyZIldOrUidGjR9OzZ88aFT4LCgqYNWsWISEhUlTmjdYxGAy0b9+el1566YYin0aUcxkVKhcV7u5u+Nf2t4tFNiv+/vb7g4enBwqFAi9vL1bMnYWrhycvfrYEBIFl77+JxWLGYrag1+sp1ZWi1Wpx93AHBGQyJ4KCglCr1Rh0pWg0WjRaLVaLFZVKRS0fX1xcXEhPT+fkyZMAjB49mg0bNqDVaikoKOCPP/7g8OHDrFu3jrlz55Y77zKZjMceewylUsnu3bv56KOPePbZZ68pauHAgYPr4xgJ3CVcKtIjYo9iuv2ImExGjCYzMbk5HPphMVazCYVCQXh4OBEREUREREh+FA4cOPhvIYoiKQUlnNfqcBIEFHK7WO7h4UFxcTHFxUV4eLgjk10bMWmz2TAaDKhUKmRXRVMJgoDSSUaeRoNz7WA6D7uPh/v3dAhSlWTw4MEkJiaydetWIiIiaNSoUdUbe+IJuDotzGajx5YtxIWFkZKSUiFRytnZueY9pa4mJwcWLLBHSuXkwIwZsHYtXIkWqElEUcRkMkki0Y3EpKuXX094MhgMNxTq9u3bx6VLl9DpdLz00ksoFAoefPBB6b3s7GzAXg3xgQcewNvbG5VKxdatW/Hz82PIkCGoVCpcXFxQKpX8/PPP9OzZkzfffFNarlKpcHV1xWq1Mn36dL7//ntatWrF5MmTadSokTTQfuuttxg5cqTkL1NWlEClUuHl5XVNWpVcLuf3339nypQpvP/++9gQ6H3/owx58BEsFgt5eXmkpqZy7xOT+PLNmajd3GnRpj21AoMIDq5Hbf/at+zjqNWuuHl4IpfLCQoKIiioDjq97krKnz3VrfPAoaxZvICf5n9Am9ataNiwIYGBgURFReHs7IyTkxNPPvlkhUWp8PBwXnvtNbp06YK7uzsDBw6U+j/e3t5s3LiR559/nhkzZmA2m6lXrx5r1669aZve3t789ttvzJgxg+LiYmQyGe+++y5Dhw5lw4YNTJs2jXnz5mG1WvHz85Oi1P6NGAwGfvvtNxQKBfdd5Q83bNgwsrOzOX78OF9//TWTJ0+W0sSqS4cOHfj1119JSEigbVmBh78ZhULBhAkTmDVrFt988w1BQUEcPnyYwsJCunXrdkNBCpDSc2NiYhg+fPhNU8MEQeDQoUN89913CILA+PHj6dixY40fT0JCAmq1ml69ehEUFHTN+1lZWVKk0GuvvUbfvn3p3bv3DdvLLjGQn12Ik5MTCoUCF5ULCoVCMiIHEUSwmE1oNVpKirSEhNktRLS5OZxJOE6XPv3w8fFGJpORcPggF1PP4+Xry5pvlhDVqSsg2Kt5urrh7eMt3fMLi4qwmAz4+vry3HPPsXTpUoxGI+vWraN///6cO3eO5ORkGjZsSP/+/cuZ0V+NIAg88MADqFQqNm/ezCeffMKUKVNo3LhxDZ11Bw7+vQjiX2M9HdxxzFYbu9LyQATFbYqUEkUbeoMBvU6HXq/HarUhyGQo1a5YL5ykZaP6NGvWzFH1yoEDB2QU60nIKUKGcM09SVuopVBbiKenhxTtcDWlpaXk5eXh6+uDm9ufhq6iKJKbm4Neb8DV0xN3d3ci/D2p41796j//NTIzM5k1axZeXl688cYb1btvf/EFLF8uvTSZzZzPyyNu8mQefPnlW66+cOFCYmNjWbhw4e2JptVo4CrjXwDGj4erZubL0tuqKyYZjcZr0l8qgpOTUzlB6Oq/vy4rE5Out1ylUtXYoPxqyqpbzpkzp8YMeDUaDUeOx6LxDMKgN6ArKcJmsyGXy1EoFHhe+Y07O9dMn8Jms9r7MHo9FpuI2WRk0+cfUKjVoFQqEQQBd3d3XK5EZ6pUKkJCQmjQoIH0dzPvouLiYsmA+tNPP2XLli1s3ry5Rvb9v85vv/3Gli1bGDx4MMOGDSv3nslk4uOPP+bChQv07NmThx56qEa2qdVqmTlzJuHh4UyaNKnS6yckJPDFF18wcODA6kek/oXk5GQ+++wzPDw8KCoqQqlU8t57713XAP1qTp48yfz58+nevftNK9nt2LGD1atXI5fLeeqpp25ZAbeqfPDBB5w/f/6G95Unn3yS/fv3Y7FY6NSpE59//rkk5FyvcEOpycL+SwV88uJUmrRoyX2PP82K+R9SXKjlsedeori4mK2rVnIh5SST3p7N2ROJLHr3NWROTtRr2BhRFInq1OXKeh9wNvkE2oJ8yeh8/Mw3qRUQwPpvl2DQ6Zj0xiw2r17J3k3ree7jL9iz6jvWrVjCrl27aNKkCUOGDOHIkSPI5XKmTZtG06ZN2bZtG0uWLGHUqFHce++93HfffTd8/m7evJm1a9eiUCiYNGkSzZo1uy3fgwMH/xb+caKUwWDgwQcfJDk5GRcXF/z9/fnqq68kFfro0aNMnToVo9GIwWBg3LhxvPjii7ds99KlS0yaNEkq2erk5MQnn3xyU1W/umi1WhYuXMhTz07nSKYGuUyG05VOfUpCHB/PnAZAcaGW0uIiAurWA6DP8FE8MGHyLdu3Wq3o9Tp0ej0Gvd04FEChkKNWq1G5uCA6KWju504Dr5qfdY6Li+PUqVPSLPDNKDsXM2fOlJY98cQTjBkzRvLYcODAwe1Hb7FyKL0Ao9WG6jrecXbPlXREoE6dOjj9JVoqLy+X0lIddevWwcnJHgUlijZycnMx6A24uqrx9fPDaLWhdJLRpa4PKrnDo66ybN++nV9++eWWA5RbIor2aKn16+0vgfT0dErUasL++AOhdu2brr506VKOHj3KggULKuULZrVabygS/VVMarlkCX4pKdhsNkSbDbNMxrf33ku+kxN6vV7ylKkMZcJFTYhJd3u039y5czl37hxffPFFtUSv4uJijh07RnR0NHFxcWRlZdH1gbE0jmqLoaQItYsL7u4euLq5louSrElEUcRgteIpmtGciiMpKYkLFy4giiI6nT2iqiytzmKxlDteX19fGjRoQMOGDWnQoAHBwcHSNTtp0iQOHjyI2WwmKCiIRYsW0bBhw9tyDP8lcnNzeeutt3Bzc+Odd9657gC+sLCQ2bNno9FoePDBB2uszzdv3jxOnz7Nhx9+eEvB56/cTlEK7MUJZs+ejSAIvP766+Uq7t0IURSZPXs2GRkZvP/++9dkM4iiyLp169i0aRMqlYpJkybRtGnTGt93sIvSM2fOpFGjRhUaY1UEURTZlZaH2Sri7CTDbDFTXFRESWkpos3uR+fm6oarmyuaggKMRhMqlZJa/v43ud9cKYhQVIRep5eWOjsr8PDwQO3qCiLoLRZKz51gz++/YrPZCAsL495778Xd3Z1Vq1YRFxeHIAjYbDZ27dpFnz59APDz82Ps2LE3rIq4e/dufvrpJ8lzKioqqkbOlQMH/0bu7p7UDXjqqacYNGgQgiCwYMECnnjiCfbs2SO998477zBs2DAKCgoICwtjyJAhNG/e/KZtPvPMM/Tp04d169YBkJeXh06nu23HYLFY0Gq1zJkzh4eemYpNhKtvqaERUSzetAeALb/8yMFtm3l38bfXtGO1WHCSOsQiZrMZnU6PXq/DaLySPiCASqnExUWNi9oFhfzPgYPeYqXIWPOpFxaLhbi4ONauXVthUWrOnDnlRKklS5bU+H45cODg5pzKK8FgsaG6QdSmTCbDw8MDrbaQosKicjOkIiJ6vR6l0rm8IJWTg8FgxNXVFT8/X0BAKZNhsNg4mVdCqwBHqnBl6dOnD/Hx8ezbt4/IyEhatmxZtYYEAV59FQoK4OBBBOzpW7biYoxPPYXqu+/gSoSJzWaTJnzKxKPc3Fzy8vLYt28fgiDcMq2t7K8yKX9xPj48odMhE0UEmQyZINA5NpbDgwZVWUxydnb+z/gkFhQU4OPjUyVBSqfTERcXR3R0NCdOnCAnJ4esrCxkMhm1atXCkp+N2sUFXy8PXJRK4PaeU6soIhNktKgbgHeToQwdOpSioiKSk5NJTEwkOTlZ6rs5OTnh6+uLm5sbMpmMvLw8YmJiiImJkd4PDg6mQYMGPPbYY7z55pvUqlXrP3Nd3Al+/fVXLBYLI0eOvGFEiaenJ5MmTeKjjz7i559/xt/fnxYtWlR72x06dODUqVMcO3aMnj17Vru9mqRhw4aSkGq1Wiu0jiAIDBw4kEWLFrFz585yXleiKPLjjz+yd+9eXF1db7uX0fHjxwFoc6Via00gCAJ+Ls5c0JSgzdei19s9D+VyJ9w93XFzc0cURS7nXMZsMuPiorrye73ZfU1AqVThX0uF2WKmqKiI0pISTCYzeXn5yLVa3Dy8UKldGNK7J73bRLJu3TqOHj3K7Nmzad26NSNHjqRr166MGDGC3Nxchg8fTnh4OKdOnSIvL4+PP/6Y3r17M2LEiGv8y3r16oVKpWLFihUsWrSIsWPH0qFDhxo7Zw4c/Jv4x4lSKpWKe+65R3rdsWPHcmZzgiCg1WoBexqJs7Nzhcwj09PTqVOnjvTaz89P+v+tt94iOTkZvV5PSkoKTZs2Zc6cOcyYMYPU1FTatGnDDz/8gEwmY+XKlXz66aeYTCZsNhvvvfceQ4cOBeyVKCIiIoiOjsbFxQVnZ2eKi4sZ2LUjFgQWrd95y/3MTr/IU/f0YsjD/+PYgT30H3k/dRs14ZuP52Aw6LGYzfQf/TA9h96L2lXN4vdex8VFTebFC+RkZtAgtBmvfbYYhbMzh3ZsZencWSicnHAWRGbNmsXw4cPp2bMn4eHhHD58GI1Gw/Dhw5k7dy6CIHD27FmefvppcnJykMlkvPXWW4wYMUI692+88QabNm2iZcuW7Ny5k8LCQqKioujYsSMLFy5kzJgxpKSkYDKZCA4OZunSpQQEBPD0009TXFxMVFQUcrmcmJgYevbsybRp0xgxYgQ5OTk8/fTTnDlzBlEUmTJlChMmTADsufaPPfYY27dvJzs7m/Hjx/Paa6/d8lw6cOCgPMUmC5d1RuQy4aYDM3cPD4qKiikuLpbKooM9ktVmE3GRqgNdJUi5ueLnaxekwH6/kMsELuuMlJgsuDn/4x5HfysymYxx48bx9ttv8+233/Lmm2/aTWH/Qplnxq3S2kydO9MyLg6PtDQMej06vZ7sQ4fI69mTX3v2pNRikaqxXs25c+fIyMjgu+++u+GgU6FQSGKQl5dXlSKT3D7/HPm6dZLkUVeno9e990JYWE2e1n8dNpsNjUZTKU8Tk8lEQkIC0dHRJCUlScboer0eLy8vmjVrRr169ejevTsdOnYktsBIscmCKNo1ztuFKIqYbSK+Ls54Kf+8X3h4eNCxY0c6duyIzWbj/PnzJCUlkZiYSHp6Onl5eQDUqlWL9u3b4+7ujtVq5dKlS6SlpXHhwgV2794NgKura7mUvwYNGkjVzhxUjlOnThEbG0vDhg1p3779TT8bHBzME088wZdffsnixYt56aWXrutTVBlatWrFypUrOXLkyF0lSpVVIgwNDZV82ho2bFihqKaoqChq167N3r17GThwIGq1GqvVyvLly4mOjsbb25tnn332tld9O3bsGIBU3bC6GI1GDh8+zOGEE9Tt2BuL2YJKpcTd3f3K70/AYrVw+fJlLGYLalc1fn5+CJUQwRVyBb4+vnh5eVFSbO+/WCxW9CYTmedOURpbQp8+fRg/fjz9+/dn7dq1HD9+nLi4ODp37swff/zBkSNH2LJlC4mJidSuXRubzUZubi47d+4kMTGRcePGXRNh2alTJ5RKJUuWLGH58uUYjUa6d+9eI+fNgYN/E//4UcCnn37K8OHDpdfLly9n+PDhvPbaa+Tm5rJo0SICAgIAu/dFZmYm71xt7HqFl156ifHjx/Ppp5/SsWNHhg8fXu6mERMTw7Fjx/Dy8qJnz5488cQTbN++HRcXF9q2bcvmzZsZPHgwAwYM4KGHHkIQBC5cuEDHjh1JS0uTOuunT59m3759KBQKLly4QFRUFL/tOch5TcWjskqLiwhpEspTM98AICsjnRfnL0KhdMZqMvLSmPsYMGIUtfxqIZfLOXfyBB//+BsKZyXPPTCU/Vs20HvYSJZ/PJvJ735Ep06d6BjkRVFRkbSN5ORkDh06hNlspnv37vz44488/PDDjBkzhscff5wJEyZw5swZOnbsSKtWraQZGScnJ6KjowGk8stXG4LOnz9fMmicM2cOb731FgsXLmThwoVERUXdsFTtlClTCA0NZc2aNeTk5NCmTRsiIyMl80atVssff/xBXl4ejRo1Yty4ceVERgcOHNyajGI9Vpt4wyipMmSCDA9PD7QaLUVFhXh724V//ZUIBbWLC7YrgpTRYLxSKv5PQaoMuWCvAppebCDM1+22HNM/FYvFUiGPJD8/P3bv3s2ECRNo167ddSOTKpqlv6tBA/538SKeFgtWqxWT0UhAVhZDjxzhwNChuLi6XiMmxcTEEBcXx5gxYwgMDLxuZJLTddJAK83kybBjB1wdwTxvnr0qnyOy5YZotVpsNtuV39+NsVgsJCcnEx0dTXx8PHq9nry8PEpLS3FysletcnV1pXXr1nTv3p2mTZtKwnVzmYKYrEIsNhGF0+37Lkw2EYVMoLmf2w1Fc5lMRuPGjWncuDEjRoxAq9WSlJREUlISycnJHD16FLAb9IeFhXHffffh4+ODVqvl/PnzpKamSp8vo3bt2uVEqrp169bMNf0vxmazsWrVKgAeeOCBCkWfRUREMGrUKFavXs2CBQt4+eWXK512dzUuLi5ERERw7NgxcnNzb2oOfiNuR9TcgQMHSE9PJzIykpEjR/LBBx+wePFiXn311Vt6vslkMgYMGMC3337L3r176dOnD4sWLSIpKQl/f3+mTZt2y996ddFqtZw7d46GDRtW26MuPz+fPXv2cODAAXQ6HXK5nJB23XD3q4Wb8s+oI7PFzOXLl7FarLi5ueHr60NVozKdZE54etoLOZSU6tCbzGSciCfh4nl27txJ27Zt6devH1OmTOHMmTOsWbOGAwcOcOTIEXr16sXzzz/P+vXrOXHiBIIg4O/vT25uLjk5OXz44Yf079+foUOHlktnb926NRMnTmThwoX88MMPGAwG+vfvX61z58DBv41/tCj1/vvvc/bsWXbu/DPCaM6cOcyePZuHH36Y8+fP06NHD9q2bUvz5s3LGer9lYceeoiBAweye/duDh48yPDhw3nllVd44YUXAOjfv790823dujVKpVJ6WLZq1YozZ84AkJqaypgxY0hPT0cul1NQUEBqaiphV2ZzH3nkkWt8Nyqj9APIFQr63jtaem0xGlg2520y0y7g5OREcaGGC2dO4R9kF2W6DrgHlYt9pi8ssjWZaXbfrFZdurH4vddIHTIc9egR5XKdH3vsMRQKBQqFgkceeYQdO3YwdOhQjh8/zsGDBwFo0qQJXbt2Zf/+/ZIo9fjjj99031euXMl3331XbkBVEXbs2CHNzPj7+zNy5Eh27NghiVIPP/wwYI9wa9iwIampqQ5RyoGDSmC1iWQUG5AJN4+SKsPd3Z2ioqJy0VI6vR65Qo6Tk5ycy5cxGk24ubvh63P9DqQgCMgEgYxiPU28XXGS/bPFBZvNViOG2waDocIpHaJoT9s+duwYOp2O4OBgSRDy8vKqtGeSWqtFPXkyGQkJgN03rK7RSGebzS4MXefayM7OpkWLFtWObLgpPj4wbpzdmL2MY8dg/35wzDrfkPz8fIDrDlRtNhunT58mOjqa48ePo7tSCKW0tBSLxYK7uzv+/v74+vrSvXt3OnfufF2zcD+1kmAPFRcK9chEUfLGrEmsNnvZ+EY+bngoK+5d5uXlRdeuXenatSsWi4WzZ89KolNCQgIJV67zwMBAwsPDeeSRRwgICCA9PV0SqVJTU+1RHIcPA/bIv3r16pXzp/L29nak/V3FgQMHyMjIoGPHjoSEhFR4vT59+pCdnc3+/fv58ssvmT59eqW86v5Khw4dOHbsGEeOHGHIkCFVbqem0Ol0rF27FplMxujRowkMDOSxxx5jyZIlLF68mBkzZtzSo65Dhw6sW7eOLVu2EB8fT2pqKsHBwTz77LPVEvEqSmxsLFD11D1RFDl37hw7d+4kNjYWURTx8PBg6NChdO/enVKZM3GXC7HYbMhlMkwmIzk5OVitNjw83K+MxWrityYgV7kQ4u1B2JCBbN+2jbNnz3L06FGOHj1KWFgY/fr144UXXiAxMZG1a9eybds29u/fT//+/enQoQO//fYbOTk5yOVyBEHAZDKxdetWEhISGDt2bLlrv2XLlkydOpUFCxbw66+/YjAYGDp0qOO+4cDBFf6xotTcuXNZs2YNO3bskEKr8/Ly+O233/jpp58Ae852x44dOXjw4C09pcBewnfkyJGMHDmSdu3a8f7770uilEr1Z4Wosko7V7+2WCwAPPjgg8yZM4dRo0YB4OPjg8FgkD5bZsB5Nc5OMkTsN+qK3JyUKpdy3hCfvfES7Xv25Z1F3yIIAhOG9MZ0VZqF81UpFTKZkzTYmfjau5w6eYK02KP873//Y8yYMTc0LLzRfv11+fWOr4wDBw7w2Wef8ccff+Dv78+6det44403bnm8Fdnujb4PBw7+Sv/+/cnOzkYmk+Hu7s5nn31Gq1atbllEoSL06NGDjIwMzpw5U+4aFQQBjUZzTQnhdevWsXv3bubNm1fhbaxatYoPP/xQ8kUpY968eezevZs33nhD8uaoDMUmCyarDflN/Bmu9reTCTI8PTzQaLQUFhbh5uaK1WLlo+lPU3D5Mu9/vwYPD/cr6dPX3j/iDh/EZDTQpltvTFYbJSYLnqqqDz4qS9u2bZk7dy49evSQfJL0ej1nz56lZ8+eNG7cGIvFgtlspnnz5tx33324urreVEy6kU9SaWkpO3fuvKbyVBlKpVKKLvLz86u0AbfRaOTjjz9GEATeeuut65aqrjD+/vDFF8iHDMFUUIDZbMZZoYA1a6BWLXjyyXIfLxswmkymqm+zojz8MPzyC1y+/Oey+fOhc2e4y83G/y4KCgoAJBsDURRJTU0lOjqamJgYioqKEEVRSs20WCz4+voik8mIiIigR48eNG/e/JZ9k6a+bhSZLBTozSid7L5fNYXVJmKy2QhwVVarKItcLicsLIywsDBGjRpFfn6+JFCdPHmSbdu2sW3bNlQqFc2aNSM8PJxu3brh6elJTk6OJFCV/Z07d05q28PDQxKoGjRoQP369cv1S/5L6HQ6fv/9d5RKZaVNwgVB4KGHHiI3N5dTp06xYsUKxo8fX+WBe4sWLXB1deXIkSMMHjy4wu3crhpQGzZsoLS0lD59+kgpdu3atSM1NZWdO3eyevXqW1YglMvldO7cmffee4+AgAB69OjBpEmTcHFxuS37/FfKJohbtWpVqfUsFgsxMTHs3LmTixcvAlCvXj369OlD27ZtJTHOXRQJclORXmzAYjaQm5ODzSbi5eV5xdy9Zu4tJpsNhUygRS0P3OtEEhUZSWpqKtu3b+f48eOcOnWKU6dOERgYSL9+/Zg5cybHjx9n3bp1/P7773h4eNC/f38KCwvZuXMnVqtVEqaysrL44IMPGDhwIIMHD5aOrWnTpkyfPp1PP/2UjRs3YjAYGD16tEOYcuCAf6go9cknn/Djjz+yY8eOcp1vb29vXF1d2bVrF7179yYvL48jR44wffr0W7a5YcMGevfujVqtRhRFYmNjadSoUaX3TaPR0KBBAwC+//57NBrNDT/r4eFhNwXGiiDYqx5V5bZUXKildp26CIJAwpFDnD95okLrXTx3hvpNwhjQsS0NfNzZtm2b9N7333/Pww8/jMViYeXKlTz33HO4u7vTunVrli9fzpNPPsnZs2cloelGx1dYWCi91mg0uLu74+vri8lkYtGiRdecC5PJdI1RIEDfvn35+uuvmTVrFrm5uaxZs4bVq1dX9BQ5cCCxatUq6b7x22+/MXbsWOLj44GbF1G4FWfOnOHMmTN4e3uzd+/eCnlYDBs27IZCxY0YMWIEEydOJDExkfDwcGn5smXLeO+992jbtm2lBSmwi1I2ESrjhVwWLVVSUoyISPalNLLSLqB29+DSmZN06zeQ693VrBYL8YcPUlJUSLvuvbHZ7NuvqChVFh1U0ept1/tceno6n3/+OT/++GP581BcjFwul74/q9VKbGwszz33HKNHj5buT3K5XIou8vT0pHbt2jcVk6ZPn37dyCSVSlWtimhljB07loULF7JixQqmTp1avU5ukyZkv/ACnq++itFgsItSAIsWgZ8fXDXQLBOlKmNcXmWUSpg0Ca6ezLh40S6Y3X//7d/+P5D8/Hzp9/Lbb78RHR0tRU85OTmhVqulsvSCIBAQECBFFlUmNUchk9G6thfHsrVoDGYUMhnyGoh8NNtsWGwi/mpnImt71KjY5evrS48ePejRowdms5nTp09LXlSxsbFSREhwcDAtW7YkPDyc9u3bI5PJMJvNXLx4sZxIFRcXJ1kQCIJAUFCQJFI1bNiQgICAGvmt3+1s3LiRkpIShg8fXiWB3MnJiQkTJjBnzhyio6MJCAiocpSTXC6nbdu27N27l7S0tEpFbdU02dnZ7N69G1dX12uO57777iMtLY09e/bQoEEDKQvgeuTn53P48GEMBgNGo/GOClKFhYWcPXuWkJCQCqcJFhUVsW/fPvbu3UtRURGCINC6dWv69OlDo0aNrnlWCYJAmK8bGZoiCnV2n0pvH2883K+N0qwqZpsNEWji44b7VX6WDRo04KmnniIvL4+dO3dy8OBBsrKy+Pbbb1m7di29evVi5syZxMTEsGnTJn755Rdq1arFsGHDOHHiBGfOnMFms1FSUoKLiwubNm2SoqaCg4MBuw/u888/z/z589m5cydGo5ExY8b8J+4NDhzcjH+cKJWens6MGTNo2LChVDZWqVRy5MgRnJycWLVqFS+88II0wz1t2jQ6deoE3NxTau/evbzwwgvI5XJEUSQ0NJQFCxZUev8+/fRTRo0ahZeXF71796ZevXo3/KyPjw+PPfYYvTq2Q1So+Pz3bVXqcD3x4ut89saLfP/5xzRq3pKwqIqF1C758D0unT+Lh9oFDzdXvvrqK+m9Zs2a0aVLFwoKChg+fLhUQe+HH37g6aefZsGCBQiCwJIlS254jH369GHu3LlERETQuXNnPv/8c77//ntCQ0Px9fWlb9++ZGRklDsXERERuLm5XRMF8tlnn/HMM88QHh6OKIq8+uqrjgoWDqrE1Z3kwsJCqUN0qyIKt2LZsmU88sgjBAYGsnTp0gqJUlf7ru3Zs4fJkyfTvXt3Dh48iMViYcWKFbRt27bcOs7OzjzyyCMsW7ZMirA6evQoubm5DB48mD179jBt2jTi4uLIzc1lzJgxZGVlIQgCbdq0Yfny5df4vW3YsIF3Zn/Amyt+QZOXw3tTJ6ArKcZkNBLVsQuT35p93Q6TIMjw8PREU6BBo9GwZ/0aOvYbRO2guhzYtI5u/QYBVxdoeIxjB/bSZ/go1v/wjV3wOXSADv0GUTx8GB+99Jw0iK5Xrx6PPvroNWLStm3buHTpEjabDYVCQffu3aXvdPHixbRr144LFy5gMBho3bo1oaGh9n3IzpZSj8vSzPz8/GjRokU5oaiwsJB169bxyCOPlBOYHnjgAcLCwpg8eTIajYbp06dz4cIF9Hq95GNos9mYOnUqO3fuxNnZGblczsGDB8nOziYqKkoqwvH7778zc+ZMnJ2dGThwIEuXLiUmJoaQkJAqF25o1aoVHTt25PDhw+zbt48ePXrccp2bETR0KCvXrmVMUlL5lJDZs6F9e7iSHl0m0t2RSCmAgQPhp58gOfnPZYsWwaBBcAdSV/5J5OTksHPnTo4dO4bBYJAKrAQFBWEymcjNzcVqtaJQKGjevDk9evQgPDy8yoMjpVxG20AvEnOKuKwzYrGCUiarkkAqiiJGqw1BgGAPFc39PGpE5LoRCoWCFi1a0KJFCx544AFycnJITEwkKSmJ06dPc+nSJTZv3oxaraZ58+aEh4fTokWLcpOXRUVFkkB1/vx5Lly4QEZGBgcOHADsfdWQkJByEVXXS4f8J5Odnc2uXbvw9fWlX79+VW5HrVYzefJk5syZw/r166lduzbt2rWrUlsdOnRg7969HDly5G8VpVavXo3NZmPEiBHXmOc7OTnx1FNP8d577/H9999Tp04dScS4mqysLObPn49Wq6Vnz54YDAYSEhLuWH+4LN2uIql7Fy9eZOfOncTExGCxWFCr1fTv35+ePXveUtBKTkxgz2+/07zfMLxq+ePuUnNRh2arDYsoEuLpQojn9cU8Pz8/HnjgAYYOHcq+ffvYtWsXhYWF/P7772zatIkuXbpI/azt27ezdu1a6tatS69evYiJiaGwsFCalBdFkffff5/BgwczaNAgnJycqFOnDi+88AKffPIJBw4cwGAw8Pjjjzu86hz8pxHE2xWj6qBSHEwvoNBgRiW/czckk9WGTIAe9f1QXmVsfHXVOwcO/o089thjUrWlTZs2lYs4KuPRRx/Fx8eHTz/9FIA33niDoKCg63rTWa1WgoOD2bVrF35+fjRu3Ji0tLQroeY3Tt/7qyjVt29fDh48SIcOHVi4cCG//fYbW7duvWZ7iYmJ9OnTh4yMDBQKBU8//TSenp588MEH5USpefPmcerUKSkqsaw0/PVEqbfen8Pb3/6Kk9WM1WLBxdUNq9XK6089St8Ro+k99N5y6XtliKKNS5cukZeby7tPPcJL8xbiFxDAc/fdw/w1m1Gp3cjJTOf5+4cw/uW36DLAPkO8dvkidCUlPDxlBip3D36a9z6nYw5LBSYMBoOU/nJ1hJHFYqFWrVqoVCri4+M5dOgQ77zzDiqVioEDBzJlyhTGjx9PZmYmo0eP5vTp0zg7O9OqVSuWLVtGv3792LZtGwMGDGD37t3XiIdlBSjKBKQynn32WcxmM19++SUDBgzglVdeoUePHlgsFoYMGcL48eNp3LgxDz/8MCdOnEAmk1FYWIi7uzsXL16U2szJyaFZs2YcPHiQsLAwli9fzuOPP05qaqokSg0bNozPPvtMKtyQnJxcIY88nU7HO++8Q2lpKa+//jr+/v63XOdGiKLIzJkzaXHiBI9mZ5ePd3vlFbhSjvzw4cMsX76cZ555ppwv4W3l+HF46qnyyx57DKZOvTPbv4vRaDRSal5aWhqJiYkUFhYybtw4nJ2dSU9PlwZL7u7udOnShW7dulXY37EiiKJIerGBlPwSjFf6GQpZxVL6bKKIyWqPYlArnGjm60ZtV+Xfmt5iNBpJSUmRRKqylEhBEKhfv74URVW/fv1y+2mz2cjOzpZEqtTUVDIzM8ulhvn6+pYzUa9Xr161PJT+bj7//HOSkpKYMGFCjVRmO336NPPnz0cQBGlCurKUTWSaTCY++OCDCg384+Pj+fLLLxk0aFCN9IWTkpL4/PPPqVOnDq+99toNhd+zZ8/y8ccf4+PjwyuvvFKuompaWhqffvoppaWl9OzZkyFDhvDKK6/g5+fHG2+8cUd+Ix9//DGnT59m1qxZ171n2Gw24uLi2LVrl+S1GxAQQO/evenYseMNK7RezR9//MGKFSsQBIFHn5yAwacOeout2tGXoihitNkQgBBPNWG+Ny6Y8FcsFgvR0dFs27aNzMxMwP77j4qKolOnTpw6dYq9e/ditVqlip3JyckUFxej1WqltPx69eoxbtw4aWKsoKCAefPmkZOTQ0REBE899dQ/+vfvwEF1+MdFSv1bCXZXoTWYsYlijYan3whRFLGKIkHuLuUEKQcO/gt8+61dVFmxYgUvvfQSmzZtKvf+9YooXC/CsoxNmzYREhIiFTTo27cvK1eu5JlnnqnUfjVu3Fia8ezUqdMNI7XCw8MJCQlh/fr1DBo0iJ9//lky4b2ajh07Mm/ePGbMmEH37t0ZOHDgLffBZrOxeM47JMUcRRRFtPm5NGgaRu+h1/cGEQQZvr6+HNm9A59atXH3rYVVhLBWbTmwZSP97nsAJycnnORy+gy/Dye5HJkgoFIpsZrN+Ph4I8oV9B5wD0d3bMFqtdKlSxfuuecePD09pbSiMlauXMnnn39OcXExNptNiuYs45VXXiEgIIDIyEgUCgU2m43MzEzkcrk0c9+/f/9KD27KBpJl/lCXr/I1KikpISUlhf79+2OxWHj88cfp1asXgwcPvmbwcfjwYSIiIqRr5X//+981QmdVCzeo1WrGjh3LvHnzWL58OS+88EKVo14EQSA0NJSDWi3DOnfGa80a+xtKJVwlPt3R9L0yWreGnj3h6tTaH3+EUaPgdpqt36UUFxdz7NgxYmJipIGgTCajefPm5ObmolarOXfuHDabDYDQ0FC6d+9OVFTULU2Vq4IgCAR7uODn4szFIj3pxXoMFvu2BcDpSjEFASQ/TasoShYGLgongj1cCHZ3QSn/+/snSqWSiIgIIiIiEEWRrKwsyYvqzJkzXLhwgQ0bNuDu7k6LFi1o2bIlzZs3x9XVlaCgIIKCgujSpQtgF9vT0tLKRVTFxMRI0eEymYzg4OByQpW/v/8/wnOm7Jw0adKk0n5DN6Jp06Y88sgjrFixgi+//JKXX3650tXlBEGgQ4cObNq0iZMnT9KyZcsa2beKYrFYpEqE999//03vyY0bN2b06NH8/PPPLFu2jMmTJyMIAikpKXzxxRcYjUbuuecehg0bhiAIdOvWjZ07d5KYmEhERMRtPY6ioiLOnDlD/fr1rxGkdDodBw4cYM+ePVJ6cIsWLejTp0+FPOnK2L17Nz/99BMKhYIJEyYQHh6OzmwlOa+IHJ0Ji8Xuw1uZcZIoilhEEYtNRCWXEerrRh03VaV+U3K53F6pvGNHkpOT2b59OydPnpTSfBs2bMjo0aM5f/480dHRUrS3TqcjJydHygqx2WzMmjWLoUOH0r9/f3x8fHjhhReYN28eCQkJLFiwgIkTJ1ZIvCvDbLVRbLJgsNiwiSLClUkAd6UclVPVIlUdOPg7cIhSdwmBbipOF5Ritokob2NJ5TKsoohMgLru14bEVtRDx4GDfzplgkB+fr7U0b1eEYVbsXTpUk6fPi2lBuj1ei5cuFBpUaoyhv3jx49n+fLl6HQ6WrZsKaWpXU2nTp2Ii4tjx44drFmzhtdff53Y2Fjkcnm56m4Gg0HquPyy5Cu0+Xl8sXYLzkoVX773ernCCdfD1dWN2H27yM/O5O0nHgIRzCYjBTnZDH7oUdzd3VC5qKldO+CqY3XBYjLh7u6B3mKld/8BPHrPCXbs2MHGjRuZN28esbGx5TpUFy9eZPLkyURHR9OoUSMSEhKkyKrKnsPKdtSio6N59NFHJXHq8OHD1zUyTkpKYu/evezevZuXX36Zffv2VXrgX53CDWFhYfTu3Ztdu3axdetWBg0aVKltX03Tpk05cuQIsZ060atnT3vKXJcucJWgd0eNzq9m6lR75b2y69hshgUL4P337+x+/E3o9XpiY2OJiYnh5MmT2Gw2BEGgadOmtGjRAqPRSHR0NCkpKbi5uaFSqejcuTPdunUjICDg1huoAVwUToT6utHIW83lUiMavRmt0Uyp2YqtLFpIAJkAnkoFXioFPioF/mrlXVuJs8wrKigoiP79+2MwGDh58qQURVVWpU8QBBo2bEh4eDgtW7akbl2776dKpSI0NFS6X4uiiEajKRdNdfHiRclfCMDV1ZWQkJByQtXVETR3A1arldWrVyMIAg888ECNDoQ7d+5MdnY2W7duZcGCBbz00kuVNpEvE6WOHj16x0WpPXv2cPnyZVq1aiVNRtyMXr16kZqaytGjR9mwYQP16tVj8eLFWCwWRo8eTd++faXP9uvXj927d7NlyxbCw8NvqwBxvdS9snTNP/74Q/KE7dmzJ7169arUfUYURbZs2cLatWtRKv/P3nmHRXHnf/w1W2lLBwFRsGEXC2ADrLGlGXMaE9N7PZNc+l2KyV3KL7lLMWfaGU00RaNGjdHELmJjEZWmiCKChd4WtrC7M78/FkaJqKjY4ryexydhmJ35zuwyO9/3vD/vj54nn3ySqKgowOWYHBDiyxGTldyKWiwNArdW5ere25xAJUkSogQOSUKUJNQqgVAvPd0DDbhfQEWKIAhyme+RI0dYs2YNKSkp5OXlkZeXR1BQkOxi37t3LwBt2rRBq9VSVVVFTk4OISEh/Pzzz+zevZt7772XkJAQnnvuOT755BP27dvHRx99xFNPPXXG+8/aegdHTVZKzDbMDddT8aS6JwFQqQT0ahUB7lraGtzxd9MqApXCFY0iSl0haNUqIn08yKmovehuKUmSsIsSwZ46/C5htysFhctNVVUVZrNZtk4vXbqUgIAAuTPV6ZoonIni4mLWrVtHYWGh/BpRFAkPD2fPnj1ER0dfjEPh9ttv54UXXuDIkSNMnz692XUaHTZTpkxh3LhxBAcHU1tbS+fOnUlPT8disaDVavn+++9RCa4bGVN1Nf5Bwej0blSUFpO0cjkJ484cMltRWkL6ji38uHUPnt7e2KxWTLW1PDoukb17duPh6YUoiZSWleLp4YG7uzseXgaKjxYCrv2WHi2ga8+oU8baWAIJrgwwrVZLaGgokiS1OPevW7duOBwONmzYwIgRI1i7dm2Tzllnor6+nnfeeYcjR44wbdo0vLy8GDFiBO+++y5vvPEGAMeOHUMURfR6PWq1mjFjxnDdddexadMmsrOzmzzBHjRoEOnp6eTk5NC1a1fmz5/f6oLOpEmTyM7OZvny5fTq1avZbJKW0DhxzsnJYcSjj7q63DVSUgJLlxJgMqF2Oi+tUwqgfXuYPNmVL9XI6tWu7oANzUb+bNTX15Oenk5qaioZGRmyWBkZGUlMTAx+fn7s2bOHX375Rc7V9PLyYuTIkbz55puXrSxEo1LR1uBOW4Mrv8Uuitidknyvo1O3Tij65cDNzY1+/frRr18/V9nikSNyWHpeXh4HDx5k6dKl+Pj40KtXL3r16kX37t3lYGpBEPD398ff31+e7DudTo4cOdIkRD0rK4usrBNNbIKDg+UA9Q4dOtC2bduL4nprKRs3bqSoqIiEhITzvt6ciVtuuYWSkhJ27drFV199xRNPPHFOLtCQkBAiIiLYtWsXNputxU6UC53Em0wmVqxYgUajkbtyt2Sfd955J0eOHGH27NnodDr8/f255557GHLyNRhXg6dBgwaxdetWDhw4QJcuXS5ovGeisete//79ycrKYt26dfJnMiAggOHDhxMfH9/ih3mNSJIkRxV4enry17/+9ZTsr0b3ZaiXnqI6G4U1FqptDhwNpb5N1sXlwGwsG25rcCfc4I5B37p/H+Hh4dx3331MnDiRDRs2kJSURGlpKWvXrsXT05N+/frJLilJkggICECv13P06FGKioqw2Wz885//ZOLEiYwcOZJnnnmG//73v+zfv59///vfPP30003zHIEqq50DlXWUWepxipLLeaoS0KpUCA3nSWpwnYqShNUhUlhj5ajJikGnoYOvJ2Fel7ccWkHhdCii1BVEB18Pis02qqz2i2q5tDlF9GoVPQO9lQuTwjVFdXU1kydPxmKxoFKpCAoKYsWKFQiCcMYmCnD6TKlvvvmGMWPGNBGxVCoVU6dOZfbs2aftTnmheHt7M3HiRH7++WcmT57c7DobN27kP//5j+y2ef/99/Hx8WHQoEFMmDCBXr16ERoaytChQykqLUMCbrn3Id584gHuHxNPQHAI/YcmNrvtk1m9eAExCcPx8nYJSG5u7ri5uTNq4q3s3LiasbfdCYC5zoy5zoygEugRO4jVi3/koQnDGXzdBGK6dWb63befMtaT6d27N1OnTqVnz54EBAS0OOtDp9OxYMECHn/8cZxOJ7GxsWcUC00mE3379pUn9gkJCWzdulUez3fffcezzz5Lr169EAQBT09PvvjiC5xOJw899BB2u10uQxw/frxs3QfXZPJ///sfEydORK/Xc9111+Hl5XVeXapOh1ar5b777uO9997j66+/5pVXXjkvQSIwMBA/Pz/279+PJEknvi9EER54AI4fJ8Bm4w67Hes5tn5vFR56CH79FUymE8v27v1TiVIOh4Ps7GyMRiN79uzB1uBaDA0NJS4ujh49enDo0CE2bdrE8ePHAZdQEh8fT2RkJHPnziU2NvaKyinRqlRoL39VXqsjCALt2rWjXbt2jB8/nrq6OrKzs+Wyti1btrBlyxZUKhWdO3eWXVShoaFN7sXUajURERFERETImXdms7mJSHXo0CF27Nghfz9pNBoiIiKauKn8/f0vyT1ebW0tK1aswM3NrUkpdWsiCAL33Xcf5eXlZGZmsmjRIqacY8fNgQMHsnDhQnbv3n3JgsGXL1+OxWJh/Pjx55Tbptfr6dmzJ4sWLUKlUvHhhx+eIkg1MmbMGLZt28Zvv/120UQpk8nE3r175YdBRUVFAHTp0oWRI0fSt2/f8yoVF0WRH374gaSkJLy9vXnmmWfkh4bNoVGpCDe409bLDYvDVbZmqndgaXALqQQBjUrAS6fB0PDvYrsu/fz8mDRpEhMmTGDLli2sW7eO8vJydu3ahVqtpn379lRUVFBbWwu4BFKr1Up2djaBgYEsWLCAXbt2cc899/DUU0/xxRdfkJmZyfvvv88zzzyDn58fTlEir6qOvCozDlFCoxJOO09sLI1WCQIaTrjGamwO9pRUU1x34Y4xBYWLgRJ0foVRZbWTcqwShySdd9eaM2F3ijiR6BXkTXvvS9NCVkFB4cqnrt7B5iMVqHDd+LU+ElarlTqzGYvZjNPpsuBrdDrc3N1p5zTRr1ePc8pSuFoxmUzyE9ClS5fy8ssvy1b/1uSXX35hxYoVjBkzhltvvfW8tjFnzhy2b9/Oq6++Snh4uGthQYEcdF5fX8/x48dxJiTQ8aef4FJ3D1q+HBrz3vR618/nmDlzpSGKIvv378doNJKWlobZbAZcImFsbCwxMTE4HA42bdqE0WiUXWrt2rVj2LBhxMXFodfrSUtL44svvuCOO+644G6MCheGJEkcPnxYdlEdPnxYLgX29/eXw9K7du3aomugJEmUlpY2EakKCwublGV7e3s3EakiIyPPueytJfzwww9s3LiRW2+9lTFjxrT69k+mqqqKd955h6qqqnP+XNfU1PDCCy/Qo0cP/nqWpgjPRAISAAEAAElEQVS7d+/ms88+Y8KECecttBUWFvKvf/0Lb29vuRlHS5AkiRUrVrBixQpMJhNOp5NevXrx4osvyt1O/8jnn3/Orl27ml6nW4ny8nJmzZrFwoUL5byzmJgYRo0adcYO42fD6XQyd+5cUlJSCAgI4JlnniEoKKgVR355EEWRtLQ0Vq9ezeHDhwHXe2owGKitrUUURWpqaqivr6empobq6mo6d+5MUFAQkyZNIj4+nq+//pq0tDQCAgJ4YvrTFDg0VFjsqAUBrUo477mhQxSxixLuGhV92/jg797850lB4XKgOKWuMHzdtPQK8ia9tEZ2NLWWMGV3ijgliQ6+HrRrJktKQUHh2sVDq8ZLq6bG5uDiZAsLsoMKf3+sVitmsxk7KiqOH2Xt0h+Yr9XQq1cvBgwYQO/evS/KBOpKYObMmSxYsACn04m3tzfffffdRdnPhAkTyMjIYM2aNfTp0+e8nqJ37dqV7du3k5OTc2KyEx4OkZGQny9/P/mlp8N778HLL8OldODedBOEhMD+/TBy5FUrSEmSxKFDh+TOeTU1NQD4+PgwatQoYmNjCQ0NxWg0MnfuXAoLXaWvWq2WoUOHkpiYeEr3t8bA4XMNhlZofQRBkLtr3nDDDZhMJrKyssjMzCQrK4ukpCQ5fy4qKkoWqU7XQVMQBIKDgwkODpZdP3a7ncLCwib5VHv27GHPnj3ya0JDQ5sIVWFhYefdDAHg6NGjbNq0ieDgYEaOHHne22kpvr6+PPHEE7z//vv8+OOPBAUF0aNHjxa91tvbm+7du5OdnU1NTQ3e3t4XbZySJLFw4UIkSeKWW245J0FqwYIFbNiwAU9PT15++WX27NnDypUr+e6777j33nubnROMGzeOXbt28dtvv/Hggw+2yvgPHjzIunXr2LVrF3v27EGlUnHHHXdw0003XfC5s9vtfPXVV+zZs4eQkBCefvpp/Pz8LnjcVwIqlYqYmBgGDBjAgQMHWL16Nenp6bIg5XQ60Wg0uLm5YbfbUalU5OTkcPz4cWpra9m1axd33XUXer2enXsy+D0rj6DwCNy0GtQX+N2qUalQCxIWh8jOoir6tfEl0EMRphSuDBSn1BXKUZOFzFITDlFCf46dJv6IJEnYnCKCcO5tUBUUFK4dCqrNZJSaLvia01LEhmtToKOWg2k7SEtLw9RQiqXVaunZsycDBgygT58+f1qB6mJz/Phx+Wn9a6+9ds7nsby8nFdeeYXo6Ggef/zxE784cAAefBBHVRVHjx7F4O2Nv58fPPyw65/CWZEkiaNHj2I0GjEajbKI5OnpSf/+/YmNjaVLly4cO3aMpKQkduzYgdVqBSAsLIzExEQGDhx42gyXH3/8kQ0bNvD666+fsSRG4fIiiiKHDh2Sw9IbBUdwlfs2ZlFFRUWdcxlmTU1NEzdVfn6+/BkCV5lYY9lfYz7VH8umT4ckSXz00Ufs27ePJ5544qJ3fzuZ3bt38/nnn6PX63nppZcIDQ1t0eu2b9/OnDlzmDJlCqNGjTrj9i/EKdXoUoyMjOSll15q0T230+nkm2++YceOHfj6+jJ9+nTCwsIQRZGZM2eSnZ3N7bffLpd0/pEPP/yQnJwc3nrrrfN2HDkcDlJTU1m3bh0FBQWAK6g7IyODfv368dprr53Xdk/GZrMxa9Ys9u3bR7t27Zg+ffop2Ul/NoqKili7di3btm3D4XDgdDqpqqrC6XSiVqspKyvDarVSW1srNzX4y5QplOj9cGj02C1mgoMC0elax0neOC/UqVXEhvnio79yyrsVrl0UUeoKptxST1ZpDaZ653lZNhvbLNsbhK2uAV6EG86tDaqCgsK1g90psqmgHLsoor8EZVg2hxOtWsWw9gFo1SpEUeTAgQPs3LmTtLQ02S2i0WhkgSo6OloRqM6RdevWsXDhQuLj47nrrrvO+fWvvPIKFouFf//7301dFWlpOB57jKN5eXgZDAQ0NAzglVfk8r5Lzq+/wm+/Qc+eLnHsopSiXhglJSWyENWYA6XX6+nbty+xsbF0794dSZJITU0lKSmJvLw8wPV3MGDAABITE+nUqdNZv8tnzZrFnj17+OSTT66Jstg/C1VVVWRlZZGRkcHevXtlEUmr1dKtWzfZRXU+DjhRFCkqKmoiVDUGMTfi5+cnC1QdOnSgffv2zZaN7dmzh1mzZtG9e3emT59+ye8tV69ezeLFiwkMDOSll15qkbBhs9l47rnnCA0N5ZVXXjntehciStntdl5//XXKy8t58cUX6XhSt9IzvebLL78kPT2doKAgnnnmmSbvb11dHf/617+oqqriueeea3abe/fu5aOPPiIxMZFp06ad05hrampISkpi06ZN1NTUIAgC/fr1Y9SoURw7dozvvvuOiRMnXlA3V3Blo82cOZO8vDw6derEk08+ec7B6FczJpOJjRs3snHjRmpra7Hb7RQVFeF0OpEkSRanPD09ufH+R+k+ZASIDmprTKhULnekXt869z+SJGF1ivi6aRkU5nfFdjxVuHZQyveuYALcdQwO9ye3oo7CGgtWp4hKcNkvVTTfFaSx64JDdAlSKkGgjaeOHoHeeGiVUDsFBYXTo1Wr6OznSXaZCacoXdSbFKfoulZ19vdEq3YJByqViqioKKKiorjtttuaCFSNZSgnC1R9+vSRu1gpnJ6RI0eyZ88ekpOTiY6OPmdHQ9euXdm6dStHjx5t2lmrf3/EGTOQ7r67yaSWd98Ff384zRP9i0ZGBrz+uuv/t20DqxWefvrSjuE0VFZWkpqaitFolHNGNBoN/fr1Iy4ujl69eqHT6SgqKmLJkiVs27ZNzpIKDg4mISGBIUOG4OXl1eJ9lpeX4+npqQhSVxm+vr4MHTqUoUOH4nA4OHjwoByWnpGRQUZGBj/88AOhoaGyi6pz584t6r6nUqkICwsjLCyMoUOHAi6h5vDhw7JIlZeXx86dO+VuayqVivDw8CZuKn9/f3766SdUKhVTpky5LA87r7vuOoqKitiyZQufffYZzz777FnPQaP4m5KSQnFxMW3atGn1ca1Zs4by8nIGDhzYIkHKarXKXdfCw8OZPn36KeVxnp6ePPLII/zf//0fX3zxBX//+99PWadbt25ERESwdetWbrjhhhY53goKCli3bh2pqak4HA48PDwYM2YMw4cPl0WxFStWAK6uexdCTU0NH3/8MUeOHKFHjx48+uij19y1yWAwcOONNzJu3Di2bdvGmjVr0Gq1WK1WCgsL8fDwQK1W4x3SltAefTHVVKOSRNzc9FitVoqLSwgODnLFIFwgQkPn0yqrnUPVZjr7ebbCESoonD+KU+oqoc7u4KjJypEaCzaniNjwrp18G9D4RgoCaFUCoV5utDW446vXKO4oBQWFFiFKEsZjVZRZ6i9aF9DGJ3SBHjriQn3Pug9RFDl48KAsUFVXVwOuSX2PHj1kgepaeuJ6rlRUVDBjxgx0Oh2vvfbaOZVLbNu2jblz5zJ58mRGjx7d5HdOp5P/jRvHpNxcgk7uLqXTwaxZ0LdvKx1BC/j11xOiFLhC13/6CS4gjPdCMJlMpKWlYTQayc3NBVwT/O7duxMXF0d0dDTu7u44HA527dpFUlIS+/fvl9fr27cvw4YNo2vXruf1d/j0008TGBjIP/7xj1Y9LoXLR3l5ueyi2rdvH/X19YBLbOnRo4csUl1IN09JkqiqqmoiUh0+fFgO1AeX26+kpIQhQ4Zwzz330KFDBzw9L/2k1uFw8PHHH7N//34GDhzIfffdd9a/lczMTGbOnMn111/PTTfd1Ow6u3bt4vPPPz9np1RVVRWvvvoqAG+++eZZc5JMJhOffPIJBQUFLXIObdmyhW+//ZaoqCieeeaZU/LAGssGx44dy6TTuFVFUWT37t2sX79evi6FhIQwcuRIBg0a1EQoqq2t5fnnnycsLEw+rvOhoqKCDz/8kJKSEvr168eDDz7YIhH1z44kSaSnp7NmzRpyc3Opq6sjPz+fGx5/jpAOnagqK0WtUqHXu6HX63A6nahUKgKDgvBwb537nfoGw0NCuwDcFfOCwmVEuSJcJXhqNUT5e9HJ15Nqmx1TvYMamwOLw4lTklA3KN7eelcLVB+9Fp36yitbUFBQuLJRCQI9gwzsOFaJ1SG2ujDVKEi5aVT0DDS0aNsqlYouXbrQpUuXJg6qXbt2kZ6eTnp6Omq1WhaooqOjFYHqD/j7+zN16lTmzp3Ld999xyOPPNLi97Vr164A5OTknCJKqdVqdnfuTLfAQIY1THAAqK+HZ56B2bOhBW6BViEmBrRaaJw8O50wcya8//6l2T9gsVjYvXs3RqORvXv3IooigiDQpUsX4uLi6NevnywIlpWVsWrVKrZu3Spnqfn7+5OQkMDQoUNbnO1zunFYLBYl5PxPRkBAAImJiSQmJmK328nNzZUdVLt27WLXrl0AhIeH07t3b3r27EmnTp3OKcxcEAT8/Pzw8/OT3TFOp5Njx46Rl5dHdnY2c+fOxWw2U15ezsyZMwGXo+/kEPXw8PCLLjxoNBoeffRR3n33XXbs2EFISAgTJkw442u6d++OwWBgx44d3HjjjWe8Dp7rd9/PP/9MfX09N99881kFqcrKSj788EOKi4vp2bMnjzzyyFmdQ0OHDuXQoUNs3ryZJUuW8Je//KXJ7/v27UubNm3YtGkT48aNa/I9aDabSU5OZsOGDVRUVADQs2dPRo0aRY8ePZo91j179iCKIgMGDGjpKTiF4uJiPvzwQyorKxk0aBD33HPPBYXr/5kQBIHo6Giio6PJz89nzZo1hHWKom2nLpjr6hAQGjKo6rDb69FqtQiCQGlpKYEBAXh6ttw5ezq0KgGrU+RorVVxSylcVhRR6ipDrRLwd9cpbTwVFBQuGl46Df3a+LCzqNolILWSMNUoSOnUKvq18cFLd+5fQY0T/EaB6uRSk8bSFrVaTffu3RkwYAB9+/ZVBKoGBg0axJ49e9i1axcpKSly166z4e/vT2BgILm5uYiieMqEQqvVsisujmE9e8LSpSd+YTLBk0/CnDlwEcpkTqFNG7j9dvj22xPLNmyAtDS4wNKTM1FfX096ejqpqalkZGTgcDgAiIiIIC4ujgEDBsgT1EaXQlJSEllZWYDrM92nTx8SExPp2bNnq0zYlM57f360Wi09evSgR48eTJkyhZKSErnMLycnhyNHjrBq1So8PDxkF1XPnj3Pq3OaWq2mXbt2tGvXjsOHD9O3b18mTpxIREREE0fVjh072LFjB+ASjNq3b99EqAoICGh1962npydPPvkk7777LsuWLaNNmzZnFFHUajUxMTFs2LBBzjZqDfLy8ti+fTsBAQFcd911Z1z3ZKEmJiaG++67r8UC3tSpUyksLGTNmjVERkYSExMj/06lUjF27Fi+/fZbNm3axPjx4ykqKmL9+vVs27aN+vp6dDodw4cPZ8SIEYSEhJxxX40lnOcrSh05coSPPvoIk8nEiBEjuO2225TKjdMQGRnJQw89RMrhIgpNVpz1NlSeHtisNuz2eqxWG3a7Ha1Wi93hoLS0DEmS8PK6sJB4QRAQgCM1Fjr6elySJjcKCs2hiFIKCgoKCqfg765jQIgPu4qrsTpEtCoBzQVMlh2iiF2UcNeo6NvGp1WEdUEQ6NSpE506dWLy5MmyQJWWliZPzubPny8LVNHR0ZelxORKQRAEpk2bxoEDB/jhhx+IiopqcRvurl27smXLFgoLC4mIiGjyO51Oh93hgJdfhooKSEo68cuSEpcwNXs2XMQW7DL33QfLlkFDiScAH30Ec+e2aui5w+EgOzub1NRUdu/ejc1mAyA0NJS4uDhiYmIIDg6W16+srCQ5OZnk5GSqqqoA8PHxIT4+nvj4ePwbQ+JbiUYnRGtvV+HKJTg4mJEjRzJy5EhsNhs5OTlyqV9qaiqpqamASyzt3bs3vXr1IiIi4pxE0IKCArZu3UpoaChjxoyRHaqAHNTcKFAdOnSIw4cPy0H94MrUaRSoOnbsSGRkZKs0rmjTpg2PPvooH330EXPmzCEgIIDIyMjTrj9o0CA2bNhASkpKq4hSkiSxYMECAG699dYzdkksKCjgk08+wWQykZCQwB133HFO74FGo+GRRx7hX//6F99++y1t27Zt0n1w4MCBLFu2jAULFrBv3z727dsHuATq4cOHEx8f36IHNXV1dezdu5ewsLDzyt7Ky8tj5syZmM1mJkyYwE033aQIUmdBlCSqRTWe7h74eHhQW2uipqYGm82GxWLBbrfLHfscdjtOpwNRFPH2Pn9XLbjyRC0OJ1VWu2J6ULhsKKKUgoKCgkKz+LvrGNzWn71lJorNNhxOJ1qVCvU53Fi6OoCKAIR46ukRaLgouQV/FKgOHTokO6gaBarGPJ9GB9W1KFAZDAbuuusuZs2axdy5c3n66adbNFFoFKVycnJOEaW0Wq0rb0athrffhscfh/T0EyscOuQq5Zs1Cy52sK3BAI88Av/3fyeWZWfD77/DBXaOEkWR/fv3k5qaSlpaGnV1dQAEBgYycuRIYmNjCQsLk8+nKIpkZ2eTlJREenq6HAbfvXt3hg0bRp8+fVBfpC6XilPq2kav19OnTx/69OnD1KlTKSoqkq+D+/fv5/Dhw6xYsQIvLy969uxJ79696dGjxxmviY3CiyRJTJky5ZTPriAIBAUFERQURFxcHODqKldYWNik219jyXXja0JCQpp0+wsLCzsvt2DXrl2ZNm0a8+bN47///S8vv/zyaUXZiIgIgoODMRqNTJ48+YLLDHfs2EF+fj5dunQ5YyB4bm4un376KVarlXHjxjFx4sTzEmr8/f156KGH+Oijj/jss8945ZVXcHNzw2azsX37do4fP05qairl5eUMGzaMUaNGER0dfU7n9UJK9/bt28esWbOw2WxMmjSJsWPHnvM2LjVjxoyhqKgIlUqFwWDgk08+oV+/fpSXlzNq1Ch5PbPZTF5eHiUlJWcV/e+9917WrFlDUFCQvGz69Oncd999za5fW+/AIUpoBAGVSsDb2weDtzfmujqqq6s5nHeQfz56N299uxgEV2mf3W7H4XTi7+dHY9JwWXERbz35IB//tKJFx64CRAlq6h1NRClBEKisrGw2o+5Mvzsbr7/+Ov/85z/Jy8trcj8xfPhwnn76aSZOnHjO21S4+lFEKQUFBQWF0+KhVdM/xIejJisHK+uoczipl0AtCKgFAZXQNHdDkiREySVGOSUJQQBPrZpOvp60NbhdkielgiDQsWNHOnbsyF/+8hdZoEpLSyMrK4usrCzmz59Pt27dGDBgAP369bumBKro6GiGDBnC1q1b2bhxIyNGjDjra6KiogBXrtSYMWOa/E6r1cqBy7i5uZxJDzzgEqMa2bMHZsxwiVYXm0mTYMECaOhyB8Cnn8LIkecsikmSxKFDhzAajaSmplJTUwO4XE6jRo0iNjaWyMjIJp/rmpoatmzZwubNm2VxyMvLi6FDhxIfH9/EQXWxUJxSCo0IgkBoaCihoaFcd911WK1W9u7dK4tUjSV3jdfNxrD0du3aNflc79y5kwMHDtCnTx/ZHXU2tFqtfC1uxGQyNRGpDh06xJYtW9iyZQvgEtQiIiKalP21dOIbHx9PcXExq1ev5r///S/PP/98s04sQRAYOHAgv/zyC9nZ2efckfRkbDYbS5YsQRCEM5anZWRk8MUXX2C321tFqOnWrRu33HILS5Ys4dNPPyUyMpItW7ZgNpvx9PSkXbt2RERE8Oyzz56XyHe+pXt79uzhyy+/xOl0Mm3aNBITE89535eDhQsXyp+zn3/+mXvvvZc9e/YQEBDA7t275fU++OADNm3a1OJr6/PPP8/TLewCa6p3IEoSqpM6HwsIeHp64enpid1Sh0pQ4enpidliwSmK1JnNFBYUYLNaGxxzAoFtQlosSMGJeziTzdHi15wvoigyd+5chg8fzpw5c3jjjTcu+j4Vrg4UUUpBQUFB4YwIgkC4tzthBjdKzfUcqbFQYa3HIUqIouR6NicAkqsLqEoQ0KggyE1PuLc7QR66y5ZT8EeB6vDhw7KDKjs7m+zsbL777ju6du0qC1ReXhceHnqlc9ttt5GTk8PixYvp3r37WbNF/Pz8CA4O5sCBA6fkSjW2tJbx9nYFjN9/v6t8r5HVq10uqvDw1j6cpmg0MH06PPvsiWXFxfD9967yvrMgSRJHjx7FaDRiNBplYcnDw4OEhARiY2Pp0qVLk3MgSRI5OTkkJSWxa9cuxAZ3YJcuXRg2bBj9+vW7pN2mGscceHJHRAUFwM3NjX79+tGvXz/5s56RkUFmZiYHDx7k4MGDLFu2DG9vb3r16kXv3r3p3LkzixcvRq1WnxKufa4YDAbZxQWuv52ioqImIlVubq7ciRJc15+TRaqIiAh0uubLjG655RaKi4vZs2cPs2fP5rHHHmtWlImLi+OXX35hx44dpxWlWvIQZdWqVVRXV5OQkEC7du2aXSclJYU5c+YgSRJ33XUX8fHxZ93u2ZAkiQ4dOlBbW8vXX39NZGQkPXv25MYbbyQxMZFNmzaxYsUKjEZji/MDGzGbzezdu1cWM1vKjh07mDt3LgD333+/7Ji7GjhZ+Kyurj7tez979mzeeeedC9qXxWJh0KBBvPrqq/zlL39h27Zt3H777SxYs5GSo6U8ddNoJtx2J6mbNyKKTp547V8MiB+GXu96sNehY0eqq6p4/4XpFBcexmG34xsYxF3PvETckKGUHDvKwxNGsDz9IACjOgRx/3OvsGX1Kqoryrnrr39j3OQ7APj8X6+TnrKV+no73j7eLPhmjtzcBFwi3K+//kpdXR2vv/4606ZNO+V4cnNzefrppykpKcFms/Hwww/z5JNPNnvsa9asoU2bNnzwwQdMnDiR1157TQm+VwAUUUpBQUFBoYWoBIE2nnraeOoRJYnaegemegc2p4gkgSCAXq3CoNPgpdNccYGZgiAQGRlJZGQkkyZNaiJQ7d27l7179/L999/LAlXfvn3lTml/Ntzc3Lj33nv5z3/+w5w5c3jxxRfPemPYtWtXNm/eTEFBQZO8Fp1OJzuIZEJCXMLUAw9Abe2J5ZfKkZaQAAMGQMPTfsAVuH7zzXCaJ9wlJSWyEHX8+HHA5doYOHAgsbGxdO/e/RRhqa6ujm3btpGUlERxcTHgEq8GDx5MQkLCOU3oWpPy8nL0er0S8q9wRgRBIDw8nPDwcMaPH4/ZbCY7O5uMjAyysrLYunUrW7dupbCwkMrKSkaNGoXT6USSpFZzvZ7s5BoyZAjgch8VFBQ0yadKS0sjLS0NcAV6t23btkk+VZs2bRAEAZVKxQMPPMD7779Peno6ixcvZvLkyafsNzg4mI4dO7Jnzx6sVut5ZVuVlZWxZs0a3NzcuPnmm5tdZ+PGjfz444+oVCoeeuihM5b3tQSHw0Fqaipr166lsLAQd3d32rRpg16v55577qFXr14AjBgxgtWrV/Pbb78RFxd3Tu/Xnj17cDqd5+SSSkpK4vvvv0etVvPII49ckPvscnH33XezYcMGAFauXHnK77du3UplZSU33HCDvOy1114jLCyMRx99tNltvv/++7JQBzBz5kwSEhL46aefGDVqFBEREXLZqZ9/IBwtpc5UQ/vOXXj07zPI3mXk1QfvYvbarVhtVkCi1mTC6XRy19MvoNbqsFitrF+ygOXfzqZbr+bPu1anZ9ay1RQczOXxm6/julumoNZomProUzz69xnYHE5Sfl/O9OnT+e233+TXCYLArl27yMvLIyYmhqFDhzb5/nc6ndx+++2y+9xsNjNo0CD5e/OPzJ49m/vvv59+/foREBDA2rVrT3FfK1ybKKKUgoKCgsI5oxIEvPVavPWnD3S9kvmjQFVQUNCsQBUVFSU7qP5sAlVUVBSjRo1i7dq1rFq1iuuvv/6s62/evJmcnJwmN6VyptQf6dTJlSP1zjtQVeVyTrUwWP2CEQRXjtVdd0FDlhNmM3z5Jbz0krxaZWUlqampGI1GDjeU+2k0Gvr160dsbCy9e/c+xZEhSRJ5eXkkJSWRmpoqd9vr0KEDiYmJxMTEnNbFcamoqKjA399fCRZWOCc8PDyIiYkhJiYGSZIoKChg27ZtfPLJJ5jNZo4fP86MGTPw8/OTw9K7deuGvpWz4vR6vdxltZHKysombqrDhw9TWFhIUkNjBXd3dyIjI+V8qnvuuYeZM2eydu1aQkJCSEhIOGU/cXFx5OXlkZaWJgtigJz/djYWL16Mw+Fg4sSJp3w/SJLEqlWrWLZsGTqdjscee6zFZY/NUVNTQ1JSEps2baKmpgZBEOjfvz+jRo3Cw8ODd999lzlz5vD3v/8df39/vLy8SEhIYN26dWRkZJyTSHSupXu///47S5YsQa/X8/jjj9OtW7fzOsbLzbcNnVu/+eYbXnzxxVOEqdmzZ3P33Xc3eTjx5ptvnnGbpyvfi4qK4q233mLw4ME89dRT6HQ69u/PwWq1oVZriB46jIKCw+i9/fHy9WPzutX4BQYhiqL80CTp12UYN6zBUV9PfX09Xt4+LpeXo/6U/Y2eeCsA7Tt1Qa3WUFFaQlBoGDuTN/HzN//DXGtCQMJSU93kdQ8++CAAHTt2JDExkaSkpCbf/40NFaZOnSovM5lMZGdnnyJKlZeXs3r1ar766ivA5aabPXu2IkopAIoopaCgoKBwjSMIAhEREURERHDLLbc0Eagauxf9UaA6n7bqVyITJ04kKyuLFStWyN24TkejpT8nJ6dJHsppRSmAHj1g3rymy0QRyspcZX6t0HnrtHTrBhMmwK+/nli2ZAm111/PzooKjEYjBw4cQJIkVCoVPXv2JDY2lr59++Lu7n7K5iwWCzt27GDTpk0cO3YMcE2ehwwZQmJi4mlLdy41drudmpoa2rdvf7mHonAV03hdXLduHb1792bSpEn4+vrKLqqkpCSSkpLQaDR06dJFFqmCg4Mvihjq5+eHn5+f7DQSRZGjR482EaoaHyg0otVqyc3N5f3338dmszF8+PAmgkJMTAwLFy5kx44dRMfEUdPg/q3QGug2fBzOwHD2lddi0KlPcQDv37+ftLQ0goODT8nlkySJRYsWsXbtWjw8PHjqqaea5GqdCwUFBaxbt04WwD08PBgzZgzDhw9v0sjg7rvv5quvvuKLL77g+eefR6PRMHr0aDZs2MBvv/1G79695ffF5hSptTW4nJEQENA1uJyd9Vays7MJCQk5q9NTkiSWLVvGqlWrLvg4ryTuueceHn30UcrLy+VzXFtby8KFCzEajWd9vd1up7q6mpqaGvn9q6qqorq6msrKSqqrq6mqqmLjxo3odDqWL19Obm4uHfoPJLLfQCQkKioqUGlcn7fGWAS1WgMI+Pr6kbc3gy2rfuHDBcsIDg1j29rVzP63K7OxsqrylDHp9Ce+a1VqNU6ng+KjR5j5+kvMWrYav7btqc7P5fG/3HDKa0/mj3/bkiTh7+/fJHfrdMybNw+Hw0F0dDTgclmVl5c3Oc8K1y6KKKWgoKCgoNDAHwWqI0eOyAJVTk4OOTk5/PDDD3Tp0oUBAwbQv3//q1qg0mq13HfffU2esp+unbmPjw9t2rThwIEDcltqcJXvORyOlpX0mM0uB9POndCmDfzrX9C3bysf1Uk88QSsXYtosWC2WKirqyNr2jQWDB4MuDKfYmNj6d+//2mdcIcPHyYpKYmUlBQ50D08PJxhw4YRFxfXKi3tWxMl5FyhtcjLy2PHjh2Eh4dz3XXXoVKpGDhwIKIocujQITIzM8nIyJDFoIULFxIUFCSHpXft2vW015MLRaVS0a5dO9q1ayeHaZvNZg4fPtxEqPLz8yM7O5tnnnmGmJgYunbtKpf8hUdEEjN6AqK3P5sOlyE1XL/sGndCuvZC1Os5WOnqsqkSBNw0Ktp5uxPqqWPBggUATJkypYnQJYoi8+bNY+vWrXh7e/P000/Ttm3bczo2URTZvXs369evJzc3F4CQkBBGjhzJoEGDmnWmxcTEcOjQIdauXcuPP/7InXfeib+/P4MGDWLr1q1k5uah9guizGLH4nDilCRONoQJgquBid1cR4eBiUSFnFkkaOzGuGHDBry9vZk+fTrhFzsv8CJRVVWF2WwmLCwMgKVLlxIQENDkGrpgwQKio6MJCQkhPz+fqqoqWWhq/P/Gnxs7s2ZlZVFUVERNTQ0WiwWLxYLZbMZms1FaWkpeXh7R0dFkZWXh7e1NeNdaVCoVotNJtnEbo2+ZzKF92dRWVZIwaizm2mq5dPXw3ky8DAYC24TisDtY8cM3qFUq9G56HHZni9x+daYa1Bot/sFtkCSJpd/OPmWdxjDy/Px8Nm/ezEcffdTk9127dsXb25s5c+bIXQUPHDiAv7//Kd9Bs2fPZtGiRYwbN05edttttzF//nymT5/esjdL4U+LIkopKCgoKCg0gyAI8qTn5ptvbiJQ7d+/n/379/Pjjz9e9QJVREQEN9xwA8uXL2fp0qXN5q800rVrV5KSkjh8+LD8RLxx0mm3289etpaSciLnqbgYnn4a/vc/6Ny5NQ6lCfX19WQUFlLXvj0R69bJZXzdnU7u79WLqDvvxO805YQ2mw2j0SgfK7iOs9EV9ceOe1cSjaKU8uRZ4UJoFB3ANXE8OXNOpVLRqVMnOnXqxM0330xVVRVZWVlkZmaSnZ3Nhg0b2LBhA1qtlq5du8ouqosdvO/h4UH37t3p3r27fAzl5eV89913/PLLLxw+fBitVkt+QQEHKutoZ9fhFhGFw2GnqrISH28v9Ho9Ur0Tq6kGN7UP7p4ecldZi91JTnktmcesuLXvQq+AADnDCVzXwP/973/s3r2bwMBAnn76aYKCglo8frPZTHJyMhs2bJD/jnv27MmoUaPo0aPHWa85t956KwUFBWzevJkOHTowdOhQBo0cQ51/GHvNEp4qCyrBJbDpVCoEXN9zkiQh4eqaa3aIhPfuj9rLix3Hqujk50GQR1MRTBRFvvnmG7Zv346/vz/PPPPMJekqerGoqqri1ltvpa6uDkmSMBgMvPDCC3z//fey2DRr1iy6dOnCiy++2OS1qamp8ufO6XSiUqkQBAGn04nT6WTXrl1kZWXJeWedOnWiS5cuGI1G7r77bnr37g24ygAnjh9DkcYbT4M3lUXHeWnarTidDv7+8Rd4+/pirj2R3Rg3bCTrlv7EvaMG4e3rT/+hiZQVHScgIIBj+YeQkKi316PTnv47uWO3Hoy4cSIPjEnAy9eXWybecso6TqeTfv36UVdXxyeffNKkdA9c5e4rVqzg6aef5sMPP8TpdBIYGMj333/fZL2UlBRKSkoYPXp0k+XTpk3jH//4hyJKKSBILS2cVlBQUFBQUJA7VjUKVI0B14Ig0LlzZ1mg8vHxucwjbTmiKPLee++Rn5/Ps88+26T7zsmkpqby1VdfMXHiRMaPHw/A/Pnz2bx5M//5z3/wPFuQ+c6d8MgjTZcFBblCyM/SAbAlOBwO9u7di9FoZPfu3dhsNrQOB89s3EigSoWHpydajQaiolzd+P7A0aNHSUpKYvv27XJHwZCQEBITExk8ePBVERyenJzMvHnzePDBB5sNmlVQaAnbt29nzpw59O/fn0f++Dd7BpxOJwcPHpRdVI2lruD6Wzq5o9+l6kgpSRLffPMN27Zto3vfAfQcOZ5ap4TDbsdaV0tVZRUqlYCHh6fcSdZqs+Lj7Y1/QAA6rRbXL1zHV1JejkqjIdDbQN8wf4I89FitVj777DP27dtHWFgY06dPb9LR7UwUFRWxfv16tm3bRn19PTqdjiFDhjBixIizdkb9IzU1NfzrX//CUl/P1Cf+Ro2gw1RXh6W2lqDAAPRneHAgSiJHCgvRaDQEh4RiFyXUgkA7bzei/L3QqlU4HA7+97//sWvXLoKDg3nmmWeuWFemJElYrdYzupoa/+t0Os+4LYPBgI+PD97e3qhUKiRJor6+HqvVSm1tLTU1NTgcjlOEQ71eT0hIyCn/goODT/n8S5LEwq27eWD8cLlz3vnQeFx6va7h83NmMdMuikhAQrg/njrFr6JweVA+eQoKCgoKCufAyR2rbrrppiYCVW5uLrm5uSxYsIBOnTrJAlVLJyeXC5VKxf33389bb73F3Llzef3115stS4uKigJcuVKNolTjjXV9ff3ZRan+/WH4cNi48cSy0lJ48kmYPRvOQ8gTRZHc3FyMRiNpaWly6URAQAAjR44kJiaGtikpCG+/feJF+/e79hsUhN1uZ+fOnSQlJXHwoGsioFariY2NZdiwYXTu3PmKdUU1R3l5OaCU7ymcPzabjSVLlqDRaLj11lvP6bVqtZqoqCiioqKYNGkSFRUVZGZmkpmZyd69e1m7di1r165Fr9fTvXt3udTvdK7F1kAQBO68807qBA2GLn2ornfi4+WB2sMDfHzQabXUmEx4eXnicDqpq6vDYbdjMpmw2eoRVAJ6nQ6dXo/NasVmteLj40s9KnYWVdPRS8vi2Z+Tn59Phw4deOqpp856LZQkiezsbNatW0dWVhbgumYNHz6c+Pj48xbAvb29uffhR9lyqIjCWiveXhq89DpqK+sx1VSjDzy9c8tiNiNJ4OHhiUalQi1IOESJ/GoL5ZZ6evp7MO9/X7J3717Cw8OZPn36ZXMH19fXnyIsNSc4NZZcnw53d3eCg4Px8fHB19dX/qfX67Hb7VgsFmprayktLaWoqIh9+/YhiuIp2/Hz82tWfPLx8Wnx94cgCIQaXK60C+lw6e3jjdlch81Wj8lkwmA483vkECWCPHSKIKVwWVE+fQoKCgoKCufJHwWq48ePywLVgQMHOHDgAAsXLqRjx44MGDCAAQMGXLECVZs2bbj11lv58ccfWbBgAffcc88p63h7exMaGsrBgwdxOBxoNBq5ZO+0YecnIwiuHKnHHoP09BPL8/NdWVOzZrUo/FySJPLz8zEajaSmplJdXS2Pb9SoUcTGxjYtsQsLg0WLXGIUgI8PxWYzST/9xLZt22QhKzAwkMTERIYMGXLVdltUyvcULpTffvuN6upqxo8ff8Eld/7+/iQmJpKYmIjD4SA3N5eMjAwyMzPZvXu3HJDctm1b2UXVsWNHObOutSi3Oek0dDRVJhOminI0khNvb5cIbjAYMJstqNVqAgICMZvrKC4uwd3DHZ1Wi81mw2azYTZbMJvrEAQBtUbjuga6uZFabsPm7kP37t157LHHztiN0GazsX37dtatWye7bLt06cKoUaOIjo5uUiZ5PpjqHRRpDASEaaguL0OqtxEcHIybuxt1ZjO+DjsaTfM5X2azGQAPT5cgJggCWrWAWpKosTn4PfMgBcWldOzYkaeeeuqiOEedTic1NTVNAsGbE5wax3o6tFqtLDD9UXBq/Nnb2xuz2UxRURHHjx+nqKiIvLw8OQfqj2g0mmaFpzZt2rRatmBcj64s2Z2LXZTQqc9PlBIQCAgI4HhREZVVVbi7e5zWlShKEgLQzvvU5h4KCpcSpXxPQUFBQUHhInDs2DFZoGps4Qw0cVBdTHfA+SBJEh9//DF79+7lscceo28zIeQ//PADGzdu5IUXXqBTp0788ssvrFixgldffbXlQbc1NfDgg5CX13R5QgJ88AE0MyFtLJtMTU3FaDRSVlYGuHJkBgwYQGxsLF26dDn9pK6iAvE//6E0L4/VYWEkm0yAyyUWHR1NYmIi3bt3v6pcUc3xwQcfkJeXx3//+9+r/lgULj3l5eW89tpreHp68tZbb51RYLlQSkpKyMrKIiMjg5ycHBwOB+Byr/To0UN2UV2oG6fGZmfHsUrsTgmVJFJcdBxRlAgKCmoQViSOHDmCIAi0bdsWs9lMaWkZvr4++Pj4AiBJIsePH6fObMbdzQ1BEKi327GYLbgbDKgEgfJMIyFe7nTs2JEOHToQEhIi/w2Wl5ezceNGkpOTMZvNaDQaYmNjGTVqVKt17rQ6nOw4Wkmt3YmbWqC8ooK62jp8fHxwc9NTXFyCweCFv/+pgvXJpXuuwO8T1w6n6KSkuBiV3g2p3sb1vTvj43luIoYkSZhMpmZL504WoEwN1+XToVKpmohMpxOc3N3d5XNvt9spLi6mqKioyb/i4uJmnVSenp6EhoaeIj4FBARcsGjYErJKa8ivtqBXq+Ruj+dDZWUFNTUm3N3dGjK/Tu2cZ3WKGHQa4tv5X9C+FBQuFMUppaCgoKCgcBEICwsjLCyMG2+8sYmD6uDBgxw8ePAUB9WVIFAJgsA999zDjBkzmD9/Pp06dTrFMRQVFcXGjRvJycmhU6dOslOqcULZIry9YeZMuO8+KCk5sXzzZnj7bfjHP1yuKlwT19TUVFJSUmRxT6/XM3DgQGJjY+nevftZs2nKyspITk4m2WrFZDCAyYSfnx8JCQkMHTr0inWvnQ8VFRX4+fkpgpTCebF48WIcDge33HLLRRWkAIKDgwkODmbEiBHU19eTk5MjZ1E1Xi/B1YyhUaCKjIw8J2FAlCSyykzUOyXc1CoEQU1QUBAlJSWUlZUREtIGnU6Pp6cnNTUmbDZbs9uxWm3Y7Q68GgSLerudoqIi9Ho9GiTcvbyhWz+2/+zK2APXdcpgMFBRUUFZWRleXl4EBgZy4403kpiY2Kqlb5IkkV1WS63diV7tCtsO8PfHXl9PdXU1On0QOr2O2tpafHx8T3GiWcyWhtI9D5oIUk4HxcXF2O0OPDQaPAOCOFhjo5+HmxySbrFYmi2dO/nn6urqZsveGhEEAYPBQLt27c4oOBkMhmavbZIkUVtbS1FREQcPHmwiPpWXl5/SjU4QBAIDA5t1Pnl5eV3Ym3GBdPH3otRcT53d2fCZPb9rua+vL2azGYvFSl1dHZ6eTY/LLkqoVQI9gwyKIKVw2VGcUgoKCgoKCpeQkwWqk0OAO3ToIDuoLnfp1Y4dO/j666+Jjo7msccea3JTbDKZeO655+jWrRtP/nU6G7dsY8369dxx++10iIjAQ6dG29JJY14ePPAA/OHpeN3tt7MtOpqUlBS5+51Go6F3797ExsbSu3fvs3b6E0WRjIwMkpKSyMrKkjM6evbsybBhw+gliqhycmDwYGjf/txO0BWKKIo88cQTdOnShWefffZyD0fhKmP//v38+9//JjIykpdeeumyCZuSJFFcXCwLVLm5uXIQtaenpyxQ9ezZ86zZTQcr68gpr0WrVqE+6Xhqa02Ul1egVqsJDQ3B6XRy/HgRXgYv3N3cmjilJCSOHzuG3e6gTZtgBEGgpKQEUZTw9vHGz9cXUQKbU8RPcFBfmMv69etJTk6mqKgISZLw8vKibdu2dO/enU6dOtGhQwc6dOhA+/btWyXw/ajJSnpJNWpBQHPS9dfhsHP8eBEg4ePrS2VFJT4+3vj6Nn0IUlpaitlsJjQsVO7YVm+vp7ioCLvdjt7NDS8vL0RJQhRUlKYbObo3nerq6rOWbnt4eJzR1dRYSteSkk1RFOV8pz/+a66k73RB40FBQXLn2CuR4jobu4pcZek69fm7syxWCyXFJajUKsLCwlCrXOfYKUrUiyIdfT3oHnh1lqor/LlQRCkFBQUFBYXLRFFREWlpaezcuZMjR47IyyMjI2UH1eUQqCRJ4ssvvyQtLY17772XwYMHy8vLLfUs+n0dGi8f2nXqgsVqxWy24OXliVarRSUIeGrV+LlpCfFyw99Ne+bJ7e7d8PjjOK1WzHV11JnN2KxWfuvbl92dO9O9e3diY2Pp27cv7u5nLxmpqqpyuaKSk6msrARcWVPx8fHEx8e7zuemTfC3v9HwS/jf/6Bjxws9bZed8vJyXnnlFYYMGdJsJpiCwukQRZG3336bwsJCXnzxRTpeQX8PVquVffv2ySJVVVUV4HK7dOjQQRap2rdv3+RaY3eKbCooxy6K6JsRPBrLm3Q6LW1CQig6XoTT6cTf34+ysnJZlKox1VBZUYmHhwdeBi9KS0qRJAlfP198vL1pdBbZHA4sFgu7ln5PyZECBEGgV69edOnSBafTyaFDhzh06FCTEjW1Wk27du1kkapjx44EBgaekyDoFCWSCsuxOJy4NXOcFouZkpJStFoNkiThFEVCQkKQRBGn04nD4aC4uARBcOVrOZ1O7HY7dXV1SJKEVqttcM25xqT39MRWV8vupd/h7eV1RsHJx8fnrA8QmsNqtTYrPJWUlDTbKc/X17dZ8cnX1/eqdY3mVtSSW1mHCuGChKmy8jLqauvw9PQgMDBIFqSCPHT0D/FFo7o6z4/CnwtFlFJQUFBQULgCKC4ulh1UzQlU/fv3v+DQ4XOhtraWGTNmUF9fzz9efRWz1oOCGgu19Q7MFgtWixUfby9Eh4OKikoCAgJwc3NHlCSckoQEqAQw6DRE+HjQ1uB2SomA1Wpl165dHPv+ewYsWIDQcEuid3PDw9MTzQcf4HHDDWcda2Mnq6SkJNLT0+UykW7dujFs2DCio6ObPoV/4w1YseLEz3Fx8N//yiWDVyu5ubl88MEH3HDDDdx4442XezgKVxHJycnMmzePuLg4Hnjggcs9nNPSmC3X2NHv4MGD8t+7t7e3LFB1796dcodAeknNGbJ5JEpKS7GYLXh4uKPT6aiqqsZg8MJkqsXX1wcvg4FjR482iFB+VFVWIgEB/v54ebkcJvX1NmpMJsx1dei9DBzLTCPSU8vw4cNPeaggSRIVFRXk5eXJIlVBQUGT8mcvLy8iIyPlbKrIyMgzhoofM1nZXVyNTi0giRJOpwOn0+kSnJxOnA4ntbWmBieRgCg60en1siPK4bBjtVrR6nTodXpEUcRqtYLgcjkZvAxoNGrUatc/lUqDUxCIbuND+AUEZEuSRFVVVbPiU6PweDJqtZrg4GBZcGrMfWrNoPErCUmS2F9RS16VGSSXY+p8BDZRdHLs2DGcTpHA4GBUWh1BHjr6tfFBewFil4JCa6KIUgoKCgoKClcYxcXFsoOqsLBQXh4RESE7qC6FQJWRkcGc73+k39ib8Q+PQBBAK6iwWS2Ulpbi6+uDVqultLSMwMDAJqU0kiQhSmBvmDAGuOvoGWRAL0ikp6djNBrJyMiQJ2Njamu5zmh0iVGNApJW6xKL+vdvdnwmk4mtW7eSlJQkB597enoyZMgQEhISaNOmTfMHtmABvP9+02UffwxDh17A2br8bN++nTlz5nDPPfcwZMiQyz0chasEi8XCq6++itVq5a233roi8u1aitlsJjs7m6ysLDIzM+WuaSqViqG3P4BHQDCeOk1DqdapE3pREikuKqK+3o6Xlye1tXVoNGocDie+vj44nU5Mplp0Oi31djsCri6dHh4emM1makwmbFZXDpVWq8HTxw+DuxvDI4Na7EBxOBwcOXJEFqkOHTpEyUlZe5Ik4e/vT3BwMP7+/nh7e6PT6aipqaGqqgqPqL64+Qdhqa2B087qJCxWK2KDy0ij0RAQEIBGo8FkMmGrryekTRsEQaCsvBxJbHSC+TS7NavDib+7loFhZ8+vs9vtlJSUNBs03lyGl4eHR7NB44GBgZckaPxKQpIkCmos7K+opd4poVOpUJ+Hs6nWXEetxYogQfe2QfRq46c4pBSuKBRRSkFBQUFB4QqmpKREdlBdaoGqoNpMyqFj2EURd60W74bQc1F0Ulh4BDc3Pd7e3pSUlBIQEHDagFiHKGKpr6feYmHf5jUcydoDQGhoKLGxscTGxrq6A335pevfyXh5ucrrOncGXDfpubm5JCUlkZaWJpdydO7cmcTERPr373/2rBCrFSZNahqy3qED/Phjs53/rhZWrlzJsmXLeOaZZ+jWrdvlHo7CVcLixYtZvXo1N954Ize0wJl4pSJJEgUFBWRmZrI39wAhg0cjOkWc9no0GjVu7u54uLvj5uaGIJyUu+R0UHT8OE6niCCA3eFArVZj8PKitrYOh9OBRq1GEFQEBAbgdDgwmUw4HK5rj7u7GwZvb9zd3HBKruvdwDA//N3PXrZmtVqb7UhXVFREfn4+hYWFsnPoZDeVSqXCYDDQJrwdk559FZUgICChUTc6mjSo1eomDicEgaLjRVgsZlQqV+C7l5cnhYVHUKtV+Pv7U1paJotgf2xycTIOUUQEEtsF4KF1XTNNJlOzrqfTBY0HBAScNmj8ai25u1jU1jvILjNRZqlHkkCjEtAIwhnP08kPhiSgpqyE9LW/Et05kqlTp166wSsotABFlFJQUFBQULhKKC0tlQWqgoICeXm7du1kgcrV+vnCyausI6ei1pUjVVyM0+kgNDRMFnyOHz+G3W5v6GRVir+/HwbDyd2kJKw2G3V1dZjNZkSniM7DE5VKQF9dSlxUJG3btm16Uy1J8M47sGRJ08H070/dhx+yfft2kpKSKCoqAsDNzY3BgweTmJjY0Mb8HFixwlXGdzIvvwy33npu27mCmDdvHsnJyfzzn/8kKCjocg9H4SqgpKSEN954A29vb958883zyv9pjjlz5nD//ffz888/M3HiRHlfd999NwcPHkSv1zNr1iwSExPPuq17772XNWvWEBQUhNVqJTY2li+++OKMJW2lZhspRytx1luxWqxYrBZEp8u1KQjw43//Q1ryJkqPHeWLX9fRvnNXioqKqbfXU1R4mG/e/yd1NTW4eXhw7wuvEtE5Cjc3N2z19UiihKAS8PL0wuBtQKs5IYKLosidw2Lo2KEjb772Dz788ENmzJhBTk4ODz/8MJ988oksPO3cuROtVouHhweLFy/m+uuvZ8+ePYwePRqA7OxsMjMz0ev1PPzww8yfP5+HHnqI2tpaKisrqaqqIrRzN4xpaRg3rOWTpWuY9caLPPjiq3To2p37Rw3hzS+/oXOP3vL46u31HD9+nOcm38DT735I37jBlJWVoXfTU2+zUXb8GG88NI3l6XmnObMSDoeDerudJd/MZkC3ztSVHKeoqIi6urpT1tbpdISEhLBy5Up69uzJI488wnfffYfD4eCTTz5h+fLlbNiwgQ8//PCsn4NrHUmSKDHXU1hjpsxixym6pvACNIiSLqOchEuMEnB91g06De283XG3W/jnm66S/Oeff55OnTpdxqNRUGjKhbd7UFBQUFBQULgkBAUFMW7cOMaNG0dZWZksUB0+fJjCwkKWLl3aKgJVQY2FnIpaBAT0GjWBgQEUFRdTVl5GSEgIAgJubm7U19vlp/euZ1wStvp6V2B5nVl2ManVKgzeBjw8PBE0GgRfPwSfZlp7CwK89BJUVMDGjUhAvc1GZVIS/3zhBewN+4qMjCQxMZGYmJjzb1s/YQL88APk5JxY9sUXMG4cnKWj15VKeXk5giBcVeVXCpeXRYsW4XQ6ufXWW1tNkMrPz+err75i0KBBTZa/9NJLDBo0iN9++w2j0cgtt9zCoUOHWtQF7fnnn+fpp5/GZrMxcuRIPv30U1544YXTrl9jc4DgEo68PL1ovDZZzBYsFgt9hw7nur9M452nHnSJ6iHheBm8qKmuZv6H7zJozPUMum48u5I3Mvf/3uLVz7/BarWh0ajxMHji7uaGKElYLVbqnHVyjtPu7Vtw8/QifXcas2fPJjw8nK+++qrBWeUgJSUFcDmdCgsLiYiIoFevXixbtoxHHnmkSVj4sGHDWLlypdxo4s0332xyjHa7nc2ZOXz58X+IiOqKceMa/vq2S9wpLirG4XRQVlqKX0U5ep0evV6HTqslMCAABFfpY2VVJXa7HVF0olar8fP3BwREScRht2O327HbHdgdrv932O002hl++XY2Hvc9iL3sOD4+PnTt2vUU15Ofn6u8Lzc3lx49ejBgwAB++eUXOTPqpptu4qabbmrBp0pBEATaeOpp46mntt5BuaUek81Blc2OxeFEahCidGo1vm5aDDoNvm7ak5qNeHDLLbfw448/Mm/ePP7xj3+0SudHBYXWQPkkKigoKCgoXIUEBgYyduxYxo4dS1lZmZxB1VjysXTpUsLDw2WB6rT5Sn/AVO9gX5nJFayqcZW46PVueHt7U1NdQ011NT4+vujd3KDGlUUiik5MtbWYTLWySKVSqfAyeOHp4dEQQusSoCRJwuYU2VtWi69ei6fuD7ciKhXWV1+l4uBBSEnBXl9PSufOCCoVCQkJJCQkEBERceEnUKWCZ56BRx89sayiAr75Bh5//MK3fxmoqKjAx8dHmWgotIi9e/eyZ88eOnXqRExMTKtsUxRFHnzwQWbOnMnfGjtcNrBw4UIOHDgAQGxsLGFhYWzatEl2BrUEvV5PfHw8hw8fBlzCzKuvvsr69eupr68nKiqKL774gnqnht8XfseyOV+g1WoRRZG/vfMh3fsN4PEbRpIw/gYWffEJlWUlJK1cRtsOrvLgPdu3cCAzHVNVFZtX/MzN9z9GRUkxGak7+HLGK4yadBvp25Ox1NVx+5PP0nvgEOptVub831sczTtIVXkZfkHBxA0fRVZWFuXl5cyYMYOMjAzsdjtr167lyJEj+Pn5UVtbS0lJCfv27UMURWpqanj22Wd5//33ufvuu6moqGD48OH06tWLzz//nNGjR3PgwAEEQcBoNDJjxgzyDhcgiiJ9Bw5h2+pfWfbNV7z80eeEd+yMta6OT19/2VXCp9Fw+1+fI6pXNDq9HkEQcDiclJeVodFoXM0lPDworShHkkQKC1yl4vcPj2HSg4+zK3kjpqoqbrn/EUZNnMzPX39OVWkJy3+Yh6+XJ99++y09e/bk1Vdf5aOPPmryXpxJJJ87dy5Lly5l6dKlALz++ut89913+Pn5MXbsWObPn09+fj4Av//+O2+99RYWiwW1Ws17773HiBEj2LhxI08++SSJiYls2bIFh8PBN998I3+mf/31V9544w3q6+sRBIEvvviCgQMHYjQaefHFF6mpqcHpdPLKK68wefLkFn8WLydeOg1ef/jubCx+OlNJ37Bhw0hJSSEvL49Vq1YpDTEUrhiUuxYFBQUFBYWrnMDAQMaMGcOYMWMoLy+XHVT5+fkcOXKEZcuWtUigEiWJrFITdlHC7Q9deXx9fbFYLFRXV+Pu7o5GrabeXo+13IrD7nJMubm54enl6RKi3N0RmgkWFgQBvVqF1SmSVWYiNvREy+7CwkI2bdpESkoKtqAgOsTG4h8SQtRtt/F/gwbh7n7+nZ6aJSYGEhMhKenEsvnzXSV8LRTxrhQau3q1b9/+cg9F4SpAFEUWLlyIIAjcdtttrZbh85///IehQ4cyYMCAJsvLy8ux2+2EhITIyyIjI+Uy5Ndee42wsDAePVkkbobq6mo2btzIO++8A8D777+Pp6en7EB66623+Mc//sHjM95l9jtvMHfdVgKCQ1yun/oTodrV5eV8sWI9tw/pS+rGdYy9ZQrhnbvwzfv/xNvPn799+Dl52Rl8839v4hMQSGVJMZa6Wvan72L8Hfei0WpZMOtDesYOJtO4HXNtLS/N/IqXp03i+Y++oOTwQT588Wk8PDwwGo0cOHAASZLw8vLi3nvvpbi4mBUrVtCzZ086depEQUEB3377LcePH+fuu+9m/PjxLF26lGHDhrFx40b+9a9/YbVa+cc//oHVauWnn35i9OjRHCst596X3ySsXXtWLfwBlUpFdVU1PqZadG7uPPKPNwnv1IXcjHS+fmcGr345D5PJhCiKOBx2HE4ngiCgUqmwWm3YrDYkQK/XyQ42Xz8/PvtlLccOH+KJm8cw+b6HefjFV9nwyxL+78u5TB2dAMDbb7/d7Hvx3//+t0WfnV9//ZXFixeza9cuvLy8uP/+++Xf5eXl8cYbb/D777/j7e3NgQMHSEhIkAWrffv2MXv2bGbNmsXnn3/O3//+d37//Xf279/PfffdR1JSEt26dcNut2M2m6mqquLhhx9m5cqVhIaGUlZWRv/+/RkyZAht27Zt0XivNFryN6xSqbj77rt56623WLVqFQMGDDj30ncFhYuAIkopKCgoKCj8iQgICGgiUDU6qA4dOiQLVGFhYbJAFRoaKr/2qMlKhaUenerU1tMCAn6+fhw9epT8/Hz0ej0Oux1RFFFrNK7Q3TZtmgQInw5BENCpVJRZ6imoquVIdjpJSUnyBEOj0TCoISuqY8eOrrGIInz1FWRkwOjRcOONrnK/C+Wvf4XkZNf2AerrXR3//lAqc6VjMpmw2+34+/tf7qEoXAUkJSVx7NgxhgwZ0jrOQyAzM5PFixeTdLLI20L+WJr2R95//32+/vpr9u/fz/XXX8+IESMAWLp0KdXV1SxevBiA+vp6IiMjUQHRgxN499knGDRqDHHDRtOu44kMnfFT7nCJMWo1/QYOIsO4HR8/f9RqNVo3N3x8fIi/bhwrv/0fDruDwMBAdHo9/zd/EQICdaZqPnv9RUJC2tA3dhA/ffYxn7/xMu07dyE4LIxO7doyLzAQh8PB5MmTWb9+PevXr+edd95BrVYjSRLr1q0jODiY6667juXLlzNkyBCMRiMmk4nk5GQsFgu7du3C3d2dyMhINBoNY8aMITU1lcjISIYNG8bWHTuIHT4Swelg2A03s3bJQjw8PTAYvBCdDr7599s47HZUajXFRwrQatSIkhoBAZ1Oh5+fHwYvL9zd3dFqtWgEEZWgIiTkxPfC+Mm3o9VoiegUhVqtoaK0hKBQl5Bx8iX4dO9FS1m3bh2TJ0+WA9YfeOABNmzYAMBvv/3GgQMHmmSQqVQqWdTs3LkzAwcOBGDw4MF88MEHAKxZs4Zx48bJjR+0Wi0+Pj6sXLmSvLw8xo8f32QMOTk5V60o1VJCQ0OZMGECv/zyC/PmzeP555+/5roaKlx5KKKUgoKCgoLCn5SAgACuu+46rrvuOioqKmSBKi8vj2PHjvHLL7/IAlX//gMoEF2ZMie3nBZFJ2azmbq6Oqw2GxISdrsdnU6Hr68fVpsVSRTR6XQtEqTk7TrtWGz1/L59LzuX/gBAmzZtSExMZPDgwXj+Mddp0SJX5hPA1q1w5EjrlNlFRsJf/gILF55YtnIlTJ0KPXpc+PYvERUVFYDrPVdQOBN1dXUsX74cvV7PLbfc0mrb3bx5M/n5+XTp0gWAoqIiHn74YY4fP85jjz2GRqOhqKhIdkvl5+e32NnXmClVUFBAQkICn3/+OY899hiSJDFz5kzGjBnTZP2DlXW88t/ZFO7NZM/2Lbxy/1Tu+9srjLyx+eOVY6IFgerycvz9/PDzD0Cj0VBRUkzbiEi0Oj0GL5dgohZAdDrx8jLQuXtP5q7dyoPjEqksK+OuwX0IDgmlrqYalUrF6NGjKSsrA2Ds2LGuTni4BJLIyEhGjBiBRqNh8ODBfPXVV3Ts2JGtW7cSGRnJ0qVL6du3L/n5+cydO5dbb70VNzc3tmzZQmFhIfVWG09OGIaAgMNhx2Yx4+XlhcHLQHVlBY/9400GXzee4uIiHh49BLvNjsHXB7VaTUREBB07dGxyHtSqU7uP6vRu8v+r1GqczhNdANUnqVKney/OF+EP277uuuv4/vvvT1nv6NGjDSXiDWNSq5t0KmwOSZLo2bMnW7dubZWxXm2MGzeO1NRU8vLy2LRpkyzyKihcLhRZVEFBQUFB4RrA39+f0aNH8+KLL/Luu+8yZcoUOnXqJItTH335PwpLyrGa67DV26irq6WkpJgjR45QXl6B1WrDTa8npE0b/P39EQQV7u7uqAQBp9OJ2IJmvhISdXV1FBcXcezYcepqavAMbMPAYSN59tlnmTFjBqNHjz5VkAJocFHJfP11UyHpQnjooVPDzT/6CK6iBsXl5eWAIkopnJ1ffvmFuro6rr/+ery9vc/+ghby2GOPcfz4cfLz88nPz2fQoEF8+eWXPPbYYwBMnjyZzz//HACj0cjRo0cZNmzYOe2jffv2zJw5kzfffBOLxcLEiRP58MMPMZvNgCu8OysrC3cVFBXk06V3NFMefoLE8Teyb0+avJ3fF/8IuET33VuT6T80Af8Gd1BQaBg71q8ma2cKRUcKadM2nJDw04tnpcePcSA7E7vdzpK0HAKCQ/h6wSK+//57TCYTpaWl8rrz5s0DICUlBavVKruCGvHw8ODQoUOsXbtWXrZ7927q6+vln4cMGcL+/ftdeUxrN/C/9Sl89staFm7PQKXWkJGyjVpTNUgSDlSUl1ew6kfXfgOCAmgbHo5Kpbqg7DlJkvDwMuC0nOi4d7r3oqWMHDmSxYsXU1vr6vr69ddfy78bO3Ysa9euJT09XV7WWCZ4JsaOHcvvv//Ovn37AFcGWXV1NUOGDDnref4zo9FouPvuuxEEgZ9//ll+qKGgcLlQRCkFBQUFBYVrDD8/P0aNGsULL7wgC1SdowfgdIoUHz/GgdxcCgoKqK6udnVk8vMlPLwtbdqEYDB4ExQUhKASqK01IQEOp1MOWW0Oh8NOZVUlR48coaysTO5iZfDyxMfXl6Fjr6dr165nzsS44QZXOPnJvP8+rFvXGicEHnig6bK0NNi06cK3fYloFKWU8j2FM3H8+HE2bdpEYGAgo0aNuqT7fu+999i6dStdunTh3nvvZf78+XJu0WuvvSYLVmfjpptuolu3bsyaNYsXX3yR2NhYBg4cSJ8+fRg0aBC7d+/GQyPw8cvP8ODYBB6eMJz9GelMfuAxeRu5mRmM6RJK6fFjmOvqePfZJ6itreOJN99Hq9Pxyasv8rc7JuEfFMSL/z6RifTBi0+zdc1vTcaTl5PNa4/cg8Nu5/Gbr2PkxL8wsH8/l2PJYJCFKJVKRWZmJtHR0dx33328//77LFu2jAkTJshiiEaj4ddff+Xtt9/m2LFjTJo0iZdeeqnJ9dXPz48nn3wSh8PBU/ffzdMTryNjZwqi6ETv7s6GFcuoqTXj4WXgq3/+g38+dg8BAYEAeLh7NJv1d66IwE13P8jfn36Svn37snv37tO+Fy3lhhtu4Oabb6Zv377ExsbKXQjBVZ73/fff88gjjxAdHU337t356KOPzrrNzp07M2fOHO68806io6MZOHAgOTk5+Pn5yec5OjqaHj168NJLLyE2lnFfA3Ts2JHhw4djs9n47rvvzvgdrqBwsREk5ROooKCgoKBwzlitVqZOnUp2djbu7u4EBwfz2Wef0bmzq4tTSUkJd999NwcPHkSv1zNr1qwmeRhnY9iwYRw9epTc3NwzijUbN27EarUybtw4AI4dO8Ztt93G5s2bAVi2bBkvvfQSer2eefPmcdddd7F582YMBgNOp5O9e/eSkpKCI6wzngHB1FVX8s2/32bgyLH0jB2ISlCh1Wrw8PDEw9MDnVYLuASpF+78CyMn3Ub3AXEEBgXRJvhEOPioDkH8sG0PkkqN1WJ1LRTAw90dL4MB94aOfL8s+I7BgwcxJXHgWc/JG7fdRtXGjXzUrh1zy8pYWlXF0u7dSX38cd5fsYIFCxa0+PyeQn29K+D8+PETy9q3hwULoAUt6y83P/74Ixs2bOCNN95okhOmoNCIJEl88sknZGdn8+ijj9KvX7/LPaSLhiRJbDlSQY3NgZumaUnaHfH9efPLb+jco7e8rKKiHJOpFi8vrwtyG9qcIlq1wPD2AWguQU6P3Snye+4xLFYrdTXVSK4KRDw8PPDy8mrS+bQ1qXeKqFUwvH0gWnXrHafJZMJgMCBJEn/729+wWCx89tlnrbZ9haZYrVbeeOMNKisreeCBB4iLi7vcQ1K4RlEypRQUFBQUFM6Thx9+mPHjxyMIAp9++ikPPvggGzduBOCll15i0KBB/PbbbxiNRm655RYOHTokOwPORG5uLrm5ufj5+bFp0yaGDx/e7HoOh4ONGzdSVVUli1JhYWGyIAXw+eef89prr3H77bcDkJaWRm5uLkajkbS0NOrq6lBpNMT3Goi7XkdARARvffY1TqcrS8psNmO12aiurqa6urqJQKVSqxBFEUmScNjtrjE5HdSaagEoL6vAw2BArVFj8PLCy8sLtbrprce6JQvx9fVFTIhDdbbg8u7dobwcqqpOLLPbifnf/1jw1VdnPa9nRKeDp56CV145saygABYvduVLXeEoTimFs5GRkUF2djZdu3alb9++l3s4FxVBEGjv7U5GqQlRks54bbHV2zDV1qJSq/Dz8z3vfUqShChJhBs8LrogVVlZydatW9myZQt+XfsQ3rs/Go0WL4MXnp6ezWZDtRaSJOGUJMK93FtVkAK4++67yc/Px2q10rNnzxa75xTODzc3N+68805mzpzJggUL6NGjB15eXpd7WArXIEr5noKCgoKCwnng5ubGhAkTZBfToEGD5O5xAAsXLpTbm8fGxhIWFsamFpaDff3119x55508+OCDzJ49W16en5+Pr68vL774Iv379+fTTz/l888/57vvvqNv3768+eab8joAf/3rX9m8eTOvvPIKAwYMYOHChajVat555x02b95MVVUV27dvZ/26dfzj7slsWr4YnVbHs1Mnsm3dagwGbzK3b+G9vz7EW4/ezRsP3oFx03qqq6s5fuw4kighIeFwODCbzZSUlnD06FGqq6td58hdT3BwEC/cdhNLZn/O9Mk3Mi1hAPNn/huAX3+cR27mbj7756v07duPlStXAvDBBx8QFxdH//79GTduHIcPHz5xcnr2dDmaTmJjURF9Y2Lg2DEAvvjiC6Kioujfvz9vvfVWE6eZ0Whk5MiRxMTE0K9fP3766acT53bKFF53OBiwdy+dMzNZWV0Ny5cDsG3bNuLj44mOjqZPnz4sW7YMcAmI119/PbGxsfTp04dPP/20Re9xa1NRUYGnpyd6vf6y7F/hysbhcPDTTz8hCAJTpkxpUfv4q51QLzf0ahV2sWlRyPfJaSe5pCQqystBcpXFqS5AzHFIEmqVQFuD29lXPg+cTie7d+/m008/5eWXX2b58uWYTCZCPLR4eXoSHBKCt8H7ogpSAM4Gka+twb3Vt/3zzz+za9cu9u7dy6JFiwgMDGz1fSg0pVevXsTFxVFbWyt/HyooXGoUp5SCgoKCgkIr8PHHH3PzzTcDLteK3W6Xu0wBREZGyu2rX3vtNcLCwmTR6mScTifffPMN69evJzAwkBkzZlBdXY2Pjw8A1dXV9OzZk/feew+Aqqoqqqqq5HyNk4WxF198kfXr19OtWzcCAgJY15C/NGjQIOLj45k0aRIzZsxg/C1/YdvRCuqqKk8ZT0ziCEbeNAlBECg6UsCTt4wjYcx47A4HoiQhOSWcTpHaujpUKjV6vU5+0hoUFIy7uwcAdTXVfLpkFdUV5dw1PJaxk+/g+ql3sXbpIm669yFeuP9ODDoN33//PTk5OWzbtg21Ws28efN4/PHH+fXXX10DEgR48UVISoJt204M1OGAJ58k87nneOONN9i1axchISG8/vrr8ipVVVU8/PDDrFy5ktDQUMrKyujfvz9DhgyRz22fu+5ixg8/8Ft1NdMLC5nQrx8VFRVMnDiRRYsWkZCQgCiKVFVV4XQ6uf3225k/fz7dunXDbDYzaNAgBg4cSGxsbMs+OK1EeXk5QUFBl3SfClcPGzdupKSkhMTERMLDwy/3cC4JWrWKSF8PcsprcUpSk05xjZhMJurr7ejd9Hg112ChhYiShEOUCPd2w6Br3elVSUkJycnJbNu2jZqaGgAiIiKIj48nNjYWNzc3UouqKKmrRy1IF1VwlCQJuygR5KHDV69MI/8sTJkyhaysLLZv305cXBw9e/a83ENSuMZQriYKCgoKCgoXyNtvv82BAwdk0edsvPnmm6f93cqVK4mMjKRbt24AjB49mu+//17uYKXVarnzzjtP+/ry8nIcDgczZszg2LFjVFRUYLFYiIuLIzY2li+//JKpU6dy9OhRrFYrt99+O9VWV+mdj/+pWSpFhQW88/SjlBYdR61WY6qq4kj+IfzbhCAAGo0KDw93VCoVICGoVGiaKVEcefOt8j5C20VQVHiYoJBTs4+WLl2K0WhkwIABgEukOwWVyuWW2ru36fKCAtY/8wzjRo+WBcGHHnpIPt9bt24lLy+P8ePHN3lZTk4OHTt2xM3NjUnPPgtt2zL4P//h4MGD8MwzbNu2ja5du5KQkNCwexX+/v5kZ2eTlZXF1JPK+0wmE9nZ2ZdUlLJYLFgsFqXznkKzmEwmVqxYgbu7OzfddNPlHs4lpYOPByV1NiqtdtzUqiaCjdPpoKqqCkGAAH9/LiR7qd4p4qFV083fcPaVW4DdbictLY3k5GT2798PuHKiRowYwdChQ2nXrl2T9XsEGKiyVlLvFNFrLp5Tql4U0akFegQargm33bWCwWDgtttu4+uvv+a7777j9ddfV1y3CpcURZRSUFBQUFC4AD744AOWLFnC2rVr8fBwuYICAgLQaDQUFRXJ4kh+fj7t25++rXgjs2fPZv/+/URGRgIuwSE/P18WpTw8PBoEoBPYbDbWrl2L0WgkIyMDu91OSUkJ/fr1IzU1lXvvvZfJkyefdp9qleDqyCRxyrzsn089xJ3Tn6Nf/HCsVitP3jCS6spKAkJCUWs0+AcE0q17dxx2BzU11dTV1VFe5so3MplMeDa0PNeddIOrUqubiE0CyC4GSZJ4+eWXefjhh898ojQa6NULPD3h5A5Px4+DyeRyTmk0TSZOkiTRs2dPtm7desrm8vPz0ev1rvWnTkV9/fU4vb1h8GBodGn9AUmS8Pf3P6cOUxcDJU9K4UwsW7YMi8XClClTMBhaRzS5WlCrBHoGGdhxtBKbKKJXnRCmKiorEUUJHx9vtFrdee/D5hRRCQLdA73Qay4sGeXIkSMkJyezY8cOzGYzAFFRUcTHx9O/f//TZhJ66jRE+XuSVWrCIYoXJdPKIYpIEnTx88Krld1gCpefuLg4duzYQVZWFsuWLWPKlCmXe0gK1xBKppSCgoKCgsJ58p///IcffviBNWvWyDlOjUyePFkOaTUajRw9epRhw4adcXvFxcWsW7eOAwcOkJ+fT35+PsePH+fYsWPs2bOnybq1tbUkJSWRlpbG+vXr+emnnygoKKBLly7odDo++OADHn30UQwGQ7MTma5du+Lh4cEPP/yAh1aNWiVQUV4GSIiSiLmujuPHj1FdVYmblw8Wi5WU9aupM9UQGBRI27Zt0Wo06HQ6BAS0Wi0BAa7l3t6uiW9VZRVHjh5FFMXTttp29/TCVmfCvWEyN3HiRD7//HMqKioAl2Ng165dzZ8wjQZmzgQ/P3nRCIOB3w8coOTll0GSmmRyDRkyhEOHDrF27Vp52e7du+V27E04Scwa0r07uTk5bE5KAkAURSoqKujatSve3t7MmTNHXvfAgQPy2C8VjaKU4pRS+CONIkebNm1O2zDhz46PXkufYG/UgoCtoTGDxWLBXGdGo1HLpdHng80pAhJRAZ6EeJ5flpTVamXz5s28/fbbvPXWW2zYsAGNRsPYsWN58803+dvf/sbAgQPP2iSjvbc74d7u2EUJx2mut+eLQxSxixJtDW5E+LR+lpTC5UcQBKZNm4Zer2f9+vUcOnTocg9J4RpCEaUUFBQUFBTOgyNHjvC3v/2NqqoqRowYQd++fRk4cKD8+/fee4+tW7fSpUsX7r33XubPny9PKl577bVmuwp98803jBkzponApVKpmDp1KrNnz8Zms+F0Opk5cybPP/883333HZ6entTW1rJu3Tr8/f257777UKvVuLufeeKg0WhYtmwZc+bMoU/v3jwyLpH1vyzhyNGj1NvqqaszY7c7uPdvL/P5my/zr8fvpfRIIcFh4Q3H0Xzphlqtwc/P5djx8fEGXCJOSUkJlZUVOJ2OJuuPve1Ofpz1Cf36uYLOp02bxr333suIESOIjo6mb9++rF+//vQHEhwM06eD2lWy0tvdnX+EhDB05kz6t2+P1WqVJ51+fn78+uuvvP3220RHR9OjRw9eeuml0wpmAKxejd+dd/Kznx8v3XUXffr0oX///mzZsgWNRsOKFStYsmQJffr0oWfPnjzwwANYLJYznvvWplEEU5xSCicjSRILFixAkiSmTJmCWn1xA7CvZEK83IgO9kYjCFidTiobOnj6+/sjCOc+HRIlCYvDiQB0CzDQwcfjnF4vSRIHDx7km2++4fnnn2f+/PkUFBTQq1cvHn30Ud59910mTZpEmzZtWrxNQRDoFWQg3OCGQ5Sod7oEuAtBklzbaRSkegd7K2V7f2ICAgK4+eabkSSJb7/9FofDcfYXKSi0AoJ0oVcrBQUFBQUFhYuG3W4nIyODlJQUMjIy5JvEiIgIYmJiiI2Nxe8kp1BLqa6uJj09nfT0dPbu3Ut434FE9B+I3WLGw90dd3d33NzcL3gCIkkitbW11NTU4HA4EQTw9PTE28cHjVqD1SkS5e9JF/8LbEOdng6PPQY2GyanE0PDBPzjgQP5LS+PVatWnd92p0yBvLwTP7/9NowZc2FjbWUWLVrEmjVr+Pvf/96iElGFa4O0tDS++OILevbsyV//+tfLPZwrgiqrnfVZB7ChRq1WEeDjc07XOKkh0NwhSXhq1fQINBDs2fLsndraWrZv305ycjLHjx8HXELA0KFDGTJkyHldy/+IKEkcrKwjr8qMQ5TQq1WozuM6LkoSNqeIRiUQ6eNOF3+v89qOwtWFKIq899575Ofnc/PNNzNhwoTLPSSFawClIFhBQUFBQeEKw+l0snfvXlJSUti9ezc2mw2AkJAQYmNjiY2NPacn6OCaTB05ckQWok7u0hcREUGP9mGovLzQ+fq2ah6JIKgwGLzxMhioq6ujprqa2to6auvq8PTyxsPTgxCvVmih3qcPvPMOPPccLxUUsKW2FrskEVZczBebNp3/dv84Sfz0Uxg+HHTnn0HT2ijlewp/xG63s2jRIlQq1Rnz5K41rFXlrJ/7Ge37xdE7YRRWp+hq2KBSoRZoVqCSJAlRAockIkqgUQm0N7jTNcALnfrs10pJkti3bx/Jycns2rULp9OJWq0mJiaG+Ph4unXr1qruI5Ug0MXfi0APPVmlNdTYXA8yNCoBtSCccV+SJOFs6LAH4KPX0DPIgJ/blXO9U7i4qFQq7r77bv75z3/y66+/0q9fP0JDT21KoqDQmiiilIKCgoKCwhWAKIocOHCAlJQU0tLSqKurA1xCw4gRI4iNjaVt27bnNHmx2+3k5OTIQlRlZSXg6uDXu3dv+vTpQ58+ffD19UWSJHYcq6LcUs8FZvU2i4CAl6cXXp6emM0WamqqcUoSh7LSyV25j3Hjxl345CwxEf7+d/771lsnlk2ZAh07nv82b7sNdu488fOxY7BgAdx11/lvs5WpqKhAr9fLQfsKCmvXrqW8vJxRo0YpE8oGJEni+++/x15vI7ptEMM6BHO81kZBjRmL3VWiJuDq9/BHVALo1SraebsTZnDDU3v2KVRlZSXbtm0jOTlZFo5DQ0OJj49n4MCBFz103s9Ny+C2/hTVWSmssVBldWBvKFVWCa5rcuPxSriEN9fvBPzdtbTzdifU0w21SnFHXWu0bduWcePGsXLlSubNm8fzzz+vlG0qXFQUUUpBQUFBQeEyIUkShw8fJiUlhZ07d1LVkHPi7e3NyJEjiY2NpUOHDud0M2gymcjIyCA9PZ3s7GzZZeXt7U18fDx9+vShe/fu6P7g9BEEgQgfdyqs9Rete1PDnvDw8EDn5obVbqe6rorMffvYt28f7du3Z9y4cfTr1++UDoMt5uabXe6mhQuhfXu40LKlESOgXz84OWx99my48Ub4Q7j95aK8vLwhG0eZNChAVVUVq1atwtPTkxtuuOFyD+eKISUlhZycHMLCwhg9ejRqtZoOvh5E+rhTa3disjkw1TuwOpyIkku40alVGHQaDDoNXjrNWQUap9NJRkYGycnJZGZmIkkSOp2OIUOGEB8fT8eOHS/p36laJdDW4E6YlxvVNgeVVjs1NjtVNjs2h0ugEgC9Ro2vXou3XoufmxYfvUa5nlzjXH/99aSlpXHw4EGSkpLO2qhFQeFCUEQpBQUFBQWFS8yxY8dISUnBaDRSVlYGgIeHB/Hx8cTGxhIVFdViUUaSJI4dOya7oQ4dOiSH24aHh9OnTx+io6OJiIg46yQjxFNPGw89RXU21IJ00SYlUkN5SHtfL268cyoFiUP4/fffSUtL48svvyQ4OJixY8cyaNAgNJrzuFVJTHT9O5mcHCgrg4EDXV37WoogwDPPwN13n1hWWwtffQXPP3/uY2tl7HY7JpOJiIiIyz0UhSuEpUuXYrPZuPXWWxX3XANms5mffvoJgGnTpjUJfRcEQRaezpeSkhKSk5PZtm0bNTU1gKssuvGafrbGExcbQRDwddPi63aig58kSUi4RClFgFL4IxqNhrvuuov3339fbubRGplnCgrNoYhSCgoKCgoKl4DS0lKMRiNGo5Fjx44BoNfriYuLIzY2lh49erRYgHE4HOTm5rJnzx7S09Pl0hCNRkOPHj3ksrxz7cYmCAI9Ag3yU3S9WtXqkxVJkrA6Rdw1KroFeLkcWhERPPzwwxQXF7N69Wq2bdvGvHnzWL58OaNHjyYxMRE3twvInVqyxBVQDtClC8yadWpW1Jno0QPGjYPffjuxbNEiV2nfZQ4Wb+y8p+RJKQDk5+ezbds2wsLCSEhIuNzDuWL4+eefMZlMDB06lM6dO7fKNu12O2lpaSQnJ7N//34A3N3dGT58OPHx8bRr165V9nOxEAThND1UFRRcdO7cmWHDhrFp0ya+//57Hn/8cUXAVLgoKKKUgoKCgoLCRaKqqorU1FSMRqMcLK7RaOjbty+xsbH07t0bvb5lnZtqa2vJzMxkz549ZGdnY7VaATAYDAwZMkQuy7sg8QZw16rpFeTN7uJqbM7WFaYkScImimhVAr2DvXHTNG1R36ZNG+666y5uvPFG1q5dS1JSEosXL2bVqlUMHz6ckSNHnl8Oy9KlJ/4/NxemT4fPP4dzcZE88QSsXw/19a6fnU74+GP497/PfTytiBJyrtCIJEksWLAAgNtuu+38S2D/ZOTn57N582Y8PT2ZNGnSBW/vyJEjJCcns2PHDsxmMwBRUVHEx8fTv39/tFrtWbagoHD1MGnSJPkB2M6dO4mJibncQ1L4E6KIUgoKCgoKCq1IbW0taWlpGI1GcnNzkSQJlUpFjx49iI2NpV+/fi0q5ZAkiaKiItLT09mzZw95eXlyWV5YWBjR0dH06dOHyMjIVp98tvHU0yvIm8zSmlYTphodUlqVQJ9gb4I8Ti/G+fr68pe//IXx48ezadMm1q1bx8qVK1mzZg3x8fFcd9115ybCdO4M2dknfs7OhhdfhA8/bHkpX2go3HEHzJ17YtmmTZCWBv37t3wsrUyjKHWurjiFPx9Go5G8vDyio6Pp1q3b5R7OFYEoisyfPx9Jkrj11lvx8vI6r+1YrVaMRiPJycnyAwZvb2/Gjh3L0KFDz7kbqoLC1YKbmxt33HEHs2bN4scff6R79+54enpe7mEp/MlQRCkFBQUFBYULxGq1snv3boxGI9nZ2YgNHY46d+5MbGwsAwYMaJHDx+l0kpubK+dDlZaWAqBWq+nWrZtclhcYGHhRjwegrcENrUogs7QGi8MlJp1v+LlDdHW28tCq6RVkOKMgdTKenp5MmDCB0aNHs2XLFlavXs2GDRvYtGkTcXFxjB07lrCwsLNv6LHHICUFiopOLNu2Dd58E2bMcOVGtYT77oNly6ChiyEA//kPfPstXCZXilK+pwBgs9lYsmQJGo2Gv/zlL5d7OFcMGzZsoLCwkE6dOjFkyJBzeq0kSRw6dIjNmzeTmppKfX09giDQq1cvuWnEydlUCgp/VqKjo4mJiSE1NZVFixZxzz33XO4hKfzJUEQpBQUFBQWF88But5ORkYHRaCQjIwO73Q5A+/btiY2NJTY2tkWhoGazmczMTNLT08nMzMRisQAuQWbQoEH06dOHnj17XnBZ3vkQ7KlniN6fvWW1FNVZsTucaFQCGkE4q3NKkiQckoRDlFAJEG5wo1uAAb3m3MUbnU7HiBEjSExMxGg08ttvv7F9+3a2b99OdHQ048aNo2PHjqffQFAQfPopPPAAVFefWL5yJQQGtrxDn6cnPPIIvPvuiWX79rmypiZMOOfjag0Up5QCwOrVq6msrGTs2LEEBwdf7uFcEVRVVbF8+XJUKhXTpk1rsduztraW7du3k5yczPHjxwGX6Dt06FCGDBmihD0rXJPcdtttZGdns3XrVuLi4ujevfvlHpLCnwhBaqwFUFBQUFBQUDgjTqeTvXv3YjQa2b17t5zrFBISIgtRLSnjKC4ult1QBw4ckJ1VISEhcllex44dr5hMGEmSKDXXU1BjocxSj1N03TqoBAG1AMhxuRJOCcSGWwuNSiDIXUc7H3cC3XWtmk2Vnp7Ob7/9Rl5eHgBdunRh/Pjx9OjR4/T7yciARx8Fm63p8mefdZXmtQSnE6ZOhUOHTiwLDnaFqV8G4fCDDz7g0KFDfPrpp0oA7TVKRUUFr732Gu7u7rz11luXRcC+Evnyyy/ZuXMnY8aM4dZbbz3jupIksW/fPpKTk9m1axdOpxO1Wk3fvn2Jj4+ne/fuyt+XwjXP1q1b+eabbwgMDOT1119Hp9Nd7iEp/ElQnFIKCgoKCgpnQBRFDhw4gNFoZOfOndTV1QGuJ+fDhg0jLi6Otm3bnnHCIooiBw8elMNCi4uLAVCpVHTp0kUuy7tSHQ6CIBDsqSfYU09tvYPiOhvVNjtVVgd2UcTVWNwlTblpVPjqtXjrNYR46vG8gDbrZxpPo3h34MABfvvtNzIzM8nNzSU8PJxx48YxYMCAU0W93r3hvfdcIlSDEAi4SvACA2HMmLPvXK12BaU//fSJZSUl8P33cP/9rXJ850J5eTl+fn7KhPkaZsmSJdjtdm6//XZFkGogKyuLnTt34ufnxw033HDa9SorK9m2bRvJycmy6zA0NJT4+HgGDhx4fo0VFBT+pAwePJiUlBT27t3L8uXLlVJhhVZDcUopKCgoKCj8AUmSOHz4MCkpKezcuZOqqirAFWw7YMAA4uLi6NChwxmFAIvFQlZWFunp6WRkZMhdmjw8POjVq5dcludxLh3grjAkSaLeKeJsuJNQCwI69dlL+y4GhYWF/P7776SmpiJJEoGBgYwdO5bBgwef2g1r+XJXntTJaDTwyScQF3f2nUkSPP44GI0nlnl4wM8/wyXMdhJFkSeeeIKoqCieeeaZS7ZfhSuHgwcP8n//93+0b9+eV155RREncZVWv/HGG5SVlfH4448THR3d5PdOp5OMjAySk5PJzMxEkiR0Oh0xMTHEx8fTsWNH5TwqKJyGsrIy3njjDRwOBy+//DIRERGXe0gKfwIUUUpBQUFBQaGBY8eOkZKSgtFopKysDHCJSP379yc2NpaoqKgzltSVlpbKZXn79++Xy/KCg4Pp06cP0dHRdOrUSQnHvYiUlpayevVqtm7disPhwNvbm9GjR5OYmNi06+HXX8OsWU1f7OEBX30FXbuefUf798O0aS6BqpEHH3SVB14iysvLeeWVVxgyZIgSPHsNIkkS77zzDocPH+a5556jS5cul3tIVwTLly/n119/pU+fPjzxxBPy8pKSErZs2cLWrVupqakBICIigvj4eGJjY1vUFVVBQQHWrFnDokWLCA8P55VXXlHuaRQuGKV8T0FBQUHhmqa0tBSj0YjRaOTYsWMA6PV64uLiiI2NpUePHmg0zX9diqLIoUOH5LK8xlBcQRDo3LmzLEQp7cIvHUFBQUybNo0bbriBdevWsWnTJpYsWcLKlSsZMWIEI0eOxNvb29VJr6wMFi488WKzGZ56CubMgbZtz7yjqCi44Qb45ZcTywoKLs5BnQal8961zbZt2zh8+DAxMTGKINVAcXExv//+O1qtlqlTp2K320lLSyM5OZn9+/cD4O7uzvDhw4mPj6ddu3aXecQKClcfo0aNwmg0cvjwYVavXs348eMv95AUrnIUp5SCgoKCwjVHVVUVqampGI1G8vPzAdBoNPTq1YvY2Fh69+6NXq9v9rVWq5Xs7Gz27NlDZmYmtbW1ALi5uclleb169cLT0/NSHY7CGTCbzSQlJbF27VpMJhMajYahQ4cyZswYAv394eWXYd26pi9q1w5mz4azdbQrL4eHHnKJUVotfPxxy8r/Wont27czZ84c7rnnnnNud69wdWO1Wnn11VexWCzMmDFDESZxOcc++ugj9u3bR0JCAhqNhh07dsil01FRUcTHx9O/f/9TS3oVFBTOiSNHjvCvf/0LlUrFa6+9pjx8U7ggFKeUgoKCgsI1QV1dHTt37sRoNJKbm4skSahUKnr06EFsbCx9+/Y9bb5TeXm5XJaXk5OD0+kEIDAwkIEDB9KnTx86d+58WkeVwuXDw8ODcePGMWrUKLZu3crq1avZtGkTmzdvJiYmhrGPPEJ4VRXs3HniRYWFriDzr76C04iTgCs/au5cyMqCiAgIC7vIR9OUxmBmRZC49li1ahU1NTVcf/31yvvfQHJyMhs2bKCurg5RFFGpVBgMBsaOHcvQoUOVSbOCQisSHh7O2LFjWbVqFfPnz+fZZ59VstgUzhvl7llBQUFB4U+L1Wpl9+7dGI1GsrOz5Yynzp07Exsby4ABA5rtriRJEocOHZKFqKNHjwKusryOHTvKZXkhISHKTdhVglarZdiwYSQkJJCamspvv/1GSkoKKSkp9OvShWlHj2IoKjrxguxsWLkSbrnlzBv29obBgy/u4E+DIkpdm5SVlbF27Vp8fX0ZO3bs5R7OZaXxWr127Vo+++wzrFYrffv2pU+fPsTHx9OnTx8l70ZB4SJx/fXXs3PnTvbv309ycjIJCQmXe0gKVymKKKWgoKCg8KfCbreTkZGB0WgkIyMDu90OQPv27YmNjSUmJgb/ZsqybDYbe/fulYUok8kEuPKl+vfvL5flKS3Cr25UKpWcF5aVlcWqVavYlZtLrr8/j+XlESYIuLu7IwA0fAZazMaN8Omn4OkJr74KnTtfhCM4QUVFBYIg4Ovre1H3o3BlsWjRIhwOB5MmTTptmfGfndraWrZv305ycjLHjx/nwIEDAIwfP55XX30VPz+/yzxCBYU/P1qtlrvuuot///vfLFq0iN69eyvfRwrnhSJKKSgoKChc9TidTvbu3YvRaGT37t1YrVYAQkJCiI2NJTY2ttnSjcrKSlmE2rdvHw6HAwB/f3+GDx9OdHQ0UVFRSlnenxBBEOjVqxe9evXiwIED/P7778x2OLh1xw7CKitxi4rCd8wYTt9r8Q+YzfDGG9CQMcYTT8DPP7s6+l0kysvL8fHxUT6f1xA5OTns2rWLjh07EncJ88uuBCRJYt++fSQnJ7N7924cDgdqtZqIiAiqqqro06cPM2bMUB4cKChcQqKiokhISGDz5s38+OOPPHoJO9Aq/HlQ7mIUFBQUFK5KJEkiNzcXo9HIzp07qaurA1ylTMOGDSM2Npbw8PAm5XWSJFFQUCB3yyssLARcAkVkZCR9+vShT58+tG3bVinLu4bo3LkznTt35ujEiaxetYrFmzZRrdfj+8knjBkzhqFDh7YsGLnhMwi4QtC//RYu0g26JElUVFQQERFxUbavcOUhiiILG7pF3nbbbdfMNaqyspJt27aRnJwsl6yGhISQkJBAbGwsM2fOxNfXl1tvvVURpBQULgOTJk0iPT2dXbt2kZaWRv/+/S/3kBSuMhRRSkFBQUHhqkGSJA4fPozRaCQ1NZWqqioAvL29GTFiBHFxcXTo0KHJZK2+vp59+/bJjqjq6moAdDod0dHRREdH07t3b7y9vS/HISlcQbRt25b7HnyQsokTWbNmDVu2bGHbxx/j//DDBLVpg9+bb+I2YkTzL/bwgCFDYMuWE8vmzYNJkyA4uNXHajKZcDgczZaiKvw5SU5O5siRIwwaNIjIyMjLPZyLitPpJDMzk82bN5OZmYkkSWi1WoYMGUJ8fDwdO3ZEEATWr19PYWEhHTt2ZOjQoZd72AoK1yQeHh7ccccdfPbZZ/zwww9069bttI1jFBSaQxGlFBQUFBSueI4dO4bRaMRoNFJaWgq4boKGDh1KXFwcUVFRqFQnCq2qqqrIyMggPT2dvXv3yrlSvr6+JCYmEh0dTdeuXZW24ArNEhgYyO23384N11+PNT4eS3U19ZWVFN1+O3lPP82Axx7Dx8fn1BdOnw7btkFDoD42G8ya5Srra2WUkPNrC7PZzLJly9Dr9dxytvD9q5iSkhK2bNnC1q1bqampASAiIoL4+HhiY2Nxd3eX162qqmLZsmWoVCqmTZt2zTjHFBSuRPr27Uv//v1JS0tj8eLF3HXXXZd7SApXEYoopaCgoKBwRVJWVobRaCQlJYVjx44BLndTbGwscXFx9OjRQ87SkSSJwsJCuSzv8OHD8nYiIiLkbnl/LOdTUDgTBjc3DD4+iFotJpOJmpoa2n38MR+np9NxwgTGjBlD8MkuqI4dYeJEWLLkxLJff4Xbb4euXVt1bIoodW3x66+/Ultby8033/ynCxK22+2kpaWRnJzM/v37AXB3d2f48OHEx8fTrl27Zl/3008/YbVaue666wgPD7+UQ1ZQUGiGqVOnyrlvcXFxdG3l7z2FPy+KKKWgoKCgcMVQVVXFzp07SUlJIT8/HwCNRkN0dDRxcXH07t1b7jZlt9vJzMxkz549ZGRkUFlZCbi6wTRmQymdYBQuCJ0Obr8d1ddf4+PtjcFgoK6ujju2b+crQSA5OZkBAwbw/+zdd1wU59bA8d9soVdRsaASKzbAghUMdjSxJUaNGmMSU673vbkxN4mptpiYXk2PibFGjcZEo9gbVlCwYC/YUFCkwy5b5v1jZSOxoYKLcL73k/fV3Zlnzi64O3PmPOeJior6+8L5uecgOtrW+BxAVeHTT+Gbb6AEE6KXLl0CJClVEaSkpLB27Vr8/Pzo3r27o8MpMWfOnCEmJobt27eTd/nfS8OGDQkPD6dly5Y3rGTdv38/cXFx+Pr60qdPn7sVshDiBry9vRk4cCAzZsxg5syZjB8/XirSRbFIUkoIIYRD5ebmsmvXLnbs2MGRI0dQVRVFUWjcuDFt2rQhNDTU3psgKyuLuLg4+7Q8o9EI2HpKRUREEBwcTFBQEE5OTo58SaI8ee45OH0aVq1Coyh4enjgAbyanMy09u2Ji4sjLi6Opk2bEhUVRYMGDVBGjrRN2ysUFwebNkGnTiUWVmGllPSUKv/mz5+P1Wpl4MCB9/wFnsFgIDY2lpiYGPuNB09PT3r27EnHjh2vuUrqP5lMJubMmQPYGr4X3qgQQjhehw4d2L59O4cOHWLJkiU89NBDjg5J3AMkKSWEEOKuMxgMJCQkEBsby/79+7Fe7sFTr1492rRpQ6tWrfD09ERVVc6ePcv69evZs2cPSUlJqKoKQK1atewVUXXq1JFpeaJ0aDQwcSKkp9uSS4AC+GZn87/Tpznw3/+yfMMGEhMTSUxMpG7duvTq0oXm/v4oKSl/j/P557ZG6LqSOfUqrJSSpFT5lpiYyL59+2jQoAEtWrRwdDi3RVVVTpw4QUxMDHFxcRiNRhRFoVmzZoSHhxMcHIxWqy32eNHR0Vy4cIHg4GBCQ0NLL3AhxC1TFIXhw4czadIkVq1aRevWraldu7ajwxJlnCSlhBBC3BUmk4m9e/cSGxvL3r177c3Ha9euTVhYGK1bt6ZSpUqYzWYOHz5sXy2vsCJEp9PRtGlTeyLK19fXkS9HVCROTvDRR/DMM3C55w2AcuAATaZPp8mnn3L89Gmio6PZvXs3Xx0/TicfH/odP467uzsKwMmTtl5TgwaVSEhpaWl4eHhIlUg5ZrFYmD9/PoqiMHjw4Hsu8Z6Tk8O2bduIiYnh3LlzgG26ac+ePenQocNtfYanpKQQHR2NXq9nyJAh99x7IkRFULVqVfr06cOiRYuYMWMGr7/+epHFaIT4J0lKCSGEKDUWi4UDBw4QGxtLQkICBoMBAH9/f9q0aUNYWBj+/v5kZ2ezb98+9uzZQ2Jion1anqenJx06dCA4OJgmTZrIBbhwHA8P+OILePJJuNx4H4Bt22DSJOpOnMjo0aM5d+4cK1asYPO2bdQzmwk4exYvLy88PDzQfP899O5tG+sOpaWlFWuqk7h3rV+/nvPnz9+w2XdZo6qqvdFxQkICZrMZrVZLq1atCA8Pp3HjxredSFJVlTlz5mA2m+nfv7/0UxOiDOvevTuxsbGcPn2a1atX06NHD0eHJMowSUoJIYQoUaqqcuTIEWJjY9m5cye5ubmA7Q75/fffT1hYGDVr1iQlJYWEhAR2797N8ePH7dPyatasaa+Guu++++ROuCg7KleGqVNtiamMjL8fX77c9tx//0v16tUZOXIkffr0YWdgINZPPiH90iUyMzLwzMrC9ZtvcH755TsKIy8vD4PBIFP3yrGcnByWLl2Ki4sL/fr1c3Q4N5Wens7WrVuJiYmxV7dWq1aNiIgI2rZti6en5x0fIy4ujoMHD1K9evVy1fBdiPJIo9EwYsQIpkyZwp9//kloaGjR1WqFuIIkpYQQQtwxVVU5efIksbGxxMXFkXH5gt3Ly4vOnTsTFhZGnTp1OHr0KFu3bmX37t1cvHgRAK1WS1BQECEhIQQHB8vdb1G21a5t6w/17LNwufIPgJkzbYmpYcMAWxK2x6uvUnD2LHnR0WRnZZGZkUH6J5+wz8uL8EGDbnsKqqy8V/4tWbKEvLw8Hn74Yby8vBwdzjVZLBb27dtHTEwMe/fuRVVV9Ho9HTp0IDw8nLp165bYTYW8vDzmz58PwNChQ9GVUG82IUTpqV27Nt27d2fFihXMmjWLMWPGyI1GcU3yiS6EEOK2JScnExsbS2xsLBcuXADAzc2Njh07EhYWRkBAAPv372ft2rXs27fPPn3P3d2ddu3aERISQpMmTXBxcXHkyxDi1jRtCh98AGPGgMXy9+Offgp+fhAVZX/I6eWXcdqxAy8vL3Kys8nKykL37be8sXs37dq1o2fPnrc8DU9W3ivfzp49y4YNG6hatSpdunRxdDhXSU1NZfPmzWzZsoWsrCwA6tSpQ3h4OGFhYbi6upb4Mf/44w+ysrJo3749DRs2LPHxhRCl48EHH2TXrl0cOnSILVu20LFjR0eHJMogSUoJIYS4JRcvXrQnos6ePQuAk5MTYWFhtGnTBj8/PxITE/nrr784duyYfWW96tWr26fl1a1bV5peintbhw4wbhyMH1/08QkTwNcX2ra1/b12bRg4EM28eXh5eeHp6YlPbi6HFMV+Yd+iRQuioqKoU6dOsQ4tlVLll6qqzJ8/H1VVeeSRR8pMRZDJZCI+Pp6YmBgOHToEgKurK5GRkaXe8+rkyZNs2LABNzc3Hn744VI7jhCi5Dk5OTF8+HA+/fRTfvvtN5o1a4a3t7ejwxJlTNn4phNCCFGmZWRksHPnTnbs2EFSUhJgWw0vJCSE1q1b4+bmxoEDB1iwYAGpqamArZ9Aw4YN7YmoKlWqOPAVCFEKHngA0tJsDdALmc0wdiwsWwZubrbHnn4a/voLcnJQFAUPDw+ej4ggoWFDoqOj2bVrF7t27SIoKIhevXrRqFGjG05xKKyUkqRU+bNnzx4OHjxI48aNad68uaPD4cyZM8TExLB9+3by8vIAaNiwIeHh4bRs2RK9Xl+qx7darcyePRtVVXnooYdKpDeVEOLuCgoKomPHjmzevJlff/2VZ5991tEhiTJGklJCCCGuKTc3l127drFjxw6OHDmCqqooikLjxo0JDg5Gr9dz+PBh5s6da79YcXNzo02bNgQHB9O0aVPcCi/KhSivHnsMLl6EOXP+fiwnB3btgvBw2999fOD//g/ee8++iaZlS1o2bEiLFi04dOgQy5cv5+DBgxw8eJA6deoQFRVFixYtrpmckul75ZPZbGbBggVoNBoGDRrksN4rBoOB2NhYYmJi7DchPD096dmzJx07dryrqz5u2LCBkydPUrduXcIL/z0JIe45AwcOZO/evezatYuEhARCQ0MdHZIoQyQpJYQQpcCqquSbLFguryin0yi46rRlvsGjwWBg9+7d7Nixg/3799un3tWrV48GDRqg0Wg4duwYCxYssD/n7+9Px44dCQ4Opn79+jItT1QsigIvvGCrmFqxwvaYmxs0alR0u4EDwdMTdu+Grl3hcl8cRVEICgoiKCiIpKQkVqxYQXx8PN999x3+/v707NmTtm3bFpnGlZaWhrOzsyR9y5k1a9Zw4cIFIiMjqVGjxl09tqqqnDhxgpiYGOLi4jAajSiKQrNmzQgPDyc4OBitVntXY8rMzGTx4sVoNBqGDRtW5r8/hRDX5+bmxqOPPsp3333H3LlzadSoUan0nxP3JkUtXINbCCHEbVNVlXSDidQ8IxkGM9kFZixWFRXbR6yiKOgUBW9nHT4ueqq5u+DpXDbuC5hMJvbt28eOHTvYu3cvJpMJgJo1a1KrVi00Gg0nTpzg3LlzgG1aXr169QgODiYkJOSu3jUXosyyWm1T9E6dgu7d7Umn25GSksKKFSvYtm0bFosFHx8funfvTkREBM7Ozrz00kt4enoy/p/9rESZoaoqeSYL2QVmDBYrVqut0lSvVfDU6/Bw0qHV/J1kycrK4q233kKj0TB58mTc3d3vSpw5OTls27aNmJgY+2d8pUqV6NixIx07drztFSJLwo8//khsbCzdunXjkUcecVgcQoiSoaoq33zzDbt376ZTp04Mu7xarRCSlCqGwhMLg8WKVVVRsJ1UeOiLnlAIISoeq6qSnG3gZFY+WUYTVhUUQKMoaBSFwk8IFRWrir1ySqtRqOSip463G1XdnO76HWCLxcKBAweIjY0lISHBviqen58fVatWBeDUqVPk5uYC4OLiQrNmzQgODqZZs2Z37YJJiHtacjKsWgXVq9sSVVf+O9+7F5Yuhfr1bVVU1/gMSE9PZ/Xq1WzatAmj0YibmxsREREsXbqUFi1a8J///OcuvhhxM6qqkmE0cyYrn9Q8IyarisWqcuVPVgU0iu07wMdZT4CXK/5uzsyeNZPNmzczZMgQOnfuXOpxHjx4kJiYGBISEjCbzWi1WkJDQwkPDycoKMjhFa8HDhzgs88+w8fHh4kTJ8oKrUKUExkZGYwfPx6DwcBLL71EgwYNHB2SKAPKxm36MqjAYiU5x8DFPCOZRjMmi4oV1XY2odguOrUaBU8nHX4uTtTwdMHDSd5OISqSnAIziReyScsvAECn0eCk4ToJpr8fU1UVi6pyIa+AtPwCqrm70LiyBy660p0aoaoqR44cITY2ll27dpGTkwPYVlCqUaMGVquV1NRUe7+aypUr065dO/u0vLKyCpQQ94TsbBg5Ei6vlMfGjTBpEmg0kJICo0aBxWJ77sIFGD36qiF8fX155JFH6N27N+vWrWPt2rX8/vvvxMfH4+HhwaVLl6SvVBlxMa+AI5dyyDCasaoqGkVBq4BeqynynaCql29QWG3fARfzCtBYzZzMNlC9Rk06depUajFmZGSwZcsWYmJi7J/z1apVIyIigrZt25aZJuImk4k5l3u0DRkyRBJSQpQjPj4+PPzww8yePZuZM2fy1ltvlfqCCaLsk0qpf8gzWTiRkUtyjoECy+WKBkVBo9gqHwqplyserKptco5Wo1DZVc993u74uTk5KHohxN1yOiufg2nZFFhUnDSa266aNFutmKxWXHVamlf1ooqbc4nGqaoqJ0+eJDY2lri4ODIyMmwXRVYr3t7emM1m8vPzURQFRVGoW7cuISEhBAcHU61aNenhIcTt2rULnnmm6GNDh8KYMbB+Pbz88t+P63SwYAHUqnXDIY1GI7Nnz+aLL76gRo0a1KlTh7Zt29KzZ0+qV69e8q9B3JTJYuXIpRxOZRmwqCp6jYL28udpcVhVK2mZWVgsViq7OdO+bs0SndptsVjYt28fMTEx7N27F1VV0ev1hIWFER4eTt26dcvc5/zSpUtZsmQJzZo14//+7//KXHxCiDujqioff/wxR44coVevXvTv39/RIQkHk6TUZaqqcibbwKG0HIwWK1pFQa8p3kmFqqqYVRWzVUWrKNTycqFhJQ/0Wmn2K0R5dDwjl0NpOagqOP/jLvjtUFUVg8WKXqMQXNWLah53flc4OTnZnohKTU3FYrGQl5eHh4cHJpMJvV6Poig4OzvTtGlTQkJCaNasGR4eHnd8bCEEkJ8PDzwAWVlFH//vf6FXL+jTBy73bwOgSxf44IObDrtp0yZmzJhBu3btOH36tL0PUGhoKFFRUdx3330l+SrEDeSZLOw8n0GW0YxOUdAV87yxyBh5uVy4cBFXD3c8vX1x0io0q+JF9Tv8HkhNTWXz5s1s2bKFrMu/g3Xq1CE8PJywsLAy22A4NTWViRMnoigKEyZMoHLlyo4OSQhRClJSUpg0aRJWq5U33niDgIAAR4ckHEjmYgAmq5U9KVmk5BlBBZdbvMhUFAW9oqBTbImppMx8LuYX0MLfGy9nKUcUojw5lZnHobQcFBScdSWTeFYUBRetBqPVyp7ULHQahcq3UTF18eJFYmNjiY2N5ezZsxiNRjIzM3FxccFqteLl5YWiKFSvXp3g4GCCg4Np2LChTMsTojS4usI779hW5iucpgfw+efg5wdDhsDMmX8/vnYtxMdDixY3HDYtLQ2NRkPnzp2577772LNnD8uXLychIYGEhAQaNmxIVFQUTZo0kQqTUpRnshCbnE6OyYyzVlukmr64VFUlPT0dRQFfb290V3wPqCrU8Ly1xJTJZCI+Pp6YmBgOHToE2KZnR0ZGEh4eTq2bVOI5mqqqzJkzB7PZTP/+/SUhJUQ55u/vz4MPPsjixYuZMWMGr776qsN72QnHqfBXIiaLlZ3nM0nLL0Cv0aDT3v4JXOGqKlpVJafAQuy5DFpX88HbRRJTQpQHWUYTB9NsfZicSrgSUlEUnDUaDBYrey9k06GmvlhJr4yMDHbu3ElsbCzHjx8nJyeH9PR0dDoder0ePz8/dDodgYGB9ml5NWrUkItVIe6G9u1h/HgYN67o4xMn2hJW3t6Qmfn3459+CtOn2/pOXcelyz2q/Pz8UBTF/u/6yJEjREdHk5iYyOHDh6lVqxZRUVG0bNlSTvRLmMliZee5DHJMlttOSAFkZWViNlvw8vJEr7OdKzprbImpvReycNZp8HO9eUuIM2fOEBMTw/bt28nLywOgQYMGRERE0LJly3umX0tcXBwHDhygWrVqdO/e3dHhCCFKWY8ePYiLi+PkyZOsXbuWbt26OTok4SAVevqeVVWJO5fBhbwCnLQatCV4kVY4HcdNr6VNdR/cpQm6EPc0q6qyPTmdS/mmW66mvBWFnx0Bni4EV/W65nFyc3PZtWsXsbGxHDhwgPT0dPuFqqenJ5UrV8bNzY0mTZoQHBxM8+bN8fLyKpV4hRDFMHOmrULqSq6u0L8/zJ1b9PHJkyEqyv5Xo9VIhjUDk2pCRWXenHmcOnCKz9797JrJptOnTxMdHc3OnTtRVZWqVavSo0cP2rdvL1WRJSTxQhZJmfk4azW3nZCyWMycTU5GoyjUqFGzyM+y8HvAw0lLh5qVrtkOwmAwEBsbS0xMDElJSYDt879Dhw507NgRf3//24rLUfLz8xk3bhxZWVn873//o2HDho4OSQhxFyQlJfHee++h0+lkym4FVqGTUsfSczmYlnNHTYpvxHZSYaGymzNh1X1u+8RFCOF4JzPzSLyQjb6UPi+uZLJasaoqYdV97NP4DAYDu3fvZseOHcTHx3Px4kXS0tJQVRU/Pz8qV66Mv7+/fVpeo0aN7pm740KUe6pqq4K6vKKYnbc36PVw8eLfm1avxrl5X3GCM5y3nCfbmo1FtWBbVgWyMrPACjX8alBVW5VAfSC19bXRKUUTTqmpqaxcuZKtW7diNpvx9vamW7dudOrUSVYzuwMX8ozsPJeJAnfUO/Ri2kVyc3Lx86uEh8fVq95ZVRWjxUqgtytNq9huKqiqyokTJ4iJiSEuLg6j0YiiKDRt2pTw8HCCg4PRakt3FdfS8uuvv7Ju3TratWvHE0884ehwhBB30YIFC1i9ejWNGzfmv//9r1TzV0AVNimVZTSx7Ww6lsuNikuLxapislppUsWTQG+3UjuOEKL0qKpKzOlLZJvMuNylE/58swV/Nz3alJNs376drVu3kpKSwqVLl+zVD5UrV6ZRo0YEBwcTEhJCQECAfJELUVZZrfDWW7BiRdHHnZ0hNxerk54jneqwv2c9LjWpidXdFQ0atIoWHToUFNuiLGfP4OLmgpevFxYsKCh4aDxo5NSIps5NcVKKTvfKzMxk9erVbNiwAaPRiJubG5GRkXTp0gVPz6uTIeL6VFVly9lLZBjMd1Qxaywwcv7ceZyc9JdXTbz2OCaLFRQI9XVh3y5bVVRycjIAlSpVomPHjnTs2BFfX9/bfUllwsmTJ5kyZQqurq5MmjRJfi+FqGCMRiOTJk3i4sWLjBw5kvbt2zs6JHGXVciklKqqxF6etlea03AKGS0WdBoNnWpVwll3b97BEqIiu5BnJO5cBjql9KukVFQMBgN5+QbyDQYWfvw2J48eRqvVUqVKFWrUqEGrVq3s0/J8fHxKNR4hRAkymWyr7+3YUeThTHcrW/4vnLOh1QFwzjOjrR2Ioi1a/WQ2mzl79iweHh74+fkBYFEtGFUjVqz4anxp79aemrqaVx06Ly+PDRs2sGbNGrKzs9Hr9XTs2JEePXrYxxI3dim/gO3J6Xf4XaBy/vx5jMYC/Kv54+J8vao1lXyDgXyTmeNxWzm2bQNarZbQ0FDCw8MJCgoqF73CrFYr7733HidPnmTYsGF06tTJ0SEJIRzgwIEDfPbZZ7i5uTFx4kRpO1HBVMikVJbRxJYz6WgU0N2FL/TC3gBNKntyn49USwlxr9mbmsWprHxcSy2prGI0GsnOziYrK5sCUwEWswUvv8rEr/gTw7mTdOzYkZCQEIKCgnByunnjWyFEGZWXB888AwcPAnC8XQCbnwjB4OmES64JXYHVtp2vL1SrVmRXg8FASkoKPj4+eHt7F3nOqlrJU/PQKTqaOzWnlUura950M5lMbN68mZUrV9pX8gsLCyMqKooaNWqUzmsuJ3anZnImy3BHNzRzc3O4eDENN3c3qlSuctXzFouZnJxccnKyMZst6F3dwGKmam4q7du2KXdVROvXr2fu3LnUrVuXV155Rap9hajApk+fztatWwkLC2PUqFGODkfcRRWy4+WZrHwsqor+Lt1hUhQFBTidlUcdb1fpLSXEPSbDYOKtkYPJuHgBjUbB1d2D/5vwLg2aBgNw5sQx3n/pP2ReSsPDy4tXPvySwIZBNxlVpaCggIyMTP6vXzf8/Kvzf+98jKIo6HQ68vJyGP1oH6J37Se8Xk05UReivHBzgy++gCee4HA9HZufaoFFr8E9NQtFxdZjSlEgPR0qV4YrmpNbLBaAa/YN0iga3HHHiJEEYwJG1UgH1w5XfXbo9XoiIyOJiIggLi6O6Ohotm/fzvbt2wkODiYqKop69eqV6ltwL7KqKqm5BWgV5bY/j1XVSnpGBooCvj5/T7lTUTHk55Odk0N+fj6otnNHDw933D08QKunRUhDPC/3GCwvsrKy+P3331EUhaFDh8r3nBAV3COPPMK+ffuIjY2lTZs2BAcHOzokcZfc+3W/t0hVVc7lGtHcwUnF7dBrNOSaLGQaTXftmEKIO1dgsZJntvDG1B/4MXoD3y9bzyOj/sUHLz1v3+bTN17igUcfY8a67Qx+9j988PJ/rj+eqYDU1BSOHTvG0aPHiFm1HBc3d5KTjmPKy6NWrVrUq1ePGtWro6Bg0UtDYiHKnUqVOPnNW2x+uhVWnQa3dAOKTm/rO2U2/72d0VhkN/Pl5663ip6iKLgoLugVPQcKDrDTsPO6IWi1Wtq2bcu4ceP497//Tb169dizZw8ffPABH330Efv27aMsFdM///zzBAYGoigKCQkJxX7uZlRV5b777qNr165FHk9KSioyPTrXZMFiVW+6UnPMymXsj4+75nOZmVlsWLKYrLQL6HQ6zGYTi2ZM45XHBpGaeoH8vHycnJzw86tEQK0A/Pwq4+zkjApkG83XHLNQZGQk9913H6Ghofb/Vvyzf9llrVu3Zv369Tcc726YP38+BoOBrl27UqtWLUeHI4RwMHd3d4YMGQLAnDlzMBgMDo5I3C13nJSaMGECVapUKfIl+M4779zRmL179+bQoUN3Gto15ZksmCw3P6m4HTc6EdEoYFUh6/JJxfTp0+nfv/81tx05ciSKohAfH29/LDs7Gw8PD0JDQ+8oxri4OAYPHgxARkYG77333m2P9eeffzJmzJg7ikeIsi7XZMGqqnh5+9gfy8nOovAjJP3iBQ7vTaB7/0cA6NSrD6nJZzmbdNy+fUGBkZSU8xw9eoQjh49w4cIFDAYjer2OnetX02fo43QfMJD4jWvw9PRCe0UfGaPFisladi4MhRB3LteayxaPI1iq+OKaabS1uVYUW1WUxWJbrU+nt1VVXeFmSalCTooTWkXL3oK9nDWfveG2iqIQHBzMK6+8wksvvUSzZs04cuQIX375JZMnTyY2Nhar1XonL7dEDBw4kJiYGOrUqXNLz93MmjVr8PHxYc+ePZw4ceK622UZzVhUlZu1ktq8chkHrnEuaLaYycrKZMuKpZw/dZKUlPOcPZtMfl4+Kiqenp5Ur1Gd6tWq4+HhiUaxnaIX3kDNKrhxUgrg008/JSEhwf5fz549b7qPoxw4cIDY2Fh8fHzo06ePo8MRQpQRhX1T09PT+f333x0djrhLSmT63rBhw/jss89KYigAli1bds3HzWbzTU/EbiarwHx56l7JJ6U2r1xG/SbNaNKi9VXPFZ5UZBfjpAJs/yB/+uknvvzySwDmzZtH48aNMZnurNKqdevWzJs3D/g7KfXqq6/e8jhms5m+ffvSt2/fO4pH/E1V1Rv+Z7Vab7rNP7cFbrrfrW5bGOvtxHMvxq7z8qVyiw5YjAa+nvAa+3faGhS//MnXnD9/jhMH9+NdyY/UixdsF5KAb5Wq7N+7G8XJhe/fHUdQyzCat+0IgLOzMx6envh4e1NgyGffjq289vFUUs6e4Y2nhvL4mLFFmteqgKUMVSsIIe6Mqqpsz99OjjUHN70XSi09nDple1KnA60WatQALy/4xw20wqTUtabv/ZMzzuSquWzN20ofzz44Kzef+tWgQQMaNGjAmTNnWLFiBbGxsfz4448sXryYHj160KFDB/R6/a2/6BJwowbYd9Ice9q0aTz99NMcOnSIn376ibfffvua2xnMFhRs53O7Nm/kp4+nYDIaMJlMDHzqOXoPHs72davYunoFO2M2sOK3X+k34ikeGPIYABcvXmT17ws4cXA/P7w3CVd3D4b8+794enqA1crXE14l6fBB9E5OvDX1R2rUDgRg1aL5LPrlR7Ba8Pf15ssvvyQkJOSWXuOWLVsYPXo0ZrOZsLAw++8RwMGDB3niiSfIysqiUaNG5OTkMHToUEaOHEl2djYvvvgiu3fvxmAw0K5dO6ZOnXrHfQ1NJhNz5swBYPDgwbi4SEWwEMKmcDrv4cOH2bBhA23atJEp5RVAqfaUGjlyJM7Ozhw9epTTp0/TrFkzfv31V8xmM7Vq1SIxMZFql5t4TpgwgczMTD799FMCAwNZvHgxoaGhREZGEhwcTGxsLK6urqxatYpXX32V5cuXA9C5c2c+/vhjnJycrns8JycnJkyYwP79+7mYlcOhQ4eoVbceT7/yFt++M45zp0/RsHkIr3/2LRqNhrycHL6Z/BbHDiRSYDTSpEUr/jPxPfROTrw4pB8Nm4dyIGEnaakptAq/nzHvfHTNE5H2Xbsz+fnnyMvJpsBopFnbDox9+z3yPfQYjUYsFgvZ2dlA0Qtlo9FIVFQUM2fOZOzYsTg5OfH9998zcOBAZsyYQXJyMlarld9++43vvvsOgOrVq/POO+/g7+/Pb7/9xh9//IGvry9HjhxBr9fzySefUKtWLbZt28b777/PggULeO6558jOzqZRo0ZotVpmzZplX5Y3PT0dRVF46qmn6NSpE6qq0rFjR0aOHMm2bdsIDQ2lTp06xMTE8Pbbb7Nr1y6mTp1Ks2bNSExMxGKx8L///Y8GDRqgqirLli3j999/x8XFhbZt2zJ37lx+//13hyUd7jS5Utx4irtt4XbC8ZTLU3sL//OpUQvf5hZMBSZGvTYBUIiJXsLcqZ/wvw+/xGwxo6q2Xi9FLh9V2/959o23MRjycXd3x8fXFyf93yfzy3+dRZvIrnh4eePh5Y1v5SrEblxL28hud/U1CyHunjPmM5wwncBZcbZVw7i7Q61acP68bfpetWq2hNQ1mM1mtFptsdoPKIqCG26kW9NJNCbS0qVlsWMMCAjgqaeeol+/fqxcuZLNmzczZ84cli5dSteuXbn//vtxdXUt9niONGrUqOveRLt06RLR0dF88803nDp1igceeICJEydec1U76xXf0w2aBfP5gqVotVqyMtJ59oEuhHXqQtvO3WnfrSf1mzTj4Sefu2JvFYvZTPueD7Bz/Wr6jniSLn0GoNfpif5tLof3JvDdX+uoXqsOP7w/iV+//ZIX3/2YfXHbWbtkER/OXYyPhzvKyf0MHTqUxMTEa77WMWPGMGHCBPvfFy5cSK1atRg8eDA///wz3bp1Y+XKlUyfPt2+zWOPPcbo0aN54oknOHDgAC1atGDo0KEA/O9//yMiIoIffvgBVVV5+umn+fzzz3n55Zdv7YfwDytWrCA1NZVmzZrRokWLOxpLCFH++Pr6MmDAAObOncuMGTN466237rgwRZRtxf7pjhs3jkmTJl3zudmzZxeZm/7aa6/Zp4glJCSwbt06nJ2d6dSpEwsXLuTRRx/l4YcfZtasWbz00kuoqsovv/zCn3/+ec3xDx8+zMaNG9Hr9XzzzTfExsayc+dOtFotffv25dNPP2Xs2LE3PB7Ypq7NW72BVIue14c/xMevjuGDmb/h7OLCv/p2Y8f61bTr0oNv3xlH87B2/O+9T1FVlY9fHcOin79n8LP/h8Vq4dih/Yz54AvMZjNvjHiEtcuWUL9ZMMHtw6lVryE9HrF9mV9Kz+LZcVNwcXPDarEwddwrzPjua2ZdOsehQ4dISkripZdeuur17tq1i5MnT+Lm5sbIkSPx8/Pj1KlTxMTEkJKSwsSJE7l06RJ//fUXDz30EO7u7uzatYvBgwfTq1cvDh06RGxsLA8//DCRkZFs376d//73v3Tq1Ink5GQuXrzI1KlTqVOnDnFxcURGRgLw3Xff8fvvv9OoUSO6dOlCZmYm48eP56GHHrKv9rJ37177Hclt27Zx5swZfv31V5KTkzl58iShoaF0796d/fv38+GHH9K7d297rA8//DBubm7ExdnK2gsTi6Xln8mFf/5XeNJZ+Ofiblt4QVCc/wrHBW7pGCW57d2I51rHKGvxFOcY17rQSzeY2Hb2ErpKvvZpv48+PZoZH0/B092dZiEtyUpPo7p/NbQ6Haqqkn4hlSbBIdSocfWy7FdaPn82ly6kMjTcdrGYl5vD8nmziySlFCiV6cZCCMc4VHAIK1b0yhUVRx4eUL/+Tfe1WCy3VKWiUTRoVA2HCg4R7ByMTrm1k/rKlSszdOhQHnzwQdauXcu6dev4/fffWb58OZGRkXTt2rXML9v9448/Xve52bNn06tXL3x8fPDx8cHf358VK1bQq1evq7a98vshK/0SH419gTMnjl1OTF3ixOEDVKl+vdULFapXr4GPbz5Ozs64u3ug1/3982/SojXVa9W5/OcwFs+wxbx5VTTHDiQy5uHeaDQK7notly5dIj8//5pJwU8//fSq1hB79uxBp9PRrZvte6VHjx7UrVvX9jqyskhISGDEiBEANG7cmPDwcPu+ixcvZuvWrXzyyScA5OfnF6tK70ZSU1NZvnw5er2eRx99tFgJViFExXP//fezY8cOjh07xvLly2WabzlX7LOT6yWk4MbT9wYMGIDb5Z4Ibdq04dixYwA88cQTjBo1ipdeeon169fj5+dH8+bNrznG8OHD7eXiq1evtldEATz99NN89dVX9qTU9Y4Hti9iHx9fLqTn0qBZMHonJ9w8PACo37S5vQfM5lXL2R8fx2/TvgHAaDCgufwlrKDQoUdvXN3dUYDARkGkX0jBxdUVrVaLk5MeNzdXQMGoVZj9+Ucc3L0LVJXM9EvUrluXAV062aukwsPDr7roPnjwIIGBgTRs2JD58+fj7u7OgAEDqFGjBvv27aN379789ddftG/fniFDhqDRaOjatSvDhg2jX79+rF27ltzcXJ566ikURSEgIIDFixfz6KOPsnv3bg4ePMjw4cNJSUlhyZIl9h5WeXl5TJs2jQ8++AC9Xo9Go+H8+fM0adKEPn368P333zNlyhSqV6+OoigsXryYtWvXMmbMGHbs2EFiYiIffPABGo2GgwcPMnr0aF577TWmT5+Ooii8++67KIrCuXPnaNOmDRMmTCjVpIQQJcFdryUvOwtjfj7VLl9wxKxchpevL14+viiKQoOmwaxavICogY+ycfkSqlSvQc3Aujcc9/De3WRcSmP+9n323+2crEyGdAghI+2ifTtnraZUphsLIe6+TEsmZ8xniiakislisdimFN/i3WJnxZlcay4nTSep53R7UyC8vLzo378/PXv2ZOPGjaxevZro6GhWr15Nhw4d6NGjB1WqVLmtsR1p2rRpnD9/nsDAQMDWv3PatGnXTErpNQoqtgrrz958mTaR3Zjwzc8oisKzD3ah4B9N6f9Jo9Hg7ubOtT7N9c5/T1/TaLVYCqfXqSo9Hh7MsDGvUdnNibY1fK+x96270TnSlc+pqsrChQtp2LBhiRxXVVXmzp2L2WymX79+VK5cuUTGFUKUP4qi8NhjjzF58mSWL19Oq1atqFHjeol/ca8r9Tq4K+eJa7Va+zz29u3bY7Va2bFjB9OnT+eJJ5647hgelxNH1/LPL9brHa/wOa1GARU0Gi1OV54EaLRYzLalllVVZfzXP1Or7tUnbxqNhipV/fGv6g+Aq6sb7u7uVK1SFRcXFzw8PKhSpSoAs778GENuDt8tWY2TswtfTnoTZ41tWiPY+gs89thjVx1jxYoVhIaG8sILLzB37lx27drFrFmz2LlzJwsXLqRfv36cOnWKgoICHnzwQcDWH0qj0dCzZ09SU1M5evSo/a5Yfn4+a9asIfJyRZS7uzsREREkJSWh1Wpp3749YDsZUxSFsLAw+0mvj48PgYGB9v4FYWFh9tVoYmNj8fT0JCgoiPPnz+Pl5UWDBg3sx1QUhcDAQCpVqoS7u7t9ZZXCn1n16tWv81MVouxw0mqw5uUy8V9PYDIaURQNPn5+vDNttv13ecw7H/P+y/9hzlef4e7pycsffGHf/6OxL9ChWxQdukcVGXf5/Nl0fnBAkWkiHl7etAqPZNXvC4iIegAAHxe9JFmFKCdOm09jUk24K+63vG9xm5z/k1bRoqoqSaak205KFXJ1daVnz5506dKFrVu3smLFCjZu3MimTZto3bo1UVFRBAQE3NEx7padO3dy4cIFkpOT7Z/DGRkZ1KpViwsXLly1vaeTDsV2Ckl2Zgb+NQNQFIU927dw/MDf0+ncPTzJvdya4VrcPDzJzc4qVoztu/Vkyph/0X3wY9Rr2gir1cquXbto3frq3qXXExQUhNlsZt26dXTu3JnVq1fbb9h6eXkREhLCrFmzePzxxzl06BAxMTEMGzYMgP79+/P+++/z3XffodPpSE9PJy0tjfrFqOq7lp07d7J//378/f3p0aPHbY0hhKg4qlevTu/evfnzzz+ZMWMGr7zyyjWnV4t7n0N/qk888QRffvklf/31l33++s1069aNGTNmUFBQgNls5scff7ylLzYPve1kTuX6PXw6du/Fr999Yb9TlZ2ZUWQlrev554lIdmYmlapUxcnZhUsXUohZvgQn7a295Z9//jkfffSRffpcoc6dOxMdHU1ycjIA3377LV27dr2lsmovLy/y8/MpKCgAwNPTk5YtW/Lzzz8DcPToUWJiYu6ogWhhrIX9A8B2Z1KIe0lQ/bp8sjCaH6M38sPy9Xw4ayH1m/xd2VmrXn2mLlrOjHXb+ebP1dQNamJ/7qX3P7sqIQXw37c/4F9vXl2BOvHb6Twy6l/416zFvF2H8XKWOfSi/Hr++ecJDAxEURQSEhKKPBcYGEijRo3sK/sWLtJxM5GRkdx3332EhobSqFEjxowZU6zV40aNGsW6detuul3hDaHmzZvz73//u1gxFUqzpAE3rlS5nltpcv5PWrRcsFy4bv/C9evXEx0dfd39J0yYwAsvvGD/u16vp1OnTrz99tuMGjWKmjVrEhsby9tvv82XX37JkSNHSqxXYuFKv88++ywBAQGcOXOGnj17FkmM3Oi5UaNGXbM9xLRp0+zV5oV8fHzo3r07M2fOvGp7TycdWkXBoqqMeuUtfvzgbZ7pHcnyBXMICm1l367bgEGs/+sPnn2gM3/9evU4Dzw6gjlffcYzvSPZvm7VDV97cJv2PD12HO+MfpKHIjvQtGlTfv311+tuP2bMmCKrYc+ePRsnJyfmzZvHmDFjaN68OXPmzCnSKH3GjBl88803NGvWjLFjxxa5+fjpp5/i6upKaGgowcHBdO3alaSkpBvGfD0Gg4H58+cDthkW0h9GCFEcPXv2pEaNGpw4caJIuyBRvpRKT6nOnTvz6aef3nTMxx57jNq1a/Pwww/j61u8kuRnnnmGY8eO0bKlrQdLZGRkkROlm/F01qHRKNzoXGn0W2/zw/tv88wDnVEUDVqdlmdeHX/T6TjdBgzig5f/w+aVy+j72JM89MQzTBz9JE/2CMevqj+hHSJuOSnVtWvXaz7erFkzPvzwQ6KibBe7tWrV4ocffrilsStVqsSIESMIDg7Gw8ODuLg4Zs+ezXPPPcfUqVNRFIUff/yR2rVr39K4/9S8eXPefPNNOnbsiKenJ1FRUXh7e9/RmELcTdU8nDmTnY/FqtqqLe8Cs1VFp1Hwd7/5illC3KsGDhzIK6+8UqSPzZXmzZtHaGjoLY9b2FsnKyuL0NBQ2rdvz6BBg264z416D10pPj6eI0eOcOjQoVuKyWw2k2pORXOb9wMtFls19+1czOsUHflqPrlqLh7K1dXn69evJyMjw35OUVwajYawsDBat27N/v37Wb58Ofv27WPfvn3Uq1ePqKgomjdvfsMk3M1WVi7OSr+Fi75cy/V+rl9//fU1H1+0aJH9zxkZGfY/67UaPJ10ZBhMtI6IZMa6HdfcPyikBT+tjLluPO279qB916I3U6MGPnrd5yMe7E/4g/2JCKiEu9P136cbXax16NDhqqRvodq1a7N161YUReHEiRO0b9+eVq1sSTYPDw+mTp163XFvxR9//EFmZiZt27alUaNGJTKmEKL80+l0PPbYY3zwwQcsXryYkJAQ/Pz8HB2WKGGKWsGW/VJVlQ2n0sgzWXDR3VmzxlthtlqxqtAhwBcvZ8csp+xI2dnZ9mqvzz//nOjo6FJvdC5ESVFVlZjTl8g2mXG5wyavxZVvtlDdw5mW1XzuyvGEcKQrV9290WPFUXizqrDh8+DBg2ndujUvv/zyDZe4v3K/663me/ToUfr27cvZs2dp1KgRL774Ig899BDPP/88O3bYkhSPPPII48ePt8dSuIKwi6sLI38fydLPlrJ9/nYUjYKTixNvLHkDZzdnNv26iZXfr8RisuDi4cLjHzxOneZ12DB7AzHzYnDxcuF04mm8/Lx4ZuozzH97PsmHk/Gr6ceYWWNw8XDBbDKzYPICEjcmYjFZqFavGk999hRuPm589a+vqOtel3PHzxV5Tfv37ycqKgqLxULNmjV56KGHGDduXJH3dMKECWRkZFzVP/Ra7+eYMWNYt24dM2fO5NixY2g0GipVqsT3339vTz4GBgYyePBg1q1bR4MGDWjQoAEHDhwgLy+PY8eOUa1aNX777TcqVarE9OnTWbx4MYsXL2b9+vX83//9H506dWLz5s2YzWZ++eUX+3S27777jo8//hgPDw8GDBjAuHHjSnR121NZ+exNzcJZq0Fzl6ZV55st+Ls70bp6yfST+qeVK1faV9OzWCy88cYb9gWCSsqpU6d49913cXV1ZeLEiWW+Ob4QouyZP38+a9asoWnTpvznP/+R1hblTIWblKkoCgFervZmlXeLyari7azD8wZ3ucqzV199ldDQUJo2bcrSpUv56quvHB2SEMWmKAq1vV1BBYu19D83TFYrGgVqed0bS64LUVpGjBhB8+bNeeqpp4r0+bnelKx/OnfuHLt377b3Xyxc4n7Hjh3s3r0bq9XK559/fs19ExISWLJkCQcOHCAlJYWFCxfSpEkTfvzxRxo1amRfteztt9/GaDSyZ88etm/fzuLFi4tMNSxcQXj56uVsmrOJnX/uZHz0eN7b/B6v/PYKemc9h7YdYstvWxi3fBzvbnqXQW8NYuqovytUjscfp/eLvXnxjxfxv8+fjwZ/xFOfPsVHsR+hc9Kxce5GAJZ+vhRnd2cmr5vMlJgp1GpaiwWTF9jHSdydeNVrCg0N5bnnnmPYsGEkJCRclZC6kWu9n0uXLmX06NF8//33vP/++wwYMICGDRvSr18/1q1bZ28ZkJaWxvbt25k9ezYA27dvZ/r06ezfv5+qVatet/rp4MGDPP744+zevZv//Oc/vPHGGwDs27ePCRMmsHHjRnbt2lWkp2hJqe7ujLNWg+kufA8AWFQVRYGAUvwu6NGjB7t372b37t3s27evxBNSVquV2bNno6oqAwYMkISUEOK29OvXj0qVKpGYmGi/CSTKjwqZIanh4cKx9FzMVhW9tvSzrNbLya9aXq4VNqsrSShxr6vl5UpyjoFL+SZcFE2p/VtWVRWzVSXA04XKrsVf+l2I8mbjxo3Url0bk8nEm2++yeOPP86yZcuAm0+1GzNmDG+++SaHDh3i//7v/2jcuDFwa0vc32g13yutXr2ajz/+2La6mrs7I0aMYNWqVQwePBj4ewVhi9XC7hW76fxEZ9y8beN6+Nqm0u38ayen9p1iXNe/E0K56bkU5NsSOPVb18fNzw1nZ2fqtqyLxWzBu6ptGnzdlnU5f+w8AHF/xZGflU/sn7EAmAvMVK799wpnPfv1LNZrKq4bvZ/nzp1j+vTppKamkp+fz6VLl5g1axZLly4lPz+fIUOGFPkcjYqKsk/JaN++PXv37r3mMevXr0/btm3t23300UcArF27lqioKKpVqwbYVme+0crRt0Ov1VDby5Uj6blYVbVUq6VUVaXAYsXbWUdVt3t3GvemTZtISkoiMDCQiIgIR4cjhLhHOTs7M3z4cL744gvmzZtHkyZNruq5LO5dFTIp5abXUs3dmTPZBnSqWuqJogKL1XZMj3v3pEKIik6jKDSt7Mm2s+kUWK04l8I0PlVVMVz+vAjy86ywSWwhAHs/Q71ezwsvvHBLy9IX9pTas2cPERER9OjRg169et3SEvc3Ws33Rv7577ZwBWGdcv2FVlRVJeLRCIaMH3LtMS/fQHNzc0Oj1eDk8nfCWqPV2FcPRoXHP3ic4K7BRfa3qBYUFNxc3G7rNV3P9d7PgoICHnroIdatW0dYWBhZWVl4e3vTpUsX4uLiyMvLY9q0aWRmZtp7Zxb3/S7udqX1+VnX153UPCOZRjMu2tK7QWG63FewaRWvuzZVsKRlZWXx+++/oygKw4cPl+80IcQdadq0KW3btmX79u0sWLCAJ5980tEhiRJS4abvFWrk54GLToOxGCvy3AmT1YqiQOPKHuhkCUsh7mleznqC/GwXmAWWkv3sUFUVo9WKXqPQrIonzjr5vBAVV25ubpEm03PnzqVFixa3PE5wcDBvv/02r7/+Oqqq2pe4L0xkpKenc/To0TuKtVu3bkybNg1VVcnNzWXmzJnXXBVYp+jo0LsD635eR15mHgC5GblYLVZa9W7F5vmbuXj6ImCb8nR8l23VX1VVMVvMaDQae4Lrelo/0JrlXy/HmGcEwJhn5MyBM1iwJaVcNdeeBubl5UVmZuYtv/brvZ8Gg4GCggJ7YvHLL78E4IEHHmDKlCm4u7vj5OTEypUreeONN9izZw95eXm3fPwr3a2Vfm2JIk90GoWCUjqHNFtVLKrKfT5u+Lrcu31If/vtN/Lz8+nSpQu1atVydDhCiHJg0KBBeHh4sH37dvbt2+focEQJqbBXPS46LUF+HijYEkelwXp5Gk5NTxf87+HSayHE32p7u9GokgcqKgazpUR60xVWSOkUheCqXlSRzwtRQTz77LMEBARw5swZevbsSf369QFISUmhc+fOBAcH07x5czZs2MCMGTPs+xW3pxTAv/71L3Jzc1m0aFGJLnFf6K233kKv19O8eXPatm1L3759r7vS36Dhg2jZpyXje4zn1Y6v8sEjH2AymgjqEMSjEx/lk2Gf8GrHV3ml7StsXbgVAKPRCKqt4upmlSZ9xvShbsu6jOs6jrEdxjKu2ziS9iZhVs3oFB165doJjgEDBpCQkEBoaOh1p7xNmzaNgIAA+3+ffPLJdd9PLy8vJk+eTJs2bWjVqhVOTn9Xdrm4uODi4sJ//vMfHnvsMSpVqsTJkyfZunUr33//PadOnSrO236VK1f6bdmyJQaDodRW+vV1caJxZU8UFIyWkvkeKGS2qpisVqp7OFPf173Exr3bDh48yPbt2/Hx8bnp6olCCFFcHh4e9u/Y2bNnYzAYHByRKAkVbvW9K6mqyv6L2ZzMzEenUUq0ksmqqhgtVnxc9IRV98FJW2Hzf0KUS6ez8jlwMRuTVcVJo0Grub1pCWarFZNVxVWnobkkpIQo1w4WHGRT3ibcFDc0SvHOC5KTkzGZTNSsWROd7va6LuRYc2jk1IhObp1ua//SZLVaiY+PJzo62p6QatKkCVFRUTRs2PCWpnzd7ZV+T2bmcSAtB6tVxfkOp/KpqorpcoVUdQ9ngqt6o7vN7xVHM5vNTJo0iZSUFJ555hlatWrl6JCEEOWIqqpMnTqVffv20aVLF3sPR3HvqtBJKbAljxIvZHMqKx+toqDXKHc8573wLpePs45W1X1w0d2dJeSFEHdXdoGZxAvZXLrcjFin0aBVbt7LRFVtFx4mq4pGgWruLjSu7CGfFUKUc/nWfBZkL8CiWnDRuNx8+/x8UlNTcXNzo0qVKrd1TLNqpkAtIMo9ipr6mrc1xt2gqioHDx4kOjqagwcPAhAYGEivXr0ICQkp1rnZv//9bzZv3ozJZKJGjRp899131K1bt1TjPp9r4MDFHPJMFtsNTuXWzyMLb2RqNQqB3q40rORxz/aRAvjrr7/4888/Zel2IUSpSUtLY+LEiRQUFPDKK6+U+me9KF0VPikFthOhI5dyOZGZh/ny3a7bORlQL59UoEBVNyeaV/GWvjBClHNWVSU528DJrHyyjCasKijYGqNrr/gcUS9va7n8kavVKFRy0VPH242qbk5y0i5EBbE5bzOJBYl4KDefjle4cp2/v3+RBt+3IteaSyVtJQZ4DLhnPmdOnDjBihUriI+PB6BatWr07NmTNm3a3Ha1WGkymC0cTMvhfI4Ri6oW6ybnlTcnFMDLWUeTyp5UusdXXb1w4QITJkwAYMKECbedTBVCiJtZu3Yt8+bNo0aNGrzxxhtl8vtBFI8kpa6Qbigg8UI2WUZbw069RoOmGFUPVtVWGWVVwVmroZGfBwGeLvfMyZ8Q4s6pqkq6wURqnpEMg4nsAjMW698rbSko6DQK3s46vF30VHN3xsv53m1gK4S4PRctF1mSswRUblgtZTKZSE5OxsnJierVq9/WscyqGaNqpKNrRxo7N77dkB3m3LlzrFy5km3btmG1WvH19aV79+6Eh4fj7Fy2pjqrqkqG0czZ7HySsw2YrH+fXttm4Sn27Qqf0SoKlVz11PJypaqb821PAy8rVFXlyy+/JDExkb59+/LAAw84OiQhRDlmtVr54IMPOHHiBH369OHBBx90dEjiNklS6h/MVpXk7HxOZeWTXWCm8JxCqyhcea5gVW3JqEIuOg0BXq7U8nTFVS9TcISo6KyqSp7JgkW13QXXahTcdFpJVgshiM2PJcGYgKviila59jnDpUuXyM7Oxs/P76ar7l2LqqrkqrnU0NWgl3uvYvewKovS09NZtWoVmzZtoqCgAHd3d7p06ULnzp1xdy97zcCNFiuX8gvILjCTZTSRa7JiVVU0gJNWg7eLHk8nHT6X/395sXPnTr7//nv8/f0ZN26cVC0IIUpdcnIykydPBmwLj9zuTRzhWJKUug5VVblkMJGWX0Cm0USm0YzFaru7pQCKAp56W8WDj4ueqm7O92xDSiGEEELcPSbVxNKcpVy0XMRdcb8qWW21Wjlz5gwajYaaNWvecjJbVVXy1DycFWce9HgQX61vSYbvMLm5uaxbt461a9eSm5uLs7MzERERdOvWDV/f8vEa71UGg4Hx48eTkZHBmDFjCAoKcnRIQogKYsmSJSxdupR69erx8ssvyw3ge5AkpYrpylVRNJen4dzrZdZCCCGEcIw0SxorcleQa829KjGVlZVFeno63t7e+Pj43NK4qqqSr+ajUTREuEZQ36l+CUfueEajkZiYGFatWkV6ejparZa2bdvSs2dPqlWr5ujwKqT58+ezZs0a2rZty5NPPunocIQQFYjZbGby5MmcO3eORx99lMjISEeHJG6RJKWEEEIIIRwgxZzCmrw15Fpzi0zlO3v2LGazmYCAALTa4rcEKKyQ0ipa2ru0J8i5fFermM1mduzYQXR0NCkpKSiKQmhoKFFRUQQGBjo6vArj9OnTvPPOO7i4uDBp0iS8vLwcHZIQooI5duwYH374IU5OTkycOFGqZ+8xkpQSQgghhHCQi5aLbMzbSJolDR06rEYrF1Iv4O7uTuXKlYs9jkk1YVANuGvcae/SnrpOFWd5bFVVSUhIIDo6mqSkJACCgoKIiooiKChIpnKUIqvVyvvvv09SUpJUKAghHGru3LmsX7+e4OBgRo8eLZ/99xBJSgkhhBBCOJBJNRFviCexIJHMnExM+SYqe1fGxfn6q/PB5dYCmChQC9Cgoba+Nu1d2+OhufXG6OWBqqocOnSI6OhoDhw4AECdOnWIiooiNDQUjebebfZeVm3cuJHZs2cTGBjI2LFj5T0WQjiMwWBgwoQJpKenM2rUKMLCwhwdkigmSUoJIYQQQpQB+8/tZ+7WuVQKrYRHJVtiSYMGraJFufw/FRWLasGCBQCdoqOmriaNnBpRW1db7gxfdvLkSaKjo4mPj0dVVfz9/enZsydt27aVVeFKSFZWFuPHjyc/P5/XX3+d2rVrOzokIUQFt2fPHr766is8PT2ZOHFimVyhVVxNklJCCCGEEGXArFmz2LRpE48/8ziVmlXiouUiqZZUsq3ZqJf/p6DgqrhSVVsVP50ftXW18dH6ODr0MislJYWVK1eydetWLBYLPj4+dOvWjYiICFxcblyJJm7sp59+Yvv27XTp0oXBgwc7OhwhhADgxx9/JDY2lvbt2zNy5EhHhyOKQZJSQgghhBAOlpuby9ixY3Fzc+Pdd98tUs1jUk2YVTMqKlpFixNOUhF1izIyMli9ejUbN27EaDTi5uZG586d6dy5M56eno4O755z6NAhPvnkE7y9vZk0aZIk+IQQZUZ2djbjx48nNzeX//73vzRp0sTRIYmbkInfQgghhBAOFhMTg8lkIjIy8qrpZXpFj6vGFTeNG86KsySkboOPjw8DBw5kypQp9OvXD41Gw19//cVrr73Gr7/+SlpamqNDvGeYzWZmz54NwKBBgyQhJYQoUzw9PRk0aBBgq0A2Go0OjkjcjCSlhBBCCCEcyGq1sm7dOnQ6HREREY4Op1xzd3end+/eTJkyhSFDhuDp6cm6det48803+fnnn0lOTnZ0iGXeypUrSUlJoUmTJrRq1crR4QghxFXatm1L48aNSUtL488//3R0OOImJCklhBBCCOFACQkJpKen06ZNG5lKdpc4OTnRuXNnJk+ezBNPPIG/vz/btm1j4sSJfP311xw/ftzRIZZJFy9eZNmyZeh0Oh599FGp2hNClEmKojB8+HCcnJxYs2YNSUlJjg5J3IAkpYQQQgghHGjNmjUAdOnSxcGRVDxarZZ27doxfvx4Ro8eTd26ddm9ezfvv/8+H3/8MYmJiUj7VRtVVZk7dy4mk4levXpRtWpVR4ckhBDXVblyZfr164eqqsyYMQOLxeLokMR1yJq4QgghhBAOcurUKY4ePUqDBg2oVauWo8OpsBRFISQkhODgYI4ePcry5ctJTEzk8OHD1KpVi6ioKFq2bIlGU3Hv58bHx7Nv3z6qVq1KVFSUo8MRQoib6tKlC7GxsSQlJbFy5Up69erl6JDENVTcb1YhhBBCCAdbu3YtIFVSZYWiKDRo0IDnn3+eN998k7CwMM6cOcMPP/zAuHHj2LhxIyaTydFh3nUGg4F58+YBMHTo0Kua8QshRFmk0Wh47LHH0Gg0LF26lJSUFEeHJK5BklJCCCGEEA6QnZ1NbGwslSpVIjQ01NHhiH+oVasWo0aNYtKkSXTq1In09HRmz57N66+/zooVKzAYDI4O8a5ZsmQJGRkZhIWF0bhxY0eHI4QQxRYQEEBUVBRms5mZM2fKlOwySJJSQgghhBAOsHHjRsxmM5GRkRV6WlhZV7VqVYYNG8a7775Lz549KSgoYNGiRbz66qssXryYrKwsR4dYqk6fPs2aNWtwdXW1L7MuhBD3kt69e+Pv78+RI0fYtGmTo8MR/yBnQEIIIYQQd5nZbGb9+vXo9XrCw8MdHY4oBm9vbx566CGmTJlC//790el0LF++nNdff525c+dy8eJFR4dY4lRVZfbs2aiqSv/+/fHy8nJ0SEIIccv0ej2PPfYYAAsXLiQjI8OxAYkiJCklhBBCCHGX7dq1i6ysLNq3b4+7u7ujwxG3wM3NjV69ejFlyhSGDh2Kt7c369ev56233mLatGmcPXvW0SGWmE2bNnHixAnq1KlDp06dHB2OEELctgYNGtCpUycMBgNz5syRaXxliHQpFEIIIYS4y9asWQNA586dHRyJuF16vZ7777+fiIgI4uLiiI6OZseOHezYsYPmzZsTFRVF/fr1HR3mbcvOzub3339HURSGDRsmU0yFEPe8hx56iD179rB7927i4+Np2bKlo0MSSFJKCCGEEOKuOn78OElJSTRu3JgaNWo4OhxxhzQaDW3atCEsLIzExESWL1/O3r172bt3L/Xq1aNXr140a9YMRVEcHeot+e2338jLy6Nz587UqVPH0eEIIcQdc3V1ZejQoXz99dfMnTuXRo0aSbVyGSBJKSGEEEKIu2jt2rUAdOnSxcGRiJKkKArNmjWjWbNmHD16lBUrVrBnzx6mTp1KzZo16dmzJ2FhYfdExdHhw4fZtm0bXl5e9OvXz9HhCCFEiQkJCaFVq1bs3LmThQsXMmLECEeHVOFJUkoIIYQQ4i7JyMhg586dVKlShebNmzs6HFFK6tevT/369Tl79iwrVqwgNjaWn376iT/++IMePXrQsWNH9Hq9o8O8JrPZzOzZswEYNGgQrq6uDo5ICCFK1pAhQzhw4ACbN2+mTZs2BAUFOTqkCq3s36oRQgghhCgnNmzYgNVqpXPnzvfcdC5x62rWrMmTTz7J22+/TWRkJFlZWcydO5fXXnuN5cuXk5eX5+gQr7Jq1SrOnz9P48aNad26taPDEUKIEufl5cXAgQMBmDlzJgUFBQ6OqGKTpJQQQgghxF1gMpnYuHEjzs7OdOjQwdHhiLuocuXKPProo0yZMoVevXphNptZvHgxr732GosWLSIzM9PRIQJw8eJF/vrrL3Q6HUOHDpXEqRCi3OrQoQNBQUFcvHiRJUuWODqcCk2SUkIIIYQQd0FsbCw5OTl06NBBpkRVUJ6envTv35/33nuPhx56CCcnJ1asWMHrr7/O7NmzSU1NdVhsqqoyd+5cTCYTUVFRVK1a1WGxCCFEaVMUheHDh6PX61m1ahWnTp1ydEgVliSlhBBCCCFKmaqq0uBc2Lm4uNCzZ0/effddhg8fjq+vLxs3bmTcuHH88MMPnD59+q7HlJCQwL59+6hatSpRUVF3/fhCCHG3ValShb59+6KqKjNmzMBisTg6pApJGp0LIYQQQpSyo0ePcvr0aZo1ayYVKMJOr9cTERFBx44d2bVrF9HR0cTFxREXF0fTpk2JioqiQYMGpT6NzmAwMG/ePACGDh1aZpuwCyFESevWrRuxsbGcOnWK1atX07NnT0eHVOFIUkoIIYQQopStWbMGgK5duzo4ElEWaTQaWrduTatWrdi/fz/R0dEkJiaSmJhI3bp1iYqKIjg4uNSSU0uWLCE9PZ2wsDAaN25cKscQQoiySKPRMGLECN59912WLFlCixYt5ObRXSbT94QQQgghSlFaWhoJCQlUr15dLvjFDSmKQtOmTfnf//7H2LFjCQkJ4fjx43z99ddMnDiRbdu2lfj0kjNnzrB27VpcXFx45JFHSnRsIYS4F9SqVYvu3btjMpmYOXMmqqo6OqQKRZJSQgghhBClaP369aiqSufOnWU1M1FsdevWZfTo0UyYMIH27duTkpLCzz//zJtvvsm6detKZAlzVVWZPXs2VquV/v374+3tXQKRCyHEvadPnz5UrVqVw4cPs3nzZkeHU6FIUkoIIYQQopQYjUZiYmJwc3OjXbt2jg5H3IOqV6/OyJEjmTx5Ml26dCE7O5tff/2V1157jb/++ou8vLzbHjsmJobjx49Tp04d7r///hKMWggh7i16vZ7hw4cD8Ntvv5GZmengiCoOSUoJIYQQQpSS7du3k5eXR3h4OM7Ozo4OR9zD/Pz8GDx4MO+99x4PPvggVquVP//8k1dffZXffvuNjIyMWxovOzubRYsWoSgKw4YNQ6ORywIhRMXWqFEjwsPDyc/PZ+7cuY4Op8KQbx8hhBBCiFKgqipr165FURQiIyMdHY4oJzw8POjTpw9TpkzhkUcewdXVlVWrVvH6668zY8YMUlJSijXOwoULycvL4/7776dOnTqlHLUQQtwbHn74Yby8vIiPjyc+Pt7R4VQIkpQSQgghhCgFBw8e5Ny5c4SGhuLn5+focEQ54+LiQrdu3XjnnXcYMWIElStXZvPmzYwfP57vvvuOkydPXnffw4cPs3XrVry8vOjXr99djFoIIco2Nzc3hg4dCsDcuXPvaIq0KB6dowMQQgghhCiP1q5dC0CXLl0cHIkoz3Q6HR07dqR9+/YkJCQQHR3Nrl272LVrF40bNyYqKopGjRrZm+ybzWbmzJkDwKBBg3Bzc3Nk+EIIUea0aNGCFi1aEB8fz6JFi+y9pkTpkKSUEEIIIUQJS01NZe/evQQEBNCgQQNHhyMqAI1GQ8uWLWnRogUHDx4kOjqaAwcOcODAAQIDA4mKiiI0NJTVq1dz7tw5goKCaN26taPDFkKIMmnIkCEcPHiQTZs20aZNGxo2bOjokMotRVVV1dFBCCGEEEKUJ/PmzWPt2rWMGDGCjh07OjocUUElJSWxYsUK4uPjUVUVT09PTpw4QdWqVZkwYQL+/v6ODlEIIcqsTZs2MWvWLKpWrcq4cePQ6/WODqlckp5SQgghhBAlyGAwsGXLFjw8PGjTpo2jwxEVWGBgIM8++ywTJkygQ4cObN++nf3799sr+YxGo6NDFEKIMis8PJyGDRuSmprKX3/95ehwyi1JSgkhhBBClKAtW7ZgMBiIiIiQu6qiTKhWrRrBwcHUq1ePJk2aUK1aNRYsWMBrr73GkiVLyM3NdXSIQghR5iiKwvDhw9HpdKxYsYLTp087OqRySZJSQgghhBAlRFVV1q1bh0ajITIy0tHhCAGA0Whk3rx5ODs7M2nSJD744AP69OkDwNKlS3n11VeZP38+6enpDo5UCCHKFn9/f/r06YPVamXmzJlYrVZHh1TuSFJKCCGEEKKE7Nu3j9TUVFq2bImPj4+jwxECgCVLlpCenk7r1q1p2rQp7u7uPPjgg0yZMoXBgwfj7u7OmjVreP311/nll184d+6co0MWQogyo3v37gQEBHDy5EnWrFnj6HDKHUlKCSGEEEKUkMKT1a5duzo4EiFszpw5w5o1a3BxceGRRx4p8pyzszNdunRh8uTJjBw5En9/f7Zs2cLEiRP59ttvSUpKckzQQghRhmi1WkaMGIGiKPzxxx9cuHDB0SGVK5KUEkIIIYQoAefOnePAgQMEBgZy3333OTocIVBVlTlz5mC1WunXr991q/d0Oh3t27dn/PjxjB49msDAQOLj45kyZQqffPIJ+/fvRxbsFkJUZHXq1KF79+6YTCZmzZoln4klSOfoAIQQQgghyoO1a9cC0KVLFxRFcXA0QsDmzZs5duwYtWvXLlaPM0VRCAkJITg4mCNHjhAdHU1iYiKHDh2idu3aREVF0aJFCzQaua8thKh4+vTpw65duzh48CBbt26lQ4cOjg6pXJCklBBCCCHEHcrLy2Pbtm14eXnRqlUrR4cjBNnZ2SxatAhFURg2bNgtJZIURaFhw4Y0bNiQU6dOsWLFCnbu3Mn3339P1apV6dGjB+3bt0enk0sJIUTF4eTkxPDhw/nss89YsGABzZo1w8vLy9Fh3fPkNocQQgghxB2KiYmhoKCA+++/Xy7URZmwaNEicnNz6dSpE4GBgbc9Tu3atXn66aeZNGkSERERXLp0iVmzZvH666+zcuVKDAZDyQUthBBlXOPGjenQoQN5eXnMmzfP0eGUC5KUEkIIIYS4A1arlfXr16PT6ejUqZOjwxGCI0eOsGXLFjw9Penfv3+JjFm1alWGDx/OO++8Q48ePTAYDCxcuJDXXnuNP/74g+zs7BI5jhBClHUDBw7E09OTuLg4du/e7ehw7nmSlBJCCCGEuAO7d+8mLS2N1q1bSxm/cDiz2czs2bMBeOSRR3BzcyvR8X18fHj44Yd577336NevH1qtlmXLlvHaa6/x66+/kpaWVqLHE0KIssbd3Z0hQ4YAMGfOHPLz8x0c0b1NklJCCCGEEHfgygbnQjjamjVrOHfuHEFBQbRp06bUjuPm5kbv3r159913efTRR/Hy8mLdunW8+eab/PTTTyQnJ5fasa9FVVVyCsycyzFwKjOPk5l5nM7K50KeEYPZIitlCSFKVKtWrQgJCSEjI4Pff//d0eHc06TpgRBCCCHEbTp9+jSHDx+mXr161KlTx9HhiAouLS2NJUuWoNPpGDp06F1ZBdLJyYnIyEgiIiKIi4sjOjqa7du3s337doKDg+nVqxd169YtlWNbVZULeQWcycon3WDCbFWx/CP5pCigVRSctRr83Z0J8HTF01kugYQQd0ZRFB599FEOHTrEhg0baNOmDfXr13d0WPckRZXbBkIIIYQQt2XGjBls3ryZZ555RlbdEw731VdfsWfPHh544AH69u3rkBhUVWXv3r1ER0dz7NgxABo0aEBUVBRNmzYtkUSZqqqcysonKSOPXLMFVbUlnjSKglbBfgxVVVGxJa8sqopVBa1GoZKLngaVPPB10d9xLEKIim3Dhg3MmTMHf39/3nrrLfR6+Vy5VXKbQAghhBDiNmRnZ7N9+3Z8fX1p0aKFo8MRFdzu3bvZs2cPlStXplevXg6LQ1EUgoODCQ4O5siRI0RHR7Nv3z6OHDlCQEAAUVFRtGrVCo3m9rqI5BSYSbyYTVpeAQB6rQbtdRJdiqKgABpFQYctSWW5XF2VbkjnPh836vm4o9WUfkWZEKJ86tSpEzt27ODo0aMsW7aMfv36OTqke45USgkhhBBC3IZly5bxxx9/MGDAAKKiohwdjqjAjEYj48ePJz09neeff56mTZs6OqQizpw5w4oVK4iNjUVVVSpXrkzPnj1p3779LVUVpOQa2ZOaRYHFgpNGe9vJJFVVMV2e6ufroqdlNW9cdNrbGksIIc6fP8/bb7+N1WrlzTffpGbNmkWet6oquSYL2UYzBVYrqmqbWuyi1eDppMNNr70r063LKklKCSGEEELcIovFwuuvv05ubi7vv/8+7u7ujg5JVGALFy5k5cqVtGrVimeeecbR4VzXxYsXWblyJZs3b8ZsNuPl5UW3bt3o1KkTrq6uN9z3XI6BPalZWKwqzlpNiVzAWVUVo8WKl7OOsOo+kpgSQty2whtVgYGBjB07FhTF1vMuO59L+SYslxPhV35yqdimHuu1CpXdnAjwdKWSi77CJagkKSWEEEIIcYtiY2P58ccfiYiIYPjw4Y4OR1RgZ8+eZfLkyej1eiZNmoSPj4+jQ7qprKws1q5dy7p16zAYDLi4uBAZGUnXrl3x8vK6avtL+QXEncvArKo4a0omIVWoMDHl46KnbQ1fdDKVTwhxG8xmM++++y5nzybT//GncAu4j1zT3z3vbH3vKPL5pV7R786iqmgU8HLSUb+SB1XdnCpMckqSUkIIIYQQt+j999/n+PHjjBs37qoyfSHuFlVV+fDDDzl27BiDBg2ia9eujg7pluTn57Nx40ZWr15NVlYWOp2Ojh070qNHDypXrgyAyWpl65l0sgvMuJRQhdQ/FSam6vm6EeTnWeLjCyEqhsMnklgRf4DKderh6eWJs0533Z53/6ReTk4VWK1oFIWani4E+XngpL29/nv3Eml0LoQQQghxC5KSkjh+/DhBQUGSkBIOtWXLFo4dO0atWrXo3Lmzo8O5Za6urvTs2ZMuXbqwdetWVqxYwYYNG9i4cSOtW7cmKiqKbBdvsgvMJTZl71o0l6sYkjLzqermTCVXp1I5jhCi/ErLK+C04k6Nhk3Iycwkx2LCrWrVYu+vXF491FWjxWS1cjorn/T8AlpW98HTqXynbcp/2k0IIYQQogStXbsWgC5dujg4ElGR5eTksHDhQhRFYdiwYbe9ml1ZoNfr6dSpE2+//TajRo2iZs2axMbG8vGXX7P39HlUiwlNKU9j0WsULFYrhy/lIBNJhBC34mKekV0pGRjNVrzdXNBgJT/fQG5u7m2Np9docNZqyDFZiE1OJ9toLuGIy5bynXITQgghhChBmZmZxMXFUblyZZo3b+7ocEQFtmjRInJzc+nUqRP33Xefo8MpERqNhrCwMFq3bk1iYiLbjp7GosKF8yk4uzjj7eV1uSF6ySeoFEVBr9GQYTCTaTTj41L8VQGFEBVXltFEQkoWBRbVPsW4kp8fqSmpXEpPx8XVFa3m1hdR0CgKLloN+WYrO89n0K6mb7ldjOHevaUihBBCCHGXbdy4EYvFQufOne/pyhRxbzt69CibN2/G09OTAQMGODqcEqcoCkFNmhAY2hoPdzdc3VwxGoykpl4g+dw5cnNzUCn5aiatomBRVc5k55f42EKI8seqquy7kI3RYi3S887VxRV3D3esFivply7d9vjK5cRUrsnCgYvlt4pTzqaEEEIIIYrBbDazYcMGnJ2d6dChg6PDERWUxWJh9uzZADzyyCO4ubk5OKLScSnfRIHFiquTE1WrVKVGjeq4e7hjNpm4eDGN5LNnyc7ORlWtJXZMRVHQKAopucZye/EnhCg5JzLySDeYcLpGz7tKvr5otRpyc/PIz8+77WPYqjgVzucaOJdjvNOQyyRJSgkhhBBCFENsbCzZ2dm0b9++3CYCxO3p0aMHwcHBhIaGEhERQXx8vP25559/nsDAQBRFISEhodhjqqrK1KlTad68OUFBQbRs2ZIePXrw8ccfk5ycTKNGjWjTpk0pvJri++yzzzh//rz9799++y0ffvhhiYydXWBGVW1TWKIXzCGqYU2Sjx2hRs2aeHp5YrFYuXTpEmfPniUzMxOr1cqAlo04f+bUHR1Xq4DJopJnspTI64Divy8TJkzghRdeuOZzkZGR+Pn5kZmZaX9s4MCBTJ8+/abjLl68mG3bthU33NsSGRnJ4sWLS/UYQpQlBrOFYxm59oUS/kmj0eJbqRIAly5dwnoHCXSdRoOqwuFLOVjLYcJcklJCCCGEEDehqipr1qwBpMG5uNr8+fPZs2cPCQkJvPjii4wcOdL+3MCBA4mJiaFOnTq3NOZbb73FnDlzWL58OQcPHmTXrl28/PLLLF++HK1Wy9ChQ0ttNbpCFsuNEzP/TEo999xzvPzyyyVy7KwrGvsunz+blh07sXz+bHRaHZV8K1EzoCbe3t6oKmRkZHD27BlU1XrTmG+mcApfdoH5jscCW4VlSb0vXl5evPfee7e8X2knpUrifRLiXpOcY8BkUXHSXP9z2N3NDVdXF8xmCxkZGXd0PCethjyzhdTc8lctJUkpIYQQQoibOHbsGKdPn6Zp06b4+/s7OhxRxvj4+Nj/nJmZWSRZ1KlTJwICAm5pvJycHD766CN++umnIvseOXKEhg0b0rNnT6pVq8bMmTNp27YtLVu2pFOnTuzevRuA6dOn061bNx599FGaN29O69atOX78uH2cG+3XuXNnHn74YZo3b86OHTv45JNPCAsLIzQ0lLCwMLZu3QrApEmTSE5OZvDgwYSGhpKQkFCk0qdhw4bExcXZjzl9+nR7/6vz588zaNAg2rRpQ/PmzXnzzTevfg9MZjTA6WNHOXf6FGM//orNK5eTm50NgFajZX/sNt56YjATnx7G/G++QLWqpJw/z+JZ03ntiUftY6mqyvBOrTm2fx8AqxbN59/9e/Lsg114YVAf++PRv83lf0MH8O6/n+L+Nq3YsWMHkydPpnHjxoSGhhIaGsrJkycBGDZsGK1btyY4OJgHHnjAnpxLSkrCx8eHsWPH0rJlS6ZOnVrkfdm7dy/h4eG0bNmSJk2aMHny5GL/XowdO5Zp06aRnJx81XMmk4lXX32VNm3aEBoayqBBg0hPT2fZsmX8+eeffPjhh4SGhvLjjz8ydOhQ5syZA8DXX3+Nk5OTfZWwLl26sHHjRvvvSXBwsP01nj179rq/J1dauHAhISEhHDt2jCNHjtCxY0dCQkKu+7MW4l5jVVVOZ+WjwE1uDtianms0CtnZ2RiNhts+pkZRUFU4XQ573klSSgghhBDiJtauXQtA165dHRyJKKtGjBhBrVq1eOutt5g5c2ax9hk1ahR//vnnVY/v378fZ2dngoKC7I/t3r2b3bt3U7lyZXr37s3mzZuZO3cuGzduZNeuXbzzzjsMHTrUvn1sbCzvvvsue/fupVu3brz//vsAN91v+/bt9v3at2/PY489RmxsLAkJCXz55Zc88cQTAIwbN44aNWowb948EhISCA0NLfIaRo4cWWRq2c8//8yTTz4JwOOPP86///1vduzYQXx8PHFxcSxYsKDI/lZVBUVh2fzZdB/wCJX9q9GiQwTrlvwOQPrFC3zwyvNM+OZnfloZQ8NmzcnJykSr09K0TXsOJOzk0oUUABK2bcbD25t6TZqxL247a5cs4tN5f/Ld0rU8+dLrvPPCc/bjHkzYxeP/e41lm3cQFBTERx99xK5du0hISGDLli32pPRnn31GXFwce/bsISIiggkTJtjHyMzMpGnTpuzateuq6XiBgYGsWbOGXbt2sXPnThYuXFjsKqZq1arx7LPPMn78+Kue+/DDD3F3d2fHjh0kJCTYE0C9e/emb9++vPzyyyQkJDBq1Ci6devG6tWrAVi1ahWtW7dmw4YN5OXlsXv3btq3b8++ffvslXl79uyhQ4cOjBo1yn68f/6eFPrkk0/47LPPWLt2LfXq1WPq1Kk8+OCD7N69m7179/Liiy8W67UKUZZlG83kmSzoi7HgiU6rs924UCEt7dIdLdKgUxR7v73yROfoAIQQQgghyrL09HTi4+Px9/enSZMmjg5HlFEzZswA4JdffmHs2LEsW7bspvv8+OOP133uyrvvGRkZ9OjRg4KCAho1asQ777zDH3/8we7du2nbtq19u0uXLpGfb7uL3r59e+677z77n7/88kuAm+7XoUMHGjVqZH8uPj6ed955h7S0NHQ6HYcOHSI/Px9XV9cbvrYRI0bQokULPv74Y86ePcvhw4fp1asXubm5rFmzhpSUFPu2OTk5HDp0qOjrR8FiNrFq0Tw+/dWWuIt6ZCizvvyYB4eO4ED8TuoGNSGwgS3W3oOH89XE1/H398erUmU69XqQVYsWMPjZ/2PFb3OJGmirnNq8KppjBxL5vwE97cfKzkjHaLC9/qatwgioWx+wTZdr0KABw4cPp0ePHjzwwAP2yrU5c+Ywc+ZMDAYDBoOBypUr28fT6/UMHz78mu9Lfn4+o0ePJiEhAY1Gw+nTp0lISKBdu3Y3fD8LvfzyyzRq1IiDBw8WeXzx4sVkZmaycOFCAAoKCggMDLzmGN26dWPixIlYLBb279/PO++8w+rVq9FqtbRp0wa9Xs+6deuIioqiZs2aAIwePZpJkybZp+r98/cEYPLkyfj7+7Nq1SpcXFwAW6Xgyy+/TE5ODvfffz/dunUr1usUoqT06NGD8+fPo9Fo8PT05IsvvqBFixYALFu2jDfffBOr1YrZbObll1/m8ccfv+F4o0aNYuuOHeSbrJw+dphqAbVxvvz7/tn8pbh5eFy1j6enJ7m5uRiNBWRlZuLt7XPNsXOyMlkyezqP/uu/13z+6L7dzPt+Kq1/W4Cfq9MtvAs248ePZ/LkyRw/frzIlPLIyEheeOEF+vfvf8tjlgRJSgkhhBBC3MC6deuwWq106dKl1Hv4iHvf448/znPPPUdaWhp+fn63NUaTJk0wGAwcOnSIRo0asXbtWvr374+7u7u9ak9VVR5//HHefffda45RmBQA0Gq1mM3mYu3nccUFVUFBAQ899BDr1q0jLCyMrKwsvL29MRqNN01KBQQE0Lp1a/744w8SExMZPnw4Op0Og8E2fWXbtm1FYvwnvVZh+9pV5GRlMfbxQfbY01LPc+LQgau2//vfpoKbmzu9HhnGB688T9/hI9m2dhWj37o8TU5V6fHwYEa9fO1pZC5ubqiAXqOg1WrZtm0bW7ZsYf369bRr1465c+eiKApffPEFW7dupWrVqvz555+MGzfOPoabmxua61RQvP7661SuXJn4+Hh0Oh0PPfSQ/T0pDi8vL8aOHctrr72GVqu1P66qKl9++SU9evS46Ri1a9fG2dmZ2bNn06pVK7p27co777yDVqu9bjXoPz/7PK5x4d22bVtWrlzJ8ePH7Qn8hx9+mA4dOrBq1SqmTp3KZ599VqyErRAlZf78+fYp1r///jsjR45k9+7dtmm9w4ezfv16goODSUpKIigoiIceeghPT8/rjvfjjz+y/2I2JzLyeCoyjLem/kD9Js2LbGMxm9Hqrky1KPj5+XHu3DkyMzNxc3NDr786qZSTlcncb764blIqKDiUsZ9/T3aB+ZaTUlarlenTpxMZGcnPP/9cpLrT0WT6nhBCCCHEdRQUFBATE4Orq2uRKSpCFMrIyCjS42fx4sX4+flR6fKqS7fDw8ODF198kVGjRrFz505WrVqFs7Nzkeqmvn37MmvWLE6dsq02Z7Vai/Rwup5b2c9gMFBQUEDt2rUB7NVWhby8vIqsBvdPTzzxBD/99BMzZsywT93z8PCgc+fORRp2Jycnc+bMmSL7ejvrWblgDv8eN5k5MbuYE7OLuZvjGfjUv1g+fzZNWrbm+MH9nDp2BLA1QzcVFNj3b9yiFQDfvjuBVh074eXjC0D7bj1Zs/g3Us6esb/+Q3sS7PupgEYBDycd2dnZpKSkEBERwVtvvUV4eDjx8fGkp6fj6emJn58fBQUFfPfdd9d9D/4pPT2dgIAAe9XZqlWrir1voX/9618kJCSwc+dO+2P9+/fn008/JS/PtvR8Xl4eiYmJwLV/Tt26dWPcuHF069YNX19f9Ho9CxYssFcyde7cmejoaPvv9rfffkvXrl2LJML+qXv37vz000/06dOHXbt2AbY+aP7+/owYMYIPPvig1FcBFOKfbtTzT1EUewPyrKws/Pz8cHZ2vumY11qd8/2X/o8PX3meFwb14ameEQB0va8KOVm2f3t6vRPP9+vOheRkLly4wBfjxvJEtw483SuS5/p0pcBo4LM3XiI/N4dnekfyr75XVxXu3r6F//TpSr7JYu9fN378eFq1akX9+vVvmPBdtWoV/v7+fPTRR/z8889YrWVnCqBUSgkhhBBCXMf27dvJzc2lW7duxTpRFRVPZmYmjzzyCPn5+Wg0GqpUqcLSpUvtFz7PPvssf/31F+fPn6dnz554enpy9OhRwDYNpG/fvvTt2/eqcd955x0+//xzHnzwQfLz8wkICODw4cNMmTIFgIiICD744AMGDBiA2WymoKCABx54gNatW98w3lvZz8vLi8mTJ9OmTRsqV67MkCFDijz//PPP8/TTT+Pm5lakf1Shfv368a9//YsGDRrQuHFj++OzZ8/mxRdfpFmzZiiKgru7O999912Rpu55aans3hrD2I+KJsK69nuYl4c/zNNjx/Hy+58z/tnH0emdCLu/C16+RROBUQMf5fv3JjJl+q/2x4LbtOfpV8cz/rnHsZgtmE0FtO3cnUbBobYNVFtDYU8nHSlpqQwcOJDc3FwURaFBgwY8/vjjuLm5MWvWLBo1aoSfnx/dunWzNwG/mTfffJPHHnuMX375hXr16t3Wap7Ozs5MmjSJESNG2B8bO3YsRqORtm3b2n/3xo4dS9OmTXnssccYOXIkixcv5t///re9r9Q333xjT0J169aNH374gZCQEACaNWvGhx9+SFRUFAC1atXihx9+uGlsERER/PrrrwwcOJCZM2eyceNGZs2ahZOTE1arlW+//faWX68Qd2rEiBGsW7cOwJ64URSFefPm8dBDD+Hu7k56ejqLFi3CyclWgVTYN++55567ajyrqnKtuunDe/fw+YJrT+ErPKZOr+PMiaPs2rKJn1bGoNFoyMnKQqd34oV3PuKZ3p35ftn6G74e6+W2VJmZmQQHBzNx4kSio6P573//S+/eva+5z7Rp03jyySdp0aIFfn5+rF69uliVlXeDoqrq7XfaEkIIIYQop1RVZdKkSZw7d47JkycX6RkjxN2wZcsWfvnlFwICAnjjjTeuOyWsPMoymthyJh2NArq7+LoNZgs+Lno6BNx+pZsQomz65ZdfmDdvHsuWLcNsNtOtWzcmTZpEp06diI2NpW/fvuzdu/em3/exyelcyCvgycgwJn3/C/WbNOf9l/6PmnXuY/h//mffrut9Vfhj91E8vLwBGNCyEV8tXoGXTyVG9+tG01ZtCG0fTrvO3fGu5Mf5M6d4pndn/txz7JrHTdi2makT32Dppq245aTRuHFj8vLyUBSFzMxM/Pz87FO1r5SWlka9evU4efIk3t7eTJ06lU2bNjFv3jzA8T2lKs43mxBCCCHELTh06BDJyckEBwdLQkrcdbm5ufz2228oisKwYcMqVEIKwNNJh4+LHpP17t0/t6q2dbECvG7cL0sIcW96/PHHWbduHWlpaSQkJJCcnEynTp0ACAsLIyAggPj4+JuO46TVXHMNPVd39yJ/12i19sUBAAqMBjQaDR5eXvwYvYmufR/m9LEjPN3rfs4mHS/269BrbHVazs7O9spI7T+OdaWZM2diNpsJCQkhMDCQ999/nyVLlpCWllbsY5amivXtJoQQJUhVVfJMFlJzjZzNzudsdj7ncgxkGExY7uJJtBCidBQ2lL6d6TVC3KmFCxeSm5tLREQEdevWdXQ4d52iKNTyckXBliy6G0xWFWethuruMlVXiPLgRj3/atWqxblz5zhwwLZwwtGjRzl27NhVq0pei6dT8bog1axzHwcTbL3fNkUvxXC551tG2kUM+Xm07tSZp15+E/+A2pw8chg3D08KjIYi/fGuVDjJrbjHLzRt2jR+++03kpKSSEpK4vTp0/Tp04dZs2bd0jilRXpKCSHELbCqKhfyCkjONpBuKKDAqmK1qkXulmgU0CoKHk46qnk4U8PDBRfd9RuDCiHKngsXLrBnzx5q1KhRrBNUIUrSsWPH2Lx5M56engwYMMDR4ThMNXdnjjnpyC4w46LVlOrql1ZVxaqq1PJyRa+V+/ZClAc36vnn7+/P999/z6BBg9BoNFitVqZOnWpf2OFGPaU8nXUU5+PoX2+9zVcTX+enj6fQrnN3e9+71HNn+eS1FzGbzFitFpq1akObyK7o9Hq6DxjE073ux9XdnW/+XF1kPKuqoii2pFRx1+zcsWMHqamp9v5xhYYNG8abb77Jf/977ZX+7ibpKSWEEMVgVVVOZ+WTlJlHrsmCerkRqvZyAgpsd3VVVcWq2ra3XJ4GoNcoVPNwpr6vB256SU4JcS9YsGABq1ev5rHHHiM8PNzR4YgKxGKxMHnyZJKTk3niiSdo166do0NyqLS8AmLPZaBAqSWLVFXFYLHi5ayjfc1K6DSll/wSQtz7jBYrG05exKrapvLdLQazBTe9lvtr+5Vqkv5uk0opIYS4iewCM4kXsknLL7CfFGuv80WgFCaqUNBjO9E1WVVOZxlIzS2gkZ8HAZ4u5eqLRIjyxmAwEBMTg7u7O23atHF0OKKCWbNmDcnJyTRs2JC2bds6OhyH83Nzora3Kycy8tBYVbSlkDAqsFrRaRSaVvaUhJQQ4qactRqqeThzOsuAqqp35bxevaLnXXm7jpDaVCGEuIHkbAPbzl4iLb8AJ40GF532ugmpa1EUBSetBhethgKLlb0XsohPycQsPaeEKLO2bduGwWAgIiLCvjS0EHfDpUuXWLJkCVqtlqFDh5a7C4/b1bCSB1XcnCiwWku0Z6OqqhgtVgCC/Dyo5Cr/3oUQxRPg6YpGActdOqU3WVV0GoUaHi5354B3kSSlhBDiOs5k57PnQhYmq4qLVnNHd2cVRcFFp0WnKJzLMRJ/PkMSU0KUQaqqsnbtWjQaDZGRkY4OR1Qw8+bNo6CggB49elC9enVHh1Nm6DQKLfy97Ykpk8XKnXYgUVUVo9WKAgT5eVLH261kghVCVAi+LnoquThhst7559HNFLYFqeHhUi5bgUhSSgghriE110jihWysVhVnTck1V9VpNDhpNKTmFbAnNbPUv8SEELcmMTGRlJQUWrRoga+vr6PDERXInj17SEhIwM/Pj969ezs6nDJHr9XQspoPgd6uqIDBYr2tVflUVcVstWKwWHHWagj29+I+H0lICSFujaIoNK3iiV6jUGC1luqxCixW3PVaGvl5lOpxHEWSUkII8Q9Gs5XEi9lYLi8NXdLTJ7QaBb1G4XyukTPZxV07QwhxN6xduxaArl27OjgSUZEYjUZ+/fVXAIYOHSrTRq9Dp1FoWsWLVtW98dBrMVqsGMwWzFb1pjd5VFWlwGJLRllUleoeznQIqFQup8IIIe4ODycdDSp5oKpgLqXEVIHFikZRaFzZ8642Vb+bpNG5EEJcQVVVDqZlk2eylOry0zqNBrPZwqG0HPxcncplKa4Q95rz58+TmJhInTp1qFu3rqPDERXIsmXLSEtLo0WLFjRr1szR4ZR5Vdyc6VhLz7kcI6ey8skymjBdvh7UQJHvbuvl5sBgS2oFeLgQ4OVKJRe99OwSQtyxQG9XsgvMnM7KB6zoNCWXOCqwWLGi0tDXg6pu5fdmhSSlhBDiCukGE8k5RnQapdRPVp21GgwWK0fTcwiu6l2qxxJC3Ny6desA6NKli1ysirsmOTmZlStX4uzszODBgx0dzj1Dp9FQy8uVAE8XMoxmMo0msowmMg1mCqxWVBU0ioKbXou3sw4vZx2VXJ1w1clNICFEyVEUhWZVPFFROZtlwKpa0d/hdUThIgyKAg183ann61auz0skKSWEEFc4nZ2PVVVxKsG7HNejKApaReF8jpGGlSy4yImyEA6Tl5fH1q1b8fLyonXr1o4OR1QQqqoyZ84crFYrffv2lT5mt0FRFHxd9Pi66B0dihCigtIoCsFVvHDX6ziWnovBYsVJc+uLJKmXG5qbLrcQaeTnQYCnS7lOSIH0lBJCCDuD2UJKjhGtUvpVUoX0GgWTVeWs9JYSwqG2bNmC0WikU6dO6HRyz07cHVu3buXIkSMEBATQpUsXR4cjhBDiNimKQn1fd9rV9MXXRY9JtZJvthRrdT5VVTFd0fPO392JDgGVqOXlWu4TUiCVUkIIYXcxrwCTVcXlLjYRVBQFBTifa6Ser/tdO64Q4m9Wq5W1a9ei1Wq5//77HR2OqCByc3NZuHAhAMOGDUNzFyp0hRBClC5vZz3tavqSmmvkdHY+afkmDBYrhamlwnN/FVsy6sqedzU9XAjwdMXPtWL1vJNvPyHEHXn++ecJDAxEURQSEhKKPNejRw+Cg4MJDQ0lIiKC+Pj4Yo+rqir33XdfsVbAWrx4Mdu2bbP/PS4urkhfju+++46goCBCQ0M5e/YsERER1xwnq8CMgu3L4rUnhnD62NGbHntoeEuO7t971ePnz5yib3C9m+4P8Of07zl9Nhmz9eZLW48cOZLPPvsMgAkTJvDCCy/YxvjzT8aMGVOs4wkhitq7dy9paWm0bt0aLy8vR4cjKohFixaRk5NDRESENNYXQohyRKMoVPNwIay6L+EBlQip6kWgjxu+LnpcdRqctBpcdRr8XJ2o5+tOqL8XnWr5EervTWU3pwqVkAKplBJC3KGBAwfyyiuvEB4eftVz8+fPx8fHB4Dff/+dkSNHsnv37mKNu2bNGnx8fNizZw8nTpzgvvvuu+Z2ZrOZxYsXExoaSrt27QBo3bo18+bNs2/z2Wef8fPPP9O+fXsANm3adM2xMg0m+5+n/PxrseIsCYun/0Czth3ILmh02z0x+vbtS9++fUs4MiEqhjVr1gDI9Clx1xw7doyYmBg8PDwYMGCAo8MRQghRSjycdHg4SdrlRqRSSghxRzp16kRAQMA1nytMSAFkZmbeUtZ/2rRpPP300wwdOpSffvrJ/vj69etp2rQpTz31FKGhocyePZs///yTDz/8kNDQUH788UfWr19PaGgoYEuaHTt2jJEjRzJw4ECSkpKKxLV161bCw8MJCQlhWLdwtq+OBopWQC348RtG9+vOM70jGd2vO4m7Yov9Ogp1va8Ks7/6lNH9ejAsohXRC+YAMOOLj0hLPc97/32W8DatSUhIwGQy8eqrr9KmTRtCQ0MZNGgQ6enpNxx/+vTp9O/f3/738ePHU79+fcLCwnjzzTcJDAy0P7dixQrCw8Np1aoVbdq0sa84tn79epo1a8bo0aMJCQmhadOmxMXF2ff766+/CAsLIyQkhNDQULZv3w5AbGwsXbp0oXXr1rRo0YIFCxbc8vsjhKOcOXOGQ4cOUbdu3SL/ToQoLRaLhdmzZwO27yh3d5m6LYQQouKSlJ0QolSNGDHCnvRYtmyZ/fFRo0Zdt7rn0qVLREdH880333Dq1CkeeOABJk6caO+3ceDAAb7++mumTZsG2JZxDw0NtU9lW79+vX2s3377jcDAQObNm0doaChJSUlFjtO/f39+++03wsPDWXEshZzMzKvi6T7gER4Z9S8A9sfH8cFL/2H6mq1XbZd2KQ2NouDl5X3N90Lv5MzXf6zk1LEjjO7Xne4DBjHi+ZeInj+HsZ99x4DIDgR4ufLuu+/i7u7Ojh07AHj77bd58803+eqrr673Nhfx119/sXDhQuLj4/Hw8ODJJ5+0P3f8+HEmTJjAihUr8PLy4ujRo0RERNjfl4MHDzJt2jS+/vprvv32W9544w1WrFjB4cOHeeKJJ9i4cSNBQUGYTCby8vLIyMjgmWeeYdmyZVSvXp2LFy/SsmVLOnToQM2aNYsVrxCOVPj5VJypwkKUhLVr13L27FkaNGhgr/AVQgghKipJSgkhStWMGTMA+OWXXxg7dqw9MfXjjz9ed5/Zs2fTq1cvfHx88PHxwd/fnxUrVtCrVy8A6tatWyLNiLdu3UqjRo2IiIhAVVU0Gg0ePlcvx300cS+zv/qUrIx0tFotp48fxWjIx9nF1b6NqlrJz8/HYraQnZ1NXvbVya1u/R8GoHa9Bmi1Oi5dSKVK9Rr25wsLyRYvXkxmZqa9AW5BQcEtVXCsWbOGRx55BE9PTwCeeuop+4V3dHQ0R48epVOnTvbtNRoNp06dAqB+/fq0bdsWgPbt2/PRRx8BsGrVKqKioggKCgJAr9fj7e3NsmXLOH78uP1nU+jQoUOSlBJlXk5ODtu3b8fHx4cWLVo4OhxRAaSnp7NkyRI0Gg3Dhg2rcH1DhBBCiH+SpJQQ4q54/PHHee6550hLS8PPz++G206bNo3z58/bEzHZ2dlMmzbNnvjw8PAo8fgURUGn0WC0WIs8biooYPy/RvLxnMUEhbQgNzubvsF1MRUUFElKKYqGmjVqkpOTTWZWFrnZuVhVK5cupeHlbauccnJ2sW+v0WqxWMxFjqW7XAmmqipffvklPXr0KLHXVkhVVbp3786cOXOu2u7s2bO4uPwdo1arxWw2X7XdlVRVpWnTpmzZsqVEYhXiboqJicFkMhEZGYlWq3V0OKICmDdvHkajkaioKKpXr+7ocIQQQgiHk55SQohSkZGRQXJysv3vixcvxs/Pj0qVKt1wv507d3LhwgWSk5NJSkoiKSmJY8eOsWLFCi5cuHDNfby8vMi8xrS7m+nQoQNHjhyxNz731GvITL9UZJsCoxGzyUTVGraqn8W//HDd8RRFwdPTi5o1a+Lj64MCZGfnkHz2LABms+ma+7l5eJCfk42H3nZR3L9/fz799FPy8vIAyMvLIzExsdivq0uXLixcuJCcnBxUVS3Sk6tnz56sXr2aPXv22B8rnCZ4Iz179mTFihUcPHgQAJPJRGZmJh06dODEiROsXr3avm1CQgIFBQXFjlcIR7BYLKxfvx6dTnfdFTmFKEl79uwhPj4ePz8/HnjgAUeHI4QQQpQJkpQSQtyRZ599loCAAM6cOUPPnj2pX78+YGts3r9/f5o3b05ISAhTp05l6dKl9qqdUaNG8eeff1413rRp0xgyZIi9fxTYGqZ3796dmTNnXjOGxx57jPnz59OiRYsbTgv8J19fX37//XdeffVVgoODGREVyf6dRZuYu3t68sSLr/Hv/j15rk9XdHqnm46roODu7o6iaPDz80OrtRWlnj93notpFzH9IznVd8Qovnjjf3S83Oh87NixhIWF0bZtW4KDg2nXrh0JCQnFfl0PPvgg/fr1IzQ0lLCwMPs0SLBNz5szZw7PPvssISEhNG7cmM8+++ymY9avX5+ff/6Z4cOHExISQtu2bTl06BC+vr789ddfvPvuu4SEhNCkSRNeffVVrFbrTccUwpHi4+NJT0+nbdu2pVJ9KcSVCgoK+PVX26quQ4YMwcnp5t8lQgghREWgqKqqOjoIIYQoC1JzjcSdz0Cv0aAt0T4fKrm5uWRmZmIymUEBdzc3vL290eudyDdbqObuTKvqPiV2xOzsbDw9PVFVlf/973/k5+fzzTfflNj4QtzrPvjgA44dO8Zbb7113RVEhSgpixcvZvny5bRo0YLnnnvO0eEIIYQQZYb0lBJCiMsquznhptOSZ7Kg1ZVkfxkFd3cP3N3dyc3LIzMjk9zcPHLz8nBz98DN05Oani43H+YWjBgxgqSkJAwGA02bNuXbb78t0fGFuJedPHmSY8eO0bBhQ0lIiVJ37tw5Vq5cibOzM4MHD3Z0OEIIIUSZIkkpIYS4TKMo1PJy5WCarRdTya+KpODu5o67mxt5eXlkZmZitqqknjnNojV/8EDv3tSpU6dEjvT777+XyDhClEdr164FbP3XhChNqqoyZ84cLBYL/fv3x9f36hVehRBCiIpMklJCCHGFmp4unMjIw2ix4lKi1VJXUnBzc8fZxZW8ggIyjuxlT0ICuxMSaNasGQ888AB169YtpWMLUbFlZWURGxuLn58fISEhjg5HlHPbt2/n8OHD1KxZk65duzo6HCGEEKLMkaSUEEJcwUWnpZGfB3svZGG2qug0JV0tZaOqKgVWlaqebjw45GEOBDdm6dKl7Nu3j3379tG4cWMeeOABGjRoUCrHF6Ki2rhxIxaLhc6dOxdZUEGIkpabm8uCBQsAGDZsGFptad3oEEIIIe5dkpQSQoh/CPB0ISXXSEquEY2iQVPC0/hUVcVosaLXKDSt7IlWo6FZs2Y0bdqUgwcPsnTpUg4cOMCBAwdo2LAhDz74IA0bNiyF6YRCVCxms5kNGzbg5OREx44dHR2OKOf+v707j46yPNg/fj2zZLITCIsEEGRRtoQIipGdgCwZrFposdLi8lLq1talLUoFBVS0tS4/20qtFltfWktRqSUBpCyCoghiFGRVDAJhSwjZM5mZ5/79geQVAU1IMhOS7+ccjsnMs1yTw8HJNffy+uuvq6SkRIMGDVKXLl3CHQcAgAaJUgoAvsayLPVuFafyQFBFvoA8zrorpk6MkLLlsCz1ahWneI/7lPv26NFD3bt31+7du7VkyRLt3LlTTz75pLp06aJx48apR48elFPAOfrggw9UVFSkoUOHKjo6Otxx0Ijt2bNH69atU2xsrL773e+GOw4AAA0WpRQAnEGky6l+FyTog0PHVeQLyO1w1Hoq38kRUo4vR0i1i4s643GWZeniiy/WPffco08//VSZmZnatm2bnnnmGXXq1Eler1fJycmUU0ANGGO0cuVKSdLw4cPDnAaNmW3bWrBggSRp/PjxiomJCXMiAAAaLkopADiLaLdTl7dN0NajRTpSVqlAQPI4HTUug4wxChojv20U5XKoZ8s4XRAbWa1zu3btqp///Of6/PPPlZmZqS1btugPf/iDOnToIK/Xq9TUVMopoBo+//xz7d27Vz179lTbtm3DHQeN2KpVq7R//35169ZNV155ZbjjAADQoFFKAcA3ODlian9xhXbml6giaMuSFFGNKX3GGAVso4Axclgndvbrnhh7Trv6XXTRRbrzzju1d+9eZWVlKTs7W/PmzVO7du2UkZGhvn37smgz8A1OjpJiBzTUp4KCAr3xxhtyOBy64YYb+NAAAIBvQSkFAN/Csix1iI9Sy+gI5RZXaF9RucoCQclIRpLDsnSyDjKSbGNkvvze5bDULjZSHeKi1CLKXetfUDp27KjbbrtN+/fvV1ZWljZv3qw///nPatu2rTIyMnTZZZdRTgFfU1BQoM2bN6t169bq1atXuOOgEVu4cKF8Pp9Gjx6tpKSkcMcBAKDBs4wx5tsPAwCcZBujvLJKFfr8KvQFVOjzK2if+KfUsqRot0sJkW7FR7jUMjpCUecwMqq6Dh48qKysLG3cuFHGGLVu3VoZGRnq378/248DX1q8eLGWLl2q66+/nvWkUG+2bt2qZ599VomJiXrwwQfl8XjCHQkAgAaPUgoA6oD5cnRUXe3SV1OHDx/W0qVLtWHDBtm2rZYtW2rs2LFKS0uTy8WgWDRdfr9f06ZNUzAY1OOPP67IyOqt5wbURGVlpWbNmqW8vDzdcccdSklJCXckAADOC5RSANCI5OXlaenSpVq/fr1s21aLFi00evRoDRw4UG63O9zxgJB755139Le//U0jRozQ97///XDHQSN1cjRenz59dPvtt4c7DgAA5w1KKQBohPLz87V8+XK98847CgQCSkhI0KhRozRkyBDKKTQZxhg9/PDDOnDggObMmaNWrVqFOxIaoYMHD2rOnDlyOp2aNWuWWrRoEe5IAACcN1gNFwAaocTERN1www165JFHlJ6ertLSUi1cuFDTp0/XihUr5PP5wh0RqHe7d+/W/v37lZycTCGFemGM0d///ncFg0FdffXVFFIAANQQpRQANGIJCQmaOHGiHn30UV111VWqqKjQokWLNH36dC1btkwVFRXhjgjUm5UrV0qS0tPTw5wEjdWGDRu0a9cuJSUlacSIEeGOAwDAeYdSCgCagPj4eE2YMEGPPvqoxowZI7/fr9dff13333+/MjMzVVZWFu6IQJ3Ky8vTRx99pKSkJHXv3j3ccdAIlZWVadGiRZKkSZMmseMpAADngFIKAJqQuLg4XXfddZo7d668Xq9s29Ybb7yh+++/X2+88YZKS0vDHRGoE2vWrJExRunp6bLCtCsmGrfXX39dxcXFGjhwoLp27RruOAAAnJdY6BwAmrCysjKtXr1a//3vf1VWViaPx6Phw4dr5MiRiouLC3c84Jz4fD5NmzZNlmXp8ccfV0RERLgjoZHZs2ePfvOb3yg6OlqzZ89WbGxsuCMBAHBecoU7AAAgfKKjo+X1ejVixAitWbNGK1as0LJly7Rq1SoNHTpUo0aNUnx8fLhjAjXy3nvvqby8XKNHj6aQQp2zbVsLFiyQMUbjx4+nkAIAoBYopQAAioyM1JgxYzR8+HCtXbtWb775plasWKE1a9Zo8ODBGj16tBISEsIdE/hWxhitWrVKlmVp2LBh4Y6DRmj16tXav3+/unbtqgEDBoQ7DgAA5zVKKQBAFY/Ho6uuukrDhg3TunXrtHz5cq1atUpr167VwIEDNXr0aCUmJoY7JnBW27dv16FDh9S3b1+1aNEi3HHQgBljVBYIqtgXUIk/qKBtZCQ5LSna7VRchEuxES45vrImWUFBgf7973/L4XBo0qRJrFcGAEAtUUoBAE7jdruVnp6uwYMHa/369Vq6dKneeustrVu3TgMGDNDYsWPVsmXLcMcETrNy5UpJUnp6epiToKEq8wd1oLhc+4sr5AvaCtpGJ6slI8n68r8Oy5LLYalNjEft4yLVPNKthQsXyufzadSoUUpKSgrfiwAAoJGglAIAnJXb7dbQoUM1cOBAvffee1q6dKnefvttrV+/XldccYXGjh2rNm3ahDsmIEk6fPiwtm7dqg4dOrAbGk5THghqV36JDpX6FLCNHJKcDktup+O0EU/GGNlGCtpG+4rKdaC4XI7KCn164JBatGihcePGhedFAADQyLD7HgCg2oLBoN5//31lZWXpyJEjsixLl19+uTIyMtS2bdtwx0MT98orr2j16tW68cYbWesHVYwxyi2p0I78ElUEbLm+HAFV3al3xhgFbaNjRUUKVPrUPiZCg3p2lcvB1D0AAGqLUgoAUGO2bWvTpk3KysrSwYMHZVmW+vbtq4yMDLVv3z7c8dAElZeXa9q0aYqIiNDcuXPldrvDHQkNgG2MtucV64uichlJHsfpo6Kq4/jx4yosLFRMfLxi4uLVIsqtvm2ayeNy1n1oAACaEEopAMA5M8Zo8+bNyszM1IEDByRJqamp8nq9uvDCC8OcDk3JypUrtXDhQmVkZOiaa64Jdxw0AMYYfXL0RCHldFhyOxzndB1/wK+DubmSZSkpKUmWw6nKoK2ESJcua9tcHue5XRcAAFBKAQDqgDFGH330kZYsWaJ9+/ZJkpKTk+X1enXRRReFOR0aO9u2NWPGDB07dkxz585VQkJCuCOhAfi0oFS78ktqVUhJRocPH1ZFhU/NmycoPr6ZpBMjsHxBWy2jInR5UsIpO/QBAIDqY6FzAECtWZal1NRU9enTR1u3btWSJUu0ZcsWbdmyRT179pTX62XhadSbrVu3Ki8vT/3796eQgiTpeIVfnxWUymHVppCSSktLVVHhkzvCrbj4+KrHHZalCIdD+eWVyiksU+eEmLqIDQBAk0MpBQCoM5ZlKTk5Wb1799b27du1ZMkSbdu2Tdu2bdMll1yicePGqVu3bue0pgtwNitXrpQkpaenhzkJGoKgbfRJXrECJ0vwJwAAKp9JREFUtlFkLabW2batgoICSVJiixaydOq/W06HpYCRPj1WqlbRHsVF8LYaAICa4v+eAIA6Z1mWevbsqR49emjXrl1asmSJdu7cqZ07d6pr167yer3q0aMH5RRqLTc3Vzt27NBFF13EVFFIkg6WVOh4hV8e57ktan7S8eMFCgZtxcbGyOOJPOMxEQ6HKoK2PisoVWqbZud8LwAAmipKKQBAvbEsS5dccokuueQS7d69W5mZmdq+fbueeeYZXXTRRfJ6verduzflFM7ZqlWrJEkjRowIcxI0BMYYfVFULkm1WuepstKn4pISOZwONW/e/KzHWZYlp2XpcKlP5YGgotiNDwCAGmGhcwBASO3Zs0eZmZnaunWrJOnCCy+U1+tVnz59KKdQI6WlpZo2bZqio6P16KOPyuXis7amrqDCr/cOFMhpSa5aLG5+8OBBVVb6lZiYqNjY2G8+2hhVBG11T4xVl+asLQUAQE1QSgEAwmLv3r3KzMzURx99JElq3769MjIy1LdvX8opVMvy5cv12muv6ZprrlFGRka446AB+LSgVDvzSxRZi6l7xcVFOnasQJ5Ijy5o00bSt1+nIhBUiyi30tq1OKd7AgDQVJ376o8AANRCx44ddfvtt2vGjBnq27evDhw4oOeff16zZs3Sxo0bZdt2uCOiAbNtW6tXr5bL5dLgwYPDHafJmj9/vizL0uLFi6see/TRR3XJJZfI4XCc8vi32bJli9LT09WnTx/17t1bl19+ubZu3aqZM2cqNTVVqampio2N1UUXXVT1/c6dOzVs2LCqx7yD0/TTq0do09rVkqR7rr9Goy9OUkHe0ar75H6Ro5GdW2vG1MmnZQgGA/rtr36ueyaM1YybJmpqxnBNzRimZf/6+xkzP3TbzVq26B9yWJaKKwOy6/Cz3jVr1mjZsmX/lzs3l7/rAIBGh3HuAICwat++vX7yk58oNzdXWVlZ2rRpk1544QX95z//UUZGhvr37y9HLbZ0R+OUnZ2tgoICDRgwQHFxceGO0yTl5OToz3/+s9LS0k55fOTIkbr++ut1yy231Oh6P/jBDzRnzhxdd911kqR9+/bJ4/Fo9uzZmj17tiRp2LBhuuuuu3Tttdeecu5TTz2la665Rmv25qsiaMvzlV33OnfvqRWv/0vf//HtkqRlC/+ui5P7nDHDsYICyUjX3vRj/eiOu6ud3WlZCthGxZUBNfO4a/Kyz2rNmjU6fvy4xowZI0lKSkrSunXr6uTaAAA0FLzLBwA0CElJSZoyZYpmzZqltLQ0HT16VPPnz9eMGTP09ttvKxAIhDsiGpCTC5ynp6eHOUnTZNu2pkyZomeffVYej+eU5/r376/OnTvX+Jr79+9Xu3btqr7v0KGDWrduXe3zg0aqtG05vjbbbtR3J+rNV/9ZlXtN5mKlf2f8aeeXV5SrrLRMlsNSVOSZd9v74rPd+un4DN0yapBmTJ2sspISSZLDkn73y5/q/z3zTNWxv/jFL/TQQw9JkiorK/XLX/5SvXv3Vp8+faqKpi1btmjQoEHq27evevbsqYcffljSidJ13rx5WrBggVJTUzV79mzl5OQoISGh6vrLly9X3759lZKSoqFDh2rbtm2STpRZvXv31u23364+ffqoV69e2rRpU7V/jgAAhBIjpQAADUqbNm108803a9y4cVq6dKneffddvfzyy8rMzNSYMWM0cOBAFrRu4vbt26fdu3erW7du6tChQ7jjNElPPvmkBg4cqH79+tXovJkzZyopKUm33nrrac/NmDFDw4cPV1pamtLS0jRhwgRdeuml1bru3XffrQcfekgllSfK61nPzVdSx4skSa2S2qlFq9ba/uEHKi46rouTUxXbrNlp13A6HIqIcCvC49HCP/+xqsiSpDsfmquU/lfqsXtu17gbblTGxB9qz45tuv2aq5R+zXerjjvbUq1z587Vrl279MEHH8jj8ejo0RPTCTt16qSVK1fK4/GovLxcAwYM0MiRI5WWlqZbb71Vx48f19NPPy3pxMi0k44cOaIbbrhBa9asUXJyshYsWKAJEybok08+kSTt2LFDL774ov74xz9q3rx5+vWvf63ly5dX62cJAEAo8a4eANAgtWrVSpMnT5bX69WyZcu0fv16/f3vf1dWVpZGjx6twYMHy+2um2kyOL8wSiq8tm7dqldffVVr166t8bknp+Gdyb333qsf/vCHWrVqldauXavBgwfrxRdf1MSJE7/1uk899ZTGfec7WpmTJ0lyf23K75jv36ClCxeouPC4vD+YrLzDB0+7RkSER23btpXL6dTEqXdo/C2nFmelxcX6dNtWjZ7wA0knpgX2vuyKU4452+LqS5Ys0eOPP141qqxVq1aSpPLyct1+++3Kzs6Ww+HQvn37lJ2dfdqUyK/bsGGDkpOTlZycLEmaNGmS7rjjDh04cECS1LVrV11xxYlsV155pZ544olvvB4AAOHC9D0AQIOWmJioSZMm6eGHH9bw4cNVWlqqf/7zn5o+fbpWrFghn88X7ogIoeLiYr3//vtq0aKFUlNTwx2nSVq3bp1ycnLUrVs3derUSe+9956mTp2q5557rtbXbtOmjX7wgx/oueee0wMPPKAFCxZU+1ynZclpWTrTYKVBo8Zq49rV+mz7J+o7cMg3XKVmO/adLKFsSU6XS7KDVc9VVFR86/nTp09Xy5Yt9eGHH+qjjz7SsGHDqnXet4n8yvRDp9PJ9GcAQINFKQUAOC80b95c119/vR555BGNHDlS5eXlWrRokaZPn67ly5fXyS9yaPjWrl2rQCCgYcOGsQB+mNx22206ePCgcnJylJOTo7S0ND3//PO67bbbanXd119/XX6/X5IUCAT08ccfq0uXLtU+37IsNfO4FDxDKxXhidTtM+bozocePee/NzFxceraM1krXjsxrS9n1w5t2bhBkmTbRu06XaSPN38gScrPz1dWVlbVud/5znf0zDPPVJXoJ6fvFRQUqH379nK5XNq5c6dWrFhRdU58fLwKCwvPmCUtLU1btmzR1q1bJUmvvPKK2rVrd8qaXAAAnA94NwcAOK80a9ZM3/ve9zR37lyNHj1afr9fr732mqZPn66srCyVl5eHOyLqSSAQ0FtvvSW3261BgwaFOw7O4uGHH1b79u317rvvasqUKWrfvn1VCTNz5kzNmzfvjOe99tpr6t27t1JSUtSnTx95PB7NmjWrWve8++67lZqaqsmjh+qnV4/QfxcvOu2YwWPGqf/QEdW63j+f/4OmZgyr+vPPP/1eknTfk3/Qkn+8rP8ZPVh/+d2JdaYkKWiMJvzoJuXn5alHjx6aPHnyKVPwpk2bposvvlh9+/ZVamqqbrzxRknSAw88oPnz5yslJUX33XffKVNSr7vuOmVnZ1ctdP5VrVq10oIFCzR58mSlpKToueee07/+9a+zTh8EAKChsszZVmQEAOA8UFpaqv/+979atWqVKioqFBUVpfT0dI0YMUIxMTHhjoc69P777+vFF1/UkCFDNGnSpHDHQQN0pNSnTYeOy+1wyBmigsYYo4qgrc4J0erRMi4k9wQAoLGglAIANAplZWVatWqVVq5cqbKyMnk8Hg0fPlwjR45UXBy/KDYGc+fOVU5Ojh588EElJSWFOw4aINsYrfsiX6X+oCJdzpDcM2Dbso00oH1zxXvYfAEAgJqglAIANCoVFRVavXq1VqxYodLSUkVERGjo0KEaNWqU4uPjwx0P5+jzzz/XY489ph49euiuu+4Kdxw0YJ8VlGpHfokinY56n852cpRUq+gI9U9qXq/3AgCgMaKUAgA0Sj6fT2+99ZbefPNNFRcXy+12a8iQIRo1apQSEhLCHQ819MILL2jjxo264447lJKSEu44aMB8AVvv7M9XRcCu99FSftuWMVK/ts3UKtpTr/cCAKAxopQCADRqlZWVWrdunZYvX67CwkK5XC4NGjRIo0ePVosWLcIdD9Vw/Phx3X///UpMTNScOXNYzBnf6mBJhbIPF8ppWXLV0y6NtjHyBW11bBal3q0YhQkAwLmglAIANAl+v1/vvPOOli1bpoKCAjmdTg0YMEBjxoxRy5Ytwx0P3+Df//63srKy9P3vf18jRlRv9zQ0bcYYfXSkSAeKKxThrPtFz09O24t1O3Vl+xaKcLKhNQAA54JSCgDQpAQCAb377rtaunSp8vPz5XA4lJaWprFjx6p169bhjoev8fv9uv/++1VZWanf/OY3ioyMDHcknCf8QVsfHDqu/HJ/nRZTJwupSJdDl7dNYHFzAABqgVIKANAkBYNBbdiwQUuXLtWRI0dkWZYuv/xyZWRkqG3btuGOhy+tX79ef/3rXzV8+HBdf/314Y6D80xl0NaHhwqVV14pl2XJ5bBqNf0zaBtV2kFFuZzqe0GCEiIppAAAqA1KKQBAk2bbtjZu3KisrCwdOnRIlmWpX79+ysjIULt27cIdr0kzxuiRRx7R/v37NXv2bEay4ZwEbFu78kv0RVGFgsbI43TIUcNiyhijyi8XNU+MjlDvlnGKiXDVU2IAAJoOSikAAHSinNq8ebMyMzOVm5srSbr00kvl9XrVoUOHMKdrmnbv3q0nnnhCycnJuvPOO8MdB+e5vLJKbcsrUkllUEaSy2HJZZ195JQxRraR/OZEGRXhdKhbixhdGB9V41ILAACcGaUUAABfYYxRdna2MjMztW/fPklSSkqKvF6vOnXqFN5wTcyf/vQnbd68WT//+c/Vs2fPcMdBIxC0jY6U+bSvqFzHKvwK2v/3NvirNdPJRx2WFON26cL4KLWNi5SHBc0BAKhTlFIAAJyBMUZbtmxRZmamcnJyJEm9evWS1+tVly5dwhuuCcjPz9evf/1rXXDBBXrwwQdrtQ4QcCallQEVVgZU4guoqNIvv21kjOR0WIqLcFX9aeZx8fcPAIB6wmR4AADOwLIspaSkKDk5Wdu2bVNmZqY++eQTffLJJ+revbu8Xq+6devGL6v1ZM2aNTLGKD09nZ8x6kVMhOvEulCx4U4CAEDTxUgpAACqwRijnTt3KjMzU7t27ZIkdevWTV6vV927d6c4qUM+n0/33XefJOmxxx6Tx+MJcyIAAADUB0opAABqaPfu3crMzNT27dslSZ07d5bX61WvXr0op+rA2rVrtWDBAo0aNUrjx48PdxwAAADUE0opAADO0Z49e5SZmamtW7dKkjp27Civ16uUlBTKqXNkjNGsWbN06NAhPfLII0pMTAx3JAAAANQTSikAAGpp7969yszM1EcffSRJat++vbxery699FLKqRravn27nn76aV166aW69dZbwx0HAAAA9YhSCgCAOrJv3z5lZWVp8+bNkqSkpCRlZGSoX79+cjjYSr46/vCHP+jjjz/Wvffeq4svvjjccQAAAFCPKKUAAKhjubm5ysrK0qZNm2SMUZs2bZSRkaH+/ftTTn2DI0eOaObMmWrXrp0eeOABRpkBAAA0cpRSAADUk0OHDmnp0qXasGGDjDFq2bKlMjIydMUVV8jlcoU7XoOzcOFCrVy5UpMnT9bAgQPDHQcAAAD1jFIKAIB6duTIES1btkzvvvuubNtWYmKixowZowEDBlBOfamiokLTpk2Ty+XSY489JrfbHe5IAAAAqGeUUgAAhEh+fr6WLVumd955R8FgUAkJCRozZowGDRrU5EuY1atX65VXXtHYsWN17bXXhjsOAAAAQoBSCgCAECsoKNDy5cu1bt06BQIBxcfHa/To0Ro8eLA8Hk+444WcMUYzZ85UXl6e5s6dq4SEhHBHAgAAQAhQSgEAECaFhYV688039dZbb8nv9ysuLk5XXXWVhg4dqsjIyHDHC5ktW7bo97//vS6//HJNmTIl3HEAAAAQIpRSAACEWXFxsVasWKE1a9bI5/MpJiZGI0eO1PDhwxUVFRXuePXumWee0bZt2zRt2jR17tw53HEAAAAQIpRSAAA0ECUlJVq5cqVWrVqliooKRUVFacSIEUpPT1dMTEy449WLgwcP6qGHHlKnTp103333ybKscEcCAABAiFBKAQDQwJSVlWnVqlVauXKlysrKFBkZqeHDh2vkyJGKjY0Nd7w6tWDBAq1du1a33HKLrrjiinDHAQAAQAhRSgEA0EBVVFRo9erVWrFihUpLS+XxeDR06FBdddVVio+PD3e8WisrK9O0adMUGRmpuXPnyuVyhTsSAAAAQohSCgCABs7n8+mtt97Sm2++qeLiYrndbg0ZMkSjRo06r3eqe/PNN/Xqq6/q6quv1rhx48IdBwAAACFGKQUAwHmisrJS69at0/Lly1VYWCiXy6VBgwZpzJgxat68ebjj1Yht23rggQdUWFiouXPnNoqRXwAAAKgZSikAAM4zfr9f77zzjpYtW6aCggI5nU4NGDBAY8aMUcuWLcMdr1o+/PBDzZs3T2lpabr55pvDHQcAAABhQCkFAMB5KhAI6N1339XSpUuVn58vh8OhtLQ0jR07Vq1btw53vG/0u9/9Trt27dL06dPVsWPHcMcBAABAGFBKAQBwngsGg9qwYYOysrJ09OhRWZal/v37a+zYsWrbtm24451m//79mjNnjrp06aJf/epX4Y4DAACAMKGUAgCgkbBtWxs3blRWVpYOHToky7LUr18/eb1eJSUl1e+9jVFJZUDFlQEVVwZVGbRlGyOHZSnS6VCsx6X4CJdi3E69/PLLeueddzR16lT169evXnMBAACg4aKUAgCgkbFtW5s3b1ZmZqZyc3MlSZdeeqm8Xq86dOhQp/cq8weVW1KhfUXlqgicKKLOxmlZ8jiM1i/9j8oPH9DsB2fI4XDUaR4AAACcPyilAABopIwxys7O1pIlS7R//35JUkpKirxerzp16lSra/uDtnYdK9H+4goFbCNLktvhkMOSLMs6Y5agkUrKy+Sr8Ckywq3e7Vqpc0KMnI7TjwcAAEDjRykFAEAjZ4zRli1btGTJEu3du1eS1KtXL3m9XnXp0qXG18sr8+mTo8Uq8QfltCy5HdYZi6jTcsjowIEDsm2j1m3byshSgselXq3ilRDprnEOAAAAnN8opQAAaCKMMdq2bZuWLFmiPXv2SJK6d+8ur9eriy++uFrX2FtYpu35JQraRh6nQ45qlFEnlZaVKu9onmLjYpXYIlG2MfIFbbkdllJax+uC2Mhzel0AAAA4P1FKAQDQxBhjtHPnTi1ZskS7d++WJHXr1k1er1fdu3c/66invYVl2pZXIskowuGo1uiorzp06KB8vkolJbWV2x1RlcVn23JalvpQTAEAADQplFIAADRhu3btUmZmpnbs2CFJ6ty5s7xer3r16nVK6XSk1KfNhwtljORx1nxx8spKnw4ePKTIqEi1ad3mlOdOFlMuy9IVSc3VjKl8AAAATQKlFAAA0GeffabMzEx98sknkqSOHTvK6/UqJSVFftto/f5jKvUHFems+QgpScrLz1NpSalat26lqKjo0543xqgiaCsh0q20pOYsfg4AANAEUEoBAIAqOTk5yszM1McffyxJ6tChg9KumShfVHyN15A6KRgM6sCB/XI6XWrXLknSma8RNEaVQVuXJMaqa/OY2rwMAAAAnAcopQAAwGn27dt3YlrfnhxdNv5Hcjqdio2OUkx0tM5WKp1NYeFxHT9eqOYtmis+Lv4bj/UFbbkcloZemKiIc5gmCAAAgPMHpRQAADirjZ8f0N6SShUXHJOM5Ha71KxZM8XExKg65ZSR0YH9+2WMUbv27eWwvrloOrkjX+9WcerY7PRpfgAAAGg8+AgSAACcUdA2KrIiFBMdraS2SYqJjZE/EFBeXr4O5OaqpKRERt/82VZZaamCQVsxMbHfWkhJksOyZEnaV1QuPjcDAABo3CilAADAGRVV+lUZtOVyOOR2u9UysaXaJSUpNjZGwUBA+fn5yj1wQCUlxWcpp4yKioolSXHxcdW+r8thqcQfVHnArqNXAgAAgIaIUgoAgCbiZz/7mTp16iTLspSdnX3GY+bPny/LsrR48WIV+wKyzalvFlwutxITWyopqZ1i42IVDAaVn39MuQcOqLi46JTRTT6fT5WVlYqKipTb5a56/IZBffXpti1nzemwLNm2UXFloOqx48eP67HHHjvn1w4AAICGh1IKAIAmYsKECXr77bfVsWPHMz6fk5OjP//5z0pLS5MkFX1ZClln2HHP5XIpsUWi2rVrp7i4OAWDQR07VqADBw6oqLhIxtgqKj45SuqbFzf/OodlyUiUUgAAAI2cK9wBAABAaAwZMuSsz9m2rSlTpujZZ5/VvffeK0nyVWP63LGjR/WHWdO1b8+nsm1bfQYM1nW33KYv9uzR/CceVl7uAblcLl174xRdfcONVeetXPyqnph2l0qLizTuBzdq4k/ulCTt/Dhbv581XWWlJYqNjtK8Z/+fBg4cqFtvvVXFxcVKTU2Vy+XSpk2b9PDDD2vBggXyeDySpH//+99nLdwAAADQ8FBKAQAAPfnkkxo4cKD69etX9ZhtjCxJ8598TC3bXKCrJ9102nlz775Nlw0epoeemy9Jyj96WA53hJ6bdb/ad+qi2fNeUmV5uW77zkh16dFLPS+9TJJUkHdUz73xXxUVHNOtV49Q78v66+LkVD10202659En1XvgUOVvz9b48eP16aefat68eUpNTa2adlhQUKAnnnhCBw8eVFRUlMrKyuRwMAAcAADgfEIpBQBAE7d161a9+uqrWrt27SmPn5xGd/M9953xvPLSEm3dtEGP/3Vh1WOJrdpIknZ8uEk/n/WoYmJiFRMTq0Gjvdr89ltVpdTY798gy7LUrEWiBo326oN31ioqJlaW5dDlQ9NVHgjqsisHqE2bNsrOzlb79u1PuXd8fLy6deumH/7whxo1apS8Xu9pxwAAAKBh4yNFAACauHXr1iknJ0fdunVTp06d9N5772nq1Kla/PJfanVdy3J85evT16U65Vid/nyE03HW85xOp9577z3dddddOnLkiNLS0rRu3bpa5QUAAEBoUUoBANDE3XbbbTp48KBycnKUk5OjtLQ0Pf/88/rxT26VpFN21PuqqJhYpfS/Uv964Y9Vjx3Pz5Mk9Rs4RJn/eLnqsbeXZ6rfoGFVxy1/9RVJUtHxAr29PEt9Bw5Wh85dZYytjWtXy5K0Y/NGHTp0SKmpqYqPj1d5ebkqKyslScXFxTp8+LAGDx6sGTNmaNCgQfrwww/r+kcDAACAesT0PQAAmoif/OQnyszM1KFDhzR69GjFxcXp008/PevxcR6XHJY0/6nH1KpN2zOuKXXfk3/U7x+6T7eMGiSXy6UBV43VTXdP0x0PPqpnZvxKU8YMkTFGN9xxt3pc+n/rVTVr0VK3Xj1CpcVFunby/6hXv/6SpIeee0nPPnS/yh99UIlxMVq0aJFiY2MlSZMnT1ZKSopiY2O1ePFiTZgwQaWlpbIsS926ddONN954Wj4AAAA0XJY528efAACgSQvaRmu+yFNl0JbH6QzZfSsCQcV7XBrYvsW3TvsDAADA+YvpewAA4IycDkvt46Jkm7NP4atrxhgZSR3ioyikAAAAGjlKKQAAcFbt4yLlclgK2KEppSptI4/TobaxkSG5HwAAAMKHUgoAAJxVTIRLSbGRChgju55HSwW/vEfHZlGKcPIWBQAAoLHjHR8AAPhGlyTGKsbtlC9o19s0PmOMKoO2EiLd6pwQUy/3AAAAQMNCKQUAAL5RhNOhni3j5HRYqqyHaXzGGPlsW26HpV5f3gcAAACNH6UUAAD4Vq1jPOqeGCtJdTpi6mQh5bQsJbeOV0Kku06uCwAAgIaPUgoAAFRLp2bR6tkyVpYlVQTtWq8xZRujiqAtl2UppXU8i5sDAAA0MZYJ1R7PAACgUTha5tO2o8Uq8QfltCy5HZYsq/pT7owx8ttGQWPUzONSr1bxas4IKQAAgCaHUgoAANSYP2hr17ES7S+uUMA2ckhyORxyWDpjQWWMUdBIAduWkeR2WOqUEK0uCTGsIQUAANBEUUoBAIBzVuYPKre4QvuKy1URODGlz5L01TcXJ793Wpai3A5dGB+ttrEeRbqc4QkNAACABoFSCgAA1JptjEoqAyquDKjIF1Bl0JatE0WUx+lQXIRLcR6XYt3OGk31AwAAQONFKQUAAAAAAICQY/c9AAAAAAAAhBylFAAAAAAAAEKOUgoAAAAAAAAhRykFAAAAAACAkKOUAgAAAAAAQMhRSgEAAAAAACDkKKUAAAAAAAAQcpRSAAAAAAAACDlKKQAAAAAAAIQcpRQAAAAAAABCjlIKAAAAAAAAIUcpBQAAAAAAgJCjlAIAAAAAAEDIUUoBAAAAAAAg5CilAAAAAAAAEHKUUgAAAAAAAAg5SikAAAAAAACEHKUUAAAAAAAAQo5SCgAAAAAAACFHKQUAAAAAAICQo5QCAAAAAABAyFFKAQAAAAAAIOQopQAAAAAAABBylFIAAAAAAAAIOUopAAAAAAAAhBylFAAAAAAAAEKOUgoAAAAAAAAhRykFAAAAAACAkKOUAgAAAAAAQMhRSgEAAAAAACDkKKUAAAAAAAAQcpRSAAAAAAAACDlKKQAAAAAAAIQcpRQAAAAAAABCjlIKAAAAAAAAIUcpBQAAAAAAgJCjlAIAAAAAAEDIucIdAAAAND2+QFBlAVu2bSRLclmWYiKccjn4vAwAAKCpoJQCAAD1zjZGR0p9Olzq03GfXxUBW7YxMl8+b0lyWpZi3C41j3KrbWykEjwuWZYVztgAAACoR5Yxxnz7YQAAADUXsG3tLSzXvqJylfmDMjJyWJYcliXnVwonY04UV8EviyqHZSnB41LHZtFqG+uhnAIAAGiEKKUAAEC9yC+v1CdHi1VcGZAlKcLpkKMa5ZL5spzy20aWJbWJ9qhHyzhFu531HxoAAAAhQykFAADqlDFGuwtKted4mYK2kaeaZdSZBGxbftso0uVQ71bxahPjqeO0AAAACBdKKQAAUGeMMdqWV6y9heVyWJbcDqvWU++MMfIFbTkdlpJbxyspNrKO0gIAACCc2OIGAADUmV3HSrW3sFxOh6UIp6NO1oKyLEsep0NBY7TlSJGOlvnqICkAAADCjVIKAADUibyySn1+vOzLEVJ1+xbDsix5HA4FbKOtR4vlC9p1en0AAACEHqUUAACoNX/Q1id5RQoaI7ejfnbKOzliqswf1I78YrECAQAAwPmNUgoAANRaTmGZSiqD8tTRlL2zcViWXA5LucU+Havw19t9AAAAUP8opQAAQK0EbKN9ReVyWDrnXfZqwmVZss2JewIAAOD8RSkFAABq5XBphSoCdp2vI3U2lmXJZVk6XOpTuT8YknsCAACg7lFKAQCAWjlc4pNRaEZJneRyWArYRkfYiQ8AAOC8RSkFAEAjMGrUKKWkpCg1NVWDBw/Whx9+WPXcsmXLdNlllyklJUVpaWn66KOPqnXNm266Se3atVNqaqqSk5M1ZMgQ7dix45RjjDEq8PlPK6SmZgxTWUmJJOmGQX316bYtZ7zHQ7fdrGWL/iFJmv/kY/rv4kXVynZy3aoiX+Abj3vppZdOy1wXnn76aR06dKjOrwsAANCUUEoBANAILFy4UB9//LGys7N1zz336KabbpIkFRQUaNKkSfrrX/+qjz/+WL/97W81adKkal/3l7/8pbKzs7VlyxZlZGRoxowZpzxfHrDlt42cXxsk9XzWGkXHxtboNdx8z30aee2Eah9vSTru++bFzr+tlAoGz236H6UUAABA7VFKAQDQCCQkJFR9XVhYWDWS6LPPPlNiYqJ69eolSRo8eLC++OILbd68uUbXN8aoqKhIzZs3lyTl5OQoISFBpf6AbGPkKyvViItaVR0/4qJWKikqPO06X3y2Wz8dn6FbRg3SjKmTq0ZTSdLjv7hTr/5lniTpr0//RnPunKJf/88k3XzVQN17w3UqOl4gSQr4/Xpmxq809aoBmvqdUbr7nns0bNiw0+71wgsvaNOmTbr77ruVmpqqrKwsvfTSSxo+fLjGjx+v5ORkvf/++xo2bJgWL15cdd6ECRP00ksvVV2jZ8+eVaPFNmzYoNmzZys3N1cTJ05UamqqsrOza/SzBAAAwAmucAcAAAB1Y/LkyVq9erUkKSsrS5LUrVs35efna/369RowYIDeeOMNFRcXKycnR3379tXMmTOVlJSkW2+99YzX/O1vf6uXXnpJR48eldPp1Nq1a0953jaSMSdGLVXHY/fcrnE33KiMiT/Unh3bdPs1Vyn9mu+e8djt2Zv13H/+q2bNW2jOT3+sJX//q264/S4t+cfftP/zPfrTshNZnrrjxjOeP2XKFP3v//6v7rrrLl177bWSToyc2rBhgz788ENdcskl35r33nvv1Y4dO9S2bVv5/X75fD5dccUV+stf/qJ//vOfSk1NreYrBwAAwNcxUgoAgEbib3/7m/bt26eHH35Y06ZNkyQ1a9ZMixYt0v33369+/frpzTffVM+ePeVynfhcavbs2WctpKT/m7534MABzZo1SxMmVH963deVFhfr021bNXrCDyRJnbv3VO/Lrjjr8ZcPTVez5i0kST37XqbcvTmSpA/Xr9PI6ybI5XbL5Xbrh5Mn1yjHgAEDqlVISdKIESP0ox/9SM8884w+//xzxdZwSiIAAADOjlIKAIBG5sYbb9Tq1auVn58vSRo+fLjeeustffDBB/rd736n3Nxc9ezZs8bXnThxoj744AMdPXpULpdLwWBQTsuSZUk+37ntgmd9w459ER5P1ddOh1PB4KmLmp8coeV01OztzNeLpZOv5aSKioqqr1999VU99thj8vv9ysjI0CuvvFKjewEAAODsKKUAADjPHT9+XLm5uVXfL168WImJiWrR4sQoo4MHD1Y9N2fOHKWnp6tr1641vs/KlSvVsmVLJSYm6oILLpAxRvs+3SmnZenN1xZ+6/kxcXHq2jNZK177pyQpZ9cObdm4ocY5Lr1ykFb9+1VVVlbKYxn978svn/XY+Ph4FRaevrbVV3Xt2lUbNpzI8fnnn+vtt9+WJAUCAX322We67LLL9Itf/EITJkzQ+++/X+3rAgAA4JuxphQAAOe5wsJCfe9731N5ebkcDodatWqlJUuWVI1CmjlzptatW6dAIKArr7xSL774YtW51V1Tyhgjj8ejRYsWyeFwyOFw6Nlnn9X4a69RRFwzDRjtrVbW+578g37zy5/pXy88p3adOiul/5U1fr3jbrhRe3Zu161jhyixRXMNTrvilFLuq6ZOnap7771XTz31lB599NEzHvOrX/1KEydOVHJysnr16qUrrjgxpTAYDOqWW27RsWPH5HK51KpVK82fP1+S9LOf/Uw//vGPFR0drZdeeom1pQAAAM6BZYwx4Q4BAADOX9mHC3WguEJRLmfI7llaXCxnVLS6N4/SfbdNUb9+/arW0QIAAMD5gZFSAACgVpJiI5VbUqGgMXJ+wxpRdemXPxovf2WlXHZAgwcN0s9+9rOQ3BcAAAB1h5FSAACgVmxjtG5fvkorg4oMwWgpY4wqgrY6NYtSr1bx9X4/AAAA1A8WOgcAALXisCx1jI+WJAVD8FmX3zZyOSy1j4+q93sBAACg/lBKAQCAWruwWZSaR7lVGbRVn4OwbWMUNEYdm0Wpmcddb/cBAABA/aOUAgAAteawLPVqGSe3w1Jl0K6Xexhj5AvaauZxqWvz2Hq5BwAAAEKHUgoAANSJeI9bPVrGSZYlXx0XUyfXkYp0OZTcOl4uR2gWVAcAAED9oZQCAAB1pkN8lHokxsqSVBEI1slUPvsrhdSlbZoxbQ8AAKCRYPc9AABQ5w4Ul2t7Xol8QVsRDoec5zCyyRgjv31iDan4CJdSWserWSSFFAAAQGNBKQUAAOpFqT+gbUeLdbS8UsZIboclp2XJsr65oPpqGeV0WOoYH6VuLWLkcjDAGwAAoDGhlAIAAPXGGKNDpT59UVSugnK/gl++7XBYlpxf6aaMTkzTs798V+J2WLog1qMO8dFqzugoAACARolSCgAAhESRz68jZZUqrPCr0OeX3zY6+SbEkhTlcioh0qV4j1sXxHgU6XKGMy4AAADqGaUUAAAIOWOMKoMnpuhZluSyLLmdTM8DAABoSiilAAAAAAAAEHJ8JAkAAAAAAICQo5QCAAAAAABAyFFKAQAAAAAAIOQopQAAAAAAABBylFIAAAAAAAAIOUopAAAAAAAAhBylFAAAAAAAAEKOUgoAAAAAAAAhRykFAAAAAACAkKOUAgAAAAAAQMhRSgEAAAAAACDkKKUAAAAAAAAQcpRSAAAAAAAACDlKKQAAAAAAAIQcpRQAAAAAAABCjlIKAAAAAAAAIUcpBQAAAAAAgJCjlAIAAAAAAEDIUUoBAAAAAAAg5CilAAAAAAAAEHKUUgAAAAAAAAg5SikAAAAAAACEHKUUAAAAAAAAQo5SCgAAAAAAACFHKQUAAAAAAICQo5QCAAAAAABAyFFKAQAAAAAAIOQopQAAAAAAABBylFIAAAAAAAAIOUopAAAAAAAAhBylFAAAAAAAAEKOUgoAAAAAAAAhRykFAAAAAACAkKOUAgAAAAAAQMhRSgEAAAAAACDk/j8qXR1qGCHC6QAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "=== ANSWER ===\n", + "The provided context does not mention transformers in natural language processing. The context discusses various topics such as machine learning, deep learning, reinforcement learning, and AI applications, but transformers are not mentioned.\n", + "Extracting text from data/AI_Information.pdf...\n", + "Created 42 text chunks\n", + "Building knowledge graph...\n", + "Creating embeddings for chunks...\n", + "Adding nodes to the graph...\n", + "Extracting concepts for chunk 1/42...\n", + "Extracting concepts for chunk 2/42...\n", + "Extracting concepts for chunk 3/42...\n", + "Extracting concepts for chunk 4/42...\n", + "Extracting concepts for chunk 5/42...\n", + "Extracting concepts for chunk 6/42...\n", + "Extracting concepts for chunk 7/42...\n", + "Extracting concepts for chunk 8/42...\n", + "Extracting concepts for chunk 9/42...\n", + "Extracting concepts for chunk 10/42...\n", + "Extracting concepts for chunk 11/42...\n", + "Extracting concepts for chunk 12/42...\n", + "Extracting concepts for chunk 13/42...\n", + "Extracting concepts for chunk 14/42...\n", + "Extracting concepts for chunk 15/42...\n", + "Extracting concepts for chunk 16/42...\n", + "Extracting concepts for chunk 17/42...\n", + "Extracting concepts for chunk 18/42...\n", + "Extracting concepts for chunk 19/42...\n", + "Extracting concepts for chunk 20/42...\n", + "Extracting concepts for chunk 21/42...\n", + "Extracting concepts for chunk 22/42...\n", + "Extracting concepts for chunk 23/42...\n", + "Extracting concepts for chunk 24/42...\n", + "Extracting concepts for chunk 25/42...\n", + "Extracting concepts for chunk 26/42...\n", + "Extracting concepts for chunk 27/42...\n", + "Extracting concepts for chunk 28/42...\n", + "Extracting concepts for chunk 29/42...\n", + "Extracting concepts for chunk 30/42...\n", + "Extracting concepts for chunk 31/42...\n", + "Extracting concepts for chunk 32/42...\n", + "Extracting concepts for chunk 33/42...\n", + "Extracting concepts for chunk 34/42...\n", + "Extracting concepts for chunk 35/42...\n", + "Extracting concepts for chunk 36/42...\n", + "Extracting concepts for chunk 37/42...\n", + "Extracting concepts for chunk 38/42...\n", + "Extracting concepts for chunk 39/42...\n", + "Extracting concepts for chunk 40/42...\n", + "Extracting concepts for chunk 41/42...\n", + "Extracting concepts for chunk 42/42...\n", + "Creating edges between nodes...\n", + "Knowledge graph built with 42 nodes and 107 edges\n", + "\n", + "\n", + "=== Evaluating Query 1/1 ===\n", + "Query: How do transformers handle sequential data compared to RNNs?\n", + "Traversing graph for query: How do transformers handle sequential data compared to RNNs?\n", + "Starting traversal from 5 nodes\n", + "Graph traversal found 9 relevant chunks\n", + "\n", + "Response: The provided context does not specifically discuss transformers. However, I can provide a general comparison between transformers and RNNs.\n", + "\n", + "Transformers are a type of neural network architecture that have gained popularity in recent years, particularly in natural language processing tasks. They are designed to handle sequential data, but they do so in a different way compared to RNNs.\n", + "\n", + "RNNs are designed to process sequential data by maintaining a hidden state that captures information from previous time steps. This allows RNNs to learn patterns and relationships in sequential data, such as text or time series.\n", + "\n", + "Transformers, on the other hand, use self-attention mechanisms to process sequential data. Instead of using a hidden state to capture information, transformers use attention weights to weigh the importance of different input elements at different positions in the sequence. This allows transformers to capture long-range dependencies in sequential data more effectively than RNNs.\n", + "\n", + "In terms of handling sequential data, transformers are generally more efficient and effective than RNNs, particularly for tasks such as language translation, text generation, and sentiment analysis. However, transformers do require more computational resources and training data than RNNs, which can be a limitation in certain applications.\n", + "\n", + "It's worth noting that the context does not provide a detailed comparison between transformers and RNNs, but I can provide a general overview of the differences between these two architectures.\n", + "\n", + "Comparison: Comparison of the AI-generated response with the reference answer:\n", + "\n", + "* Correctness: The AI response is mostly correct, but it lacks the specific detail about vanishing gradient problems in RNNs, which is mentioned in the reference answer.\n", + "* Completeness: The AI response provides a more comprehensive overview of the differences between transformers and RNNs, including their strengths and limitations.\n", + "* Relevance to the query: The AI response is highly relevant to the query, as it directly compares the handling of sequential data by transformers and RNNs.\n", + "\n", + "Brief analysis: The AI response provides a more detailed and comprehensive comparison between transformers and RNNs, but it could benefit from the specific detail about vanishing gradient problems in RNNs to make it more accurate. Overall, the AI response is a good match for the reference answer, but with some minor improvements.\n", + "\n", + "\n", + "=== EVALUATION SUMMARY ===\n", + "Graph nodes: 42\n", + "Graph edges: 107\n", + "\n", + "Query 1: How do transformers handle sequential data compared to RNNs?\n", + "Path length: 9\n", + "Chunks used: 9\n" + ] + } + ], + "source": [ + "# Path to the PDF document containing AI information\n", + "pdf_path = \"data/AI_Information.pdf\"\n", + "\n", + "# Define an AI-related query for testing Graph RAG\n", + "query = \"What are the key applications of transformers in natural language processing?\"\n", + "\n", + "# Execute the Graph RAG pipeline to process the document and answer the query\n", + "results = graph_rag_pipeline(pdf_path, query)\n", + "\n", + "# Print the response generated from the Graph RAG system\n", + "print(\"\\n=== ANSWER ===\")\n", + "print(results[\"response\"])\n", + "\n", + "# Define a test query and reference answer for formal evaluation\n", + "test_queries = [\n", + " \"How do transformers handle sequential data compared to RNNs?\"\n", + "]\n", + "\n", + "# Reference answer for evaluation purposes\n", + "reference_answers = [\n", + " \"Transformers handle sequential data differently from RNNs by using self-attention mechanisms instead of recurrent connections. This allows transformers to process all tokens in parallel rather than sequentially, capturing long-range dependencies more efficiently and enabling better parallelization during training. Unlike RNNs, transformers don't suffer from vanishing gradient problems with long sequences.\"\n", + "]\n", + "\n", + "# Run formal evaluation of the Graph RAG system with the test query\n", + "evaluation = evaluate_graph_rag(pdf_path, test_queries, reference_answers)\n", + "\n", + "# Print evaluation summary statistics\n", + "print(\"\\n=== EVALUATION SUMMARY ===\")\n", + "print(f\"Graph nodes: {evaluation['graph_stats']['nodes']}\")\n", + "print(f\"Graph edges: {evaluation['graph_stats']['edges']}\")\n", + "for i, result in enumerate(evaluation['results']):\n", + " print(f\"\\nQuery {i+1}: {result['query']}\")\n", + " print(f\"Path length: {result['traversal_path_length']}\")\n", + " print(f\"Chunks used: {result['relevant_chunks_count']}\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv-new-specific-rag", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/18_hierarchy_rag.ipynb b/18_hierarchy_rag.ipynb new file mode 100644 index 0000000..6da3837 --- /dev/null +++ b/18_hierarchy_rag.ipynb @@ -0,0 +1,1108 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "markdown" + } + }, + "source": [ + "# Hierarchical Indices for RAG\n", + "\n", + "In this notebook, I implement a hierarchical indexing approach for RAG systems. This technique improves retrieval by using a two-tier search method: first identifying relevant document sections through summaries, then retrieving specific details from those sections.\n", + "\n", + "Traditional RAG approaches treat all text chunks equally, which can lead to:\n", + "\n", + "- Lost context when chunks are too small\n", + "- Irrelevant results when the document collection is large\n", + "- Inefficient searches across the entire corpus\n", + "\n", + "Hierarchical retrieval solves these problems by:\n", + "\n", + "- Creating concise summaries for larger document sections\n", + "- First searching these summaries to identify relevant sections\n", + "- Then retrieving detailed information only from those sections\n", + "- Maintaining context while preserving specific details" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the Environment\n", + "We begin by importing necessary libraries." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import numpy as np\n", + "import json\n", + "import fitz\n", + "from openai import OpenAI\n", + "import re\n", + "import pickle" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the OpenAI API Client\n", + "We initialize the OpenAI client to generate embeddings and responses." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize the OpenAI client with the base URL and API key\n", + "client = OpenAI(\n", + " base_url=\"https://api.studio.nebius.com/v1/\",\n", + " api_key=os.getenv(\"OPENAI_API_KEY\") # Retrieve the API key from environment variables\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Document Processing Functions" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def extract_text_from_pdf(pdf_path):\n", + " \"\"\"\n", + " Extract text content from a PDF file with page separation.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF file\n", + " \n", + " Returns:\n", + " List[Dict]: List of pages with text content and metadata\n", + " \"\"\"\n", + " print(f\"Extracting text from {pdf_path}...\") # Print the path of the PDF being processed\n", + " pdf = fitz.open(pdf_path) # Open the PDF file using PyMuPDF\n", + " pages = [] # Initialize an empty list to store the pages with text content\n", + " \n", + " # Iterate over each page in the PDF\n", + " for page_num in range(len(pdf)):\n", + " page = pdf[page_num] # Get the current page\n", + " text = page.get_text() # Extract text from the current page\n", + " \n", + " # Skip pages with very little text (less than 50 characters)\n", + " if len(text.strip()) > 50:\n", + " # Append the page text and metadata to the list\n", + " pages.append({\n", + " \"text\": text,\n", + " \"metadata\": {\n", + " \"source\": pdf_path, # Source file path\n", + " \"page\": page_num + 1 # Page number (1-based index)\n", + " }\n", + " })\n", + " \n", + " print(f\"Extracted {len(pages)} pages with content\") # Print the number of pages extracted\n", + " return pages # Return the list of pages with text content and metadata" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def chunk_text(text, metadata, chunk_size=1000, overlap=200):\n", + " \"\"\"\n", + " Split text into overlapping chunks while preserving metadata.\n", + " \n", + " Args:\n", + " text (str): Input text to chunk\n", + " metadata (Dict): Metadata to preserve\n", + " chunk_size (int): Size of each chunk in characters\n", + " overlap (int): Overlap between chunks in characters\n", + " \n", + " Returns:\n", + " List[Dict]: List of text chunks with metadata\n", + " \"\"\"\n", + " chunks = [] # Initialize an empty list to store the chunks\n", + " \n", + " # Iterate over the text with the specified chunk size and overlap\n", + " for i in range(0, len(text), chunk_size - overlap):\n", + " chunk_text = text[i:i + chunk_size] # Extract the chunk of text\n", + " \n", + " # Skip very small chunks (less than 50 characters)\n", + " if chunk_text and len(chunk_text.strip()) > 50:\n", + " # Create a copy of metadata and add chunk-specific info\n", + " chunk_metadata = metadata.copy()\n", + " chunk_metadata.update({\n", + " \"chunk_index\": len(chunks), # Index of the chunk\n", + " \"start_char\": i, # Start character index of the chunk\n", + " \"end_char\": i + len(chunk_text), # End character index of the chunk\n", + " \"is_summary\": False # Flag indicating this is not a summary\n", + " })\n", + " \n", + " # Append the chunk with its metadata to the list\n", + " chunks.append({\n", + " \"text\": chunk_text,\n", + " \"metadata\": chunk_metadata\n", + " })\n", + " \n", + " return chunks # Return the list of chunks with metadata" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Simple Vector Store Implementation" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "class SimpleVectorStore:\n", + " \"\"\"\n", + " A simple vector store implementation using NumPy.\n", + " \"\"\"\n", + " def __init__(self):\n", + " self.vectors = [] # List to store vector embeddings\n", + " self.texts = [] # List to store text content\n", + " self.metadata = [] # List to store metadata\n", + " \n", + " def add_item(self, text, embedding, metadata=None):\n", + " \"\"\"\n", + " Add an item to the vector store.\n", + " \n", + " Args:\n", + " text (str): Text content\n", + " embedding (List[float]): Vector embedding\n", + " metadata (Dict, optional): Additional metadata\n", + " \"\"\"\n", + " self.vectors.append(np.array(embedding)) # Append the embedding as a numpy array\n", + " self.texts.append(text) # Append the text content\n", + " self.metadata.append(metadata or {}) # Append the metadata or an empty dict if None\n", + " \n", + " def similarity_search(self, query_embedding, k=5, filter_func=None):\n", + " \"\"\"\n", + " Find the most similar items to a query embedding.\n", + " \n", + " Args:\n", + " query_embedding (List[float]): Query embedding vector\n", + " k (int): Number of results to return\n", + " filter_func (callable, optional): Function to filter results\n", + " \n", + " Returns:\n", + " List[Dict]: Top k most similar items\n", + " \"\"\"\n", + " if not self.vectors:\n", + " return [] # Return an empty list if there are no vectors\n", + " \n", + " # Convert query embedding to numpy array\n", + " query_vector = np.array(query_embedding)\n", + " \n", + " # Calculate similarities using cosine similarity\n", + " similarities = []\n", + " for i, vector in enumerate(self.vectors):\n", + " # Skip if doesn't pass the filter\n", + " if filter_func and not filter_func(self.metadata[i]):\n", + " continue\n", + " \n", + " # Calculate cosine similarity\n", + " similarity = np.dot(query_vector, vector) / (np.linalg.norm(query_vector) * np.linalg.norm(vector))\n", + " similarities.append((i, similarity)) # Append index and similarity score\n", + " \n", + " # Sort by similarity (descending)\n", + " similarities.sort(key=lambda x: x[1], reverse=True)\n", + " \n", + " # Return top k results\n", + " results = []\n", + " for i in range(min(k, len(similarities))):\n", + " idx, score = similarities[i]\n", + " results.append({\n", + " \"text\": self.texts[idx], # Add the text content\n", + " \"metadata\": self.metadata[idx], # Add the metadata\n", + " \"similarity\": float(score) # Add the similarity score\n", + " })\n", + " \n", + " return results # Return the list of top k results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating Embeddings" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def create_embeddings(texts, model=\"BAAI/bge-en-icl\"):\n", + " \"\"\"\n", + " Create embeddings for the given texts.\n", + " \n", + " Args:\n", + " texts (List[str]): Input texts\n", + " model (str): Embedding model name\n", + " \n", + " Returns:\n", + " List[List[float]]: Embedding vectors\n", + " \"\"\"\n", + " # Handle empty input\n", + " if not texts:\n", + " return []\n", + " \n", + " # Process in batches if needed (OpenAI API limits)\n", + " batch_size = 100\n", + " all_embeddings = []\n", + " \n", + " # Iterate over the input texts in batches\n", + " for i in range(0, len(texts), batch_size):\n", + " batch = texts[i:i + batch_size] # Get the current batch of texts\n", + " \n", + " # Create embeddings for the current batch\n", + " response = client.embeddings.create(\n", + " model=model,\n", + " input=batch\n", + " )\n", + " \n", + " # Extract embeddings from the response\n", + " batch_embeddings = [item.embedding for item in response.data]\n", + " all_embeddings.extend(batch_embeddings) # Add the batch embeddings to the list\n", + " \n", + " return all_embeddings # Return all embeddings" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Summarization Function" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_page_summary(page_text):\n", + " \"\"\"\n", + " Generate a concise summary of a page.\n", + " \n", + " Args:\n", + " page_text (str): Text content of the page\n", + " \n", + " Returns:\n", + " str: Generated summary\n", + " \"\"\"\n", + " # Define the system prompt to instruct the summarization model\n", + " system_prompt = \"\"\"You are an expert summarization system.\n", + " Create a detailed summary of the provided text. \n", + " Focus on capturing the main topics, key information, and important facts.\n", + " Your summary should be comprehensive enough to understand what the page contains\n", + " but more concise than the original.\"\"\"\n", + "\n", + " # Truncate input text if it exceeds the maximum token limit\n", + " max_tokens = 6000\n", + " truncated_text = page_text[:max_tokens] if len(page_text) > max_tokens else page_text\n", + "\n", + " # Make a request to the OpenAI API to generate the summary\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\", # Specify the model to use\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt}, # System message to guide the assistant\n", + " {\"role\": \"user\", \"content\": f\"Please summarize this text:\\n\\n{truncated_text}\"} # User message with the text to summarize\n", + " ],\n", + " temperature=0.3 # Set the temperature for response generation\n", + " )\n", + " \n", + " # Return the generated summary content\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Hierarchical Document Processing" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "def process_document_hierarchically(pdf_path, chunk_size=1000, chunk_overlap=200):\n", + " \"\"\"\n", + " Process a document into hierarchical indices.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF file\n", + " chunk_size (int): Size of each detailed chunk\n", + " chunk_overlap (int): Overlap between chunks\n", + " \n", + " Returns:\n", + " Tuple[SimpleVectorStore, SimpleVectorStore]: Summary and detailed vector stores\n", + " \"\"\"\n", + " # Extract pages from PDF\n", + " pages = extract_text_from_pdf(pdf_path)\n", + " \n", + " # Create summaries for each page\n", + " print(\"Generating page summaries...\")\n", + " summaries = []\n", + " for i, page in enumerate(pages):\n", + " print(f\"Summarizing page {i+1}/{len(pages)}...\")\n", + " summary_text = generate_page_summary(page[\"text\"])\n", + " \n", + " # Create summary metadata\n", + " summary_metadata = page[\"metadata\"].copy()\n", + " summary_metadata.update({\"is_summary\": True})\n", + " \n", + " # Append the summary text and metadata to the summaries list\n", + " summaries.append({\n", + " \"text\": summary_text,\n", + " \"metadata\": summary_metadata\n", + " })\n", + " \n", + " # Create detailed chunks for each page\n", + " detailed_chunks = []\n", + " for page in pages:\n", + " # Chunk the text of the page\n", + " page_chunks = chunk_text(\n", + " page[\"text\"], \n", + " page[\"metadata\"], \n", + " chunk_size, \n", + " chunk_overlap\n", + " )\n", + " # Extend the detailed_chunks list with the chunks from the current page\n", + " detailed_chunks.extend(page_chunks)\n", + " \n", + " print(f\"Created {len(detailed_chunks)} detailed chunks\")\n", + " \n", + " # Create embeddings for summaries\n", + " print(\"Creating embeddings for summaries...\")\n", + " summary_texts = [summary[\"text\"] for summary in summaries]\n", + " summary_embeddings = create_embeddings(summary_texts)\n", + " \n", + " # Create embeddings for detailed chunks\n", + " print(\"Creating embeddings for detailed chunks...\")\n", + " chunk_texts = [chunk[\"text\"] for chunk in detailed_chunks]\n", + " chunk_embeddings = create_embeddings(chunk_texts)\n", + " \n", + " # Create vector stores\n", + " summary_store = SimpleVectorStore()\n", + " detailed_store = SimpleVectorStore()\n", + " \n", + " # Add summaries to summary store\n", + " for i, summary in enumerate(summaries):\n", + " summary_store.add_item(\n", + " text=summary[\"text\"],\n", + " embedding=summary_embeddings[i],\n", + " metadata=summary[\"metadata\"]\n", + " )\n", + " \n", + " # Add chunks to detailed store\n", + " for i, chunk in enumerate(detailed_chunks):\n", + " detailed_store.add_item(\n", + " text=chunk[\"text\"],\n", + " embedding=chunk_embeddings[i],\n", + " metadata=chunk[\"metadata\"]\n", + " )\n", + " \n", + " print(f\"Created vector stores with {len(summaries)} summaries and {len(detailed_chunks)} chunks\")\n", + " return summary_store, detailed_store" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Hierarchical Retrieval" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "def retrieve_hierarchically(query, summary_store, detailed_store, k_summaries=3, k_chunks=5):\n", + " \"\"\"\n", + " Retrieve information using hierarchical indices.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " summary_store (SimpleVectorStore): Store of document summaries\n", + " detailed_store (SimpleVectorStore): Store of detailed chunks\n", + " k_summaries (int): Number of summaries to retrieve\n", + " k_chunks (int): Number of chunks to retrieve per summary\n", + " \n", + " Returns:\n", + " List[Dict]: Retrieved chunks with relevance scores\n", + " \"\"\"\n", + " print(f\"Performing hierarchical retrieval for query: {query}\")\n", + " \n", + " # Create query embedding\n", + " query_embedding = create_embeddings(query)\n", + " \n", + " # First, retrieve relevant summaries\n", + " summary_results = summary_store.similarity_search(\n", + " query_embedding, \n", + " k=k_summaries\n", + " )\n", + " \n", + " print(f\"Retrieved {len(summary_results)} relevant summaries\")\n", + " \n", + " # Collect pages from relevant summaries\n", + " relevant_pages = [result[\"metadata\"][\"page\"] for result in summary_results]\n", + " \n", + " # Create a filter function to only keep chunks from relevant pages\n", + " def page_filter(metadata):\n", + " return metadata[\"page\"] in relevant_pages\n", + " \n", + " # Then, retrieve detailed chunks from only those relevant pages\n", + " detailed_results = detailed_store.similarity_search(\n", + " query_embedding, \n", + " k=k_chunks * len(relevant_pages),\n", + " filter_func=page_filter\n", + " )\n", + " \n", + " print(f\"Retrieved {len(detailed_results)} detailed chunks from relevant pages\")\n", + " \n", + " # For each result, add which summary/page it came from\n", + " for result in detailed_results:\n", + " page = result[\"metadata\"][\"page\"]\n", + " matching_summaries = [s for s in summary_results if s[\"metadata\"][\"page\"] == page]\n", + " if matching_summaries:\n", + " result[\"summary\"] = matching_summaries[0][\"text\"]\n", + " \n", + " return detailed_results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Response Generation with Context" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_response(query, retrieved_chunks):\n", + " \"\"\"\n", + " Generate a response based on the query and retrieved chunks.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " retrieved_chunks (List[Dict]): Retrieved chunks from hierarchical search\n", + " \n", + " Returns:\n", + " str: Generated response\n", + " \"\"\"\n", + " # Extract text from chunks and prepare context parts\n", + " context_parts = []\n", + " \n", + " for i, chunk in enumerate(retrieved_chunks):\n", + " page_num = chunk[\"metadata\"][\"page\"] # Get the page number from metadata\n", + " context_parts.append(f\"[Page {page_num}]: {chunk['text']}\") # Format the chunk text with page number\n", + " \n", + " # Combine all context parts into a single context string\n", + " context = \"\\n\\n\".join(context_parts)\n", + " \n", + " # Define the system message to guide the AI assistant\n", + " system_message = \"\"\"You are a helpful AI assistant answering questions based on the provided context.\n", + "Use the information from the context to answer the user's question accurately.\n", + "If the context doesn't contain relevant information, acknowledge that.\n", + "Include page numbers when referencing specific information.\"\"\"\n", + "\n", + " # Generate the response using the OpenAI API\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\", # Specify the model to use\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_message}, # System message to guide the assistant\n", + " {\"role\": \"user\", \"content\": f\"Context:\\n\\n{context}\\n\\nQuestion: {query}\"} # User message with context and query\n", + " ],\n", + " temperature=0.2 # Set the temperature for response generation\n", + " )\n", + " \n", + " # Return the generated response content\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Complete RAG Pipeline with Hierarchical Retrieval" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "def hierarchical_rag(query, pdf_path, chunk_size=1000, chunk_overlap=200, \n", + " k_summaries=3, k_chunks=5, regenerate=False):\n", + " \"\"\"\n", + " Complete hierarchical RAG pipeline.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " pdf_path (str): Path to the PDF document\n", + " chunk_size (int): Size of each detailed chunk\n", + " chunk_overlap (int): Overlap between chunks\n", + " k_summaries (int): Number of summaries to retrieve\n", + " k_chunks (int): Number of chunks to retrieve per summary\n", + " regenerate (bool): Whether to regenerate vector stores\n", + " \n", + " Returns:\n", + " Dict: Results including response and retrieved chunks\n", + " \"\"\"\n", + " # Create store filenames for caching\n", + " summary_store_file = f\"{os.path.basename(pdf_path)}_summary_store.pkl\"\n", + " detailed_store_file = f\"{os.path.basename(pdf_path)}_detailed_store.pkl\"\n", + " \n", + " # Process document and create stores if needed\n", + " if regenerate or not os.path.exists(summary_store_file) or not os.path.exists(detailed_store_file):\n", + " print(\"Processing document and creating vector stores...\")\n", + " # Process the document to create hierarchical indices and vector stores\n", + " summary_store, detailed_store = process_document_hierarchically(\n", + " pdf_path, chunk_size, chunk_overlap\n", + " )\n", + " \n", + " # Save the summary store to a file for future use\n", + " with open(summary_store_file, 'wb') as f:\n", + " pickle.dump(summary_store, f)\n", + " \n", + " # Save the detailed store to a file for future use\n", + " with open(detailed_store_file, 'wb') as f:\n", + " pickle.dump(detailed_store, f)\n", + " else:\n", + " # Load existing summary store from file\n", + " print(\"Loading existing vector stores...\")\n", + " with open(summary_store_file, 'rb') as f:\n", + " summary_store = pickle.load(f)\n", + " \n", + " # Load existing detailed store from file\n", + " with open(detailed_store_file, 'rb') as f:\n", + " detailed_store = pickle.load(f)\n", + " \n", + " # Retrieve relevant chunks hierarchically using the query\n", + " retrieved_chunks = retrieve_hierarchically(\n", + " query, summary_store, detailed_store, k_summaries, k_chunks\n", + " )\n", + " \n", + " # Generate a response based on the retrieved chunks\n", + " response = generate_response(query, retrieved_chunks)\n", + " \n", + " # Return results including the query, response, retrieved chunks, and counts of summaries and detailed chunks\n", + " return {\n", + " \"query\": query,\n", + " \"response\": response,\n", + " \"retrieved_chunks\": retrieved_chunks,\n", + " \"summary_count\": len(summary_store.texts),\n", + " \"detailed_count\": len(detailed_store.texts)\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Standard (Non-Hierarchical) RAG for Comparison" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "def standard_rag(query, pdf_path, chunk_size=1000, chunk_overlap=200, k=15):\n", + " \"\"\"\n", + " Standard RAG pipeline without hierarchical retrieval.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " pdf_path (str): Path to the PDF document\n", + " chunk_size (int): Size of each chunk\n", + " chunk_overlap (int): Overlap between chunks\n", + " k (int): Number of chunks to retrieve\n", + " \n", + " Returns:\n", + " Dict: Results including response and retrieved chunks\n", + " \"\"\"\n", + " # Extract pages from the PDF document\n", + " pages = extract_text_from_pdf(pdf_path)\n", + " \n", + " # Create chunks directly from all pages\n", + " chunks = []\n", + " for page in pages:\n", + " # Chunk the text of the page\n", + " page_chunks = chunk_text(\n", + " page[\"text\"], \n", + " page[\"metadata\"], \n", + " chunk_size, \n", + " chunk_overlap\n", + " )\n", + " # Extend the chunks list with the chunks from the current page\n", + " chunks.extend(page_chunks)\n", + " \n", + " print(f\"Created {len(chunks)} chunks for standard RAG\")\n", + " \n", + " # Create a vector store to hold the chunks\n", + " store = SimpleVectorStore()\n", + " \n", + " # Create embeddings for the chunks\n", + " print(\"Creating embeddings for chunks...\")\n", + " texts = [chunk[\"text\"] for chunk in chunks]\n", + " embeddings = create_embeddings(texts)\n", + " \n", + " # Add chunks to the vector store\n", + " for i, chunk in enumerate(chunks):\n", + " store.add_item(\n", + " text=chunk[\"text\"],\n", + " embedding=embeddings[i],\n", + " metadata=chunk[\"metadata\"]\n", + " )\n", + " \n", + " # Create an embedding for the query\n", + " query_embedding = create_embeddings(query)\n", + " \n", + " # Retrieve the most relevant chunks based on the query embedding\n", + " retrieved_chunks = store.similarity_search(query_embedding, k=k)\n", + " print(f\"Retrieved {len(retrieved_chunks)} chunks with standard RAG\")\n", + " \n", + " # Generate a response based on the retrieved chunks\n", + " response = generate_response(query, retrieved_chunks)\n", + " \n", + " # Return the results including the query, response, and retrieved chunks\n", + " return {\n", + " \"query\": query,\n", + " \"response\": response,\n", + " \"retrieved_chunks\": retrieved_chunks\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluation Functions" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "def compare_approaches(query, pdf_path, reference_answer=None):\n", + " \"\"\"\n", + " Compare hierarchical and standard RAG approaches.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " pdf_path (str): Path to the PDF document\n", + " reference_answer (str, optional): Reference answer for evaluation\n", + " \n", + " Returns:\n", + " Dict: Comparison results\n", + " \"\"\"\n", + " print(f\"\\n=== Comparing RAG approaches for query: {query} ===\")\n", + " \n", + " # Run hierarchical RAG\n", + " print(\"\\nRunning hierarchical RAG...\")\n", + " hierarchical_result = hierarchical_rag(query, pdf_path)\n", + " hier_response = hierarchical_result[\"response\"]\n", + " \n", + " # Run standard RAG\n", + " print(\"\\nRunning standard RAG...\")\n", + " standard_result = standard_rag(query, pdf_path)\n", + " std_response = standard_result[\"response\"]\n", + " \n", + " # Compare results from hierarchical and standard RAG\n", + " comparison = compare_responses(query, hier_response, std_response, reference_answer)\n", + " \n", + " # Return a dictionary with the comparison results\n", + " return {\n", + " \"query\": query, # The original query\n", + " \"hierarchical_response\": hier_response, # Response from hierarchical RAG\n", + " \"standard_response\": std_response, # Response from standard RAG\n", + " \"reference_answer\": reference_answer, # Reference answer for evaluation\n", + " \"comparison\": comparison, # Comparison analysis\n", + " \"hierarchical_chunks_count\": len(hierarchical_result[\"retrieved_chunks\"]), # Number of chunks retrieved by hierarchical RAG\n", + " \"standard_chunks_count\": len(standard_result[\"retrieved_chunks\"]) # Number of chunks retrieved by standard RAG\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "def compare_responses(query, hierarchical_response, standard_response, reference=None):\n", + " \"\"\"\n", + " Compare responses from hierarchical and standard RAG.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " hierarchical_response (str): Response from hierarchical RAG\n", + " standard_response (str): Response from standard RAG\n", + " reference (str, optional): Reference answer\n", + " \n", + " Returns:\n", + " str: Comparison analysis\n", + " \"\"\"\n", + " # Define the system prompt to instruct the model on how to evaluate the responses\n", + " system_prompt = \"\"\"You are an expert evaluator of information retrieval systems. \n", + "Compare the two responses to the same query, one generated using hierarchical retrieval\n", + "and the other using standard retrieval.\n", + "\n", + "Evaluate them based on:\n", + "1. Accuracy: Which response provides more factually correct information?\n", + "2. Comprehensiveness: Which response better covers all aspects of the query?\n", + "3. Coherence: Which response has better logical flow and organization?\n", + "4. Page References: Does either response make better use of page references?\n", + "\n", + "Be specific in your analysis of the strengths and weaknesses of each approach.\"\"\"\n", + "\n", + " # Create the user prompt with the query and both responses\n", + " user_prompt = f\"\"\"Query: {query}\n", + "\n", + "Response from Hierarchical RAG:\n", + "{hierarchical_response}\n", + "\n", + "Response from Standard RAG:\n", + "{standard_response}\"\"\"\n", + "\n", + " # If a reference answer is provided, include it in the user prompt\n", + " if reference:\n", + " user_prompt += f\"\"\"\n", + "\n", + "Reference Answer:\n", + "{reference}\"\"\"\n", + "\n", + " # Add the final instruction to the user prompt\n", + " user_prompt += \"\"\"\n", + "\n", + "Please provide a detailed comparison of these two responses, highlighting which approach performed better and why.\"\"\"\n", + "\n", + " # Make a request to the OpenAI API to generate the comparison analysis\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt}, # System message to guide the assistant\n", + " {\"role\": \"user\", \"content\": user_prompt} # User message with the query and responses\n", + " ],\n", + " temperature=0 # Set the temperature for response generation\n", + " )\n", + " \n", + " # Return the generated comparison analysis\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "def run_evaluation(pdf_path, test_queries, reference_answers=None):\n", + " \"\"\"\n", + " Run a complete evaluation with multiple test queries.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF document\n", + " test_queries (List[str]): List of test queries\n", + " reference_answers (List[str], optional): Reference answers for queries\n", + " \n", + " Returns:\n", + " Dict: Evaluation results\n", + " \"\"\"\n", + " results = [] # Initialize an empty list to store results\n", + " \n", + " # Iterate over each query in the test queries\n", + " for i, query in enumerate(test_queries):\n", + " print(f\"Query: {query}\") # Print the current query\n", + " \n", + " # Get reference answer if available\n", + " reference = None\n", + " if reference_answers and i < len(reference_answers):\n", + " reference = reference_answers[i] # Retrieve the reference answer for the current query\n", + " \n", + " # Compare hierarchical and standard RAG approaches\n", + " result = compare_approaches(query, pdf_path, reference)\n", + " results.append(result) # Append the result to the results list\n", + " \n", + " # Generate overall analysis of the evaluation results\n", + " overall_analysis = generate_overall_analysis(results)\n", + " \n", + " return {\n", + " \"results\": results, # Return the individual results\n", + " \"overall_analysis\": overall_analysis # Return the overall analysis\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_overall_analysis(results):\n", + " \"\"\"\n", + " Generate an overall analysis of the evaluation results.\n", + " \n", + " Args:\n", + " results (List[Dict]): Results from individual query evaluations\n", + " \n", + " Returns:\n", + " str: Overall analysis\n", + " \"\"\"\n", + " # Define the system prompt to instruct the model on how to evaluate the results\n", + " system_prompt = \"\"\"You are an expert at evaluating information retrieval systems.\n", + "Based on multiple test queries, provide an overall analysis comparing hierarchical RAG \n", + "with standard RAG.\n", + "\n", + "Focus on:\n", + "1. When hierarchical retrieval performs better and why\n", + "2. When standard retrieval performs better and why\n", + "3. The overall strengths and weaknesses of each approach\n", + "4. Recommendations for when to use each approach\"\"\"\n", + "\n", + " # Create a summary of the evaluations\n", + " evaluations_summary = \"\"\n", + " for i, result in enumerate(results):\n", + " evaluations_summary += f\"Query {i+1}: {result['query']}\\n\"\n", + " evaluations_summary += f\"Hierarchical chunks: {result['hierarchical_chunks_count']}, Standard chunks: {result['standard_chunks_count']}\\n\"\n", + " evaluations_summary += f\"Comparison summary: {result['comparison'][:200]}...\\n\\n\"\n", + "\n", + " # Define the user prompt with the evaluations summary\n", + " user_prompt = f\"\"\"Based on the following evaluations comparing hierarchical vs standard RAG across {len(results)} queries, \n", + "provide an overall analysis of these two approaches:\n", + "\n", + "{evaluations_summary}\n", + "\n", + "Please provide a comprehensive analysis of the relative strengths and weaknesses of hierarchical RAG \n", + "compared to standard RAG, with specific focus on retrieval quality and response generation.\"\"\"\n", + "\n", + " # Make a request to the OpenAI API to generate the overall analysis\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt}, # System message to guide the assistant\n", + " {\"role\": \"user\", \"content\": user_prompt} # User message with the evaluations summary\n", + " ],\n", + " temperature=0 # Set the temperature for response generation\n", + " )\n", + " \n", + " # Return the generated overall analysis\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluation of Hierarchical and Standard RAG Approaches" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Processing document and creating vector stores...\n", + "Extracting text from data/AI_Information.pdf...\n", + "Extracted 15 pages with content\n", + "Generating page summaries...\n", + "Summarizing page 1/15...\n", + "Summarizing page 2/15...\n", + "Summarizing page 3/15...\n", + "Summarizing page 4/15...\n", + "Summarizing page 5/15...\n", + "Summarizing page 6/15...\n", + "Summarizing page 7/15...\n", + "Summarizing page 8/15...\n", + "Summarizing page 9/15...\n", + "Summarizing page 10/15...\n", + "Summarizing page 11/15...\n", + "Summarizing page 12/15...\n", + "Summarizing page 13/15...\n", + "Summarizing page 14/15...\n", + "Summarizing page 15/15...\n", + "Created 47 detailed chunks\n", + "Creating embeddings for summaries...\n", + "Creating embeddings for detailed chunks...\n", + "Created vector stores with 15 summaries and 47 chunks\n", + "Performing hierarchical retrieval for query: What are the key applications of transformer models in natural language processing?\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "C:\\Users\\faree\\AppData\\Local\\Temp\\ipykernel_9608\\2918097221.py:62: DeprecationWarning: Conversion of an array with ndim > 0 to a scalar is deprecated, and will error in future. Ensure you extract a single element from your array before performing this operation. (Deprecated NumPy 1.25.)\n", + " \"similarity\": float(score) # Add the similarity score\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Retrieved 3 relevant summaries\n", + "Retrieved 10 detailed chunks from relevant pages\n", + "\n", + "=== Response ===\n", + "I couldn't find any information about transformer models in the provided context. The context appears to focus on various applications of Artificial Intelligence (AI) and Machine Learning (ML), including computer vision, deep learning, reinforcement learning, and more. However, transformer models are not mentioned.\n", + "\n", + "If you're looking for information on transformer models, I'd be happy to try and help you find it. Alternatively, if you have any other questions based on the provided context, I'd be happy to try and assist you.\n", + "Query: How do transformers handle sequential data compared to RNNs?\n", + "\n", + "=== Comparing RAG approaches for query: How do transformers handle sequential data compared to RNNs? ===\n", + "\n", + "Running hierarchical RAG...\n", + "Loading existing vector stores...\n", + "Performing hierarchical retrieval for query: How do transformers handle sequential data compared to RNNs?\n", + "Retrieved 3 relevant summaries\n", + "Retrieved 10 detailed chunks from relevant pages\n", + "\n", + "Running standard RAG...\n", + "Extracting text from data/AI_Information.pdf...\n", + "Extracted 15 pages with content\n", + "Created 47 chunks for standard RAG\n", + "Creating embeddings for chunks...\n", + "Retrieved 15 chunks with standard RAG\n", + "\n", + "=== OVERALL ANALYSIS ===\n", + "Based on the provided evaluation, I will provide a comprehensive analysis of the relative strengths and weaknesses of hierarchical RAG compared to standard RAG.\n", + "\n", + "**Overview of Hierarchical RAG and Standard RAG**\n", + "\n", + "Hierarchical RAG (Retrieval Algorithm for Generating) is an extension of the standard RAG approach, which involves dividing the input text into smaller chunks or sub-sequences to facilitate more efficient and effective retrieval. The hierarchical approach further divides these chunks into smaller sub-chunks, allowing for more granular and detailed retrieval.\n", + "\n", + "Standard RAG, on the other hand, uses a single chunk size to retrieve relevant information from the input text.\n", + "\n", + "**Strengths of Hierarchical RAG**\n", + "\n", + "1. **Improved Retrieval Quality**: Hierarchical RAG's ability to divide the input text into smaller sub-chunks allows for more precise retrieval, as it can capture subtle nuances and relationships between words and phrases that may be missed by standard RAG.\n", + "2. **Enhanced Response Generation**: By considering multiple levels of granularity, hierarchical RAG can generate more accurate and informative responses, as it can take into account the context and relationships between different parts of the input text.\n", + "3. **Better Handling of Complex Input Text**: Hierarchical RAG is particularly well-suited for handling complex input text, such as long documents or texts with multiple layers of abstraction.\n", + "\n", + "**Weaknesses of Hierarchical RAG**\n", + "\n", + "1. **Increased Computational Complexity**: The hierarchical approach requires more computational resources and processing power, as it needs to handle multiple levels of granularity.\n", + "2. **Higher Risk of Overfitting**: The increased number of parameters and complexity of the hierarchical model can lead to overfitting, particularly if the training data is limited or biased.\n", + "\n", + "**Strengths of Standard RAG**\n", + "\n", + "1. **Simpler and Faster**: Standard RAG is a simpler and faster approach, as it only requires a single chunk size and less computational resources.\n", + "2. **Less Risk of Overfitting**: The standard model has fewer parameters and is less prone to overfitting, making it a more robust and reliable choice.\n", + "\n", + "**Weaknesses of Standard RAG**\n", + "\n", + "1. **Limited Retrieval Quality**: Standard RAG's single chunk size can lead to limited retrieval quality, as it may not capture the full range of nuances and relationships between words and phrases.\n", + "2. **Less Effective for Complex Input Text**: Standard RAG is less effective for handling complex input text, as it may struggle to capture the context and relationships between different parts of the text.\n", + "\n", + "**When to Use Each Approach**\n", + "\n", + "1. **Use Hierarchical RAG**:\n", + "\t* When dealing with complex input text, such as long documents or texts with multiple layers of abstraction.\n", + "\t* When high retrieval quality and response generation are critical, such as in applications requiring accurate and informative responses.\n", + "\t* When computational resources are not a concern, and the benefits of hierarchical retrieval outweigh the costs.\n", + "2. **Use Standard RAG**:\n", + "\t* When dealing with simple input text, such as short documents or texts with a clear and concise structure.\n", + "\t* When computational resources are limited, and speed is a priority.\n", + "\t* When the goal is to quickly retrieve relevant information, rather than generating accurate and informative responses.\n", + "\n", + "In conclusion, hierarchical RAG offers improved retrieval quality and response generation, but at the cost of increased computational complexity and risk of overfitting. Standard RAG, on the other hand, is simpler and faster, but may have limited retrieval quality and be less effective for complex input text. The choice of approach depends on the specific requirements and constraints of the application.\n" + ] + } + ], + "source": [ + "# Path to the PDF document containing AI information\n", + "pdf_path = \"data/AI_Information.pdf\"\n", + "\n", + "# Example query about AI for testing the hierarchical RAG approach\n", + "query = \"What are the key applications of transformer models in natural language processing?\"\n", + "result = hierarchical_rag(query, pdf_path)\n", + "\n", + "print(\"\\n=== Response ===\")\n", + "print(result[\"response\"])\n", + "\n", + "# Test query for formal evaluation (using only one query as requested)\n", + "test_queries = [\n", + " \"How do transformers handle sequential data compared to RNNs?\"\n", + "]\n", + "\n", + "# Reference answer for the test query to enable comparison\n", + "reference_answers = [\n", + " \"Transformers handle sequential data differently from RNNs by using self-attention mechanisms instead of recurrent connections. This allows transformers to process all tokens in parallel rather than sequentially, capturing long-range dependencies more efficiently and enabling better parallelization during training. Unlike RNNs, transformers don't suffer from vanishing gradient problems with long sequences.\"\n", + "]\n", + "\n", + "# Run the evaluation comparing hierarchical and standard RAG approaches\n", + "evaluation_results = run_evaluation(\n", + " pdf_path=pdf_path,\n", + " test_queries=test_queries,\n", + " reference_answers=reference_answers\n", + ")\n", + "\n", + "# Print the overall analysis of the comparison\n", + "print(\"\\n=== OVERALL ANALYSIS ===\")\n", + "print(evaluation_results[\"overall_analysis\"])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv-new-specific-rag", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/19_HyDE_rag.ipynb b/19_HyDE_rag.ipynb new file mode 100644 index 0000000..0940de4 --- /dev/null +++ b/19_HyDE_rag.ipynb @@ -0,0 +1,1070 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "markdown" + } + }, + "source": [ + "# Hypothetical Document Embedding (HyDE) for RAG\n", + "\n", + "In this notebook, I implement HyDE (Hypothetical Document Embedding) - an innovative retrieval technique that transforms user queries into hypothetical answer documents before performing retrieval. This approach bridges the semantic gap between short queries and lengthy documents.\n", + "\n", + "Traditional RAG systems embed the user's short query directly, but this often fails to capture the semantic richness needed for optimal retrieval. HyDE solves this by:\n", + "\n", + "- Generating a hypothetical document that answers the query\n", + "- Embedding this expanded document instead of the original query\n", + "- Retrieving documents similar to this hypothetical document\n", + "- Creating more contextually relevant answers" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the Environment\n", + "We begin by importing necessary libraries." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import numpy as np\n", + "import json\n", + "import fitz\n", + "from openai import OpenAI\n", + "import re\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the OpenAI API Client\n", + "We initialize the OpenAI client to generate embeddings and responses." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize the OpenAI client with the base URL and API key\n", + "client = OpenAI(\n", + " base_url=\"https://api.studio.nebius.com/v1/\",\n", + " api_key=os.getenv(\"OPENAI_API_KEY\") # Retrieve the API key from environment variables\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Document Processing Functions" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def extract_text_from_pdf(pdf_path):\n", + " \"\"\"\n", + " Extract text content from a PDF file with page separation.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF file\n", + " \n", + " Returns:\n", + " List[Dict]: List of pages with text content and metadata\n", + " \"\"\"\n", + " print(f\"Extracting text from {pdf_path}...\") # Print the path of the PDF being processed\n", + " pdf = fitz.open(pdf_path) # Open the PDF file using PyMuPDF\n", + " pages = [] # Initialize an empty list to store the pages with text content\n", + " \n", + " # Iterate over each page in the PDF\n", + " for page_num in range(len(pdf)):\n", + " page = pdf[page_num] # Get the current page\n", + " text = page.get_text() # Extract text from the current page\n", + " \n", + " # Skip pages with very little text (less than 50 characters)\n", + " if len(text.strip()) > 50:\n", + " # Append the page text and metadata to the list\n", + " pages.append({\n", + " \"text\": text,\n", + " \"metadata\": {\n", + " \"source\": pdf_path, # Source file path\n", + " \"page\": page_num + 1 # Page number (1-based index)\n", + " }\n", + " })\n", + " \n", + " print(f\"Extracted {len(pages)} pages with content\") # Print the number of pages extracted\n", + " return pages # Return the list of pages with text content and metadata" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def chunk_text(text, chunk_size=1000, overlap=200):\n", + " \"\"\"\n", + " Split text into overlapping chunks.\n", + " \n", + " Args:\n", + " text (str): Input text to chunk\n", + " chunk_size (int): Size of each chunk in characters\n", + " overlap (int): Overlap between chunks in characters\n", + " \n", + " Returns:\n", + " List[Dict]: List of chunks with metadata\n", + " \"\"\"\n", + " chunks = [] # Initialize an empty list to store the chunks\n", + " \n", + " # Iterate over the text in steps of (chunk_size - overlap)\n", + " for i in range(0, len(text), chunk_size - overlap):\n", + " chunk_text = text[i:i + chunk_size] # Extract the chunk of text\n", + " if chunk_text: # Ensure we don't add empty chunks\n", + " chunks.append({\n", + " \"text\": chunk_text, # Add the chunk text\n", + " \"metadata\": {\n", + " \"start_pos\": i, # Start position of the chunk in the original text\n", + " \"end_pos\": i + len(chunk_text) # End position of the chunk in the original text\n", + " }\n", + " })\n", + " \n", + " print(f\"Created {len(chunks)} text chunks\") # Print the number of chunks created\n", + " return chunks # Return the list of chunks with metadata" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Simple Vector Store Implementation" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "class SimpleVectorStore:\n", + " \"\"\"\n", + " A simple vector store implementation using NumPy.\n", + " \"\"\"\n", + " def __init__(self):\n", + " self.vectors = [] # List to store vector embeddings\n", + " self.texts = [] # List to store text content\n", + " self.metadata = [] # List to store metadata\n", + " \n", + " def add_item(self, text, embedding, metadata=None):\n", + " \"\"\"\n", + " Add an item to the vector store.\n", + " \n", + " Args:\n", + " text (str): Text content\n", + " embedding (List[float]): Vector embedding\n", + " metadata (Dict, optional): Additional metadata\n", + " \"\"\"\n", + " self.vectors.append(np.array(embedding)) # Append the embedding as a numpy array\n", + " self.texts.append(text) # Append the text content\n", + " self.metadata.append(metadata or {}) # Append the metadata or an empty dict if None\n", + " \n", + " def similarity_search(self, query_embedding, k=5, filter_func=None):\n", + " \"\"\"\n", + " Find the most similar items to a query embedding.\n", + " \n", + " Args:\n", + " query_embedding (List[float]): Query embedding vector\n", + " k (int): Number of results to return\n", + " filter_func (callable, optional): Function to filter results\n", + " \n", + " Returns:\n", + " List[Dict]: Top k most similar items\n", + " \"\"\"\n", + " if not self.vectors:\n", + " return [] # Return an empty list if there are no vectors\n", + " \n", + " # Convert query embedding to numpy array\n", + " query_vector = np.array(query_embedding)\n", + " \n", + " # Calculate similarities using cosine similarity\n", + " similarities = []\n", + " for i, vector in enumerate(self.vectors):\n", + " # Skip if doesn't pass the filter\n", + " if filter_func and not filter_func(self.metadata[i]):\n", + " continue\n", + " \n", + " # Calculate cosine similarity\n", + " similarity = np.dot(query_vector, vector) / (np.linalg.norm(query_vector) * np.linalg.norm(vector))\n", + " similarities.append((i, similarity)) # Append index and similarity score\n", + " \n", + " # Sort by similarity (descending)\n", + " similarities.sort(key=lambda x: x[1], reverse=True)\n", + " \n", + " # Return top k results\n", + " results = []\n", + " for i in range(min(k, len(similarities))):\n", + " idx, score = similarities[i]\n", + " results.append({\n", + " \"text\": self.texts[idx], # Add the text content\n", + " \"metadata\": self.metadata[idx], # Add the metadata\n", + " \"similarity\": float(score) # Add the similarity score\n", + " })\n", + " \n", + " return results # Return the list of top k results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating Embeddings" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def create_embeddings(texts, model=\"BAAI/bge-en-icl\"):\n", + " \"\"\"\n", + " Create embeddings for the given texts.\n", + " \n", + " Args:\n", + " texts (List[str]): Input texts\n", + " model (str): Embedding model name\n", + " \n", + " Returns:\n", + " List[List[float]]: Embedding vectors\n", + " \"\"\"\n", + " # Handle empty input\n", + " if not texts:\n", + " return []\n", + " \n", + " # Process in batches if needed (OpenAI API limits)\n", + " batch_size = 100\n", + " all_embeddings = []\n", + " \n", + " # Iterate over the input texts in batches\n", + " for i in range(0, len(texts), batch_size):\n", + " batch = texts[i:i + batch_size] # Get the current batch of texts\n", + " \n", + " # Create embeddings for the current batch\n", + " response = client.embeddings.create(\n", + " model=model,\n", + " input=batch\n", + " )\n", + " \n", + " # Extract embeddings from the response\n", + " batch_embeddings = [item.embedding for item in response.data]\n", + " all_embeddings.extend(batch_embeddings) # Add the batch embeddings to the list\n", + " \n", + " return all_embeddings # Return all embeddings" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Document Processing Pipeline" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def process_document(pdf_path, chunk_size=1000, chunk_overlap=200):\n", + " \"\"\"\n", + " Process a document for RAG.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF file\n", + " chunk_size (int): Size of each chunk in characters\n", + " chunk_overlap (int): Overlap between chunks in characters\n", + " \n", + " Returns:\n", + " SimpleVectorStore: Vector store containing document chunks\n", + " \"\"\"\n", + " # Extract text from the PDF file\n", + " pages = extract_text_from_pdf(pdf_path)\n", + " \n", + " # Process each page and create chunks\n", + " all_chunks = []\n", + " for page in pages:\n", + " # Pass the text content (string) to chunk_text, not the dictionary\n", + " page_chunks = chunk_text(page[\"text\"], chunk_size, chunk_overlap)\n", + " \n", + " # Update metadata for each chunk with the page's metadata\n", + " for chunk in page_chunks:\n", + " chunk[\"metadata\"].update(page[\"metadata\"])\n", + " \n", + " all_chunks.extend(page_chunks)\n", + " \n", + " # Create embeddings for the text chunks\n", + " print(\"Creating embeddings for chunks...\")\n", + " chunk_texts = [chunk[\"text\"] for chunk in all_chunks]\n", + " chunk_embeddings = create_embeddings(chunk_texts)\n", + " \n", + " # Create a vector store to hold the chunks and their embeddings\n", + " vector_store = SimpleVectorStore()\n", + " for i, chunk in enumerate(all_chunks):\n", + " vector_store.add_item(\n", + " text=chunk[\"text\"],\n", + " embedding=chunk_embeddings[i],\n", + " metadata=chunk[\"metadata\"]\n", + " )\n", + " \n", + " print(f\"Vector store created with {len(all_chunks)} chunks\")\n", + " return vector_store" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Hypothetical Document Generation" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_hypothetical_document(query, desired_length=1000):\n", + " \"\"\"\n", + " Generate a hypothetical document that answers the query.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " desired_length (int): Target length of the hypothetical document\n", + " \n", + " Returns:\n", + " str: Generated hypothetical document\n", + " \"\"\"\n", + " # Define the system prompt to instruct the model on how to generate the document\n", + " system_prompt = f\"\"\"You are an expert document creator. \n", + " Given a question, generate a detailed document that would directly answer this question.\n", + " The document should be approximately {desired_length} characters long and provide an in-depth, \n", + " informative answer to the question. Write as if this document is from an authoritative source\n", + " on the subject. Include specific details, facts, and explanations.\n", + " Do not mention that this is a hypothetical document - just write the content directly.\"\"\"\n", + "\n", + " # Define the user prompt with the query\n", + " user_prompt = f\"Question: {query}\\n\\nGenerate a document that fully answers this question:\"\n", + " \n", + " # Make a request to the OpenAI API to generate the hypothetical document\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\", # Specify the model to use\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt}, # System message to guide the assistant\n", + " {\"role\": \"user\", \"content\": user_prompt} # User message with the query\n", + " ],\n", + " temperature=0.1 # Set the temperature for response generation\n", + " )\n", + " \n", + " # Return the generated document content\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Complete HyDE RAG Implementation" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "def hyde_rag(query, vector_store, k=5, should_generate_response=True):\n", + " \"\"\"\n", + " Perform RAG using Hypothetical Document Embedding.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " vector_store (SimpleVectorStore): Vector store with document chunks\n", + " k (int): Number of chunks to retrieve\n", + " generate_response (bool): Whether to generate a final response\n", + " \n", + " Returns:\n", + " Dict: Results including hypothetical document and retrieved chunks\n", + " \"\"\"\n", + " print(f\"\\n=== Processing query with HyDE: {query} ===\\n\")\n", + " \n", + " # Step 1: Generate a hypothetical document that answers the query\n", + " print(\"Generating hypothetical document...\")\n", + " hypothetical_doc = generate_hypothetical_document(query)\n", + " print(f\"Generated hypothetical document of {len(hypothetical_doc)} characters\")\n", + " \n", + " # Step 2: Create embedding for the hypothetical document\n", + " print(\"Creating embedding for hypothetical document...\")\n", + " hypothetical_embedding = create_embeddings([hypothetical_doc])[0]\n", + " \n", + " # Step 3: Retrieve similar chunks based on the hypothetical document\n", + " print(f\"Retrieving {k} most similar chunks...\")\n", + " retrieved_chunks = vector_store.similarity_search(hypothetical_embedding, k=k)\n", + " \n", + " # Prepare the results dictionary\n", + " results = {\n", + " \"query\": query,\n", + " \"hypothetical_document\": hypothetical_doc,\n", + " \"retrieved_chunks\": retrieved_chunks\n", + " }\n", + " \n", + " # Step 4: Generate a response if requested\n", + " if should_generate_response:\n", + " print(\"Generating final response...\")\n", + " response = generate_response(query, retrieved_chunks)\n", + " results[\"response\"] = response\n", + " \n", + " return results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Standard (Direct) RAG Implementation for Comparison" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "def standard_rag(query, vector_store, k=5, should_generate_response=True):\n", + " \"\"\"\n", + " Perform standard RAG using direct query embedding.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " vector_store (SimpleVectorStore): Vector store with document chunks\n", + " k (int): Number of chunks to retrieve\n", + " generate_response (bool): Whether to generate a final response\n", + " \n", + " Returns:\n", + " Dict: Results including retrieved chunks\n", + " \"\"\"\n", + " print(f\"\\n=== Processing query with Standard RAG: {query} ===\\n\")\n", + " \n", + " # Step 1: Create embedding for the query\n", + " print(\"Creating embedding for query...\")\n", + " query_embedding = create_embeddings([query])[0]\n", + " \n", + " # Step 2: Retrieve similar chunks based on the query embedding\n", + " print(f\"Retrieving {k} most similar chunks...\")\n", + " retrieved_chunks = vector_store.similarity_search(query_embedding, k=k)\n", + " \n", + " # Prepare the results dictionary\n", + " results = {\n", + " \"query\": query,\n", + " \"retrieved_chunks\": retrieved_chunks\n", + " }\n", + " \n", + " # Step 3: Generate a response if requested\n", + " if should_generate_response:\n", + " print(\"Generating final response...\")\n", + " response = generate_response(query, retrieved_chunks)\n", + " results[\"response\"] = response\n", + " \n", + " return results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Response Generation" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_response(query, relevant_chunks):\n", + " \"\"\"\n", + " Generate a final response based on the query and relevant chunks.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " relevant_chunks (List[Dict]): Retrieved relevant chunks\n", + " \n", + " Returns:\n", + " str: Generated response\n", + " \"\"\"\n", + " # Concatenate the text from the chunks to create context\n", + " context = \"\\n\\n\".join([chunk[\"text\"] for chunk in relevant_chunks])\n", + " \n", + " # Generate response using OpenAI API\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": \"You are a helpful assistant. Answer the question based on the provided context.\"},\n", + " {\"role\": \"user\", \"content\": f\"Context:\\n{context}\\n\\nQuestion: {query}\"}\n", + " ],\n", + " temperature=0.5,\n", + " max_tokens=500\n", + " )\n", + " \n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluation Functions" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "def compare_approaches(query, vector_store, reference_answer=None):\n", + " \"\"\"\n", + " Compare HyDE and standard RAG approaches for a query.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " vector_store (SimpleVectorStore): Vector store with document chunks\n", + " reference_answer (str, optional): Reference answer for evaluation\n", + " \n", + " Returns:\n", + " Dict: Comparison results\n", + " \"\"\"\n", + " # Run HyDE RAG\n", + " hyde_result = hyde_rag(query, vector_store)\n", + " hyde_response = hyde_result[\"response\"]\n", + " \n", + " # Run standard RAG\n", + " standard_result = standard_rag(query, vector_store)\n", + " standard_response = standard_result[\"response\"]\n", + " \n", + " # Compare results\n", + " comparison = compare_responses(query, hyde_response, standard_response, reference_answer)\n", + " \n", + " return {\n", + " \"query\": query,\n", + " \"hyde_response\": hyde_response,\n", + " \"hyde_hypothetical_doc\": hyde_result[\"hypothetical_document\"],\n", + " \"standard_response\": standard_response,\n", + " \"reference_answer\": reference_answer,\n", + " \"comparison\": comparison\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "def compare_responses(query, hyde_response, standard_response, reference=None):\n", + " \"\"\"\n", + " Compare responses from HyDE and standard RAG.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " hyde_response (str): Response from HyDE RAG\n", + " standard_response (str): Response from standard RAG\n", + " reference (str, optional): Reference answer\n", + " \n", + " Returns:\n", + " str: Comparison analysis\n", + " \"\"\"\n", + " system_prompt = \"\"\"You are an expert evaluator of information retrieval systems.\n", + "Compare the two responses to the same query, one generated using HyDE (Hypothetical Document Embedding) \n", + "and the other using standard RAG with direct query embedding.\n", + "\n", + "Evaluate them based on:\n", + "1. Accuracy: Which response provides more factually correct information?\n", + "2. Relevance: Which response better addresses the query?\n", + "3. Completeness: Which response provides more thorough coverage of the topic?\n", + "4. Clarity: Which response is better organized and easier to understand?\n", + "\n", + "Be specific about the strengths and weaknesses of each approach.\"\"\"\n", + "\n", + " user_prompt = f\"\"\"Query: {query}\n", + "\n", + "Response from HyDE RAG:\n", + "{hyde_response}\n", + "\n", + "Response from Standard RAG:\n", + "{standard_response}\"\"\"\n", + "\n", + " if reference:\n", + " user_prompt += f\"\"\"\n", + "\n", + "Reference Answer:\n", + "{reference}\"\"\"\n", + "\n", + " user_prompt += \"\"\"\n", + "\n", + "Please provide a detailed comparison of these two responses, highlighting which approach performed better and why.\"\"\"\n", + "\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " return response.choices[0].message.content\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "def run_evaluation(pdf_path, test_queries, reference_answers=None, chunk_size=1000, chunk_overlap=200):\n", + " \"\"\"\n", + " Run a complete evaluation with multiple test queries.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF document\n", + " test_queries (List[str]): List of test queries\n", + " reference_answers (List[str], optional): Reference answers for queries\n", + " chunk_size (int): Size of each chunk in characters\n", + " chunk_overlap (int): Overlap between chunks in characters\n", + " \n", + " Returns:\n", + " Dict: Evaluation results\n", + " \"\"\"\n", + " # Process document and create vector store\n", + " vector_store = process_document(pdf_path, chunk_size, chunk_overlap)\n", + " \n", + " results = []\n", + " \n", + " for i, query in enumerate(test_queries):\n", + " print(f\"\\n\\n===== Evaluating Query {i+1}/{len(test_queries)} =====\")\n", + " print(f\"Query: {query}\")\n", + " \n", + " # Get reference answer if available\n", + " reference = None\n", + " if reference_answers and i < len(reference_answers):\n", + " reference = reference_answers[i]\n", + " \n", + " # Compare approaches\n", + " result = compare_approaches(query, vector_store, reference)\n", + " results.append(result)\n", + " \n", + " # Generate overall analysis\n", + " overall_analysis = generate_overall_analysis(results)\n", + " \n", + " return {\n", + " \"results\": results,\n", + " \"overall_analysis\": overall_analysis\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_overall_analysis(results):\n", + " \"\"\"\n", + " Generate an overall analysis of the evaluation results.\n", + " \n", + " Args:\n", + " results (List[Dict]): Results from individual query evaluations\n", + " \n", + " Returns:\n", + " str: Overall analysis\n", + " \"\"\"\n", + " system_prompt = \"\"\"You are an expert at evaluating information retrieval systems.\n", + "Based on multiple test queries, provide an overall analysis comparing HyDE RAG (using hypothetical document embedding)\n", + "with standard RAG (using direct query embedding).\n", + "\n", + "Focus on:\n", + "1. When HyDE performs better and why\n", + "2. When standard RAG performs better and why\n", + "3. The types of queries that benefit most from HyDE\n", + "4. The overall strengths and weaknesses of each approach\n", + "5. Recommendations for when to use each approach\"\"\"\n", + "\n", + " # Create summary of evaluations\n", + " evaluations_summary = \"\"\n", + " for i, result in enumerate(results):\n", + " evaluations_summary += f\"Query {i+1}: {result['query']}\\n\"\n", + " evaluations_summary += f\"Comparison summary: {result['comparison'][:200]}...\\n\\n\"\n", + "\n", + " user_prompt = f\"\"\"Based on the following evaluations comparing HyDE vs standard RAG across {len(results)} queries, \n", + "provide an overall analysis of these two approaches:\n", + "\n", + "{evaluations_summary}\n", + "\n", + "Please provide a comprehensive analysis of the relative strengths and weaknesses of HyDE compared to standard RAG,\n", + "focusing on when and why one approach outperforms the other.\"\"\"\n", + "\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-3B-Instruct\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualization Functions" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "def visualize_results(query, hyde_result, standard_result):\n", + " \"\"\"\n", + " Visualize the results of HyDE and standard RAG approaches.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " hyde_result (Dict): Results from HyDE RAG\n", + " standard_result (Dict): Results from standard RAG\n", + " \"\"\"\n", + " # Create a figure with 3 subplots\n", + " fig, axs = plt.subplots(1, 3, figsize=(20, 6))\n", + " \n", + " # Plot the query in the first subplot\n", + " axs[0].text(0.5, 0.5, f\"Query:\\n\\n{query}\", \n", + " horizontalalignment='center', verticalalignment='center',\n", + " fontsize=12, wrap=True)\n", + " axs[0].axis('off') # Hide the axis for the query plot\n", + " \n", + " # Plot the hypothetical document in the second subplot\n", + " hypothetical_doc = hyde_result[\"hypothetical_document\"]\n", + " # Shorten the hypothetical document if it's too long\n", + " shortened_doc = hypothetical_doc[:500] + \"...\" if len(hypothetical_doc) > 500 else hypothetical_doc\n", + " axs[1].text(0.5, 0.5, f\"Hypothetical Document:\\n\\n{shortened_doc}\", \n", + " horizontalalignment='center', verticalalignment='center',\n", + " fontsize=10, wrap=True)\n", + " axs[1].axis('off') # Hide the axis for the hypothetical document plot\n", + " \n", + " # Plot comparison of retrieved chunks in the third subplot\n", + " # Shorten each chunk text for better visualization\n", + " hyde_chunks = [chunk[\"text\"][:100] + \"...\" for chunk in hyde_result[\"retrieved_chunks\"]]\n", + " std_chunks = [chunk[\"text\"][:100] + \"...\" for chunk in standard_result[\"retrieved_chunks\"]]\n", + " \n", + " # Prepare the comparison text\n", + " comparison_text = \"Retrieved by HyDE:\\n\\n\"\n", + " for i, chunk in enumerate(hyde_chunks):\n", + " comparison_text += f\"{i+1}. {chunk}\\n\\n\"\n", + " \n", + " comparison_text += \"\\nRetrieved by Standard RAG:\\n\\n\"\n", + " for i, chunk in enumerate(std_chunks):\n", + " comparison_text += f\"{i+1}. {chunk}\\n\\n\"\n", + " \n", + " # Plot the comparison text in the third subplot\n", + " axs[2].text(0.5, 0.5, comparison_text, \n", + " horizontalalignment='center', verticalalignment='center',\n", + " fontsize=8, wrap=True)\n", + " axs[2].axis('off') # Hide the axis for the comparison plot\n", + " \n", + " # Adjust layout to prevent overlap\n", + " plt.tight_layout()\n", + " # Display the plot\n", + " plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluation of Hypothetical Document Embedding (HyDE) vs. Standard RAG" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Extracting text from data/AI_Information.pdf...\n", + "Extracted 15 pages with content\n", + "Created 4 text chunks\n", + "Created 4 text chunks\n", + "Created 3 text chunks\n", + "Created 3 text chunks\n", + "Created 3 text chunks\n", + "Created 3 text chunks\n", + "Created 3 text chunks\n", + "Created 3 text chunks\n", + "Created 3 text chunks\n", + "Created 3 text chunks\n", + "Created 3 text chunks\n", + "Created 3 text chunks\n", + "Created 3 text chunks\n", + "Created 4 text chunks\n", + "Created 3 text chunks\n", + "Creating embeddings for chunks...\n", + "Vector store created with 48 chunks\n", + "\n", + "=== Processing query with HyDE: What are the main ethical considerations in artificial intelligence development? ===\n", + "\n", + "Generating hypothetical document...\n", + "Generated hypothetical document of 3364 characters\n", + "Creating embedding for hypothetical document...\n", + "Retrieving 5 most similar chunks...\n", + "Generating final response...\n", + "\n", + "=== HyDE Response ===\n", + "The main ethical considerations in artificial intelligence development include:\n", + "\n", + "1. Bias and Fairness: Ensuring that AI systems are fair, non-discriminatory, and do not perpetuate existing biases present in the data they are trained on.\n", + "\n", + "2. Transparency and Explainability: Making AI decisions more understandable and assessable to build trust and accountability.\n", + "\n", + "3. Privacy and Data Protection: Ensuring responsible data handling, implementing privacy-preserving techniques, and complying with data protection regulations.\n", + "\n", + "4. Accountability and Responsibility: Establishing clear guidelines and frameworks for AI development and deployment to address potential harms and ensure ethical behavior.\n", + "\n", + "5. Job Displacement: Addressing the potential economic and social impacts of AI-driven automation.\n", + "\n", + "6. Autonomy and Control: Establishing guidelines and frameworks to ensure that AI systems are developed and deployed in a way that prioritizes human well-being and safety.\n", + "\n", + "7. Weaponization of AI: Addressing the risks associated with the potential use of AI in autonomous weapons systems.\n", + "\n", + "8. Respect for Human Rights: Prioritizing the respect and protection of human rights, particularly in the development and deployment of AI systems.\n", + "\n", + "These considerations are guided by principles of Ethical AI, which include respect for human rights, privacy, non-discrimination, and beneficence.\n", + "\n", + "=== Processing query with Standard RAG: What are the main ethical considerations in artificial intelligence development? ===\n", + "\n", + "Creating embedding for query...\n", + "Retrieving 5 most similar chunks...\n", + "Generating final response...\n", + "\n", + "=== Standard RAG Response ===\n", + "The main ethical considerations in artificial intelligence development include:\n", + "\n", + "1. Bias and Fairness: Ensuring that AI systems do not inherit and amplify biases present in the data they are trained on, leading to unfair or discriminatory outcomes.\n", + "2. Transparency and Explainability: Making it possible to understand how AI systems arrive at their decisions and ensuring that these decisions are transparent and accountable.\n", + "3. Respect for Human Rights, Privacy, Non-Discrimination, and Beneficence: Guiding the development and deployment of AI systems to ensure they are fair, transparent, accountable, and beneficial to society.\n", + "4. Job Displacement: Addressing the potential economic and social impacts of AI-driven automation, particularly in industries with repetitive or routine tasks.\n", + "5. Autonomy and Control: Establishing clear guidelines and ethical frameworks for AI development and deployment, particularly as AI systems become more autonomous.\n", + "6. Weaponization of AI: Addressing the risks associated with AI-powered autonomous weapons and establishing international discussions and regulations.\n", + "7. Cybersecurity: Protecting sensitive information and ensuring responsible data handling, as well as using AI to detect and respond to threats, analyze network traffic, and identify vulnerabilities.\n", + "\n", + "These considerations are essential to ensure that AI development aligns with societal values and promotes the well-being of individuals and society as a whole.\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAB8YAAAM+CAYAAABoiWs3AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdeVzWVf7//ydCbqOIa2M2hQ6KAtfFxSIICYqiuBQpWZorLri12WRpZuqYmamj5lJoLphKlllqaTqhMrmhol7gBolKTZrpCJKmyHb9/vDn+yuxiGVhfB73283bjes657zO6xz878U5x85ms9kEAAAAAAAAAAAAAEAFVam8EwAAAAAAAAAAAAAA4PdEYRwAAAAAAAAAAAAAUKFRGAcAAAAAAAAAAAAAVGgUxgEAAAAAAAAAAAAAFRqFcQAAAAAAAAAAAABAhUZhHAAAAAAAAAAAAABQoVEYBwAAAAAAAAAAAABUaBTGAQAAAAAAAAAAAAAVGoVxAAAAAAAAAAAAAECFRmEcAAAAAAAAAAAAAFChURgHAAAAAAAAAAAAAFRoFMYBAAAAAAAAAAAAABUahXEAAAAAAAAAAAAAQIVGYRwAAAAAAAAAAAAAUKFRGAcAAAAAAAAAAAAAVGgUxgEAAAAAAAAAAAAAFRqFcQAAAAAAAAAAAABAhUZhHAAAAAAAAAAAAABQoVEYBwAAAAAAAAAAAABUaBTGAQAAAAAAAAAAAAAVGoVxAAAAAAAAAAAAAECFRmEcAAAAAAAAAAAAAFChURgHAAAAAAAAAAAAAFRoFMYBAAAAAAAAAAAAABUahXEAAAAAAAAAAAAAQIVGYRwAAAAAAAAAAAAAUKFRGAcAAAAAAAAAAAAAVGgUxgEAAAAAAAAAAAAAFRqFcQAAAAAAAAAAAABAhUZhHAAAAAAAAAAAAABQoVEYBwAAAAAAAAAAAABUaBTGAQAAAAAAAAAAAAAVGoVxAAAAAAAAAAAAAECFRmEcAAAAAAAAAAAAAFChURgHAAAAAAAAAAAAAFRoFMYBAAAAAAAAAAAAABUahXEAAAAAAAAAAAAAQIVGYRwAAAAAAAAAAAAAUKFRGAcAAAAAAAAAAAAAVGgUxgEAAAAAAAAAAAAAFRqFcQAAAAAAAAAAAABAhUZhHAAAAAAAAAAAAABQoVEYBwAAAAAAAAAAAABUaBTGAQAAAAAAAAAAAAAVGoVxAAAAAAAAAAAAAH8qzs7OcnV1lcVikaurq6ZNm1amcXPmzNG5c+dK7dOlSxelpqbejTRv64svvlDbtm2LbWvbtq3WrVv3m+JPmjRJo0aNKvRdTEyMunXrVqax9evXl5eXl5o1a6aWLVvqnXfeUX5+vtHHzs5OJpNJFovF+Hfx4sXflPPvxaG8EwAAAAAAAAAAAACAO/XRRx/JYrHozJkzcnNzU7t27eTn51fqmDlz5qht27b661//WqStoKBAkrRp06bfJd8/oz59+mjOnDmSpFOnTqlv3746efKk5s6da/TZsWOHnJycyifBO8CJcQAAAAAAAAAAAAB/Wo0aNVLz5s317bffSpLOnTunp556Sn5+fjKZTBo/frwkafLkyTp79qx69uwpi8Uiq9WqSZMm6YknnlBYWJg8PDz0ww8/yNnZWVartdRYq1at0qOPPmrkYLPZ1KRJEyUlJUmSVqxYIX9/f3l7eys4ONj4Pjc3VyNHjlTTpk3l5+en7du3l7q2rVu3qmXLlnJxcdFLL70km82mxMRENW/eXDabzegXGBioL7/88o737tFHH1VsbKzx+d///rf8/f2L7dukSRMtXbpU7733nrKysu54rvJGYRwAAAAAAAAAAADAn1ZKSoouXrxoXEk+YMAAPfPMM9q3b58OHTqkxMRErVmzRhMmTNADDzygjz76SFarVRaLRZK0Z88effDBBzp27JgaNWpUKHZJsSIiIpSQkGBcyx4fH6/atWvL09NTu3bt0ocffqivv/5aBw8e1JtvvqnevXtLkhYtWqTU1FQdPXpUO3fu1MGDB0td27Fjx7R7924lJyfrP//5jz788EP5+vqqbt26+uqrryRJhw4d0oULF9SpU6diY6xatarQVecTJkww2l544QXNnz/f+LxgwQI9++yzJebTvHlzVa9evdBV80FBQUbskJAQ4/suXbooMTGx1PX9kbhKHQAAAAAAAAAAAMCfTs+ePVWpUiWlpqZq9uzZql+/vn7++Wdt3bpVP/74o9HvypUrpb4Z3qVLF91///1Fvi8t1pNPPqknnnhCK1as0Msvv6yYmBgNHDhQkrR+/XolJSUVOnmdkZGha9euaevWrerfv78qV64sSRo0aJCWLFlSYm79+/fXfffdp/vuu099+/ZVXFycevfubRS0O3bsqAULFmjkyJGys7MrNsat16FLN94Yv/l2eYcOHTRq1CgdOnRIderU0b59+/Txxx+XmI+kQifVpZKvUr/XrqSnMA4AAAAAAAAAAADgT+fmG+NxcXF67LHH1K5dOzVu3FiSlJCQoKpVq5YpTo0aNYr9/mYBuKRYgwYN0sCBAzVixAh98cUXmj17tjFuwIABmjp16m3nLqmYfbv+EREReuWVV3To0CFt2LBBM2fOvKM4t3r++ec1b9483X///Ro0aJCqVKlSYt/U1FRdu3ZNzZs3/9XzlReuUgcAAAAAAAAAAADwpxUaGqoRI0Zo/PjxqlGjhkJCQjRt2jSj/ezZs/r+++8lSY6OjmV+H/t2sW6eCB89erRCQ0NVp04dSVJ4eLhWrlyp7777TpJUUFBgXCkeGhqqlStXKjc3Vzk5OVq2bFmpOdzse+3aNcXGxio0NFSS5ODgoOHDhys8PFzdu3cv9sR2WfXr109btmzRsmXLNHz48BL7paena/DgwRoxYoQcHR1/9XzlhcI4AAAAAAAAAAAAgD+1119/XTt37tSBAwe0atUqpaWlycPDQyaTSREREbp48aKkG6ejo6KiZLFYZLVabxu3tFiSNHDgQC1cuNC4Rl268eb29OnT1b17d3l6esrd3V2rV6+WJEVFRalp06Zyc3NT69atjXfOS9KiRQs98sgjMplMCgoKUq9evYy2wYMH68yZM6W+CV4W1atXV0REhB555BH97W9/K7J+Ly8vubq66sknn1SPHj2Mk/G3rvfWN8xvXlt/r70xbmf75SXwAAAAAAAAAAAAAIB72ieffKL33ntPW7du/U1x8vPz5ePjo3nz5ikoKOguZXfv4Y1xAAAAAAAAAAAAAPgT6dSpk7755ht99tlnvynOhg0b9Pzzz6tz584VuigucWIcAAAAAAAAAAAAAFDB8cY4AAAAAAAAAAAAAKBCozAOAAAAAAAAAAAAAKjQKIwDAAAAAAAAAAAAACo0CuMAAAAAAAAAAAAAgAqNwjgAAAAAAAAAAAAAoEKjMA4AAAAAAAAAAAAAqNAojAMAAAAAAAAAAAAAKjQK4wAAAAAAAAAAAACACo3COAAAAAAAAAAAAACgQqMwDgAAAAAAAAAAAACo0CiMAwAAAAAAAAAAAAAqNArjAAAAAAAAAAAAAIAKjcI4AAAAAAAAAAAAgN/s+eefl7Ozs+zs7GS1Wu9KTDs7O126dOmuxPq9pKenKzo6+nef54svvlDbtm3L1LdHjx6KiYn5TfO1bdtW69at+00xfm9Wq1WrV68uU18K4wAAAAAAAAAAAAB+sx49emjnzp16+OGHyzuVP9QfVRhHURTGAQAAAAAAAAAAAPyhgoOD9eCDD97xuBMnTqhr165q2bKlzGaz5s+fX2y/xMREBQYGymw2y8/PT7t27ZJ0ozDt5OSk0aNHy2w2y93dXXFxcYXaXn/9dXl7e6tp06batWuXXnzxRVksFnl4eOjIkSPGHCtWrJC/v7+8vb0VHByspKQkSVJMTIxCQ0P19NNPy2QyydfXV6dOnZIkDR8+XKmpqbJYLAoPDy+S9+HDh9W6dWt5e3vLzc1NU6ZMMdomTZqknj176rHHHpObm5vatWunjIwMSVJubq5Gjhyppk2bys/PT9u3by9xD1NSUhQYGCh3d3d169ZNP/30k9F2+fJlRUVFyc/PT2azWUOHDlVOTo6kG6fCn3vuObVs2VIuLi566aWXZLPZisQ/f/68IiIiZDKZ5OHhoYULF0qSPvnkE3Xs2NHol5+fr4cffljHjh1TfHy8PDw8NGLECJnNZplMJiUnJysyMlImk0n+/v46c+aMMXbmzJny8/OTt7e3OnXqpG+//bbUPTp//rwmTJig7du3y2KxaPjw4SXuj0RhHAAAAAAAAAAAAEA5yc/P19NPP61//etf2r9/vxISErRo0SLt37+/UL+cnBxFRERo4sSJSk5O1qxZs/TEE0/oypUrkqSsrCy1aNFCycnJWrJkiXr37q3Lly8bbT4+Pjp48KDGjh2rsLAwhYeHy2q1asCAAfrnP/8pSdq1a5c+/PBDff311zp48KDefPNN9e7d28hh//79mjp1qg4fPqzQ0FC9/fbbkqTo6Gi5urrKarVqw4YNRdbo7OysrVu36uDBgzpw4IDWrl2rhIQEo33v3r2KiYnRsWPH1KBBA6PovGjRIqWmpuro0aPauXOnDh48WOI+9uvXT4MHD9bRo0f1xhtv6D//+Y/R9tJLLykoKEj79u1TUlKSCgoK9M477xjtx44d0+7du5WcnKz//Oc/+vDDD4vEf+655+Tq6qrDhw9r27ZtmjJlihISEtS9e3d98803Sk1NlSRt2LBBLi4ucnNzk3SjYD9kyBAlJyerW7duateuncaOHavDhw/L19dXc+bMkSTFxsYqNTVVe/bs0cGDB9WnTx+NHDmy1D1q0KCBJk+erJCQEFmt1tue2qcwDgAAAAAAAAAAAKBc3Cz89urVSxaLRYGBgbp8+bKOHTtWpF+lSpUUFhYmSWrdurXuv/9+4y1zBwcHRUZGSpJatWqlBx54QIcOHZIkVa1aVd26dZMk+fr6qkaNGgoJCZEk+fn56cSJE5Kk9evXKykpSf7+/rJYLHruueeUkZGha9euSZICAgLUuHFj4+eTJ0+WaY3Xrl3TkCFDZDKZ1KpVK3377beF3mDv1KmT6tatWyTu1q1b1b9/f1WuXFmVK1fWoEGDio3/008/yWq1Gus3mUxq3bq10b5u3TrNmDFDFotFXl5e2rFjh9LS0oz2/v3767777lP16tXVt29f47T9reLi4jRs2DBJUoMGDRQREaG4uDjZ29tr5MiRWrBggSRpwYIFevbZZ41xLi4u8vHxkXRj711cXNS8eXNJhfd+3bp1iouLk4+PjywWi6ZPn67vvvvutnt0JxzueAQAAAAAAAAAAAAA3AU2m0116tQpVCguKzs7uzK1V6lSxfjO3t5eVatWLfQ5Ly/PyGXAgAGaOnVqsfFKGnc748aNU7169XTo0CE5ODgoIiJC2dnZdxz3dustqa/NZtPatWvVrFmzOx5blj5RUVFyc3NT//79lZaWVug6+V+urbS9f/XVVzV06NBi5/u1e38rTowDAAAAAAAAAAAAKBeurq5ydHTUsmXLjO/S0tKMd7Zv7VdQUKCvvvpKkrR7926dO3dOFotFkpSXl6cVK1ZIkvbt26ezZ88abWUVHh6ulStXGieVCwoKlJiYeNtxjo6OysrKKrE9MzNTDz74oBwcHJSammqs4XZCQ0O1cuVK5ebmKicnp9Ae/XJ+Ly8vffDBB5JkXL1+U7du3fT2228bxeTMzMxCJ8ZvznHt2jXFxsYqNDS02Fzef/99SdKFCxf06aefqkOHDpKk2rVr6/HHH1f37t01bNgw2dvbl2l9t+rWrZuio6MLva9+88R/aW6397eiMA4AAAAAAAAAAADgNxs2bJgefPBBff/99woLC5OLi4vRNmTIkGLf33ZwcNAXX3yhTz/9VGazWe7u7ho8eLBxfflNlStX1qeffqqJEyfKbDZr1KhR+uSTT1SjRg1JUq1atXTkyBF5enpq4MCBio2NVc2aNe8o/6CgIE2fPl3du3eXp6en3N3dtXr16tuOu5m3h4dHodPSN40fP17Lli2T2WzW2LFj1a5duzLlExUVpaZNm8rNzU2tW7cutdD/wQcfaNGiRfLw8ND48eMVHBxstM2ePVvVqlWTxWKR2WxW+/btlZ6ebrS3aNFCjzzyiEwmk4KCgtSrV68i8efOnavjx4/LZDIpJCREr732mvz9/QvleuHCBUVFRZVpbb/Up08fRUZGKiQkRJ6enrJYLNq2bdttx7Vv317Xr1+X2WzW8OHDS+1rZ7PZbL8qOwAAAAAAAAAAAAAoZ+np6bJYLLp06VJ5p/Kn07ZtW40aNcp4g/3Xmjlzpo4fP64lS5bcncR+B7wxDgAAAAAAAAAAAAD4Vdzd3WVnZ6fNmzeXdyql4sQ4AAAAAAAAAAAAAKBC441xAAAAAAAAAAAAAECFRmEcAAAAAAAAAAAAwB9i4sSJat68ufz9/Uvs06ZNG7m4uOiXF1/b2dndlXfEu3TpotTU1N8c5/eeIzo6WjNmzDA+Dx48WG5uburevbs2bNigF1988bemWaxLly5p2rRpv2qs1WrV6tWrC31X2u/tt+5T27ZttW7dujL15Sp1AAAAAAAAAAAAAH+IatWq6dSpU2rYsGGx7SdOnFCbNm1Uu3ZtLViwQG3btjXa7OzslJmZKScnp1LnyM/Pl729/V3Muvz9+OOPatKkiX766afffW3p6emyWCy/6o8QYmJitG7dukLF6rL+3n6Ntm3batSoUerWrdtt+3JiHAAAAAAAAAAAAMBds2XLFnl7e8tsNqtNmzY6duyYJCkwMFDZ2dnq2LGjnn/++WLHLl26VH379tWQIUO0ZMmSMs0XExOjkJAQPfHEEzKZTNq3b59mzZqlli1bymKxqGXLltqzZ4/R39nZWVarVZI0ZcoUtWjRQhaLRRaLRd9++60kaf/+/WrXrp18fX3l5eWlNWvWFDv34sWL5ebmJovFIpPJpL179xaZIyUlRQEBAXJ3d1dERIQ6duyomJgYSVJkZKSGDRum9u3bq1mzZoqIiFBOTo4kadKkSRo1apQuXbqkkJAQZWdny8fHR9OmTVNMTEyhYvCyZctksVjk6ekpX19fpaenKy8vT2FhYfL19ZW7u7t69+6tn3/+WZIUHx8vDw8PjRw5Up6ennJ3d1diYqIkafjw4bp8+bIsFot8fX2LXfeKFSvk7+8vb29vBQcHKykpSefPn9eECRO0fft2WSwWDR8+3Oj/7rvvys/PT40bN9ayZcuK/V2cOXNGPXr0kMlkktls1uuvvy5Jio2Nlb+/v7y8vOTp6anPP/+81P8PJXH4VaMAAAAAAAAAAAAA4BfOnz+v3r17Kz4+XiaTSatWrVKPHj109OhR7d69W3Z2dtqxY0exp4fz8/O1fPlybdu2TfXq1dM///lPZWVlqVatWredd+/evTp06JBcXV0lSS4uLvrHP/4hSUpISFBkZKRSUlIKjcnMzNTMmTP1ww8/qFq1arp69aoqVaqkS5cuaejQodq0aZMaNmyo//3vf/L29lZgYKAaNWpUKMZLL72klJQUNWzYULm5ubp+/XqR3Pr166eRI0dq4MCBOn78uLy8vNS7d2+j3Wq1avv27apSpYqCg4O1du1aPf3000a7k5OTNm3aJIvFYhSRbxbWpRtF7smTJ2v37t1q2LChrl69Kkmyt7dXbGys6tatK5vNppEjR2revHkaO3aspBsF+yVLlujdd99VdHS0XnvtNW3ZskXR0dGF5vqlXbt26cMPP9TXX3+tKlWqaMeOHerdu7eOHj2qyZMnFzkxLklVqlTRvn37lJKSopYtW6pfv35ycChcqu7bt686duyoTz75RJJ04cIFSVJYWJiefvpp2dnZKT09Xa1atdK3336rKlWqFJtfSSiMAwAAAAAAAAAAALgr9u7dK5PJJJPJJEnq06ePnnnmGZ05c0YPPvhgqWM3bdokZ2dnNW/eXJIUGhqq2NhYjRgx4rbzBgYGGkVxSTp06JDefPNNXbx4UQ4ODkpNTdW1a9dUrVo1o4+jo6OaNm1qFGS7du2qBx98UNu2bdOpU6fUuXPnQnOkpqYWKYy3b99e/fr102OPPabOnTurWbNmhdp/+uknWa1W9e/fX5LUokULtW7dulCf7t27q3r16pIkPz8/nTx58rbrvdXGjRvVr18/43r6m7EKCgo0e/Zsbdy4UXl5ecrKylJgYKAxzsXFxXjrPSAgQDNnzizTfOvXr1dSUlKhd+IzMjJ07dq1Esf06dNHktS8eXM5ODjo3Llzhf4/XLlyRTt37tSWLVuM7+rXry9JOn36tPr06aPvv/9eDg4OysjI0OnTp43/J2VFYRwAAAAAAAAAAABAuVuyZIm++eYbOTs7S5KuXbum9PT0MhXGa9SoYfyck5OjiIgIbd++XS1bttRPP/2kWrVq6fr164UK4/b29kpISNDu3bsVHx+vVq1a6cMPP5TNZpO7u7t2795923nXrl2rAwcOKD4+Xl26dNGUKVPUq1evUsfY2dkV+ly1atVCOeXl5d123rKIjY3Vtm3b9J///EeOjo6aO3eutm3b9pvntdlsGjBggKZOnVrmXH7LGnv16qVp06apR48ekqQ6deooOzu7zONv4o1xAAAAAAAAAAAAAHdFq1atdPjwYR05ckSStHr1ajVq1KjISetf+vHHH7V161alpaUpPT1d6enp+uGHH3T27FklJSXdUQ7Z2dnKycnRQw89JEmaN29esf0uX76sH3/8UUFBQXr99dfVunVrHTp0SIGBgTp9+rTi4uKMvlar1Xj7+6a8vDydPHlSvr6+Gj16tHr06KF9+/YV6uPo6ChPT0+tXLlS0o1T5zt37ryj9dzOY489ppUrV+qHH36QJF29elVXr15VZmam6tWrJ0dHR12+fLnQ9eulcXR01LVr14qs96bw8HCtXLlS3333naQbJ9Nvvk/u6OiorKysO15DjRo1FBwcrH/961/GdzevUs/MzFTjxo0lSStXrlRmZuYdx5cojAMAAAAAAAAAAAC4S+rXr69Vq1apf//+MpvNeu+997RmzZoip6R/afny5erYsWOht8crVaqkXr16acmSJXeUg6Ojo6ZMmSI/Pz/5+PiocuXKxfbLyspSRESETCaTzGazcnNzNWDAANWuXVsbN27U1KlT5enpKTc3N40dO1YFBQWFxufn52vQoEHy8PCQxWLRgQMHjHfNb/XBBx/ovffek4eHh8aMGaOWLVsW+8b6rxUcHKyJEycqLCxMnp6eatOmjS5cuKD+/fvr6tWrcnV1VefOnRUUFFSmeHXq1DF+f76+vkXag4KCNH36dHXv3l2enp5yd3fX6tWrJd24Wv769esym80aPnz4Ha1jxYoVSkxMlLu7uywWi+bPny9Jeuedd9SjRw95eXnp0KFDxh883Ck7m81m+1UjAQAAAAAAAAAAAAClunLliv7yl7/Izs5Op0+fVkBAgPbv36+//e1v5Z3a/ym8MQ4AAAAAAAAAAAAAv5Pdu3fr5ZdflnTjlPns2bMpipcDTowDAAAAAAAAAAAAACo03hgHAAAAAAAAAAAAUKHUq1dP6enpv+scQ4YM0fbt22/bLyYmRikpKWWKOWnSJI0aNeo3ZobicJU6AAAAAAAAAAAAANyhxYsXl6lfTEyMnJyc1Lx58985oz9WXl6eHBz+POVmTowDAAAAAAAAAAAAuCf06dNHvr6+MpvN6tq1q86dOydJSk9Pl5OTkyZOnCgfHx+5uLho06ZNxrgNGzaoRYsWMpvNeuWVV0qMn5CQIB8fH1ksFnl4eOi9996TJJ0/f14REREymUzy8PDQwoULjTHHjx9XWFiYzGazzGazoqOjJUlt27bVunXrJEmXL19WVFSU/Pz8ZDabNXToUOXk5Gjx4sVKTEzUiy++KIvFok2bNunw4cNq3bq1vL295ebmpilTppRpb0rKIy0tTaGhoTKbzbJYLEZOkmRnZ6epU6fKz89PjRs31rJly24b79y5c3rqqafk5+cnk8mk8ePHG2OcnZ01ZswY+fn5acCAAZo0aZJ69uypxx57TG5ubmrXrp0yMjJK3evyQmEcAAAAAAAAAAAAwD1hzpw5SkxMVHJysoKCgjRp0iSjLSsrS2azWQcOHND8+fP14osvSrpR1B44cKDWrl2r5ORkubi46OLFi8XGf+uttzR69GhZrVYdOXJEvXr1kiQ999xzcnV11eHDh7Vt2zZNmTJFCQkJysvL0+OPP67IyEglJycrOTlZPXr0KBL3pZdeUlBQkPbt26ekpCQVFBTonXfe0ZAhQ+Tr66vZs2fLarWqS5cucnZ21tatW3Xw4EEdOHBAa9euVUJCQqn7Uloeffr00ZNPPqnk5GStWbNGgwcP1rfffmuMrVKlivbt26cvv/xSzz//vPLy8kqNN2DAAD3zzDPat2+fDh06pMTERK1Zs8aId/HiRe3du1erVq2SJO3du1cxMTE6duyYGjRoYPxRQUl7XV7+PGfbAQAAAAAAAAAAAFRosbGxWrFihbKzs5Wdna169eoZbVWrVlVERIQkKSAgQCdPnpR042Sy2WyWm5ubJGnw4MF67rnnio0fEhKiN954QydOnFC7du3UunVrSVJcXJwOHDggSWrQoIEiIiIUFxenmjVrKjs7W08//bQR49acblq3bp327NmjWbNmSZKuXbsme3v7YnO4du2aRo4cKavVqkqVKum///2vrFarWrVqVeK+pKamFpvH5cuXdfDgQe3atUuS1LRpU7Vu3Vo7duzQww8/LOlG4VySmjdvLgcHB507d05ZWVnFxvv555+1detW/fjjj8b3V65cUWpqqvE5MjJSdnZ2xudOnTqpbt26km78Xg4fPlzqXpcXCuMAAAAAAAAAAAAAyt3OnTs1d+5c7dmzRw0aNNCGDRs0YcIEo71KlSpGQdbe3l75+fnFxrm1aPtLo0aN0uOPP664uDiNGzdOHh4eevfdd+8oRnFsNpvWrl2rZs2a3bbvuHHjVK9ePR06dEgODg6KiIhQdnb2Hc1Xml/mXrVqVeNne3t75eXllTjWZrNJuvHHBreOu1WNGjXKFL+se/1H4Sp1AAAAAAAAAAAAAOUuMzNTNWvWVN26dZWTk1Pone/SBAQEKDk5WSkpKZKkpUuXKicnp9i+qampaty4saKiojRu3DjjCvPQ0FC9//77kqQLFy7o008/VYcOHeTq6qrq1avrww8/NGL873//KxK3W7duevvtt42icGZmptLS0iRJjo6OysrKKrTOBx98UA4ODkpNTdVXX3112zWWlEfNmjXl7e1tvB2elpamnTt3Kjg4+FfFq1GjhkJCQjRt2jTj+7Nnz+r777+/bY6/VNJelxcK4wAAAAAAAAAAAADKXadOneTq6ipXV1cFBQXJYrGUaVz9+vW1dOlSde/eXZ6enjpx4oRxtfcvzZ8/X+7u7vLy8tL48eP1r3/9S5I0d+5cHT9+XCaTSSEhIXrttdfk7+8vBwcHrV+/XsuWLZPJZJKnp6fWrl1bJO7s2bNVrVo1WSwWmc1mtW/fXunp6ZKkoUOHaurUqbJYLNq0aZPGjx+vZcuWyWw2a+zYsWrXrt1t11haHqtWrdJHH30kT09P9ejRQ4sXL9ZDDz30m+KlpaXJw8NDJpNJERERJb7ZXpqS9nrDhg0aMmTIHcf7rexsN8/DAwAAAAAAAAAAAABQAXFiHAAAAAAAAAAAAABQoVEYBwAAAAAAAAAAAABUaBTGAQAAAAAAAAAAAAAVGoVxAAAAAAAAAAAAAOXOYrHIYrHIzc1N9vb2xueePXsqPj5eFoulvFOsMDZs2KAXX3zxN8Wws7PTpUuX7k5CfwA7m81mK+8kAAAAAAAAAAAAAECS0tPTZbFYChVd4+PjNWrUKFmt1nLL616Rl5cnBweH8k5DdnZ2yszMlJOTU3mnUiacGAcAAAAAAAAAAABwz8vLy9PIkSPl6ekpd3d3JSYmGm1btmxR69at5ePjIz8/P23fvl2SdOLECT3yyCPy9PSUyWTS+PHjJUm5ubkaO3as/Pz8ZLFY9NRTTykzM7PYeTdu3KiWLVvK09NTFotFe/fuNeb09vaW2WxWmzZtdOzYMUk3ivgeHh4l5lpSvP3796tdu3by9fWVl5eX1qxZI+nGHwo4OTlpzJgx8vb21vz589W2bVuNHj1aQUFB+vvf/67hw4cb8RcvXiw3NzdZLBaZTCYj/q1iYmLUrVu335TvrZydnQv90YKvr6/i4+OL/0WWk/L/UwIAAAAAAAAAAAAAuI2UlBQtWbJE7777rqKjo/Xaa69py5YtOnXqlCZNmqQtW7bI0dFRaWlpCgoKUnp6uubPn69HH31Ur776qiQpIyNDkjRjxgz95S9/0b59+yRJb7zxhsaPH68FCxYUmvObb77RwIED9fXXX6t58+bKzc3V1atXdf78efXu3Vvx8fEymUxatWqVevTooaNHj5aaa0nxLl26pKFDh2rTpk1q2LCh/ve//8nb21uBgYGSpKysLLm7u+vtt9+WJK1bt04nT57U9u3blZubKzc3N+3Zs0cBAQF66aWXlJKSooYNGyo3N1fXr1//1XtbUr5/RpwYBwAAAAAAAAAAAHDPc3Fxkb+/vyQpICBAJ0+elCRt3rxZaWlpCg4OlsViUY8ePVSpUiV99913Cg4O1vvvv6/XXntN//73v41rv9etW6eVK1ca75h/+OGHOn36dJE5v/rqK3Xq1EnNmzeXJN13332qVauW9u7dK5PJJJPJJEnq06ePzp49qzNnzpSaa0nxdu/erVOnTqlz586yWCwKDQ2VJKWmphr9+vbtWyi3nj17ysHBQdWqVZPFYjHmaN++vfr166d33nlHp0+fVo0aNX713paU758RJ8YBAAAAAAAAAAAA3POqVq1q/Gxvb6+8vDxJks1mU4cOHRQbG1tkTNOmTRUYGKivvvpK8+fP15w5c7Rp0ybZbDbNmzdPHTt2/ENzLYnNZpO7u7t2795dpC09PV3Vq1dXpUqFzzyXNMfatWt14MABxcfHq0uXLpoyZYp69ep1V/P9JQcHB+Xn5xufs7Oz72j8H4ET4wAAAAAAAAAAAAD+tMLCwhQXF6fk5GTju5tXpJ84cUL333+/+vfvr+nTpyshIUGS1K1bN82ePdu4Fvzq1avGNei/jL1lyxalpKRIuvE2eVZWllq1aqXDhw/ryJEjkqTVq1erUaNGatSo0W1zLS5eYGCgTp8+rbi4OKOv1WpVTk7OHe1FXl6eTp48KV9fX40ePVo9evQw9uLXKCnfX3JxcTHeHt+3b59x0v1ewolxAAAAAAAAAAAAAH9aLi4uio2N1bBhw3T16lXl5OTIy8tLsbGx+uSTT7Ry5UpVrlxZBQUFio6OliSNGTNG169fl7+/v+zs7Izv3N3di8RetmyZ+vbtq9zcXNnb2ys6Olp+fn5atWqV+vfvr7y8PNWuXVtr1qwxYpWWa0nxNm7cqNGjR+ull15Sbm6uHnroIa1bt+6O9iI/P1+DBg1SRkaGHBwcVL9+fS1btuyOYpQ131tNmTJFAwYM0MKFCxUQEFBoH6Ojo3X27FlNnjxZktSlSxdNnjxZvr6+SkxM1IQJE7Rp06ZfnWNZ2dlsNtvvPgsAAAAAAAAAAAAAAOWEq9QBAAAAAAAAAAAAABUahXEAAAAAAAAAAAAAQIVGYRwAAAAAAAAAAAAAUKFRGAcAAAAAAAAAAAAAVGgUxgEAAAAAAAAAAAAAFRqFcQAAAAAAAAAAAABAhUZhHAAAAAAAAAAAAABQoVEYBwAAAAAAAAAAAABUaBTGAQAAAAAAAAAAAAAVGoVxAAAAAAAAAAAAAECFRmEcAAAAAAAAAAAAAFChURgHAAAAAAAAAAAAAFRoFMYBAAAAAAAAAAAAABUahXEAAAAAAAAAAAAAQIVGYRwAAAAAAAAAAABAucjOzla3bt3UrFkzeXp6qkOHDkpLS7ujGG3atJGLi4tsNluJfb744gu1bdu22LYjR47I2dn5jub8NTZs2KAXX3zxrsaMiYlRrVq1ZLFY5OnpKbPZrPXr1xvtXbp0UWpq6l2Za8KECVq1atWvGmuz2dS4cWO1b9++0Pfp6elycnK6C9ndHoVxAAAAAAAAAAAAAOVm6NChSk1NVVJSkh5//HENGTKkzGNPnDihEydOqEqVKvrPf/5z13PLy8u7a7HCw8M1e/bsuxbvppCQEFmtViUlJWnRokWF9m/Tpk1ydXW9K/NMnjxZffr0+VVjt27dKicnJyUnJ+v06dN3JZ87RWEcAAAAAAAAAAAAQLmoWrWqunTpIjs7O0lSq1atlJ6eXubxS5cuVd++fTVkyBAtWbLE+D43N1cjR45U06ZN5efnp+3btxcaN2nSJDVt2lQ+Pj5avXq18f3NE8xjxoyRt7e35s+fr3Pnzumpp56Sn5+fTCaTxo8fL0kqKCjQs88+qxYtWsjT01M+Pj7Kzs7WhQsX1LFjR5lMJpnNZg0cOFDSjdPd3bp1kyTFx8fLw8NDI0eOlKenp9zd3ZWYmGjksXDhQjVr1kze3t564403jP25nUuXLql27drGZ2dnZ1mtVknSrFmz1LJlS1ksFrVs2VJ79uwpdR2/FBkZqTlz5hj717NnTz322GNyc3NTu3btlJGRUWJeS5YsUVRUlHr37q2lS5eWaS13m0O5zAoAAAAAAAAAAAAAv/DOO+/o8ccfL1Pf/Px8LV++XNu2bVO9evX0z3/+U1lZWapVq5YWLVqk1NRUHT16VJIUFhZmjNu4caPWrFmjAwcOqGbNmurXr1+huFlZWXJ3d9fbb79tjB03bpzatGmjvLw8Pfroo1qzZo1cXFy0detWHT16VJUqVVJWVpYqV66slStXqnHjxvr3v/8tSSUWjFNSUrRkyRK9++67io6O1muvvaYtW7boyJEjmjRpkg4dOqS//vWvmjhxYqn7sH37dlksFl29elVnzpzRRx99VGy/fv366R//+IckKSEhQZGRkUpJSVFSUlKx67idvXv36sCBA6pbt6569eqlhQsX6tVXXy3SLyMjQ5s3b9Z7772n7777Tl27dtU///lPVar0x57h5sQ4AAAAAAAAAAAAgHI3depUpaWl6a233ipT/02bNsnZ2VnNmzdXvXr1FBoaqtjYWEk3ru7u37+/KleurMqVK2vQoEHGuK1bt+qpp56So6Oj7OzsNGzYsEJx77vvPvXt21eS9PPPP2vr1q164YUXZLFY5Ovrq7S0NKWmpqpJkybKy8vToEGDtHz5cuXm5qpSpUpq1aqVvvzyS7300ktav369/vKXvxSbv4uLi/z9/SVJAQEBOnnypCRp27Zt6tSpk/76179KkqKiokrdh5tXqX/zzTfau3evhgwZorNnzxbpd+jQIbVp00YeHh4aPny4UlNTde3atRLXcTudOnVS3bp1i+T/S6tWrVLnzp3l5OQks9ms+++/X1u2bLlt/LuNwjgAAAAAAAAAAACAcjVz5kx9+umn+vLLL1W9evUyjVmyZIm++eYbOTs7y9nZWTt27Ch0nfqtSruK/Jdt1atXNwrDNptN0o0T1larVVarVWlpaRo/frxq1aqlI0eOqHfv3kpJSZHZbFZaWpoCAgJktVrl7++vTz/9VC1btlR+fn6ReatWrWr8bG9vX+J75mW9Rl2SPDw89NBDD2nXrl2Fvs/JyVFERIRmzpypI0eO6Ouvv5YkXb9+vcR13E5Z81+yZIm2bdtm/J5Onz5d4u/p90RhHAAAAAAAAAAAAEC5mTVrlj788EN99dVXcnJyKtOYH3/8UVu3blVaWprS09OVnp6uH374QWfPnlVSUpJCQ0O1cuVK5ebmKicnR8uWLTPGhoaGas2aNbp8+bJsNpsWLVpU4jw1atRQSEiIpk2bZnx39uxZff/997pw4YJ+/vlndezYUVOnTpWzs7OOHTum06dPq0aNGnrqqac0b948ffPNN7py5UqZ9yMkJERbtmzR+fPnJemOisjff/+9Tpw4oWbNmhX6Pjs7Wzk5OXrooYckSfPmzTPaSlrH3XDgwAFduHBBZ8+eNX5PJ0+e1JYtW3ThwoW7MkdZURgHAAAAAAAAAAAAUC6+//57vfTSS7p06ZJCQkJksViM68UlacKECYqOji4ybvny5erYsWOhQnqlSpXUq1cvLVmyRFFRUWratKnc3NzUunVrWSwWo1+XLl3Uo0cPeXt7y9fX1ygWl2TVqlVKS0uTh4eHTCaTIiIidPHiRf33v/9Vhw4dZDab5eHhIQ8PD3Xu3Fnx8fHy8fGRxWJRYGCgZsyYoVq1apV5T0wmk8aPH69HHnlE3t7eys7OLnX8zTfGLRaLwsLCNHXqVHl6ehbq4+joqClTpsjPz08+Pj6F3hAvaR13w5IlS9SrV69CV7M7OTmpQ4cOWrFixV2Zo6zsbDfP/wMAAAAAAAAAAAAAyt3ly5dVs2ZNSdI777yjzZs368svvyznrP7cHMo7AQAAAAAAAAAAAADA/zN27Fjt2rVLubm5euCBB7Rw4cLyTulPjxPjAAAAAAAAAAAAAIAKjTfGAQAAAAAAAAAAAAAVGoVxAAAAAAAAAAAAAL+7y5cvq0aNGho8eHCh7+Pj42WxWH7XuePj47V58+bfdY7yEh0drRkzZty2393c5wkTJmjVqlVG3D9ib0ePHq1Jkyb96vG8MQ4AAAAAAAAAAADgd/fRRx/Jx8dHn376qd555x3VqFHjD5s7Pj5ely5dUqdOnf6wOf8ow4cP/0Pny8vL0+TJk43Pf5a95cQ4AAAAAAAAAAAAgN/dkiVLNGbMGAUHB+ujjz4q05jFixfLzc1NFotFJpNJe/fu1SeffKKOHTsaffLz8/Xwww/r2LFjOnHihB555BF5enrKZDJp/Pjxslqtio6O1qpVq2SxWIyi7pYtW9S6dWv5+PjIz89P27dvl3Sj0Ovh4aERI0bIbDbLZDIpOTlZkZGRMplM8vf315kzZyRJCQkJ8vHxkcVikYeHh957771i17Fx40a1bNlSnp6eslgs2rt3rySpT58+8vX1ldlsVteuXXXu3DlJUnp6upycnDRx4kT5+PjIxcVFmzZtKjb2pEmTNGrUKElSTEyMQkND9fTTT8tkMsnX11enTp0y+ubl5WnkyJHy9PSUu7u7EhMTjbbS9sPd3V2DBw+WxWLRZ599psjISM2ZM6fEvb1VbGys/P395eXlJU9PT33++edGW9u2bTV69GgFBQXp73//e6Ei/w8//KCwsDC5ubkpNDRU33//fbHrLytOjAMAAAAAAAAAAAD4XR07dkz//e9/FRYWpry8PE2bNq3IlerFeemll5SSkqKGDRsqNzdX169fl6+vr0aPHq3U1FS5urpqw4YNcnFxkZubm1544QU9+uijevXVVyVJGRkZqlOnjoYPH65Lly5pzpw5kqRTp05p0qRJ2rJlixwdHZWWlqagoCClp6dLklJSUrR8+XK99957ev3119WuXTvt3LlTzZs31zPPPKM5c+ZoxowZeuuttzR69Gg9/fTTkqTMzMwia/jmm280cOBAff3112revLlyc3N19epVSdKcOXNUv359SdK0adM0adIkRUdHS5KysrJkNpv1z3/+U5s3b9YLL7ygLl263HbP9u/fL6vVqsaNG2vs2LF6++23tXDhQmNdS5Ys0bvvvqvo6Gi99tpr2rJly2334/jx43r33Xe1ZMkSSTcK/ZJksViK7O0vhYWF6emnn5adnZ3S09PVqlUrffvtt6pSpYok6eTJk9q+fbtyc3Pl5uamPXv2KCAgQM8//7z8/Py0ZcsWnTlzRhaLRc2bN7/t+kvCiXEAAAAAAAAAAAAAv6slS5aof//+sre3V5cuXXT69GkdP378tuPat2+vfv366Z133tHp06dVo0YN2dvba+TIkVqwYIEkacGCBXr22WclScHBwXr//ff12muv6d///recnJyKjbt582alpaUpODhYFotFPXr0UKVKlfTdd99JklxcXOTj4yNJ8vX1lYuLi1GU9fPz04kTJyRJISEheuONNzR58mTt3LlTtWvXLjLXV199pU6dOhnj77vvPtWqVUvSjdPUvr6+8vDw0OLFi2W1Wo1xVatWVUREhCQpICBAJ0+evO1+3ezbuHHjYse5uLjI39+/SNvt9qNJkyZq06ZNmeb/pdOnT6tz587y8PBQt27dlJGRodOnTxvtPXv2lIODg6pVqyaLxWLktHXrVg0ZMkSS1KhRI4WHh/+q+W+iMA4AAAAAAAAAAADgd5Obm6sVK1Zo+fLlcnZ2louLi65evWqcPi7N2rVrNW3aNOXm5qpLly5avXq1JCkqKkpr1qxRYmKi0tLSjKLpE088oV27dsnV1VXz58/Xo48+Wmxcm82mDh06yGq1Gv/OnDmjpk2bSrpRlL7J3t6+yOe8vDxJ0qhRo7Rx40Y1bNhQ48aN08iRI8u8Lzt37tTcuXO1adMmHTlyRLNmzVJ2drbRXqVKFdnZ2Rlz5ufnlyluSbmW1na7/fgt78H36tVLQ4YM0ZEjR2S1WlWjRo1C6ywt31vd3Itfi8I4AAAAAAAAAAAAgN/Nhg0b1KRJE505c0bp6elKT09XQkKCVqxYodzc3BLH5eXl6eTJk8bV6T169NC+ffskSbVr19bjjz+u7t27a9iwYbK3t5cknThxQvfff7/69++v6dOnKyEhQZLk6OiorKwsI3ZYWJji4uKUnJxsfHcz9p1ITU1V48aNFRUVpXHjxhnz3SosLExbtmxRSkqKpBt/KJCVlaXMzEzVrFlTdevWVU5OjnHdeXn4Lfvxy739pczMTOME+8qVK4u9br44oaGhWrp0qaQb741v2LChTONKwhvjAAAAAAAAAAAAAH43S5YsUZ8+fQp916JFCzVq1Eiff/656tSpU+y4/Px8DRo0SBkZGXJwcFD9+vW1bNkyoz0qKkoxMTGKiooyvvvkk0+0cuVKVa5cWQUFBcZ73d27d9eKFStksVgUERGhCRMmKDY2VsOGDdPVq1eVk5MjLy8vxcbG3tHa5s+fr23btqly5cqyt7fXv/71ryJ9XFxctGzZMvXt21e5ubmyt7dXdHS0OnXqpJUrV8rV1VV169ZVaGiozpw5c0fz3y0uLi6/ej+K29tbvfPOO+rRo4ecnJzUrl07PfTQQ2XK6Z133lFkZKTc3NzUqFEjtWvX7let7SY7m81m+00RAAAAAAAAAAAAAOAPNnPmTB0/frxMV7IDnBgHAAAAAAAAAAAA8Kfi7u4uOzs7bd68ubxTwZ8EJ8YBAAAAAAAAAAAAABVapfJOAAAAAAAAAAAAAABuJz4+XhaLRZJ09uxZBQUFGW3r169XixYtZLFYdPjw4TLFS09Pl5OTU7Ftv4yPG9LT041324uzYcMGvfjii3ccd926dUpISDA+3/q7vlsojAMAAAAAAAAAAAD4U3nggQe0Y8cO43N0dLQmTJggq9Uqk8l01+Pfy/Ly8v6wuW5XGA8PD9fs2bPvOO4vC+O/BwrjAAAAAAAAAAAAAH6zPn36yNfXV2azWV27dtW5c+ck/b+T2a+//rq8vb3VtGlT7dq1Sy+++KIsFos8PDx05MgRSTdOCnt4eKh///7y8PCQj4+PrFZrkbluPe39/PPPa8eOHRo3bpwCAwP16KOPKjY21uj773//W/7+/iXmPXr0aJnNZrm7uysuLq5I/NLWduHCBXXs2FEmk0lms1kDBw40xsycOVN+fn7y9vZWp06d9O2330qSPv/8c5nNZmPt69evL5JTafsQHx8vd3d3DR48WBaLRZ999plOnDihrl27qmXLljKbzZo/f74k6dq1a+rZs6fc3Nzk6empjh07GnOsWLFC/v7+8vb2VnBwsJKSkiRJMTExCg0N1dNPPy2TySRfX1+dOnVKkjR8+HClpqbKYrEoPDy8SN4xMTHq1q1boTWMHDlSnp6ecnd3V2JiYpExmzZt0oYNGzRjxgxZLBYtXrxY0o2Cf0ljt2zZotatW8vHx0d+fn7avn17ib9fgw0AAAAAAAAAAAAAfqPz588bP7/11lu2YcOG2Ww2m+306dM2SbbPPvvMZrPZbIsXL7b95S9/sW3bts1ms9ls06dPt/Xo0cNms9ls27dvt0myxcXF2Ww2m+2jjz6yubq62goKCmzbt2+3eXp6GjFr1aplzNemTRsj/r///W9bQECA0RYeHm774IMPiuR7M6/FixfbbDabbc+ePbb69evbfvrppyLxS1rbrFmzbEOHDjXaLl68aLPZbLZVq1bZhgwZYsvLy7PZbDbbBx98YOvSpYvNZrPZzGazbffu3TabzWbLz8+3ZWZmFsntdvtgZ2dni4+Pt9lsNlteXp7Nx8fHdvz4cZvNZrP9/PPPNpPJZNu3b5/t008/tXXs2LFIfjt37rR17tzZlp2dbbPZbLavv/7a5ubmZrPZbLZly5bZHB0dbadOnbLZbDbbmDFjjDXe+jsozrJly2yPP/640dfe3t6WkJBgs9lstvfee69QLrcaMGCAbfbs2YXWX9LYkydP2lq1amXLysqy2Ww224kTJ2x//etfjbWUxOH2pXMAAAAAAAAAAAAAKF1sbKxWrFih7OxsZWdnq169ekZb1apVjZPEvr6+qlGjhkJCQiRJfn5+WrVqldHX2dlZ7du3lyQ99dRTGjp0qP773/+WOY8OHTpo1KhROnTokOrUqaN9+/bp448/Lravg4ODIiMjJUmtWrXSAw88oEOHDumhhx4q09patWql2bNn66WXXlJwcLA6deok6cbV4Pv375ePj48kKT8/34jVvn17vfDCC+rRo4c6duxY4lvape1DkyZN1KZNG0lSamqqjh49ql69ehljL1++rGPHjikoKEjHjx/XyJEj1aZNG3Xp0kXSjTfZk5KSCp2kz8jI0LVr1yRJAQEBaty4sfHzvHnzStvyErm4uBhzBAQEaObMmb957ObNm5WWlqbg4GCjb6VKlfTdd9+padOmJcajMA4AAAAAAAAAAADgN9m5c6fmzp2rPXv2qEGDBtqwYYMmTJhgtFepUsX42d7eXlWrVi30ubR3su3s7GRnZ3dH+Tz//POaN2+e7r//fg0aNKjQ/Lfzy7lKW1tAQICsVqvi4uL06aef6vXXX9ehQ4dks9n06quvaujQoUXiz5o1S0ePHtX27ds1YMAA9enTR6+88kqZ8rqZW40aNYzvbTab6tSpU+yV85J07Ngxbdu2TXFxcXrllVdktVpls9k0YMAATZ06tdgxd/L7Kc1viVPSWJvNpg4dOhS6Lr8seGMcAAAAAAAAAAAAwG+SmZmpmjVrqm7dusrJydHChQt/daz09HTjzehPPvlE999/vx588ME7itGvXz9t2bJFy5Yt0/Dhw0vsl5eXpxUrVkiS9u3bp7NnzxY5wV3a2k6fPq0aNWroqaee0rx58/TNN9/oypUr6tatm6Kjo5WRkSFJys3N1aFDhyRJKSkpcnd317PPPqsRI0YoISHhN+2Dq6urHB0dtWzZMuO7tLQ0ZWRk6Pvvv5ednZ3Cw8M1c+ZM2Ww2/fe//1V4eLhWrlyp7777TpJUUFBQ7Pvfv+To6KisrKzb9rtTdxI3LCxMcXFxSk5ONr7bt2/fbcdxYhwAAAAAAAAAAADAb9KpUyetXLlSrq6uqlu3rkJDQ3XmzJlfFcvd3V0xMTF6/vnnVblyZX344Yd3fGK8evXqioiI0NmzZ/W3v/2txH61atXSkSNH5Onpqby8PMXGxqpmzZq6ePFimdYWHx+vWbNmGSeaZ8yYoVq1aqlPnz66ePGicV18Xl6eBg0aJC8vL40bN06pqamqXLmyqlevrvfee+837YODg4O++OILjRo1SrNnz1Z+fr7q1aun2NhYHT58WK+++qpsNpvy8vLUr18/mc1mSdL06dPVvXt35eXlKScnR127dpWvr2+p+2o2m+Xu7i4PDw81adJEGzZsKP0XUUb9+vVTZGSk1q1bp2eeeUYuLi4l9nVxcVFsbKyGDRumq1evKicnR15eXrc9QW5ns9lsdyVbAAAAAAAAAAAAAPgN4uPjNWrUqBKvBS+r/Px8+fj4aN68eQoKCro7yf2B7tY+4P/hKnUAAAAAAAAAAAAAFcaGDRv097//XQEBAX/Kojh+H5wYBwAAAAAAAAAAAABUaJwYBwAAAAAAAAAAAABUaBTGAQAAAAAAAAAAANzz6tWrp/T09Nv2mzRpkrKzs+84fnp6uqKjowt95+zs/Kd75/vSpUuaNm3arx6fmJionj173sWM7g0UxgEAAAAAAAAAAABUGP/85z/vWmH8XpGXl1fmvrcrjN8ulq+vrz766KMyz/dnQWEcAAAAAAAAAAAAwD1nw4YNatGihcxms1555ZVCbaNHj1bLli1lsVgUHBys1NRUSdLw4cMlSUFBQbJYLDp//rxiY2Pl7+8vLy8veXp66vPPPy92vuHDhys1NVUWi0Xh4eHG959++qkCAgLUuHFjTZkyxfj+3Llzeuqpp+Tn5yeTyaTx48dLkj755BN17NjR6Jefn6+HH35Yx44dKzLnrFmzjHW0bNlSe/bsMdqcnZ01ZswY+fn5acCAAcrNzdXYsWPl5+cni8Wip556SpmZmcWu4/Lly7JYLPL19ZUktW3bVs8//7wCAgLUsWNH5eXlKSwsTL6+vnJ3d1fv3r31888/S5Li4+NlsVgk3fhjAScnJ02cOFE+Pj5ycXHRpk2bjLn279+vdu3aydfXV15eXlqzZk2xe3svsLPZbLbyTgIAAAAAAAAAAAAAbjp//rxatGihHTt2yM3NTYsWLdKwYcN0+vRpOTs768KFC6pfv74kafXq1YqJidHmzZslSXZ2dsrMzJSTk5Mk6eLFi6pTp47s7OyUnp6uVq1a6dtvv1WVKlUKzRkfH69Ro0YVujrd2dlZ4eHhmjt3rv73v//p73//u44dO6ZGjRopLCxM48aNU5s2bZSXl6dHH31UgwcPVkREhP7+979ry5YtcnV11Weffab58+dr69atRdZ56zoSEhIUGRmplJQUY+7Q0FC9//77srOz09SpU5Wfn6/XX39dkvTGG2/o3LlzWrBgQaGY6enpslgsunTpkvFd27ZtVbVqVX3++ee67777ZLPZlJGRobp168pms2nkyJF6+OGHNXbs2EL7kJ6ersaNG+uTTz7RE088oc2bN+uFF15QamqqLl26pJCQEG3atEkNGzbU//73P3l7e2vPnj1q1KjRr//l/04cyjsBAAAAAAAAAAAAALhVQkKCzGaz3NzcJEmDBw/Wc889Z7R/9dVXmjdvni5fvqyCggJlZGSUGOv06dPq06ePvv/+ezk4OCgjI0OnT59W8+bNy5RL7969Jd1447xJkyY6ffq0nJyctHXrVv34449GvytXrig1NVX29vYaOXKkFixYoLlz52rBggV69tlni4196NAhvfnmm7p48aIcHByUmpqqa9euqVq1apKkyMhI2dnZSZLWrVunrKwsrV27VpKUk5MjZ2fnMq1Bkvr27av77rtPkmSz2TR79mxt3LhReXl5ysrKUmBgYLHjqlatqoiICElSQECATp48KUnavXu3Tp06pc6dOxfqn5qaSmEcAAAAAAAAAAAAAO7UzeKwJH333Xd69tlntX//fv39739XcnKygoODSxzbq1cvTZs2TT169JAk1alT547eIK9atarxs729vfLy8nTzUu6EhIRC7TdFRUXJzc1N/fv3V1paWqGr2W/KyclRRESEtm/frpYtW+qnn35SrVq1dP36daMwXqNGDaO/zWbTvHnzCl3TfidujRUbG6tt27bpP//5jxwdHTV37lxt27at2HFVqlQx9t/e3l75+flGPu7u7tq9e/evyuePxhvjAAAAAAAAAAAAAO4pAQEBSk5ONq4VX7p0qXJyciRJWVlZuu+++9SwYUPZbDbNnz+/0NiaNWsqKyvL+JyZmanGjRtLklauXFnsu9yS5OjoWGhcaWrUqKGQkBBNmzbN+O7s2bP6/vvvJUm1a9fW448/ru7du2vYsGGyt7cvEiM7O1s5OTl66KGHJEnz5s0rdc5u3bpp9uzZunr1qiTp6tWrOnr0aLHruHbtmrFfxcnMzFS9evXk6Oioy5cvKyYm5rZr/qXAwECdPn1acXFxxndWq7XUecsThXEAAAAAAAAAAAAA95T69etr6dKl6t69uzw9PXXixAnVrVtXkmQymdSrVy+5u7urZcuWRmH5ppdeekkdOnSQxWLR+fPn9c4776hHjx7y8vLSoUOHivS/yWw2y93dXR4eHsWe8P6lVatWKS0tTR4eHjKZTIqIiNDFixeN9qioKF24cEFRUVHFjnd0dNSUKVPk5+cnHx8fVa5cudT5xowZo5YtW8rf319ms1mtWrUq9B76TXXq1FH//v1lNpvl6+tbbKz+/fvr6tWrcnV1VefOnRUUFHTb9f5S7dq1tXHjRk2dOlWenp5yc3PT2LFjVVBQIEnq0qWLEhMTJUmJiYnq0qWLMXbChAmKjo6+4zl/CzvbzXP+AAAAAAAAAAAAAIC7YubMmTp+/LiWLFlS3qlAvDEOAAAAAAAAAAAAAHeVu7u77OzstHnz5vJOBf8/TowDAAAAAAAAAAAAACo03hgHAAAAAAAAAAAAAFRoFMYBAAAAAAAAAAAAABUahXEAAAAAAAAAAAAAQIVGYRwAAAAAAAAAAAAAUKFRGAcAAAAAAAAAAAAAVGgUxgEAAAAAAAAAAAAAFRqFcQAAAAAAAAAAAABAhUZhHAAAAAAAAAAAAABQoVEYBwAAAAAAAAAAAABUaBTGAQAAAAAAAAAAAAAVGoVxAAAAAAAAAAAAAECFRmEcAAAAAAAAAAAAAFChOZR3AgAAAAAAAAAAAADwf0HHjh117tw5VapUSTVr1tTcuXPl5eVV3mn9n2Bns9ls5Z0EAAAAAAAAAAAAAFR0ly5dkpOTkyTps88+06RJk5SUlFS+Sf0fwVXqAAAAAAAAAAAAAPAHuFkUl6SsrCzZ2dmVXzL/x3CVOgAAAAAAAAAAAAD8Qfr376/t27dLkjZt2lTO2fzfwVXqAAAAAAAAAAAAAPAHW758uT766COK438QCuMAAAAAAAAAAAAAUA6qVaum77//XnXr1i3vVCo83hgHAAAAAAAAAAAAgN/ZpUuXdPbsWePzunXrVLduXdWpU6ccs/q/gzfGAQAAAAAAAAAAAOB3lpWVpSeffFLXrl1TpUqVVL9+fX3xxReys7OTJA0ZMkTh4eEKDw8v50wrJq5SBwAAAAAAAAAAAABUaFylDgAAAAAAAAAAAACo0CiMAwAAAAAAAAAAAAAqNArjAAAAAAAAAAAAAIAKjcI4AAAAAAAAAAAAgHuWs7OzXF1dZbFY5ObmpgULFpTYt0uXLkpNTf0Ds/v91KtXT+np6UW+z83N1fPPPy93d3d5enrKzc1Ns2bN+sPyio6O1owZMyRJVqtVq1ev/sPm/i0cyjsBAAAAAAAAAAAAACjNRx99JIvFom+//VZms1lBQUEym81Ge0FBgSRp06ZN5ZXiH+add97R2bNnlZSUJAcHB2VnZ+vkyZN/yNx5eXkaPny48dlqtWrdunXq1avXHzL/b8GJcQAAAAAAAAAAAAB/Cg8//LBcXV31zTffaNKkSXriiScUFhYmDw8P/fDDD3J2dpbVatWuXbtkMpkKjW3btq3Wr1+vvLw8hYWFydfXV+7u7urdu7d+/vlno9+yZctksVjk6ekpX19fpaen69lnn9XUqVONPqmpqfrb3/6mvLy8Ijn26dNHvr6+MpvN6tq1q86dOydJSk9Pl5OTkyZOnCgfHx+5uLgUKuRv2LBBLVq0kNls1iuvvFLiHnz//fdq0KCBHBxunIGuWrWq3N3djfYVK1bI399f3t7eCg4OVlJSktH29ttvy2QyydPTU61atdLVq1cVHx8vi8Vi9Dly5IicnZ0L5TxmzBh5e3tr/vz5mjRpkkaNGqXz589rwoQJ2r59uywWi4YPH66ZM2dq6NChRqxLly6pXr16ysjIKHE9fxQK4wAAAAAAAAAAAAD+FA4fPqyUlBR5enpKkvbs2aMPPvhAx44dU6NGjYx+jzzyiK5fv67ExERJ0qlTp5SamqquXbvK3t5esbGxSkxM1JEjR1SrVi3NmzdPkhQfH6/Jkyfryy+/VFJSkr7++ms1aNBAzz33nBYtWqT8/HxJ0rvvvquhQ4caxelbzZkzR4mJiUpOTlZQUJAmTZpktGVlZclsNuvAgQOaP3++XnzxRUnS+fPnNXDgQK1du1bJyclycXHRxYsXi92DqKgoff7552rRooWioqK0evVqI69du3bpww8/1Ndff62DBw/qzTffVO/evSVJy5cv19q1a7Vz504lJSXpyy+/VJUqVW6751lZWXJ3d9fBgwc1atQo4/sGDRpo8uTJCgkJkdVqVXR0tIYMGaJ169bp0qVLkm78kcHjjz+uOnXq3Hae3xtXqQMAAAAAAAAAAAC4p/Xs2VPVqlVT9erVtXTpUjVt2lTSjTfF77///mLHDBw4UMuWLZOvr6+WL1+uPn36yMHBQQUFBZo9e7Y2btyovLw8ZWVlKTAwUJK0ceNG9evXTw0bNpQkVa9eXZLk6uoqNzc3rV+/XmFhYfrwww91+PDhYueNjY3VihUrlJ2drezsbNWrV89oq1q1qiIiIiRJAQEBxhXoCQkJMpvNcnNzkyQNHjxYzz33XLHx3d3ddfLkSe3cuVO7d+/WxIkTtWLFCm3cuFHr169XUlKS/P39jf4ZGRm6du2avvjiCw0fPly1atWSJNWuXbsMOy/dd9996tu3b5n6Ojk5qUePHlq6dKlefPFFvffee/roo4/KNPb3RmEcAAAAAAAAAAAAwD3t5hvjv1SjRo0SxwwYMECenp6aOXOmPvjgA33xxReSbhSut23bpv/85z9ydHTU3LlztW3bttvm8MILL+jtt9/WhQsX1KFDh2IL8jt37tTcuXO1Z88eNWjQQBs2bNCECROM9ipVqsjOzk6SZG9vb5z0/qWbfUpSuXJltWvXTu3atdOQIUPUsGFDZWRkyGazacCAAYWufb8dBweHQnlkZ2cXaq9evboqVSr7ReTPP/+8wsPD1aJFC9WvX19eXl5lHvt74ip1AAAAAAAAAAAAABXOAw88oJYtW+rFF19UgwYNjHe4MzMzVa9ePTk6Oury5cuKiYkxxjz22GNauXKlfvjhB0nS1atXdfXqVUlSx44dde7cOU2ZMkXPPvtssXNmZmaqZs2aqlu3rnJycrRw4cIy5RoQEKDk5GSlpKRIkpYuXaqcnJxi+3799ddGfpJ04MAB1alTR05OTgoPD9fKlSv13XffSZIKCgqM6+TDw8MVHR2trKwsSTfe/87Pz1eTJk307bff6sKFC5JuvFFeVo6Ojka8m5o3b64mTZpo6NChJe5TeaAwDgAAAAAAAAAAAKBCGjhwoBYuXKiBAwca3/Xv319Xr16Vq6urOnfurKCgIKMtODhYEydOVFhYmDw9PdWmTRujYGxnZ6fBgwerQYMGCggIKHa+Tp06ydXVVa6urgoKCir2lHtx6tevr6VLl6p79+7y9PTUiRMnVLdu3WL7fvfdd+ratavc3NxksVj01ltvaf369apUqZKCgoI0ffp0I467u7tWr14tSerXr5+eeOIJBQYGytPTU126dNH169f1wAMP6JVXXpGfn59atWp1R++Bt2/fXtevX5fZbNbw4cON76OiopSXl6cePXqUOdbvzc5ms9nKOwkAAAAAAAAAAAAAuNc9+uij6tmzp/r161feqdzTnn32Wd1///16/fXXyzsVAyfGAQAAAAAAAAAAAKAUiYmJcnFxUaVKldS7d+/yTueedfbsWTVv3lwHDx7UqFGjyjudQjgxDgAAAAAAAAAAAACo0DgxDgAAAAAAAAAAAOA3uXz5smrUqKHBgwcX+j4+Pr7M72z/WvHx8dq8efPvOsevNXjwYLm5ual79+5at26dEhISyjWfSZMmldtJbmdnZ1mt1nKZW6IwDgAAAAAAAAAAAOA3+uijj+Tj46NPP/1UV65c+UPnvlcL4z/++KNWr16tw4cP67PPPvtDCuN5eXm/a/w/2t1cD4VxAAAAAAAAAAAAAL/JkiVLNGbMGAUHB+ujjz4q05jFixfLzc1NFotFJpNJe/fu1SeffKKOHTsaffLz8/Xwww/r2LFjOnHihB555BF5enrKZDJp/Pjxslqtio6O1qpVq2SxWDR58mRJ0pYtW9S6dWv5+PjIz89P27dvl3SjiO7h4aERI0bIbDbLZDIpOTlZkZGRMplM8vf315kzZyRJCQkJ8vHxkcVikYeHh957771i17FixQqZzWaZzWZ17dpVZ86c0aVLlxQSEqLs7Gz5+Pho6tSp2rBhg2bMmCGLxaLFixcbY/39/eXt7a3g4GAlJSVJkmJiYhQaGqqnn35aJpNJvr6+OnXqVLHzt23bVs8//7wCAgKMvZs5c6b8/Pzk7e2tTp066dtvvy0yzmQyaffu3cbnRYsWqWfPnsXOMWPGDLm7u8tkMqlPnz7KysqSdOMEes+ePfXYY4/Jzc1N7dq1U0ZGRgm/8f9n1qxZatmypSwWi1q2bKk9e/YYbc7OzhozZoz8/Pw0YMAAXb58WT179lTz5s0VFBSkYcOGKTIy0uhflrVKksNtswIAAAAAAAAAAACAEhw7dkz//e9/FRYWpry8PE2bNq3IlerFeemll5SSkqKGDRsqNzdX169fl6+vr0aPHq3U1FS5urpqw4YNcnFxkZubm1544QU9+uijevXVVyVJGRkZqlOnjoYPH65Lly5pzpw5kqRTp05p0qRJ2rJlixwdHZWWlqagoCClp6dLklJSUrR8+XK99957ev3119WuXTvt3LlTzZs31zPPPKM5c+ZoxowZeuuttzR69Gg9/fTTkqTMzMwiazhy5IhefvllHThwQI0aNdKbb76pIUOG6Msvv9SmTZtksViM68O/+eYbWSwW4yrzXbt26cMPP9TXX3+tKlWqaMeOHerdu7eOHj0qSdq/f7+sVqsaN26ssWPH6u2339bChQuL3ctvvvlGX3/9te677z7FxsYqNTVVe/bskb29vVasWKGRI0dq48aNhcY8//zzmj9/vgIDAyVJCxYs0Pz584vE/vLLL7V06VLt2bNHTk5OGjp0qMaOHWv8ocDevXt14MAB1a1bV7169dLChQuN31FJ+vXrp3/84x+SbvwBQmRkpFJSUoz2ixcvau/evbKzs9PLL7+satWq6fjx47py5YoCAwPl4+MjSWVeq0RhHAAAAAAAAAAAAMBvsGTJEvXv31/29vbq0qWLhg0bpuPHj6tFixaljmvfvr369eunxx57TJ07d1azZs0kSSNHjtSCBQs0d+5cLViwQM8++6wkKTg4WC+//LKuXLmiNm3aKDQ0tNi4mzdvVlpamoKDg43vKlWqpO+++06S5OLiYhRWfX195eLioubNm0uS/Pz89Nlnn0mSQkJC9MYbb+jEiRNq166dWrduXWSu7du3q1OnTmrUqJGR++TJk5Wfn3/bfVu/fr2SkpLk7+9vfJeRkaFr165JkgICAtS4cWPj53nz5pUYq2/fvrrvvvskSevWrdP+/fuNNZaUS9++fTVhwgT9+OOPOnHihOzs7BQUFFSkX1xcnHr27CknJydJ0ogRI/Tkk08a7Z06dVLdunWNPA8fPnzbtR86dEhvvvmmLl68KAcHB6WmpuratWuqVq2aJCkyMlJ2dnaSpK1bt2r27Nmys7NTzZo11bNnT6Wlpd3RWiUK4wAAAAAAAAAAAAB+pdzcXK1YscI4qSxJV69e1ZIlSzRz5sxSx65du1YHDhxQfHy8unTpoilTpqhXr16KioqSm5ub+vfvr7S0NIWHh0uSnnjiCQUGBuqrr77S/PnzNWfOHG3atKlIXJvNpg4dOhj53OrMmTOqWrWq8dne3r7I55vvWo8aNUqPP/644uLiNG7cOHl4eOjdd98tdU03i7llYbPZNGDAAE2dOrXY9pLyKk6NGjUKxX311Vc1dOjQUuevVq2aIiMjtXDhQh0/flzPPPNMmfL+5RrvJE9JysnJUUREhLZv366WLVvqp59+Uq1atXT9+nWjMH7rekqbv6xrlXhjHAAAAAAAAAAAAMCvtGHDBjVp0kRnzpxRenq60tPTlZCQoBUrVig3N7fEcXl5eTp58qRxdXqPHj20b98+SVLt2rX1+OOPq3v37ho2bJjs7e0lSSdOnND999+v/v37a/r06UpISJAkOTo6Gm9eS1JYWJji4uKUnJxsfHcz9p1ITU1V48aNFRUVpXHjxhnz3SokJESbN2/W2bNnJUnR0dFq3769kfOtfplneHi4Vq5caZxkLygoUGJi4h3n+UvdunVTdHS08dZ3bm6uDh06VGzfZ555RosWLdK2bdvUp0+fYvuEhobq448/1k8//SRJWrhwYaF34O9Udna2cnJy9NBDD0lSqSfhJaldu3Zavny5bDabrly5oo8//thou5O1cmIcAAAAAAAAAAAAwK+yZMmSIgXVFi1aqFGjRvr8889Vp06dYsfl5+dr0KBBysjIkIODg+rXr69ly5YZ7VFRUYqJiVFUVJTx3SeffKKVK1eqcuXKKigoUHR0tCSpe/fuWrFihSwWiyIiIjRhwgTFxsZq2LBhunr1qnJycuTl5VXsCfLSzJ8/X9u2bVPlypVlb2+vf/3rX0X6eHh4aMaMGerUqZMk6W9/+5vef//9YuP169dPkZGRWrdunZ555hkNGTJE06dPV/fu3ZWXl6ecnBx17dpVvr6+d5TnL/Xp00cXL15USEiIpBt/hDBo0CB5eXkV6fvggw/Ky8tLzZo1U/Xq1YuN17lzZx05ckQBAQGqVKmSzGbzbU/Ol8bR0VFTpkyRn5+f6tWrp169epXaf8KECRo8eLBatGihevXqydPT07jW/U7Wamez2Wy/OmsAAAAAAAAAAAAAuMtmzpyp48ePa8mSJeWdSoX2888/y9XVVTt27DDeM7/X5ObmKj8/X1WrVtXPP/+ssLAwPffcc+rZs+cdxeHEOAAAAAAAAAAAAIB7hru7u+zs7LR58+byTqVCi46O1ptvvqmRI0fes0VxScrMzFTnzp2Vn5+v7OxsPf7443rqqafuOA4nxgEAAAAAAAAAAAAAFVql8k4AAAAAAAAAAAAAAIDfE4VxAAAAAAAAAAAAALhFenq6oqOj72rMhIQEmUwmeXl5acuWLaX2/eKLL9S2bdvbxrRarVq9evVdyrDsJkyYoFWrVpXaZ8iQIdq+ffsflNHtURgHAAAAAAAAAAAAgFv8HoXx5cuXq3fv3jp06JDCwsLuSszfqzCel5dXavvkyZPVp0+fUvssXrxYISEhdzOt34TCOAAAAAAAAAAAAIBy0adPH/n6+spsNqtr1646d+6cpBuFaScnJ73++uvy9vZW06ZNtWvXLr344ouyWCzy8PDQkSNHjDgzZsyQu7u7TCaT+vTpo6ysLEnSpEmTNGrUKKPf/PnzFRkZKUmKiYlRaGionn76aZlMJvn6+urUqVOSpOHDhys1NVUWi0Xh4eEqKCjQs88+qxYtWsjT01M+Pj7Kzs4usp7z588rIiJCJpNJHh4eWrhwoSRp2rRp+uijjzR//nxZLBZdunSp0Ljc3FyNHDlSTZs2lZ+fX6GT1ufOnVNISIh8fHzk7u6uZ599VgUFBTp//rwmTJig7du3y2KxaPjw4aXuaWlu7veYMWPk7e2t+fPna+vWrQoICJCXl5fc3d21ZMkSo39kZKTmzJkjSfr8889lNpuN38v69eslSW3bttW6deuM/sOGDVP79u3VrFkzRUREKCcnR5J0+fJl9ezZU82bN1dQUJCGDRtm/I7uJgrjAAAAAAAAAAAAAMrFnDlzlJiYqOTkZAUFBWnSpElGW1ZWlnx8fHTw4EGNHTtWYWFhCg8Pl9Vq1YABA/TPf/5TkvTll19q6dKl2rVrlw4fPqy//OUvGjt2bJnm379/v6ZOnarDhw8rNDRUb7/9tiQpOjparq6uslqt2rBhg5KSkrR161YdPXpUSUlJ2rZtmypXrlwk3nPPPSdXV1cdPnxY27Zt05QpU5SQkKCxY8cqPDxcL7/8sqxWq5ycnAqNW7RokVJTU3X06FHt3LlTBw8eNNqcnJz0+eef68CBA0pOTlZ6ero+/vhjNWjQQJMnT1ZISIisVqtxwr20PS1NVlaW3N3ddfDgQY0aNUre3t7auXOnDh06pB07dmjy5Mn6/vvvi4wbP368Fi5cKKvVquTkZLVp06bY+FarVZ9//rmOHz+uH3/8UWvXrpV04/R5tWrVdPz4cW3atEm7d+8uU753isI4AAAAAAAAAAAAgHIRGxsrX19feXh4aPHixbJarUZb1apV1a1bN0mSr6+vatSoYVzN7efnpxMnTkiS4uLi1LNnT6PYPGLECH311Vdlmj8gIECNGzc2fj558mSx/Zo0aaK8vDwNGjRIy5cvV25uripVKlpqjYuL07BhwyRJDRo0UEREhOLi4m6bx9atW9W/f39VrlxZlStX1qBBg4y2goICjRkzRp6envLy8lJiYmKhffql0va0NPfdd5/69u1rfL548aKefPJJeXh4qF27drp48WKhU/o3tW/fXi+88IKmT5+u5OTkIkX/m7p3767q1avL3t5efn5+xl5v3bpVAwcOlJ2dnWrWrKmePXuWKd87RWEcAAAAAAAAAAAAwB9u586dmjt3rjZt2qQjR45o1qxZha4nr1KlivGzvb29qlatWuhzSe9g29nZGT87ODgoPz/f+PzL68/LGrNWrVo6cuSIevfurZSUFJnNZqWlpd12jbfmciduHTdr1iydP39ee/fuVXJysnr37l3sNe7S7fe0NNWrVy9U7B8+fLhat26tw4cPy2q1qlmzZsXGmjVrlpYtW6bq1atrwIABmj59erHxf83v726iMA4AAAAAAAAAAADgD5eZmamaNWuqbt26ysnJMd7jvlOhoaH6+OOP9dNPP0mSFi5cqI4dO0qSXFxclJiYqPz8fF29etW4vvt2HB0djXfKJenChQv6+eef1bFjR02dOlXOzs46duxYsbm8//77xphPP/1UHTp0KNMaVq5cqdzcXOXk5GjZsmVGW2Zmpv7617+qatWqOnfunNasWVNinrfb0/bt22vfvn1l2oPMzEw9/PDDsrOz09dff62kpKRi+6WkpBhvn48YMUIJCQllin9Tu3bttHz5ctlsNl25ckUff/zxHY0vKwrjAAAAAAAAAAAAAP5wnTp1kqurq1xdXRUUFCSLxfKr4nTu3FkDBw5UQECATCaTfvrpJ7311luSpIiICD3wwANq0aKFHn30UXl5eZUpptlslru7uzw8PBQeHq7//ve/6tChg8xmszw8POTh4aHOnTsXGTd37lwdP35cJpNJISEheu211+Tv73/b+aKiotS0aVO5ubmpdevWhfbihRde0N69e+Xu7q5+/fopNDTUaGvfvr2uX78us9ms4cOHl7qn+fn5SkpK0oMPPlimPZg2bZrGjh0ri8WipUuXlriOcePGyd3dXV5eXlqxYkWZ3zS/acKECbp8+bJatGihTp06ydPTs8Tr2H8LO5vNZrvrUQEAAAAAAAAAAAAA94z9+/dr4cKFWrx4cXmnUkhubq7y8/NVtWpV/fzzzwoLC9Nzzz13198apzAOAAAAAAAAAAAAACgX58+fV+fOnZWfn6/s7Gw9/vjjmjZt2l1/a5zCOAAAAAAAAAAAAACgQuONcQAAAAAAAAAAAAAoBzExMerWrdtdjRkfH6/NmzfftXgWi0WXL1++a/HKC4VxAAAAAAAAAAAAAChGXl5eeadwx+5WYfzm2q1Wq2rWrPmb45U3CuMAAAAAAAAAAAAA7gl79uxR69at5enpKbPZrPXr10uSEhMTFRgYKLPZLD8/P+3atUuSlJ6eLicnJ02cOFE+Pj5ycXHRpk2bbhvvxIkT6tq1q1q2bCmz2az58+cbY+zs7DRx4kS1bNlSr776qiIjIzVs2DC1b99ezZo1U0REhHJyciRJn3/+ucxmsywWizw8PIz4tzp37pxCQkLk4+Mjd3d3PfvssyooKDDaf/rpJ4WHh8vNzU3BwcFKT0+XJOXn5+vll1+Wh4eHPDw89NxzzxnzRkZGas6cOUaM0aNHa9KkSbJarYqOjtaqVatksVg0efLkIvlERkZq0KBBCgwMVLNmzTRgwABdu3atUFtwcLA8PDyM/bh06ZJWrVqlRx991Ihjs9nUpEkTJSUl3XaN9wIK4wAAAAAAAAAAAADKXUZGhrp166a33npLSUlJslqtCgoKUk5OjiIiIjRx4kQlJydr1qxZeuKJJ3TlyhVJUlZWlsxmsw4cOKD58+frxRdfLDVefn6+nn76af3rX//S/v37lZCQoEWLFmn//v1GLvb29tq/f79mzJgh6cap6c8//1zHjx/Xjz/+qLVr10qSxo8fr4ULF8pqtSo5OVlt2rQpsi4nJyd9/vnnOnDggJKTk5Wenq6PP/7YaN+1a5fefvttHTt2TI8++qiGDh0qSUZOBw4ckNVq1cmTJzV79uxS99BisWj48OHq06ePrFarJkyYUGy/vXv3asuWLTp+/LgyMjIKxT1w4IA2btyolJSUQmMiIiKUkJCgc+fOSbpxMr127dry9PS87RrvBRTGAQAAAAAAAAAAAJS7PXv2yNXVVUFBQZKkSpUqqU6dOkpNTVWlSpUUFhYmSWrdurXuv/9+Wa1WSVLVqlUVEREhSQoICNDJkydvG+/o0aPq1auXLBaLAgMDdfnyZR07dszIZdCgQYVy6969u6pXry57e3v5+fkZc7Rv314vvPCCpk+fruTkZDk5ORVZV0FBgcaMGSNPT095eXkpMTHRyF2SAgMD1aJFC0nS0KFDFR8fr/z8fMXFxSkyMlJVqlSRg4ODoqKi9NVXX/3GXb7hqaeeUs2aNWVvb6/BgwcrLi7OaHvyySeLvTq9WrVqeuKJJ7RixQpJN95HHzhwYJnWeC9wKO8EAAAAAAAAAAAAAOBO2NnZGT9XqVLF+Gxvb6/8/PxSx9psNtWpU6fUwm2NGjUKfa5atarxs729vfH+9qxZs3T06FFt375dAwYMUJ8+ffTKK68UGjtr1iydP39ee/fuVdWqVfWPf/xD2dnZZVrnrW5ds4ODQ6F1ZmdnF8n518YuLc6gQYM0cOBAjRgxQl988YVx0vxurfH3xIlxAAAAAAAAAAAAAOUuMDBQJ06c0I4dOyTdOIWckZEhV1dXFRQUGKeld+/erXPnzslisfzqeI6Ojlq2bJnRNy0tTRkZGXecc0pKivGm9ogRI5SQkFCkT2Zmpv7617+qatWqOnfunNasWVOofc+ePca15YsXL1ZISIjs7e0VGhqqDz74QDk5OcrLy9PixYvVsWNHSZKLi4v27dsnSbp48WKhd9UdHR2VlZVVat6ffPKJrly5ovz8fC1btkyhoaFlWq+/v7+kG2+ah4aGqk6dOmVa472AwjgAAAAAAAAAAACAcle7dm199tlnGjt2rMxms7y9vbVr1y5VrlxZn376qSZOnCiz2axRo0bpk08+ue0J6ZLiOTg46IsvvtCnn34qs9ksd3d3DR48WNeuXbvjnMeNGyd3d3d5eXlpxYoVmjRpUpE+L7zwgvbu3St3d3f169evSBE6MDBQY8aMkbu7uzZs2KCFCxdKunGture3t7y9vWWxWOTs7KxRo0YZbRcuXFCLFi3Uv39/tWrVyojXvXt3Wa1WWSwWTZ48udi8W7ZsqbCwMLVo0UJOTk5G3LIYOHCgFi5caFyjfrs1nj17ttAfMURHRxd6+7xLly5KTEws8/y/lp3NZrP97rMAAAAAAAAAAAAAAMpdZGSkLBbLHRXDKwJOjAMAAAAAAAAAAAAAKjROjAMAAAAAAAAAAAAAKjROjAMAAAAAAAAAAAAAKjQK4wAAAAAAAAAAAACACo3COAAAAAAAAAAAAACgQqMwDgAAAAAAAAAAAACo0CiMAwAAAAAAAAAAAAAqNArjAAAAAAAAAAAAAIAKjcI4AAAAAAAAAAAAAKBCozAOAAAAAAAAAAAAAKjQKIwDAAAAAAAAAAAAACo0CuMAAAAAAAAAAAAAgAqNwjgAAAAAAAAAAAAAoEKjMA4AAAAAAAAAAAAAqNAojAMAAAAAAAAAAAAAKjQK4wAAAAAAAAAAAAD+VJYtWyY7OzutW7euvFP5Q8TExKhbt253Pe6RI0fk7Ox81+PeiyiMAwAAAAAAAAAAAPjTSE9P1/vvv69WrVqVdyqFFBQUqKCgoLzTKFFeXt49GeuPQmEcAAAAAAAAAAAAwJ9CQUGBhgwZonnz5qlKlSplHjdlyhS1aNFCFotFFotF3377bZE+W7duVUBAgLy8vOTu7q4lS5YYbVlZWRoyZIg8PDzk6empQYMGSZImTZqkJ554QmFhYfLw8NAPP/ygFStWyGw2y2w2q2vXrjpz5owkKSEhQT4+PrJYLPLw8NB7770nSVq8eLHc3NxksVhkMpm0d+/eYtfw008/KTw8XG5ubgoODlZ6erokKT8/Xy+//LI8PDzk4eGh5557Tjk5OZKkyMhIDRo0SMHBwfLw8DBybtq0qXx8fLR69epCc2zZskWtW7eWj4+P/Pz8tH37dklSfHy83N3dNXjwYFksFn322Wdlzvte4VDeCQAAAAAAAAAAAABAWcyaNUuPPPKIfHx8yjwmMzNTM2fO1A8//KBq1arp6tWrqlSp6Plhb29v7dy5U/b29srIyJCXl5fCwsL04IMPatSoUapWrZqSk5NVqVIlXbhwwRi3Z88eHTp0SPfff7+OHDmil19+WQcOHFCjRo305ptvasiQIfryyy/11ltvafTo0Xr66aeNvCTppZdeUkpKiho2bKjc3Fxdv3692HXs2rVLVqtVLVq00PTp0zV06FD9+9//1qJFi7R//34dOHBA9vb2Cg8P1+zZszVmzBhJ0oEDB7Rz507VrFlTGzdu1Jo1a3TgwAHVrFlT/fr1M+KfOnVKkyZN0pYtW+To6Ki0tDQFBQUZBfjjx4/r3XffNf5goFatWmXK+17BiXEAAAAAAAAAAAAA97wjR45o7dq1Gj9+/B2Nc3R0VNOmTdW3b18tXLhQGRkZqlq1apF+Fy9e1JNPPikPDw+1a9dOFy9e1JEjRyRJX3zxhUaPHm0U1OvXr2+M69Kli+6//35J0vbt29WpUyc1atRIkjRy5Eht27ZN+fn5CgkJ0RtvvKHJkydr586dql27tiSpffv26tevn9555x2dPn1aNWrUKHYdgYGBatGihSRp6NChio+PV35+vuLi4hQZGakqVarIwcFBUVFR+uqrr4xxTz75pGrWrCnpxqn4p556So6OjrKzs9OwYcOMfps3b1ZaWpqCg4NlsVjUo0cPVapUSd99950kqUmTJmrTpo3Rv6x53ysojAMAAAAAAAAAAAC45+3YsUPp6elq2rSpnJ2dlZCQoKFDhxpXkpfE3t5eCQkJGjVqlM6fP69WrVppx44dRfoNHz5crVu31uHDh2W1WtWsWTNlZ2ffNq/SCsJ2dnbGz6NGjdLGjRvVsGFDjRs3TiNHjpQkrV27VtOmTVNubq66dOlS5HrzO3XrnHeSn81mU4cOHWS1Wo1/Z86cUdOmTYuNc7fz/r1RGAcAAAAAAAAAAABwzxsxYoR++OEHpaenKz09Xa1atdKiRYs0YsSIUsddvnxZP/74o4KCgvT666+rdevWOnToUJF+mZmZevjhh2VnZ6evv/5aSUlJRlt4eLhmzpypgoICSSp0lfqtQkJCtHnzZp09e1aSFB0drfbt28ve3l6pqalq3LixoqKiNG7cOCUkJCgvL08nT56Ur6+vRo8erR49emjfvn3Fxt6zZ49SUlIk3XiXPCQkRPb29goNDdUHH3ygnJwc5eXlafHixerYsWOxMUJDQ7VmzRpdvnxZNptNixYtMtrCwsIUFxen5ORk47uScrmTvO8VvDEOAAAAAAAAAAAA4E9vwoQJeuCBBzR8+PBC32dlZalHjx76+eefZWdnp6ZNm2rAgAFFxk+bNk0jR47UG2+8IYvFIn9/f6Nt9uzZevHFF2UymXTfffepZcuWev/994vE8PDw0IwZM9SpUydJ0t/+9jej3/z587Vt2zZVrlxZ9vb2+te//qX8/HwNGjRIGRkZcnBwUP369bVs2bJi1xcYGKgxY8YoLS1NdevW1QcffCDpxrXqJ0+elLe3tySpbdu2GjVqVLExunTpon379snb21uOjo7q3Lmz0ebi4qLY2FgNGzZMV69eVU5Ojry8vBQbG1skTml5l/R7KG92NpvNVt5JAAAAAAAAAAAAAADwe+EqdQAAAAAAAAAAAABAhUZhHAAAAAAAAAAAAABQoVEYBwAAAAAAAAAAAFCunJ2d5erqKovFIjc3Ny1YsOC2Y2JiYtStW7fbtiUmJqpnz553lE9MTMz/x959R1VxPG4Dfy5F6Spgb6go0i8KIlZUIthrxF6wl6ixocYookaNBWsUY0GxJxp7J2IhFlSuoBSRgEbFmK8igo027x++7I8LF7goijHP5xzOYXdmZ2dn9+7O7OzOokyZMpDL5dLf2LFji5TGlyI4OBgnTpz46OuZMmUKfHx8Plr6Wh8tZSIiIiIiIiIiIiIiIiIiNe3ZswdyuRz37t2DnZ0dmjdvDjs7uw9O19HREXv27Cnycq1atcKBAwc+eP3/dsHBwXj+/Dk8PDxKOisfhG+MExEREREREREREREREdFno2bNmrCwsMCdO3fg4+ODiRMnSmFr1qzB4MGDpekXL16gc+fOsLKyQosWLZCQkJAnveDgYMjlcmn66NGjcHJygr29PeRyOa5cuVKk/Pn4+MDT0xOdOnWClZUVWrdujWfPngEA0tPTMWbMGNSrVw+NGzfG5MmT4erqCgB4/PgxWrVqhYYNG8La2hrjxo1DVlZWocsBQGBgIJydndGgQQO0aNECN2/eBPDuzXY3Nzf06dMHVlZWaNKkCSIjI9GtWzdYWlqibdu2SE1NldYxffp0NGrUCHK5HL169UJSUhIAYPDgwRg5ciTatGmDevXqoXv37khLS4NCocD69euxY8cOyOVy+Pr65imPnTt3wtnZGQ4ODrC3t8fhw4elMFdXV0yZMgXNmzdHnTp1MGrUKCksMTER7u7usLKygpubGx48eFCk/VBU7BgnIiIiIiIiIiIiIiIios9GREQEoqOjYW9vX2jckJAQLF68GJGRkejYsSNGjBhRYPw7d+5gyJAhCAwMxM2bNxEaGor69eurjHv27FmlodT9/PyksCtXriAgIACRkZGoUKEC/P39AQAbNmxAbGwsbt++jQsXLiA8PFxapmzZsjh8+DCuX7+O8PBwJCQkYO/evYUuFxISgl27duH8+fO4ceMGFixYgL59+0rhoaGhUhnUqVMHnTp1wvr16xEVFYVSpUph69atAIAlS5ZAX18fV69ehUKhgK2tLWbNmiWlo1AocPjwYURFReHvv//Gvn37IJfLMWrUKPTr1w8KhQKzZ8/OU07u7u64fPkywsLCcPDgQQwfPhxv376VwuPi4nD27FncunULJ0+exKVLlwAA48ePR6NGjRAZGYmtW7ciKCiowH33oTiUOhERERERERERERERERGVOE9PT+jq6kJPTw+bN29G3bp1C12mSZMmsLS0BACMGDECs2bNQmZmZr7xT58+DQ8PD6kzXFtbG2XKlFEZt6Ch1D08PGBiYgIAcHFxQUREBAAgKCgI/fv3h7a2NgBg0KBB2LhxIwAgKysL3t7euHjxIoQQePLkCWxsbNC7d+8Clzt48CBu3rwJZ2dnaf3Pnj3D69evpfXXqFEDwLth49PT01GxYkUAgJOTE2JjYwEABw4cQHJyMvbt2wcASEtLg5mZmZRmt27doKenBwBo1KgR4uLi8i3HnOLj49GvXz88ePAAWlpaePbsGeLj46Uy9vT0hJaWFrS0tCCXyxEXFwcXFxcEBQVh6dKlAICqVauic+fOaq3vfbFjnIiIiIiIiIiIiIiIiIhKXPY3xnPS0tJS6uh+8+bNJ86Vajo6OtL/mpqayMjIUBlPJpNJ/y9fvhxPnjzBlStXoKOjg0mTJuW7PTmXE0Jg0KBB+OGHH9TKS355E0Jg9erVaNu27QdtU269e/fGokWL0LNnTwCAsbGx0na9T1l9DBxKnYiIiIiIiIiIiIiIiIg+S+bm5rh27RoyMzPx6tUr6W3nbJcuXUJ0dDQAYOPGjWjVqhU0NTXzTc/d3R0nT56UlklPT0dycnKx5bd169bYuXMn0tPTkZ6ejm3btklhSUlJqFSpEnR0dPD48WP88ssvai3XuXNnbN++Hffv3wfw7s3za9euFTlvXbt2hZ+fH169egUAePXqFW7fvl3ockZGRgWWUVJSEmrVqgUA2L59u/Td8sK4ublh8+bNAN59b/zQoUNqLfe++MY4EREREREREREREREREX2Wunfvjl9++QWWlpaoVq0aHBwcpI5d4N1Q6t7e3rh79y5MTEyUOpRVMTc3x5YtW9C/f3+kp6dDU1MT69evR6NGjfLEzf7GeDYLCwvs2bOnwPRHjhyJiIgIWFlZoVy5cnB0dMSjR48AABMmTEDPnj1hbW2NKlWqwM3NTa3lmjdvjh9//BHdunVDRkYG0tLS0KFDBzg6OhZafjl5e3vj7du3cHZ2lt7O9vb2hrW1dYHLdevWDYGBgZDL5ejevXue74yvXLkSPXv2RNmyZdG6dWtpWPfCrFy5EoMHD4aVlRWqVq2K1q1bF2l7ikomhBAfdQ1ERERERERERERERERERP8RKSkpMDQ0RHp6Ovr164eGDRvC29v7oy1H6mHHOBERERERERERERERERFRMXF2dsbbt2/x5s0bNGvWDKtXr4auru5HW47Uw45xIiIiIiIiIiIiIiIiIiL6ommUdAaIiIiIiIiIiIiIiIiIiIg+JnaMExERERERERERERERERHlYmZmBgsLC8jlcukvIiJCZVxTU1MkJCR8lHwEBAQgOjpamj506BC+/fbbj7KuL5lWSWeAiIiIiIiIiIiIiIiIiOhztGfPHsjl8hLNQ0BAAMqWLYv69esDADp37ozOnTuXaJ7+jfjGOBERERERERERERERERFRERw6dAiWlpaws7PDtGnTlMLMzMygUCikaUdHRwQHBwMAHj58iJ49e8LW1hZ2dnb4/vvvAQA7d+6Es7MzHBwcYG9vj8OHDwMANm7ciGvXruHbb7+FXC7HsWPHEBAQgK5du0rpL1myBNbW1rC1tUW/fv2QnJwMAPDx8YGnpyc6deoEKysrtG7dGs+ePft4hfKZY8c4EREREREREREREREREZEKnp6eSkOpv379Gk+ePMGQIUOwb98+hIeHw9zcHE+fPlUrvf79+6Nhw4aIiIhAeHg4xo8fDwBwd3fH5cuXERYWhoMHD2L48OF4+/Ythg0bBkdHR/j5+UGhUKB9+/ZK6R0/fhybN29GSEgIIiIioK+vj+nTp0vhV65cQUBAACIjI1GhQgX4+/sXX+H8y3AodSIiIiIiIiIiIiIiIiIiFVQNpX769GnY2dnBysoKADB06FB88803haaVmpqKixcv4uTJk9K88uXLAwDi4+PRr18/PHjwAFpaWnj27Bni4+Ol4dPzc+bMGXh6eqJs2bIAgNGjR+Prr7+Wwj08PGBiYgIAcHFxyfcb6f8FfGOciIiIiIiIiIiIiIiIiOg9yWQypWktLS1kZmZK02/evCk0jd69e2PYsGG4desWFAoFDAwM1FqusLzo6OhI/2tqaiIjI6PIaX4p2DFORERERERERERERERERKQmFxcXhIeHIzo6GgCwefNmpKWlSeHm5ua4cuUKAODq1auIiYkBABgYGKBFixZYtmyZFPeff/4BACQlJaFWrVoAgO3btyMpKUmKY2RkJH03PDc3Nzfs3bsXL168AAD4+/ujbdu2xbWpXxR2jBMRERERERERERERERERqZD7G+Nnz55F+fLlsXnzZnTr1g329vaIjY2VhisHgPnz52Pt2rWwt7fH5s2bYW1tLYUFBgbi2rVrsLa2hlwux5o1awAAK1euRM+ePeHg4ICwsDDUqFFDWmbEiBH44YcfIJfLcezYMaX8tWvXDkOGDIGLiwtsbW3x4sULLFy48COXyr+TTAghSjoTREREREREREREREREREREHwvfGCciIiIiIiIiIiIiIiIioi8aO8aJiIiIiIiIiIiIiIiIiOiLxo5xIiIiIiIiIiIiIiIiIvqsmJmZwcLCQun73hERESWSl2HDhuHs2bPFmuaKFSvw+PFjadrHxwcTJ04s1nUUhZmZGRQKRaFh71MWcrkcKSkpH5jDD6dV0hkgIiIiIiIiIiIiIiIiIsptz549kMvlJZ0NbNy4sdjTXLFiBVxdXVGpUqViT/tjep+yyK/D/VPjG+NERERERERERERERERE9K8hk8nwww8/oFGjRqhVqxa2bNkihf3xxx+Qy+WwtbWFl5cX7O3tERwcDABYvnw5nJycIJfL4eTkhEuXLqm1nKurKw4cOAAAGDx4MEaOHIk2bdqgXr166N69O9LS0gAAKSkp8PT0RP369dG8eXOMHDkSgwcPzpN/X19fPHr0CJ6enpDL5VLHcWJiIjp16gQrKyu0bt0az549k5ZZunQpGjVqhAYNGsDDwwP37t3Lt2xmzZoFBwcH1KtXDzt27FAKe/78uTRtamqKhIQEaXrHjh1o2LAhzM3NsWTJEpXp5yyL5ORkDBs2DDY2NrC3t4eXl1e+ecper5mZGWbPng0XFxfUqlUL8+fPV7nMx8COcSIiIiIiIiIiIiIiIiL67GR3HGf/vX79WgorXbo0rl69iuPHj2P8+PHIyMhAWloaPD094efnh4iICAwYMADh4eHSMgMGDEBoaCgUCgVWr16NIUOGAEChy+WmUChw+PBhREVF4e+//8a+ffsAvOvw1tXVRVRUFI4dO4Y//vhD5fKzZ89GlSpVsGfPHigUCumt+CtXriAgIACRkZGoUKEC/P39AQA7d+5ETEwMLl26hBs3bqBfv34YM2ZMvvmTyWQICwvDiRMn8M033yh1fhfk77//xrVr13D58mWsXr063/xnmzhxIkqVKoXw8HDcvHkTixcvVms9z58/x6VLlxAaGoolS5bg4cOHai33oTiUOhERERERERERERERERF9dgoaSr1fv34AgPr160NLSwuPHz/Gs2fPoKWlhVatWgEAWrVqhTp16kjLhIWFYcGCBXj69Cm0tLQQExOD169fIzY2tsDlcuvWrRv09PQAAI0aNUJcXBwAICgoCH5+fpDJZDA0NISnpyfu3r2r9vZ6eHjAxMQEAODi4iJ9U/3AgQMIDQ1Fw4YNAQCZmZkFpjNs2DAAQO3atdGiRQucP38eZmZmha5/6NChkMlkMDU1Rffu3XHmzBk0adIk3/hHjhzBlStXoKHx7l3s8uXLF7oOAOjbty+Ad2+s165dG/Hx8ahatapay34IdowTERERERERERERERER0b+Kjo6O9L+mpiYyMjJUxpPJZADevRXevXt3nD17Fk5OTnjx4gXKlCmDt2/fFrhccaxbXfmlK4TAjBkzMGLEiCKllzsfmpqaSp3qb968UWu54qZu+RU3DqVORERERERERERERERERP96FhYWSE9Px7lz5wAA586dk97YfvPmDdLS0lCjRg0AwOrVq9Varihat26NrVu3QgiB1NRU7N27N9+4RkZGSE5OVivdrl27Yv369dI3x9PT0xEWFpZv/OxvrickJODChQto3rw5AMDc3BxXrlwBAOzfvx8vX75UWi4gIAAA8OzZM/z2229o06ZNgfnq3Lkzli5diqysLADAP//8o9b2lBS+MU5EREREREREREREREREnx1PT0/o6upK035+ftJw56qULl0au3fvxtixY5GVlYWGDRvCwsICZcuWhZGREebPn49GjRrB1NQUvXv3Vmu5opg9ezaGDh0KS0tLmJqawt7ePt80xo8fj+HDh0NPT0/qkM5Pv3798PTpU2nbMzIy4OXlBQcHB5XxMzMz4eDggJcvX2LVqlXSMOp+fn4YP348Zs2ahQ4dOkjDtmcrX748GjZsiOTkZIwbN67AYdSz0/v2229ha2sLbW1tODk54eeffy5wmZIkE0KIks4EEREREREREREREREREdGHSklJgaGhIQAgNDQUnTt3RlxcnPRN8OJeLqf09HRkZmZCR0cHL1++hLu7O7755ht4enq+/wYVkUwmQ1JSUpE79f8L+MY4EREREREREREREREREX0R9u3bBz8/PwghoKWlhcDAQLU6t993uZySkpLQrl07ZGZm4s2bN+jSpQt69er1vptCxYxvjBMRERERERERERERERER0RdNo6QzQERERERERERERERERERE9DGxY5yIiIiIiIiIiIiIiIiIPjlTU1MkJCQAANq3b4+YmJgSzc+1a9c+6ffAVUlISCj274MnJCRg/fr1hcbbsmULZDIZLly4oDR/8ODBWLFihcpl5HI5UlJSiiObHx07xomIiIiIiIiIiIiIiIioRB07dgwWFhYlmgdHR0fs2bOnRPPwMajbMb5p0ya0adMGmzZtUjtthUIBQ0PDD8neJ8OOcSIiIiIiIiIiIiIiIiL66A4dOgRLS0vY2dlh2rRpSmFmZmZQKBQAgPnz58PS0hJyuRxyuRz37t0DAFy6dAnNmjWDvb097OzscPDgwTzLAu86uIODg/NN6/Xr1/D09ISVlRXs7e3Rtm1bAEBwcDDkcrmUTmBgIOzs7GBnZ4cOHTrg4cOHAICAgAC4ubmhT58+sLW1haOjI/78808AQGxsLJo2bQp7e3vY2tpi1qxZKsuiX79+cHR0lNJ+/PixUviUKVNgZ2cHa2trnDlzRq08de3aVYp35MgRuLq6AgBGjRqFmJgYyOVydO7cWWV+YmJiEB8fj23btuHAgQN48eKFyni5yWQyPH/+HFlZWRg3bhwsLS1hb2+Phg0b4s2bN3niJycnY9iwYbCxsYG9vT28vLzUWk9x0PpkayIiIiIiIiIiIiIiIiKi/6QnT55gyJAhuHDhAqysrLBhwwY8ffo0T7ykpCQsXboUiYmJ0NXVxatXr6ChoYFnz56ha9eu+PXXX9G8eXNkZWXh+fPnBa4zv7SOHz+O58+fIzIyEgDw7NmzPMveunULU6dOxfXr11G1alUsWLAAw4YNw/HjxwEAoaGhUCgUqFWrFqZPn47FixfD398fa9asQceOHTFjxox80waAFStWoHz58gCARYsWwcfHR3qrOzk5GZaWlli6dCkuX76Mzp07Iy4uDvfu3SswT/lZv349Jk6cqPTwQG6bNm3CgAEDUKVKFbRu3Rq7d+/GiBEjCkw3p5s3byIoKAi3b9+GhoYGkpOTUapUqTzxJk6cCF1dXYSHh0NDQwP//POP2uv4UHxjnIiIiIiIiIiIiIiIiIg+qsuXL8POzg5WVlYAgKFDh6rsODUyMkLdunXRv39/+Pv749mzZ9DR0cGlS5dgYWGB5s2bAwA0NDRgbGxc4DrzS8ve3h5RUVEYM2YM9uzZA21t7TzLnj17Fh4eHqhatSoAYMyYMfj999+RmZkJAHBxcUGtWrWk/+Pi4gAALVq0wM8//4zvvvsOp06dyvd74Tt37oSjoyNsbGywceNGpU5rLS0tDB48GADQuHFjVKlSBWFhYYXm6X1lZGRg27ZtGDJkCADAy8urSMOpA0Dt2rWRkZEBLy8vbN26Fenp6dDQyNsVfeTIEUyZMkUKy3444FNgxzgRERERERERERERERERfVIymUzlfE1NTVy+fBkTJ07EkydP0LhxY1y4cKHAtLS0tJQ6h7OH8M4vrdq1ayMyMhIeHh4ICQmBjY0NkpKSipRfHR0dpTxnZGQAAHr06IGQkBBYWFhIb4/ndvHiRaxatQrHjh3DrVu3sHz5cpXDjhe0/tzz8isDdRw5cgTPnz+Hu7s7zMzMMHbsWNy4cQO3bt1SO40yZcrg1q1b6Nu3L6Kjo2FnZ4e7d++qvfynwI5xIiIiIiIiIiIiIiIiIvqoXFxcEB4ejujoaADA5s2bkZaWlideSkoK/v77bzRv3hzff/89mjVrhrCwMDRp0gSxsbFSJ3lWVpY0TLm5uTmuXLkCALh69SpiYmIKTOvBgweQyWTo3Lkzli5dCiEE/vrrL6V8tGrVCidOnMCjR48AvBuOvE2bNtDU1CxwO2NjY1GxYkUMHDgQP/74Iy5fvpwnTlJSEgwNDWFiYoK0tDT4+/srhWdkZCAwMFDankePHkEulxeYJ3Nzc4SHh+P169fIyMjAzp07pfSMjIyQnJycb543bdqEFStWICEhAQkJCbh37x4mTZpUpLfG//nnH7x8+RJt27bFDz/8ADMzM2mo+pyyyzwrK0ta7lNhxzgRERERERERERERERERfVTly5fH5s2b0a1bN9jb2yM2NhYmJiZ54iUnJ6N79+6wtbWFnZ0d0tPTMWjQIJQrVw6//fYbpk+fDjs7OzRo0AAhISEAgPnz52Pt2rWwt7fH5s2bYW1tXWBaERERaNq0Kezt7eHg4IABAwbAzs5OKR82NjZYsmQJPDw8YGdnhwsXLuDnn38udDt//fVX2NrawsHBAZ6entJ3w3Py8PCAhYWFNDS8XC5XCs9++9re3h5DhgzBzp07YWhoWGCeGjdujPbt28PGxgaurq6oW7eulJ6dnR2sra1hY2ODzp07K63r0aNHCAoKwtdff600v1+/fti+fbvKhxdU+euvv/DVV1/Bzs4ONjY2sLGxQbt27fLE8/Pzw9u3b2Frawu5XI6ZM2cCAK5du4b27dtL8WbPnq1UdnK5XHog4H3JhBDig1IgIiIiIiIiIiIiIiIiIiL6jPGNcSIiIiIiIiIiIiIiIiIi+qKxY5yIiIiIiIiIiIiIiIiIiL5o7BgnIiIiIiIiIiIiIiIiIqIvGjvGiYiIiIiIiIiIiIiIiIjoi8aOcSIiIiIiIiIiIiIiIiIi+qKxY5yIiIiIiIiIiIiIiIiIiL5o7BgnIiIiIiIiIiIiIiIiIqIvGjvGiYiIiIiIiIiIiIiIiIjoi8aOcSIiIiIiIiIiIiIiIiIi+qKxY5yIiIiIiIiIiIiIiIiIiL5o7BgnIiIiIiIiIiIiIiIiIqIvGjvGiYiIiIiIiIiIiIiIiIjoi8aOcSIiIiIiIiIiIiIiIiIi+qKxY5yIiIiIiIiIiIiIiIiIiL5o7BgnIiIiIiIiIiIiIiIiIiomZmZmsLCwgFwuh1wux549e0o6SwRAq6QzQERERERERERERERERET0JdmzZw/kcnlJZ4Ny4BvjREREREREREREREREREQl7Nq1a2jSpAns7OzQqFEjhISEAAASEhJQtmxZzJkzBw0bNoS5uTmOHTtWwrn992HHOBERERERERERERERERFRMRo4cCBsbW0xdOhQ/PPPP4XGT0tLQ/fu3TFnzhyEh4dj+fLl6NGjB1JTUwEAycnJsLOzw/Xr17FmzRp8++23H3sTvjjsGCciIiIiIiIiIiIiIiIiKibnz59HeHg4bty4AVNTUwwaNKjQZWJiYqChoQF3d3cAQLNmzVCxYkUoFAoAgI6ODrp37w4AcHFxQVxc3EfL/5eK3xgnIiIiIiIiIiIiIiIiIiomNWrUAABoa2tj4sSJqFev3nulI5PJpP9Lly4tTWtqaiIzM/PDM/ofwzfGiYiIiIiIiIiIiIiIiIiKwcuXL/H8+XNpeteuXXBwcCh0OQsLC2RlZeH06dMAgD/++AOPHz+GXC7/SDn97+Eb40RERERERERERERERERExeDvv/9Gjx49kJmZCSEEateujW3btknhw4YNQ+fOndG5c2el5UqVKoX9+/dj/PjxmDx5MnR0dPDrr7/CwMAA//vf/z71ZnyRZEIIUdKZICIiIiIiIiIiIiIiIiIi+lg4lDoREREREREREREREREREX3R2DFORERERERERERERERERERfNHaMExERERERERERERERERHRF40d40RERERERET/X3BwMGQyGZ4/f17safv4+EAulxdbegEBAShbtmyxpUdERERERPSppKSkwMDAAEOHDlWaHxwcnG+7afbs2dixY8dHz1tBefgY5HI5UlJSPtn6crp16xbMzMw+Wvo+Pj548+aNNP2p9mF+2DFOREREREREn8TgwYPRtWvXPPM/Zmd0QVxdXTFx4sSPkrZMJsOBAweU5k2ZMgVBQUEfZX0F5SP7T19fH3Xr1sXgwYNx/fr1T5qPkvYx9zURERERERXdnj170LBhQ+zfvx+pqalqLePr64t+/fp95Jx9egqFAoaGhiWdjY9i7ty5Sh3jJb0P2TFORERERERE9AkYGBjAxMTkk693y5YtSExMxO3bt7F27VqkpqbC2dkZ27Zt++R5ISIiIiIiAoBNmzbB29sbLVq0wJ49e9RaZvDgwVixYgUA4PDhw7Czs4NcLoeNjQ0OHjyocpnAwEA4OzujQYMGaNGiBW7evAng3Qhcbm5u6NOnD2xtbeHo6Ig///xTWi4jIwNjxoyBvb09rK2tce3aNWm+u7s7HB0dYW1tjb59++Lly5cA3j30bWNjo3I5ADh69CicnJxgb28PuVyOK1euAIDSg+JmZmaYPXs2XFxcUKtWLcyfP19aPjo6Gi4uLrC2tkb37t3Rtm1bBAQEqNzukydPolmzZmjYsCEaNWqEs2fPSmE+Pj6oW7cuGjZsiN27d0vzExISlEYlS01NhUwmk6YvXbqEZs2awd7eHnZ2dlKZT5kyBU5OTpDL5WjRogViYmIAAKNGjQIANG/eHHK5HE+ePFHah6mpqfDy8oKNjQ1sbGwwd+5caV2urq6YMmUKmjdvjjp16khpfSh2jBMREREREdFn4+XLlzAyMsKvv/6qNP/AgQPQ19dHSkoKEhISIJPJsHv3bjRp0gQ6OjqwsbHBuXPnlJY5d+4cGjVqhNKlS6Ny5cqYPn06MjIyALy7oXLu3DmsXLlSeqM6ISFBWvb69etwdHSEnp4emjRpIjXssx08eBANGjSAjo4Oateujblz50ppZw9D161bN8hkMmla1VDqmzdvhrW1tZTHcePGSWHLly+Hra0t9PX1Ub16dYwZM0btNylyKlu2LCpVqgQzMzO0bdsWv/76K/r164dx48YhKSlJirdv3z4pL2ZmZli2bJlSOm/fvoW3tzeqV6+O0qVLw9zcHJs2bQKgelj3AwcOKN1Eyd7+zZs3o0aNGjAwMMCYMWOQmZmJH3/8EZUqVUKFChWwYMECpXSeP3+OYcOGoXz58jAyMkLr1q2lm1k50w0MDISZmRnKlCmD3r17S0MRFraviYiIiIjo04qMjMRff/0Fd3d3DB06VGpXFMWsWbPg7+8PhUKB8PBwtGzZMk+ckJAQ7Nq1C+fPn8eNGzewYMEC9O3bVwoPDQ3FDz/8gIiICLi5uWHx4sVSWHR0NAYNGoSbN2/im2++wXfffQcA0NTUxM6dO3Ht2jXcunULZcqUwerVqwtd7s6dOxgyZAgCAwNx8+ZNhIaGon79+iq37fnz57h06RJCQ0OxZMkSPHz4EAAwYMAAjBgxArdv38aCBQtw/vx5lcv/+eef8PHxwbFjx3D9+nXs3LkTffv2xdu3b3H06FH88ssvuH79Oq5du6Z22+jZs2fo2rUrFi5ciJs3b0KhUKB58+YAAG9vb4SGhkKhUGDMmDGYMGECAGD9+vUAgAsXLkChUKBChQpKac6bNw9v375FeHg4rly5ggMHDig9JBEXF4ezZ8/i1q1bOHnyJC5duqRWXgvCjnEiIiIiIiL6bOjr66N3797YsmWL0vwtW7agZ8+eSsPLTZ06FZMnT0ZYWBhcXFzQqVMnPH36FADw8OFDtG/fHk5OTrh58ybWrVuHTZs2SU/br1y5Ei4uLhg+fDgSExORmJiI6tWrS2l/9913WLZsGa5duwYtLS14eXlJYRcuXMDAgQMxYcIEREZGwt/fHwEBAVKHbmhoqJTnxMREaTq3devWYezYsRgxYgQiIiJw6NAhmJubS+EaGhpYtWoVbt++ja1bt+L333/HtGnTPqR4Jd9++y1SUlJw+vRpAO8eBOjVqxd69+6NiIgI+Pj44Pvvv1d6+2DgwIHYtWsXVq1ahaioKPj7+8PAwKBI642Li8Px48dx4sQJ7Nq1C5s2bUKHDh3w4MEDnDt3DosXL8asWbOkNycA4Ouvv8aTJ09w/PhxXL9+HQ0aNECbNm3w7NkzpXQPHDiAI0eO4MiRIzh37hwWLVoEoOB9bWZmBh8fn/csRSIiIiIieh+bNm3CwIEDoampifbt2yM+Ph5RUVFFSqNNmzaYMGECfvzxR4SHh+d5UBd490DzzZs34ezsDLlcjm+++QbPnj3D69evAUB6Kzv7/7i4OGlZc3NzODs75wkTQsDPzw8ODg6ws7PD0aNHoVAoCl3u9OnT8PDwkDrDtbW1UaZMGZXblt15b2pqitq1ayM+Ph4vXryAQqHAwIEDAQCWlpZo1qyZyuVPnDiBu3fvokWLFpDL5ejZsyc0NDRw//59BAUFoVevXjAyMoJMJsPIkSMLL2y8e1vcwsJC6gzX0NCAsbGxtG0uLi6wsbGBr6+vUnkU5MyZMxg+fDg0NDSgr6+PgQMHSm1UAPD09ISWlhZ0dXUhl8uV9s/70vrgFIiIiIiIiIjUdOTIkTydqZmZmUrTw4YNQ5MmTZCYmIjKlSvjyZMnOHbsGM6cOaMUb9y4cejRoweAd53MJ06cwKZNmzBt2jT89NNPqF69OtasWQOZTIb69evj0aNH8Pb2xuzZs1GmTBmUKlUKenp6qFSpUp58LliwQHrjYPr06ejQoQPevHkDHR0dzJ07F9OnT8egQYMAALVr18a8efMwbdo0zJkzB+XLlwfwf29q52f+/PmYPHmy9DQ9ADg5OUn/5/wmtpmZGebPn49Ro0bhp59+yjdNdWXfjMl+O2D58uVo06YNvv/+ewBAvXr1EBkZiSVLlmDw4MG4c+cO9u7di9OnT8PNzU3a7qLKysrC5s2bYWhoCCsrK7Rq1QoxMTE4duwYNDQ0YGFhgcWLF+Ps2bNwdnbGxYsXcfXqVTx58gSlS5cGACxduhQHDhzAr7/+ihEjRkjpBgQESA9ODBgwAEFBQViwYEGB+7pOnTowNTUtegESEREREdF7SU9PR2BgILS1tbFz504AwKtXr7Bp0yYsXbpU7XSWL1+O27dv4+zZsxg0aBD69euX50FiIQQGDRqEH374QWUaOjo60v+amprSKGAFhe3cuRO///47zp07ByMjI6xatQq///67WmmqS900co7QlZMQAl999ZVUvgXJmYaWlpZS+zznt8Hzc//+fYwbNw6hoaGoU6cOwsPD0aJFi0KXKywvQPGUZW58Y5yIiIiIiIg+mVatWkGhUCj9bdy4USlOo0aNYG1tja1btwIAtm/fjpo1a+ZpXLu4uEj/a2lpwdHRUXrLICoqCi4uLkoN66ZNmyI1NRUPHjwoNJ92dnbS/5UrVwYAPHnyBABw8+ZN+Pr6wsDAQPrLfhv51atXapXDkydP8OjRI7Rp0ybfOGfOnEGbNm1QtWpVGBoaYsCAAXj69Kna6yiIEALA/914iIqKQtOmTZXiNG3aFLGxscjMzIRCoYCmpqbK4QmLwszMTOmt/4oVK8LKygoaGhpK83KWdWpqKkxMTJTKOz4+XultgdzpZj9QUZigoCCl4euJiIiIiOjjOnToEGrXro2HDx8iISEBCQkJuHz5MgIDA5Genq52OtHR0bC2tsa4ceMwevRoXL58OU+czp07Y/v27bh//z6Adw/U5vzm9/tISkqCqakpjIyMkJKSku83vnNzd3fHyZMnER0dDeDdAwLJyclqr9fIyAj29vbYvn07ACAmJgYXL17Md11nzpxBeHi4NO/q1asAADc3N/zyyy9ISUmBEAIbNmyQ4lSqVAlCCERGRgIAtm3bJoU1adIEsbGxuHDhAoB3Zfns2TMkJydDW1sblStXhhACa9asUcqLoaFhvtvp5uaGTZs2QQiBly9fIjAwEG3btlW7TN4H3xgnIiIiIiKiT0ZfX19puHAAKjuqhw0bhrVr12L69OnYsmULhgwZku/T8B+Dtra29H/2erOysgAAqampmDt3Lrp3755nuZxPtBdEV1e3wPCEhAR07NgRo0ePxoIFC2BsbIyLFy9i6NChSEtLg56enrqbolL2AwTZwwZ+aH41NDSkzvZsqm5q5SxX4F3ZqpqXs6wrV66M4ODgPGnlHCqxoDSIiIiIiOjzsWnTJvTr109pnqWlJapWrYrDhw9Lw3MXZubMmYiJiZFGh1q3bl2eOM2bN8ePP/6Ibt26ISMjA2lpaejQoQMcHR3fO/8DBw7EwYMHYWFhgfLly6N58+a4d+9eocuZm5tjy5Yt6N+/P9LT06GpqYn169ejUaNGaq9727Zt8PLywpIlS2Bubg4nJyeVQ8ibm5tj586dGDlyJF69eoW0tDQ4ODhg586daN++Pa5evYoGDRrAyMgI7dq1k5bT0tLC6tWr0bFjR5iYmKBnz55SWLly5fDbb79h8uTJSElJgYaGBubNm4dOnTqhd+/esLa2homJCbp27aqUl8mTJ+Orr76Cnp4eTp06pRT2/fffY/z48bC1tQXw7jNavXr1Urs83gc7xomIiIiIiOiz079/f0ybNg2rVq1CZGSkNGx5TpcvX5beIs/IyMD169elt38tLS2xb98+CCGkju2QkBAYGhqiWrVqAIBSpUrlGcZdHQ0aNEBMTEyeDv6ctLW1C0zb0NAQZmZmCAoKQqtWrfKEX79+HVlZWVi2bJn0NvXevXuLnNf8rFixAkZGRtKw6JaWlggJCVGKExISgnr16kFTUxO2trbIysrCuXPnpGVyKl++PFJSUvDy5Uvo6+sDgNrflStIgwYN8PjxY2hpacHMzOy903nffU1ERERERMXr2LFjKuffuHFD+j+/tkTOt7P379+v1vp69+6N3r1755k/ePBgDB48WJru2LEjOnbsCABwdXVVyoONjY30GaoyZcrk+cxXtoKWA4D27dujffv2eZbL+ZBxzvgAlN5wr1GjBi5dugSZTIb4+Hi4uLigYcOGKvPi5uamsu0GAD4+PvDx8ZGm58+fL/0/ZMgQDBkyRJr29vaW/m/cuHGediMArFy5EitXrpSmZ82aJf0/Z84czJkzR5rOuQ8NDAywefNmlXnM/XD0r7/+qjJeUXEodSIiIiIiIvrslCtXDt27d8fUqVPRtm1bqTM7p7Vr1+K3335DdHQ0xo4di6SkJHh5eQEAxowZg7/++gvffPMNoqOjcfDgQcyZMweTJk2SOprNzMxw5coVJCQk4H//+5/abxjPnj0b27Ztw9y5c3H79m1ERUVh9+7dSo3/7E7vx48fIykpSWU6Pj4+WLZsGVatWoXY2FjcuHEDq1evBvDuCf/09HSsXr0af/75JwIDA7F+/foilWG258+f4/Hjx7h37x5Onz6Nnj17YufOnVi3bp30dsHkyZMRFBSEefPm4c6dO9i6dSvWrFmDKVOmSNszaNAgeHl54cCBA4iPj0dwcLDUWe/s7Aw9PT3MnDkTcXFx2Llzp9pDChbEzc0NLi4u6Nq1K06dOoWEhAT88ccf+O6774o0BGJ++7pNmzZ5hvojIiIiIiL6HP3xxx+Qy+Wwt7dHp06d4Ofnh+rVq5d0tv5V2DFOREREREREn6XsYcOzO7tzW7RoERYtWgR7e3tcvHgRhw4dgqmpKQCgatWqOHbsGK5evQp7e3uMGjUKQ4cOVeq8njJlCjQ1NWFlZYXy5ctL350rjLu7O44cOYJTp07ByckJjRs3hp+fH2rWrCnFWbZsGU6fPo3q1avDwcFBZTqDBg3CihUr8NNPP8Ha2hodO3ZEbGwsAMDe3h7Lly/H4sWLYWNjgx07dmDhwoVq5S+3IUOGoHLlyqhfvz5Gjx4NAwMDXL16FX379pXiNGjQAHv37sXu3bthY2OD2bNnw9fXV+kNinXr1qFnz54YM2YM6tevj+HDh+Ply5cAAGNjY2zfvh3Hjh2Dra0tdu3apfQGwvuSyWQ4duwYWrRogSFDhqBevXro3bs37t27h4oVK6qdTn77Oi4uDv/73/8+OJ9EREREREQfW9u2bXHz5k3cvHkTt27dQp8+fUo6S/86MpH7I2BEREREREREn4HAwEB8++23ePToEUqVKiXNT0hIQK1atRAWFga5XF5yGSQiIiIiIiL4+Phg+vTp0NHRAfBumHK5XI6JEyeWSH4SEhIgl8vx/PnzEln/x+bj44Pnz59jxYoVOHToEM6ePQs/P79iSXvNmjW4du2ayhHAzMzMcODAgU/WDl+/fj1SUlIwderUYkuT3xgnIiIiIiKiz8qrV6+QmJiIRYsWYeTIkUqd4kRERERERPR5mTt3LiZOnCh1jKsr+xNH2Z+7+rfKyMiAllbJdLl27twZnTt3LpF1f6jCym3UqFHFvs5/95FGREREREREX5wff/wR9evXR6VKlTBjxoySzg4RERERERHlI7vzsnnz5pDL5Xjy5AkAICoqCm3atEG9evXQvXt3pKWlAXj3tnOPHj3g7u4OGxsbJCYmIjAwEHZ2drCzs0OHDh3w8OFDAEBAQAC6du0qrevIkSNwdXWVpufMmQNzc3M4OTlh1qxZMDMzU8rbnDlz0LBhQ5ibm+PYsWMq85+dn9atW6N+/fro1KkTnj59CgBIT0/H9OnT0ahRI8jlcvTq1QtJSUkA3r0V7+XlhRYtWsDGxgavX7+Gp6cnrKysYG9vj7Zt20rrWLJkCaytrWFra4t+/fohOTlZWrenpyc6deoEKysrtG7dGs+ePQMAREREoFmzZmjQoAGsrKwwf/58lfnPWUa+vr6Qy+WQy+WwsbGBTCbDvXv3AABLly5Fo0aN0KBBA3h4eEjzU1JS4OnpCQsLCzRr1gwRERH57Glljx8/Rq9evdCoUSPY2trm+WyZk5MT5HI5WrRogZiYGClMJpNhzpw5cHJywowZMzB48GCMHDky32Mle9SBgIAAuLm5oU+fPrC1tYWjoyP+/PNPKd3CjoVs7BgnIiIiIiKiz4qPjw/S09MRFBQEAwODPOFmZmYQQnAYdSIiIiIiohK2fv16AMCFCxegUChQoUIFAIBCocDhw4cRFRWFv//+G/v27ZOWuXTpErZt24bIyEgkJSVh6tSpOH78OMLDw9GkSRMMGzas0PUePXoU+/btQ1hYGK5evSp1pmdLTk6GnZ0drl+/jjVr1uDbb7/NN60LFy5g586diI6ORvXq1aUHtJcsWQJ9fX1cvXoVCoUiTwfw9evXcfToUURHR+PEiRN4/vw5IiMjcfPmTezevRsAcPz4cWzevBkhISGIiIiAvr4+pk+fLqVx5coVBAQEIDIyEhUqVIC/vz+Ad+3eoKAg3LhxA9evX8e+fftw+fLlAstk9uzZUCgUUCgUaNKkCby8vFCzZk3s3LkTMTExuHTpEm7cuIF+/fphzJgxAN51ppcuXRrR0dE4evQozp8/X2jZA8CgQYMwduxYXL16FWFhYbh27Rp++eUXAIC3tzdCQ0OhUCgwZswYTJgwQWlZTU1NhIaGYsmSJQAKPlZyCg0NxQ8//ICIiAi4ublh8eLFAAo/FnLiUOpEREREREREREREREREVGy6desGPT09AECjRo0QFxcnhbVv3x4VK1YEAJw9exYeHh6oWrUqAGDMmDHw9fVFZmZmgekHBQXh66+/hqGhIQBg6NChOHv2rBSuo6OD7t27AwBcXFyU1p9bhw4dUKlSJQDAiBEjpOUOHDiA5ORkqaM2LS1N6U3knOu3t7dHVFQUxowZg5YtW6J9+/YAgDNnzsDT0xNly5YFAIwePRpff/21lIaHhwdMTEykfGa/sf369WuMGTMGCoUCGhoa+Ouvv6BQKNC4ceMCywUA5s+fj/v37+PIkSPSdoSGhqJhw4YAoFS2QUFB8PPzg0wmQ5kyZdC3b98CywoAXr58iaCgIPz999/SvNTUVOnN8NOnT2P16tVISUlBVlaW9BZ8Ni8vL6Xpgo6VnFxcXFCrVi3p/9WrV0vbUNCxkBM7xomIiIiIiIiIiIiIiIio2OT83rimpiYyMjKkaVUjg2WTyWTS/1paWkqduG/evFFrOQAoXbq0NE9TU7PQjnZVaQkhsHr1aqVh0XPKuR21a9dGZGQkfv/9d5w5cwbTpk2DQqEoNJ/5ldPMmTNhamqKsLAwaGlpoXv37gVuf7Zt27Zh//79OH/+vPT9biEEZsyYgREjRhS6fO78qSKEAABcvnw5z3fl79+/j3HjxiE0NBR16tRBeHg4WrRooRQn9/4v6Fh5n3gFbQOHUiciIiIiIiIiIiIiIiKi92JoaCh9N7uoWrVqhRMnTuDRo0cA3g3N3qZNG2hqasLc3Bzh4eF4/fo1MjIysHPnTmm51q1bY9++fUhNTYUQAps3b37v/B87dkx6+3njxo1wc3MDAHTt2hV+fn549eoVAODVq1e4ffu2yjQePHgAmUyGzp07Y+nSpRBC4K+//oKbmxv27t2LFy9eAAD8/f3z7WjPKSkpCdWqVYOWlhZiYmJw+vTpQpc5c+YM5s2bh6NHjyp1Pnft2hXr16+X3txOT09HWFgYAMDNzQ1btmyBEAIvXrzArl27Cl2PgYEBWrVqhUWLFknzHj16hAcPHiA5ORna2tqoXLkyhBBYs2ZNoel9qKIcC3xjnIiIiIiIiIiIiIiIiIjey+TJk/HVV19BT08Pp06dKtKyNjY2WLJkCTw8PAAA1atXx88//wwAaNy4Mdq3bw8bGxtUrlwZTZs2xZUrVwAAHTt2xJUrVyCXy1G2bFm0bNlSGq68qJo3b46+ffvi4cOHqFu3LgICAgC8+1b227dv4ezsLL2F7O3tDWtr6zxpREREYMaMGRBCICMjAwMGDICdnR3s7Oxw69YtuLi4QENDA3Z2dvjpp58KzdOsWbMwYMAAbN26FXXq1EHr1q0LXWbBggV49eoV2rVrJ807duwY+vXrh6dPn6JVq1YAgIyMDHh5ecHBwQHff/89hg0bhvr166N8+fJo1qwZ3r59W+i6duzYgUmTJsHGxgYymQz6+vrw9/eHvb09evfuDWtra5iYmKBr166FpvWhinIsyET2++5ERERERERERERERERERP8CKSkpMDQ0hBACkydPxuvXr7Fu3boipeHj44Pnz59jxYoVHyeT9EmoeyzwjXEiIiIiIiIiIiIiIiIi+lcZOHAgEhIS8ObNG1hbW2P9+vUlnSUqIeoeC3xjnIiIiIiIiIiIiIiIiIiIvmgaJZ0BIiIiIiIiIiIiIiIiIiKij4kd40RERERERERERERERERU7GQyGZ4/f15oPB8fH7x580aafvbsGZo2bQq5XI4FCxZ8xBx+OitWrMDjx48/6Tpzl2tR9OzZEwEBAcWboUKsWbMGgwcPBgAcOnQI3377bbGmz45xIiIiIiIiIiIiIiIiIioxc+fOVerAPX36NAwMDKBQKPDdd9+pnU5GRsbHyF6xKImO8dzl+ql9yP7o3Lkz/Pz8ijE37BgnIiIiIiIiIiIiIiIiovckk8kwa9YsODg4oF69etixY4fKeFOmTIGTkxPkcjlatGiBmJgYAMCoUaMAAM2bN4dcLsfevXsxdepUXL58GXK5HGfOnMGTJ0/QvXt32NrawsbGBv7+/lK6ZmZm8Pb2RqNGjTBo0CD4+PigV69e6NSpE+rVq4eOHTvi1q1bcHd3R7169dCnTx9kZWUBADZu3AgrKyvI5XLY2triypUrhW5vftuRXRY535A3NTVFQkICfH198ejRI3h6ekIul0OhUCA1NRVeXl6wsbGBjY0N5s6dKy3n6uqKyZMno0WLFqhRowa+//57HDt2DM2aNYOZmRmWL19e5HJ98uQJUlJSMHz4cDRq1Ah2dnYYMWIE0tLSAADR0dFo0qQJrK2t0bVrV7x48SLfMvD390e9evXQoEEDzJs3DzKZTKkM5syZAycnJ8yYMQMRERFo1qwZGjRoACsrK8yfP1+Km5KSAk9PT1hYWKBZs2aIiIiQwgICAtC1a1cAQHBwMGxsbDBmzBjY29vD2toa165dUys/SgQRERERERERERERERER0XsAIGbNmiWEECIuLk6UK1dOxMfHS2FJSUlCCCGePHkiLbNr1y7h7u6ulEZ2PCGE2LJli+jSpYs03atXLzF9+nQhhBB///23qFatmrh06ZIQQoiaNWuKoUOHiqysLCGEEHPmzBG1atUSz549E1lZWaJFixbC2dlZvHjxQqSnpwt7e3tx5MgRIYQQRkZG4tGjR0IIIdLS0kRKSkqh21uU7TAxMZHKombNmiIsLEwKmzZtmujbt6/IzMwUqampQi6Xi927dwshhGjZsqXo0aOHyMjIEM+ePRNGRkZi7NixIisrSzx48EDo6+u/V7kOHz5cbN26VQghRFZWlhg6dKj48ccfhRBCODo6io0bNwohhAgPDxelSpUSW7ZsybP9ERERolKlSiIxMVEIIcTs2bNFzi5nAGLu3LnS9IsXL8SbN2+EEEK8evVKyOVyad9NmTJFDBgwQGRlZYnnz5+L+vXri0GDBgkhlI+Bs2fPCk1NTXH58mUhhBDr1q0Tbdu2VSs/OfGNcSIiIiIiIiIiIiIiIiJ6b8OGDQMA1K5dGy1atMD58+fzxDl9+jRcXFxgY2MDX19fKBQKtdM/c+YMRo4cCQCoUKECunfvjjNnzkjhgwcPVnpLuG3btihXrhxkMhkaNGgAV1dXGBoaQktLCw4ODoiNjQUAtGnTBgMGDMDKlSsRHx8PAwODQvPyIduRe5uGDx8ODQ0N6OvrY+DAgTh9+rQU3rNnT2hqaqJcuXKoXbs2OnbsCJlMhqpVq6J8+fJISEgocn4OHDiAJUuWQC6Xw8HBARcuXMDdu3fx4sULKBQK6fvetra2aNasmco0fv/9d3h4eKBSpUoAgOHDh+eJ4+XlJf3/+vVrDBs2DLa2tmjcuDHu3bsn5TEoKAhDhw6FTCZDmTJl0Ldv33zzbm5uDmdnZwCAi4sL4uLi1M5PNq18Q4iIiIiIiIiIiIiIiIiIiij3UNb379/HuHHjEBoaijp16iA8PBwtWrQotvRzd2jr6OhI/2tqauaZzv729b59+3D9+nUEBwejffv2mD9/Pnr37p3vegvbDk1NTWRmZkrTRfm+d+5tUmcbilquQgjs27cP9erVU5qvatj0fIcjVyNezv0xc+ZMmJqaIiwsDFpaWujevXu+5VLQOvPbh0VJg2+MExEREREREREREREREdF727JlCwAgISEBFy5cQPPmzZXCk5OToa2tjcqVK0MIgTVr1iiFGxoaIjk5Od/03dzc8PPPPwMA/vnnH+zfvx9fffXVB+U5IyMDcXFxcHR0xJQpU9CzZ09cvXoVADBjxow8eVRnO8zNzaXvlO/fvx8vX76UwoyMjJS20c3NDZs2bYIQAi9fvkRgYCDatm1bpG0oarl27doVixcvljqVk5KScPfuXRgZGcHBwQHbtm0DANy+fRsXL15Uuc5WrVrh5MmTePLkCQBg06ZNBeYxKSkJ1apVg5aWFmJiYpTeindzc8OWLVsghMCLFy+wa9euIm1/UfPDjnEiIiIiIiIiIiIiIiIiem+ZmZlwcHBA27ZtsWrVKpiZmSmF29raonfv3rC2toaTkxNq1KihFD558mR89dVXkMvlUgdnTqtWrUJUVBRsbW3RqlUrfPfdd9Kw2h+SZy8vL9jY2EAul+P69euYNGkSAODmzZvS0NxF2Q4/Pz9MmDABDRo0QFhYGExMTKSw8ePHY/jw4ZDL5VAoFPj++++hra0NW1tbODs7o3PnzujVq1eRtqGo5ern5wddXV3I5XLY2dmhTZs20pDs27Ztw4YNG2BjY4NZs2bl++a5ra0tZs2ahaZNm6JBgwZ48+YNypQpk28eZ82ahS1btsDOzg7Tp09H69atpbDvv/8er1+/Rv369dG+fft8h28vrAzUzY/s/38EnUrI7du3sXDhQpw9exb/+9//YGJigtatW2PmzJmwsrIq6ewRERERERERERERERER5UsmkyEpKQlly5Yt6awUi8zMTDRu3BhXrlyBhgbfMVYlJSUFhoaGAICVK1fixIkTOH78+GefH3aMl6D9+/ejT58+MDY2xtChQ1GrVi0kJCRg06ZNePbsGfbs2YMuXbqUdDaJiIiIiIiIiIiIiIiIVPrSOsapcGPHjkVISAjS09NRpUoV+Pv7o3bt2p99fviYQwmJi4vDgAEDULt2bYSHh2P+/PkYOnQo5s2bh/DwcNSqVQv9+/dHfHz8J83Xq1evPun6iIiIiIjeh4+PD+RyeYFxXF1dMXHixGJb5+DBg9G1a9diS+9jCQ4Ohkwmw/Pnz/ONExAQUKI3LBISEiCTyaBQKEosD+ocQyUh93H76tUr9OjRA0ZGRtJ+NTMzw4oVK9RK7332dVHS/9hy50Umk+HAgQMAPo/j6Ev1bznfEREREdHnQQjBTvH/mLVr10KhUOD27ds4ffp0iXaKFyU/7BgvIUuWLMGrV6+wYcMGlC9fXinM1NQU/v7+SE1NxZIlSwC8a5Tm/h4D8O5mjkwmyzN/+/btaNiwIXR1dWFsbIzevXvjr7/+Uorj6uoKGxsbXL9+HS1atICenh5mzpyJQYMGwdTUFOnp6XnSbdu2LSwsLAAA9+/fR3R09PsWARERERF9gVxdXYsUNnjwYMhksjx/Hh4eUpycHWFFsX//fsybN6/Iy30IIQQ2bNgAZ2dnGBgYoGzZsnB0dMSKFSs+2UOoTZo0QWJiYoHf9/qUVHWwVa9eHYmJibCxsSmZTAGYMmUKgoKCiiWt169fw9jYGKampnj79q1ay+T3AEPu43br1q24cOEC/vjjD2m/hoaGYsSIEWqtx9PTE3fu3FF7W9TxPg8VfIzO9s/hOCou2W17mUwGLS0tmJqaokWLFlixYoXaxxTlr6CO/uDgYPj4+BQ5jIiIiIiI/n3YMV5CDh8+DDMzMzRv3lxleIsWLWBmZobDhw8XOe0FCxZg4MCBqFu3LpYvX46JEyciKCgILVq0yHPT5enTp2jXrh3kcjlWrFiBVq1aYcCAAXj69ClOnjypFPfx48f4/fff0b9/fwDAwIEDYWlpWeT8EREREdGXJSQkBGfOnFGad+bMGfzxxx8FhmXz8PBAYmKi0t+uXbs+OF/GxsbS96U+lQEDBmDixIno0qULzp49C4VCge+//x4HDx7EqVOnPkkeSpUqhUqVKql8gLY4qXqQVl2ampqoVKkStLS0ijFHRWNgYAATE5NiSWvfvn2wtrZG/fr11XqIo6Cyy33cxsXFwdLSEjY2NtJ+LV++PPT09NTKm66uLipUqKBW3H+bz+E4Kk7W1tZITEzE/fv3cfbsWXz99ddYuHAhmjRpgpSUlJLO3hdn/fr1ePLkiTSdlpaGZcuWIT09vcAwIiIiIqIPtWLFCjx+/Liks/GfxI7xEpCcnIxHjx7B3t6+wHh2dnZ48OBBkRrA9+7dw5w5czB//nzs3r0bo0ePxuzZs3H27Fk8ePAAP/30k1L8x48fY968eVi1ahVGjBiBLl26oHXr1qhWrRq2b9+uFHfXrl3IysqSOsaJiIiIiACgRo0a8Pf3x5gxY5CSkoIxY8Zgw4YNqF69eoFh2UqXLo1KlSop/ZUrVw4ApFGTunXrBplMlmcUpcDAQJiZmaFMmTLo3bu3Ut0595DUb9++hbe3N6pXr47SpUvD3NwcmzZtAgBkZmZi6NChqFWrFnR1dWFhYYGVK1cWqRz27t2LHTt2YNeuXZg5cyacnJxgZmaGLl264Pfff0erVq0AAFlZWfD19UW1atVQunRpyOVynDhxQkone3jo/fv3o1WrVtDT04O9vT0uXbokxbl37x46deqEcuXKQV9fH9bW1jh27BgA1W8iBwQEoEaNGtDT00O3bt3w9OnTPPk/ePAgGjRoAB0dHdSuXRtz585FRkaGFC6TybBu3Tp07twZ+vr6WLBgQaHl5uPjg61bt+LgwYPS27DBwcEqh8A+d+4cGjVqhNKlS6Ny5cqYPn260vpdXV0xfvx4TJs2DcbGxqhUqZLSm5xCCPj4+KBGjRooXbo0qlSpgvHjx+e7v3K/9Zz9RunSpUtRuXJlmJiYYOzYsWp1hG3atAn9+/dH//79pWMqp9xlN3z4cOl4KFeuHGQyGQYPHixtZ/Zx6+rqimXLluH8+fOQyWTSqAu5375+/vw5Ro4ciYoVK0JHRwc2NjY4cuQIgLxDqcfFxaFLly6oWLEiDAwM4OTklOfhlaIqrOxcXV1x7949fPvtt9JxkO3ixYto3rw5dHV1Ub16dYwfPx4vX75Ua72qjqNDhw6hbt260NHRQatWrbB169Y8v4fC1mlmZoYffvgBXl5eMDQ0RI0aNbBhwwaldT948AB9+vSBsbEx9PX14ejoiCtXrkjhhf2eVNHS0kKlSpVQpUoV2Nra4ptvvsG5c+dw69YtLF68WIr39u1bTJkyBVWrVoW+vj6cnZ0RHBwMAHjx4gV0dXVx/PhxpbR/++03GBoaSiNX/PXXX+jVqxfKli0LY2NjdOnSBQkJCfnm7e3btxg/fjwqVKgAHR0dNGvWDKGhoVJ49nnn6NGjsLOzg46ODho3boxbt25JcbKPxSNHjsDCwgJ6enro2bMnXr16ha1bt8LMzAzlypXD+PHjkZmZqdb25kz35MmTsLS0hIGBgfTAFZD/eah69ero3LkzfvvtN9y+fRutW7cG8O73WlAYEREREdGHYsd4yWHHeAnIvllX2Nsr2eFF6Rjfv38/srKy0KtXL/zvf/+T/ipVqoS6devi7NmzSvFLly6NIUOGKM3T0NBAv379cOjQIaV179ixA02aNEGtWrUAvGv4CiHUzhsRERERfZmqV6+OX375BWXKlMGNGzdQtmxZ7N27F9WrVy8wTB3ZHS9btmxBYmKiUkdMXFwcDhw4gCNHjuDIkSM4d+4cFi1alG9aAwcOxK5du7Bq1SpERUXB398fBgYGAN51VlerVg2//PILIiMjMXv2bMycORN79+5Vuxx27NgBCwsLdOnSJU+YTCaThjZfuXIlli1bhqVLlyI8PBzu7u7o3LkzYmNjlZb57rvvMGXKFCgUCtSrVw99+vSROtbGjh2Lt2/f4vz584iIiMDixYulbcntypUrGDp0KMaNGweFQoFWrVph/vz5SnEuXLiAgQMHYsKECYiMjIS/vz8CAgKwYMECpXg+Pj7o1q0bIiIi4OXlVWi5TZkyBb169VIaFaBJkyZ58vjw4UO0b98eTk5OuHnzJtatW4dNmzblyefWrVuhr6+PK1eu4Mcff4Svry9Onz4N4N1b235+fvD390dsbCwOHDgAW1vbfPeXKmfPnkVcXBzOnj2LrVu3IiAgAAEBAQUuExcXh0uXLqFXr17o1asXLly4gHv37uWJl7Ps5s6di3379gEAYmJikJiYqPJBjP3792P48OFwcXFBYmIi9u/fnydOVlYW2rVrh5CQEGzfvh2RkZFYtGgRNDU1VeY3NTUV7du3R1BQEMLCwuDh4YFOnTrh/v37apRQ/goqu/3796NatWrw9fWVjgPgXdl5eHigR48eCA8Px549e3Dx4kWMGzfuvfIQHx+Pnj17omvXrrh58yZGjhyJ7777TimOuutctmwZHB0dERYWhjFjxmD06NGIiYkB8K4MW7ZsiYcPH+LQoUO4efMmpk2bhqysLADq/57UUb9+fbRr105p348bNw6XLl3C7t27ER4ejq+//hoeHh6IjY2FkZEROnbsiJ07dyqls2PHDnTt2hV6enpIT0+Hu7s7DA0NceHCBYSEhEidyWlpaSrzMW3aNOzbtw9bt27FjRs3YG5uDnd3dzx79kwp3tSpU7Fs2TKEhoaifPny6NSpk9LDJa9evcKqVauwe/dunDhxAsHBwejWrRuOHTuGY8eOITAwEP7+/vj111/V2t6c6S5duhSBgYE4f/487t+/jylTpgDI/zzUoUMHnDx5EqdOncKxY8ewevVqTJ48GVpaWgWGEREREdF/0+vXr+Hp6QkrKyvY29ujbdu2AJCn/n3q1Ck4OzsDADZu3AgrKyvI5XLY2triypUr8PX1xaNHj+Dp6Qm5XA6FQoH09HRMnz4djRo1glwuR69evZCUlATg3YPII0aMgJubG2rVqgUvLy9cvXoVrq6uqF27NiZNmiSte/78+bC0tIRcLodcLlfZNv3PE/TJPX/+XAAQXbp0KTBe586dhUwmE2/fvhWDBg0SNWvWzBNnzpw5IuduHD16tACQ75+dnZ0Ut2XLlqJ27doq13379m0BQGzdulUIIUR0dLQAINavX1/0DSYiIiKiL9qDBw+Ep6enGDVqlGjQoIEYNWqU8PT0FA8ePCgwTAghBg0aJDQ1NYW+vr7S34IFC6T0AYjffvtNaZ1z5swRenp64sWLF9K8qVOnCmdnZ2m6ZcuWYsKECUIIIWJiYgQAcfr0abW3a+zYsaJHjx7S9KBBgwqsw1taWorOnTsXmm6VKlWUtk8IIZycnMSYMWOEEELEx8cLAGLjxo1SeHb9PCoqSgghhK2trfDx8VGZ/tmzZwUAkZSUJIQQok+fPqJ9+/ZKcTw9PUWZMmWk6TZt2ogffvhBKU5gYKCoXLmyNA1ATJw4sdDtU6fcsrcxLCxMCCHEzJkzhYWFhcjKypLirF27VhgYGIjMzEwhxLv92axZM6V0nJychLe3txBCiGXLlol69eqJtLS0QvMoxLtjyN7eXimfNWvWFBkZGdK8r7/+Wnh6ehaYzsyZM0XXrl2l6S5duog5c+YoxVFVdrn3U7acx60QQkyYMEG0bNlSKU7NmjWFn5+fEEKIkydPCg0NDRETE6Myf1u2bFHa16pYW1uL1atXq0xflfcpO1VpDh06VIwYMUJp3oULF4SGhoZ4/fq1yuVyng9yH0fe3t7CxsZGKb3vvvtOqZzVXWf//v2l8KysLFGhQgWxbt06IYQQ/v7+wtDQUDx9+lRl+ajze8otd5nm5O3tLXR1dYUQQty7d09oamqKhw8f5lnnjBkzhBBC/Pbbb8LAwEC8fPlSCCFEcnKy0NHREcePH5fykvv39vbtW6GrqytOnjwphFD+3aampgptbW2xY8cOKX5aWpqoUqWK+PHHH4UQ/3c87969W4rz9OlToaurK/bs2SOEeHcsAhB3796V4owcOVLo6emJlJQUaZ67u7sYOXKk2turKt21a9eKihUrStOqzkPHjx8XjRs3FuPHjxc9e/YUzZo1EytWrBAZGRkFhhERERHRf9P+/ftF27Ztpens9sCpU6eEi4uLNL9z585i27ZtQgghjIyMxKNHj4QQ7+rQ2fXemjVrSu0YIYRYsGCB8PX1laZ9fX2lewSDBg0SjRs3Fq9fvxZv374VderUEV27dhVpaWkiNTVVVKhQQdy6dUs8e/ZMlClTRrx69UoIIcTLly+lNg79Hz7qWgLKlCmDKlWqIDw8vMB44eHhqFatGkqVKpXvcF05hxcD3r0tIJPJcPz4cZVvCOR+i0RXV1dlulZWVmjYsCG2b9+OgQMHYvv27ShVqhR69epVYJ6JiIiI6L8nISEBw4YNg5ubG1xdXbFu3TqcOXNGGpY3v7CqVasCAFq1aoV169YppWlsbFzoes3MzJRGYapcubLSN2FzUigU0NTURMuWLfNNb+3atdi8eTPu37+P169fIy0tTWmo7cIINUZTevHiBR49eoSmTZsqzW/atClu3rypNM/Ozk76v3LlygCAJ0+eoH79+hg/fjxGjx6NU6dOwc3NDT169FCKn1NUVBS6deumNM/FxUVp+PabN28iJCRE6Y3WzMxMvHnzBq9evZK+Z+3o6Jgn/Q8tt+w8uri4KLV7mjZtitTUVDx48AA1atTIUyaA8j7/+uuvsWLFCtSuXRseHh5o3749OnXqVKQ3PK2trZXaUZUrV0ZERES+8TMzM7F161alt7379++PKVOmYPbs2dDQ+L9B2lSVXXFQKBSoVq0a6tWrp1b81NRU+Pj44OjRo0hMTERGRgZev379wW+MF7XsgHfHXXh4OHbs2CHNE0IgKysL8fHxsLS0LFIeYmJi4OTkpDSvUaNG77XOnMeaTCZDpUqVpGNNoVDAwcEh3/OUur8ndQkhpN9GREQEMjMz8+zvt2/fwsTEBADQvn17aGtr49ChQ+jduzf27dsHIyMjuLm5Sfm7e/dunlHs3rx5g7i4uDzrj4uLQ3p6utJ5S1tbG40aNUJUVJRSXBcXF+l/Y2NjWFhYKMXR09NDnTp1pOmKFSvCzMxM6V5FxYoVpbJWZ3tVpVvQ9SBbfHw8Dh48iMjISAQHB2PHjh1YtWqVdCzkF5bfaAxERERE9GWzt7dHVFQUxowZg5YtW6J9+/YAgK+++goTJ05EWFgYjI2NcfXqVWkUtTZt2mDAgAHo1KkT2rVrl2+77cCBA0hOTpZGFktLS1P6lFyXLl2go6MDALC1tYW7uzu0tbWhra0NKysrxMbGon79+qhbty769++Ptm3bokOHDqhWrdpHLJF/J3aMl5BOnTrB398fFy9eRLNmzfKEX7hwAQkJCdIQCOXKlVP6Jlq23MMg1KlTB0II1KpVS+0bI/kZOHAgJk2ahMTEROzcuRMdOnSQvvVIRERERJQtdycvAKkDRpXcYfr6+jA3Ny/yerW1tZWmZTKZNJRxbvk9EJpt9+7dmDJlCpYtWwYXFxcYGhpiyZIlSt8MLky9evUQHR2tdvzC5Ny+7E6x7O0bNmwY3N3dcfToUZw6dQoLFy7EsmXL8M0337zXulJTUzF37lx07949T1h24xt4t69yKo5yK4qC9nn16tURExODM2fO4PTp0xgzZgyWLFmCc+fO5VnufdJX5eTJk3j48CE8PT2V5mdmZiIoKAhfffWVNC932RWXwo7t3KZMmYLTp09j6dKlMDc3h66uLnr27JnvENrqKmrZAe+Ou5EjR6r8Fnz2wxDFTd11FrQ9hZW5ur8ndUVFRUmfNEtNTYWmpiauX7+ep4M2u3O5VKlS6NmzJ3bu3InevXtj586d8PT0lB4SSU1NRcOGDZUeDshWvnz5IuevKFSVa0Flrc725pduYQ8rjR49GgAQGRkJ4F25ZQ+/XlAYEREREf031a5dG5GRkfj9999x5swZTJs2DQqFAuXKlcP48eOxevVqVKxYEV5eXihdujSAd5/8un79OoKDg9G+fXvMnz8fvXv3zpO2EAKrV6+WhmfPLWc7QlNTM890RkYGNDU1cfnyZfzxxx8IDg5G48aNsWvXLjRv3ryYS+Lfjd8YLyFTpkyBnp4eRo4ciadPnyqFPXv2DKNGjYKRkZH0nbM6deogOTlZ6S3zxMRE/Pbbb0rLdu/eHZqampg7d26eRqAQIs+6CtKnTx/IZDJMmDABf/75J/r3768Ufv/+/WK98UdERERE/37BwcHvFVYQbW3tPCMlFZWtrS2ysrJw7tw5leEhISFo0qQJxowZAwcHB5ibm6t8c7Igffv2xZ07d3Dw4ME8YUIIJCcnw8jICFWqVEFISEie9VtZWRVpfdWrV8eoUaOwf/9+TJ48GT///LPKeJaWlnk6qi9fvqw03aBBA8TExMDc3DzPX863nnNTp9xKlSpV6P6ztLTEpUuXlNowISEhMDQ0LNIT7rq6uujUqRNWrVqF4OBgXLp0qdC3lj/Epk2b0Lt3bygUCqW/3r17Y9OmTQUuW6pUKQB5RwErKjs7Ozx48AB37txRK35ISAgGDx6Mbt26wdbWFpUqVZJGd/iYVB0HDRo0QGRkpMrjLrt8isLCwgLXrl1TmhcaGlrs67Szs4NCocjzfe2c63if35Mq0dHROHHiBHr06AEAcHBwQGZmJp48eZIn7UqVKknL9evXDydOnMDt27fx+++/o1+/fkr5i42NRYUKFfKkUaZMmTx5qFOnDkqVKqV03kpPT0doaGie81bOc0tSUhLu3LlT5Df/c1J3ewtT0HnI1dUVPj4+RQ4jIiIiov+WBw8eQCaToXPnzli6dCmEEPjrr78AAAMGDMDJkyexZcsWjBo1CgCQkZGBuLg4ODo6YsqUKejZsyeuXr0KADAyMkJycrKUdteuXeHn54dXr14BAF69eoXbt28XKX8pKSn4+++/0bx5c3z//fdo1qwZwsLCALx7ETZ3f+J/FTvGS4i5uTm2bduG2NhY2Nra4vvvv8fmzZsxe/Zs2NraIj4+HoGBgdJT4b1794a+vj66deuGlStXYuHChXB2ds7zVnidOnUwf/587Ny5E82aNcOSJUuwfv16eHt7w8LCAlu2bFE7j+XLl4eHhwd++eUXlC1bFh06dFAKHzhw4Ac1cImIiIiIgHdD4j5+/Fjp73//+58UbmZmhqCgIDx+/BhJSUnvtQ4zMzMMGjQIXl5eOHDgAOLj4xEcHCwNb1a3bl1cu3YNJ0+exJ07d/D999/n6VArTK9eveDp6Yk+ffrghx9+wLVr13Dv3j0cOXIEbm5uOHv2LABg6tSpWLx4Mfbs2YOYmBhMnz4dCoUCEyZMUHtdEydOxMmTJxEfH48bN27g7Nmz+dbNx48fjxMnTmDp0qWIjY3FmjVrlIZRB4DZs2dj27ZtmDt3Lm7fvo2oqCjs3r0bs2bNKjAf6pSbmZkZwsPDERMTg//9739IT0/Pk86YMWPw119/4ZtvvkF0dDQOHjyIOXPmYNKkSWp3JAYEBGDTpk24desW/vzzT2zfvh26urqoWbOmWssX1T///IPDhw9j0KBBsLGxUfobOHAgDhw4kG/HKQDUrFkTMpkMR44cwT///IPU1NT3ykfLli3RokUL9OjRA6dPn0Z8fDyOHz+eZx9nq1u3Lvbv3w+FQoGbN2+ib9++hb7ZXRzMzMxw/vx5PHz4UPp9e3t7448//sC4ceOgUCgQGxuLgwcPSg+IF9XIkSMRHR0Nb29v3LlzB3v37kVAQACA/xt1oTjW2adPH1SqVAldu3ZFSEgI/vzzT+zbtw+XLl0C8P6/p4yMDDx+/BiPHj1CREQEVq9ejZYtW0Iul2Pq1KkA3o1M0a9fPwwcOBD79+9HfHw8rl69ioULF+Lo0aNSWi1atEClSpXQr18/1KpVC87OzlJYv379YGpqii5duuDChQvS+XD8+PF48OBBnnzp6+tj9OjRmDp1Kk6cOIHIyEgMHz4cr169wtChQ5Xi+vr6IigoCLdu3cLgwYNhamqKrl27ql22uam7vYVR5zxERERERFSQiIgING3aFPb29nBwcMCAAQOkTzDp6emhe/fuaNq0KapXrw7g3UPQXl5esLGxgVwux/Xr16VRosePH4/hw4dDLpdDoVDA29sbTk5OcHZ2hp2dHRo3bgyFQlGk/CUnJ6N79+6wtbWFnZ0d0tPTMWjQIADAtWvXpHz913Eo9RLUo0cP3LhxAwsXLsTGjRvx5MkTZGVlQUdHB9evX1d68trExAS//fYbJk2ahGnTpqFWrVpYuHAhYmNjcePGDaV0p0+fjnr16sHPzw9z584F8O5tkrZt26Jz585FyuPAgQNx5MgR9OrVSxr6gYiIiIioOJ04cUL6hnY2CwsLaXSiZcuWYdKkSfj5559RtWrV9367dd26dZg5cybGjBmDp0+fokaNGpg5cyaAdx1qYWFh8PT0hEwmQ58+fTBmzBgcP35c7fRlMhl27tyJDRs2YPPmzViwYAG0tLRQt25dDBw4EO7u7gDeNYCTk5MxefJkPHnyBFZWVjh06BDq1q2r9royMzMxduxYPHjwAEZGRvDw8ICfn5/KuI0bN8bPP/+MOXPmYPbs2XBzc8OsWbMwb948KY67uzuOHDkCX19fLF68GNra2qhfvz6GDRtWYD7UKbfhw4cjODgYjo6OSE1NxdmzZ5W+lQYAVatWxbFjxzB16lTY29vD2NgYQ4cOLbQjMaeyZcti0aJFmDRpEjIzM2Fra4vDhw8rfYe4OG3btg36+vpo06ZNnrA2bdpAV1cX27dvVzlkN/Bum+fOnYvp06djyJAhGDhwoNSJW1T79u3DlClT0KdPH7x8+RLm5uZYtGiRyrjLly+Hl5cXmjRpAlNTU3h7e+PFixfvtd6i8PX1xciRI1GnTh28ffsWQgjY2dnh3Llz+O6779C8eXMIIVCnTp08Q9Orq1atWvj1118xefJkrFy5Ei4uLvjuu+8wevRoqT1bHOssVaoUTp06hcmTJ6N9+/bIyMiAlZUV1q5dC+D9f0+3b99G5cqVoampiTJlysDKygozZsxQyj8AbNmyBfPnz8fkyZPx8OFDmJqaonHjxujYsaMUJ/v3+OOPP2L27NlK69HT08P58+fh7e2N7t27IyUlBVWrVkWbNm1gZGSkMm+LFi1CVlYWBgwYgJSUFDg6OuLkyZN5Pre2aNEiTJgwAbGxsZDL5Th8+PB7vf2fkzrbWxhV5yFXV9cPyhcRERER/be0a9cO7dq1UxmWmZmJCxcuYPXq1dK80qVL4/z58yrjDxs2LE/7wNfXF76+vnni5m4n/vrrr0rTZ86ckf7PPToc8O6h7qpVq8LR0VFlXv5rZKKwjy7RJ7Vt2zYMHjwY/fv3x7Zt20o6Ozh48CC6du2K8+fP8zsERERERERE9K+yYMECrF+/XhrikD6O4OBgtGrVCklJSShbtmxJZ4eIiIiI6JM5dOgQxo8fj3bt2mHdunUlnR0qBN8Y/8wMHDgQiYmJmD59OqpVq4YffvihRPPz888/o3bt2mjWrFmJ5oOIiIiIiIioMD/99BOcnJxgYmKCkJAQLFmy5L2HZiciIiIiIipM586dizxaM5Ucdox/hry9veHt7V2iedi9ezfCw8Nx9OhRrFy5UvoeGxEREREREdHnKjY2FvPnz8ezZ89Qo0YNTJ48GTNmzCjpbBEREREREdFngEOpk0oymQwGBgbw9PTE+vXroaXFZyiIiIiIiIiIiIiIiIiI6N+JvZ2kEp+XICIiIiIiIiIiIiIiIipe/fr1Q0xMDNLS0lC9enVs2rQJlSpVKuls/SfwjXEiIiIiIiIiIiIiIiIiok/gn3/+Qfny5QEAixYtQkJCAtavX1/Cufpv4BvjRERERERERERERERERESfwM6dOxEYGIg3b97gzZs3MDU1Leks/WdolHQGiIiIiIiIiIiIiIiIiIi+dBcvXsSqVatw7Ngx3Lp1C8uXL8ebN29KOlv/GewYJyIiIiIiIiIiIiIiIiL6yJKSkmBoaAgTExOkpaXB39+/pLP0n8KOcSIiIiIiIiIiIiIiIiKij8zDwwMWFhawsLBA8+bNIZfLpbD27dvj2rVrAIBr166hffv2Utjs2bP5HfJiIBNCiJLOBBERERERERERERERERER0cfCN8aJiIiIiIiIiIiIiIiIiOiLxo5xIiIiIiIiIiIiIiIiIiL6orFjnIiIiIiIiIiIiIiIiIiIvmjsGCciIiIiIiIiIiIiIiIioi8aO8aJiIiIiIiIiIiIiIiIiOiLxo5xIiIiIiIiIiIiIiIiIiL6orFjnIiIiIiIiIiIiIiIiIiIvmjsGCciIiIiIiIiIiIiIiIioi8aO8aJiIiIiIiIiIiIiIiIiOiLxo5xIiIiIiIiIiIiIiIiIiL6ov3nOsaDg4Mhk8nw/Pnzks6K2gICAlC2bNkC4/j4+EAulxf7ugcPHoyuXbsWe7pUuFevXqFHjx4wMjL6ZMds7v0thMCIESNgbGwMmUwGhUIBV1dXTJw4Ua303uf3Vlj6H+tYV8XMzAwrVqyQpmUyGQ4cOAAASEhIkMrkS5CWlgZzc3P88ccfH5TO9OnT8c033xQaL3fZfgo599/n6FMe2//GayF92mOkMKquUZ/id13c9RJ16ljqyH3t+lTnuM/9vPaxFMd2h4SEwNbWFtra2tIxlXteUc+VRakjfWn+zdv+PufW4r6OFiW9f/s1XJ069Jd2bivsWvOltSuK6nO75/Cp6ltFPW9+buX0X/eprnsldX740s7DxcnHxwcVK1b8LMroc61/fU7t1pyKq+2XW+623+PHj/HVV19BX19fWt/ncLwUJ3Xqo8V9HHys/fdfUhL3Yj9Hn9uxVNj54d/aVvhcrwXZvqiOcZlMVuCfj49PSWfxvXh6euLOnTslnQ1SQ3HeqNq6dSsuXLiAP/74A4mJiShTpsyHZ/D/y++EunLlSgQEBEjTJ06cQEBAAI4cOYLExETY2Nhg//79mDdvnlrradKkSbHnfcqUKQgKCirSMh/jwl+9enWpTL4E69evR61atdCkSZM8YSNHjoSmpiZ++eWXPGG5L3JTpkzB1q1b8eeff37M7P5nfa4N3/z8WytvVDhV16jQ0FCMGDFCivMxGv+5r1Ofq9xl8bEkJiaiXbt2H309X6JJkyZBLpcjPj5eOqZyzytqPaYodSSinIpyrH2M+vXn5r92bvvS2hWkLL97BEW9ZuSuAxV3u6Cgevu/rQ1SnP7tDyPRO8XVLomKisLcuXPh7++v1rXK3d0dmpqaCA0NzRP2JT/s8j737T6Fj3V/PXfbz8/PD4mJiVAoFNL6PnXdpjjvxbzvNeBzPQ7+Cz5mx+/n3NnJa/a/S0nvL60SWetHkpiYKP2/Z88ezJ49GzExMdI8AwMDXLt27ZPkJS0tDaVKlSqWtHR1daGrq1ssadG/R1xcHCwtLYv9BklaWlq+YblvsMXFxaFy5cpKnaXGxsZqr6tUqVKoVKlS0TNZAAMDAxgYGBRrmu9DU1Oz2LetpAghsGbNGvj6+uYJe/XqFXbv3o1p06Zh8+bN+PrrrwtMy9TUFO7u7li3bh2WLFnysbL8yaSnp0NbW7uks0GfmeK8xv9bqbpGlS9f/qOv99/SEfQpygJAiV2HvoTfQFxcHEaNGoVq1aoVOK8oZVyUOhJRTkWpM3+M+vXn5kvfvtw+RbviSzhvF8W/YXuLes34t9SB/i3+DccIfX7i4uIAAF26dIFMJisw7v379/HHH39g3Lhx2Lx5M5ycnD5FFj8qdX83n8t9u9w+1v313G2/uLg4NGzYEHXr1pXm/dfqNsDnexx8bnjfkT6UEAKZmZnQ0vqiuno/ui/qjfFKlSpJf2XKlIFMJlOal/NkfP36dTg6OkJPTw9NmjRR6kAHgIMHD6JBgwbQ0dFB7dq1MXfuXGRkZOS77uwn/RYsWIAqVarAwsICABAYGAhHR0cYGhqiUqVK6Nu3L548eSItl/1kxNGjR2FnZwcdHR00btwYt27dkuKoespn0aJFqFixIgwNDTF06FC8efOmwLLJzMzE0KFDUatWLejq6sLCwgIrV67ME2fSpEkoW7YsTExMMG3aNAghpPANGzagSpUqyMrKUlquS5cu8PLyAvDu4t+lSxdUrFgRBgYGcHJywpkzZ5Tim5mZ4YcffoCXlxcMDQ1Ro0YNbNiwQSnOgwcP0KdPHxgbG0NfXx+Ojo64cuWKFF7Y/pHJZPD390fHjh2hp6cHS0tLXLp0CXfv3oWrqyv09fXRpEkTqVJblHQ3btyIbt26QU9PD3Xr1sWhQ4cAvHsar1WrVgCAcuXKQSaTYfDgwfnuk3379sHa2hqlS5eGmZkZli1bJoW5urpi2bJlOH/+PGQyGVxdXVWmoW55z5s3DwMHDoSRkRFGjBiBWrVqAQAcHByU0s/5xOrgwYPxzTff4P79+5DJZDAzM5PylvNJwbdv38Lb2xvVq1dH6dKlYW5ujk2bNgHI++TP06dP0adPH1StWhV6enqwtbXFrl278i0jVXI/mZad56VLl6Jy5cowMTHB2LFjkZ6eLuX33r17+Pbbb6XRI7JdvHgRzZs3h66uLqpXr47x48fj5cuXauVD1dOXhw4dQt26daGjo4NWrVph69ateZ58Kmydn+L3kdv169cRFxeHDh065An75ZdfYGVlhenTp+P8+fP466+/Ci2bTp06Yffu3YXGe/XqVYHb6e3tjXr16kFPTw+1a9fG999/L+1XALh58yZatWoFQ0NDGBkZoWHDhoU+/JT9lK6uri5q166NX3/9VQrL3qd79uxBy5YtoaOjgx07diArKwu+vr6oVq0aSpcuDblcjhMnTkjLqXrCTaFQQCaTISEhQZr3888/o3r16tDT00O3bt2wfPlylU9wBgYGwszMDGXKlEHv3r2RkpIC4N2xfu7cOaxcuVI6lnOmnzuNgq492UJCQvK99gAFn6cA1U/ely1bVnqbJb9zTW7ZZRgUFJTvtVnVE/UTJ05UStPV1RXffPMNJk6ciHLlyqFixYr4+eef8fLlSwwZMgSGhoYwNzfH8ePHldK5desW2rVrBwMDA1SsWBEDBgzA//73P6V0x40bh4kTJ0oPfwDAuXPn0KhRI5QuXRqVK1fG9OnTlX5rqkarkMvl0gg2Qgj4+PigRo0aKF26NKpUqYLx48erLKOc/P39pWOpV69eSE5OVgrfuHEjLC0toaOjg/r16+Onn36SwrKP8/3796NVq1bQ09ODvb09Ll26pJRGQeeq/K5RObc3+5rRrVs3pWvI+/xuc8p9HLi6umL8+PGYNm0ajI2NUalSpTwjBD1//hwjR45ExYoVoaOjAxsbGxw5ckSt9IG8x9nLly8xcOBAGBgYoHLlynl+F7nLAii4/pBNnWtIbqo+8bF3715p3zk5OeHOnTsIDQ2Fo6MjDAwM0K5dO/zzzz95tnnu3LkoX748jIyMMGrUKKUH6d7nN6BOvREo/JoVGxuLFi1aQEdHB1ZWVjh9+nS+5ZEtKysLCxculOq99vb20vk+u5yePn0KLy8vyGQyBAQEqJyn6vweEhICV1dX6OnpoVy5cnB3d0dSUpJUTrnrSFOmTEHVqlWhr68PZ2dnBAcHS+HZdfyTJ0/C0tISBgYG8PDwUHrYFwA2b94snYsrV66McePGAQC8vLzQsWNHpbjp6emoUKGCVBdTx/PnzzFs2DBp/7du3Ro3b96UwrPrXvldn3KWe0G/xeXLl8PW1hb6+vqoXr06xowZg9TU1GIrD3W2BSh6OwoAjh07hnr16kFXVxetWrVSef0trI5XlDrzvXv30KlTJ5QrVw76+vqwtrbGsWPHVMYFCr9eq1O/zC17PSdPnoSDgwN0dXXRunVrPHnyBMePH4elpSWMjIzQt29fvHr1SlruxIkTaNasmdSm7NixY542V06ZmZnw8vJC/fr1cf/+fQCqz22FXbfUrWcVpLC8q5uXgIAA1KhRQ8rL06dPC1xv7naFOvUiADh8+DCcnJygo6MDU1NTdOvWTQpT1QYECj9OC6tDJiUloV+/fihfvjx0dXVRt25dbNmyRQr/66+/0KtXL5QtWxbGxsbo0qWL0u+lsHsOqmSfGw4cOCBdJ93d3ZXaJdnnqY0bN6JWrVrQ0dEB8K5jqkuXLjAwMICRkRF69eqFv//+Wyn9ws4Jqt6U69q1q1JbP7/fd0H3CHKmO3PmTDg7O+fZdnt7e+nh5dxt9dztgvj4eJibm2Pp0qVKaWS3S+7evVtgORdFUlISBg4ciHLlykFPTw/t2rVDbGwsgHf12/Llyyu1s+RyOSpXrixNX7x4EaVLl5bOHQWdu+/cuQOZTIbo6GilPPj5+aFOnTrS9PvW5wtS2D2ewq576lyTcrt69SocHBygo6MDR0dHhIWF5YlT0LYWV/0rt4iICLRu3Rq6urowMTHBiBEjlK7h6tYnP1ZbraD2QH7tkqJup4+PDzp16gQA0NDQKLRjfMuWLejYsSNGjx6NXbt24fXr1wXGz+197p/99NNP0nmyYsWK6NmzpxT29u1bjB8/HhUqVICOjg6aNWum8k32nPK7lhR2ryb3fbvg4GA0atRIGlq8adOmuHfvnhRe0PFY1DZzQe1NVffX58+fjwoVKsDQ0BDDhg3D9OnTi3TPMbuccraD9+3bh23btimdM3LfOynovl5x3FtX914MUPBvrLD7UAX1r6h6s7igenxhbQV1FHa/dN26dahTpw5KlSoFCwsLBAYGKi2vTrv99u3b6NixI4yMjGBoaIjmzZsr1RfVuR+T+76jOsdZQb/vnIKDgzFkyBAkJyerHEX5Q+7FBgQEYO7cubh586aUdn4j+mVv0w8//ICKFSuibNmy8PX1RUZGBqZOnQpjY2NUq1ZNqR6pzv3V/NpJRe2XAVBg3RIo+rUyNDQUX331FUxNTVGmTBm0bNkSN27cUIqjzv2NwuoC2eV0/PhxNGzYEKVLl8bFixcLvA8CFFyPT0tLw7hx41C5cmXo6OigZs2aWLhwobRscbezC9pf73O9ei/iC7VlyxZRpkyZPPPPnj0rAAhnZ2cRHBwsbt++LZo3by6aNGkixTl//rwwMjISAQEBIi4uTpw6dUqYmZkJHx+ffNc3aNAgYWBgIAYMGCBu3bolbt26JYQQYtOmTeLYsWMiLi5OXLp0Sbi4uIh27drlyY+lpaU4deqUCA8PFx07dhRmZmYiLS1N5bbs2bNHlC5dWmzcuFFER0eL7777ThgaGgp7e/t885eWliZmz54tQkNDxZ9//im2b98u9PT0xJ49e6Q4ixcvFuXKlRP79u0TkZGRYujQocLQ0FB06dJFCCHEs2fPRKlSpcSZM2ekZZ4+fao0T6FQiPXr14uIiAhx584dMWvWLKGjoyPu3bsnLVOzZk1hbGws1q5dK2JjY8XChQuFhoaGiI6OFkIIkZKSImrXri2aN28uLly4IGJjY8WePXvEH3/8ofb+ASCqVq0q9uzZI2JiYkTXrl2FmZmZaN26tThx4oSIjIwUjRs3Fh4eHkXa7wBEtWrVxM6dO0VsbKwYP368MDAwEE+fPhUZGRli3759AoCIiYkRiYmJ4vnz5yr3x7Vr14SGhobw9fUVMTExYsuWLUJXV1ds2bJFKtfhw4cLFxcXkZiYKJ4+faoyHXXL28jISCxdulTcvXtX3L17V1y9elUAEGfOnFFKf9CgQdL+fv78ufD19RXVqlUTiYmJ4smTJ0IIIVq2bCkmTJggpd+rVy9RvXp1sX//fhEXFyfOnDkjdu/eLYT4v+M7KSlJCCHEgwcPxJIlS0RYWJiIi4sTq1atEpqamuLKlStSernTz23OnDlKx/qgQYOEkZGRGDVqlIiKihKHDx8Wenp6YsOGDVJZVqtWTfj6+orExESRmJgohBDi7t27Ql9fX/j5+Yk7d+6IkJAQ4eDgIAYPHqxUdn5+ftI0APHbb78JIYSIj48XAERYWJgQQog///xTaGtriylTpojo6Gixa9cuUbVqVaXtV3edH/v3kdvy5ctF/fr1VYY1b95crFmzRgghRI8ePYSvr2+B+0MIIaKiogQAER8fn+86C9tOIYSYN2+eCAkJEfHx8eLQoUOiYsWKYvHixVK4tbW16N+/v4iKihJ37twRe/fuFQqFIt91AhAmJibi559/FjExMWLWrFlCU1NTREZGCiH+b5+amZmJffv2iT///FM8evRILF++XBgZGYldu3aJ6OhoMW3aNKGtrS3u3LkjhMh7nAshRFhYmFIZXLx4UWhoaIglS5aImJgYsXbtWmFsbKx0bp8zZ44wMDAQ3bt3FxEREeL8+fOiUqVKYubMmUKId79JFxcXMXz4cOlYzsjIULmtxXHtKew8lV2m2b+JbGXKlJHi5HeuyU2da3PO81O2CRMmiJYtW0rTLVu2FIaGhmLevHnizp07Yt68eUJTU1O0a9dObNiwQdy5c0eMHj1amJiYiJcvXwohhEhKShLly5cXM2bMEFFRUeLGjRviq6++Eq1atVJK18DAQEydOlVER0eL6Oho8eDBA6GnpyfGjBkjoqKixG+//SZMTU3FnDlzpOVyn0OEEMLe3l6K88svvwgjIyNx7Ngxce/ePXHlyhXp3KXKnDlzhL6+vmjdurUICwsT586dE+bm5qJv375SnO3bt4vKlStLx/C+ffuEsbGxCAgIEEL833Fev359ceTIERETEyN69uwpatasKdLT04UQhZ+r8rtG5dzeJ0+eCABiy5YtSteQov5uc8t9HLRs2VIYGRkJHx8fcefOHbF161Yhk8nEqVOnhBBCZGZmisaNGwtra2tx6tQpERcXJw4fPiyOHTsmhMhbx1LnOBs9erSoUaOGOHPmjPTbMTQ0VLp2qbp+5Fd/EEK9a4gqqq5L9evXV6rrNGzYULi6uoqLFy+KGzduCHNzczFq1CilbTYwMBCenp7i1q1b4siRI6J8+fLSuSe7nIv6G1Cn3ljYNSszM1PY2NiINm3aCIVCIc6dOyccHBxUnntymj9/vlQOcXFxYsuWLaJ06dIiODhYZGRkiMTERGFkZCRWrFghEhMTRWpqap55r169ynN+DwsLE6VLlxajR48WCoVC3Lp1S6xevVr8888/UjnlPA6GDRsmmjRpIs6fPy/u3r0rlixZIkqXLi1dP7Zs2SK0tbWFm5ubCA0NFdevXxeWlpZKv+mffvpJ6OjoiBUrVoiYmBhx9epV6dgKCQkRmpqa4tGjR1L8/fv3C319fZGSkpJv+eTm5uYmOnXqJEJDQ8WdO3fE5MmThYmJiXR8FnZ9yt72gn6LQgjh5+cnfv/9dxEfHy+CgoKEhYWFGD16tBT+oeWhzra8Tzvq/v37onTp0mLSpEkiOjpabN++XVSsWLHIdbyi1Jk7dOggvvrqKxEeHi6dt86dO6cyrjrXa3XqXbllr6dx48ZK54+WLVuKtm3bihs3bojz588LExMTsWjRImm5X3/9Vezbt0/ExsaKsLAw0alTJ2FraysyMzOFEMp16Ddv3ohu3boJBwcH6TohRP7ntvyuW+rUs9Shbt4Lysvly5eFhoaGWLx4sYiJiRErV64UZcuWLTAvudsV6tSLjhw5IjQ1NcXs2bNFZGSkUCgU4ocffpDCVbUB1TlOC6tDjh07VsjlchEaGiri4+PF6dOnxaFDh4QQ7+45WFpaCi8vLxEeHi4iIyNF3759hYWFhXj79q0QovB7DqpknxscHR3FH3/8Ia5duyYaNWqkVB7ZdSQPDw9x48YNcfPmTZGZmSnkcrlo1qyZuHbtmrh8+bJo2LCh0jVdnXOCqvZply5dxKBBg6Tp/H7fBd0jyJnurVu3BABx9+5dKc3sebGxsUKIvG11Ve2CBQsWCCsrK6W8jh8/XrRo0SLf8s2W+zjMKXcZdO7cWVhaWorz588LhUIh3N3dhbm5udSO6N69uxg7dqwQ4v/qA2XKlBFRUVFCiHfX6aZNm0rpFXbudnR0FLNmzVLKU8OGDaV571ufL0xh+6+w615h25VbSkqKKF++vOjbt6+4deuWOHz4sKhdu7bSfilsW4uj/iWE8nk4NTVVVK5cWaoHBAUFiVq1ain9BtStT36stlpB+yK/dkluhW1nSkqK2LJliwCgdG9JlaysLFGzZk1x5MgRIcS743Xbtm1KcVS1O3Iq6v2z0NBQoampKXbu3CkSEhLEjRs3xMqVK6W448ePF1WqVBHHjh0Tt2/fFoMGDRLlypXL93gUQvW1RIjC79XkvE+Unp4uypQpI6ZMmSLu3r0rIiMjRUBAgHTfsrDjsaht5oLam7nbftu3bxc6Ojpi8+bNIiYmRsydO1cYGRkV6Z5jdjnlbAd7eHiIXr16KZ0zcv6mCruvVxz31tW9F1PYbyy/6406dZXc9wsLq8er01YoqD5VWLnu379faGtri7Vr14qYmBixbNkyoampKX7//XcpjcLa7Q8ePBDGxsaie/fuIjQ0VMTExIjNmzdL5a7u/Zjc9x0LO84K+33n9PbtW7FixQphZGQk7bPstuGH3ot99eqVmDx5srC2tpbSfvXqlcp8DBo0SBgaGoqxY8eK6OhosWnTJgFAuLu7iwULFkjXAW1tbfHXX38JIdS7v5pfO6ko/TLq1C3f5/56UFCQCAwMFFFRUVI9t2LFiuLFixdCCPXub6hTF8guJzs7O3Hq1Clx9+5d8fTp0wLvgwhRcD1+yZIlonr16uL8+fMiISFBXLhwQezcuVPatuJuZxe0v97nevU+/rMd4zkriUePHhUAxOvXr4UQQrRp00apUSmEEIGBgaJy5cr5rm/QoEGiYsWKUoMvP6GhoQKAdELKzk/2TREh3lVadXV1pU7r3Nvi4uIixowZo5Sus7NzgTd0VBk7dqzo0aOHNF25cmXx448/StPp6emiWrVqShW1Ll26CC8vL2na399fVKlSRbpZoIq1tbVYvXq1NF2zZk3Rv39/aTorK0tUqFBBrFu3TkrT0NAw34Ndnf0DQKnhdOnSJQFAbNq0SZq3a9cuoaOj80HppqamCgDi+PHjQgjVJ3BV+vbtK7766iuleVOnTlVqxOa+Ea8uVeXdtWtXpTj5NXpzV8z9/PxEzZo1leLkrHjHxMQIAOL06dMq86JOeXTo0EFMnjxZZfqqqOoYr1mzplIH4ddffy08PT2laVWdU0OHDhUjRoxQmnfhwgWhoaEhnQuK0jHu7e0tbGxslNL77rvvlLZf3XV+7N9HbhMmTBCtW7fOM//OnTtCW1tbuuH/22+/iVq1aomsrCwpjqqO8eTkZAFAuvCqUth2qrJkyRLRsGFDadrQ0FCqXKoDgFJnkBDvzp3ZFe3sfbpixQqlOFWqVBELFixQmufk5CSdh9WpuHl6eooOHToopdGvX788HeN6enpShUmId+cFZ2dnabqw30d+3ufao855qrCO8YJusOWkzrVZ3Y7xZs2aSdMZGRlCX19fDBgwQJqXmJgoAIhLly4JId5V+tu2bauU7l9//SVVzrLTdXBwUIozc+ZMYWFhofR7WLt2rTAwMJCuiYV1jC9btkzUq1dPuolYmDlz5ghNTU3x4MEDad7x48eFhoaGdGOmTp06SpXX7G10cXERQvzfPtm4caMUfvv2bQFAulmpzrlK1TWqoHNmtqL+bnNT1TGec58L8e736e3tLYQQ4uTJk0JDQ0Pal7kVtWM8JSVFlCpVSuzdu1cKz/7tFNYxXlD9QZ1riCqqrks59+2uXbsEABEUFCTNW7hwobCwsFDaZmNjY+kGpBBCrFu3TulYft/fQGH1xsKuWSdPnhRaWlri4cOHUvjx48cL7Bh/8+aN0NPTk26AZBs6dKjo06ePNJ3zXJXfvNzn9z59+ijdyM8t5zn63r17QlNTUynv2ds8Y8YMIYSQbq7m7AhZu3atqFixojRdpUoV8d133+W7TisrK6UbkZ06dVLq5CrMhQsXhJGRkXjz5o3S/Dp16gh/f38hhPrXp4J+i6r88ssvwsTERJr+0PJQZ1vepx01Y8aMPB1N3t7eRarjFbXObGtrm+9Nl9xx1blev0+9S9W1eeHChQKAiIuLk+aNHDlSuLu755vOP//8IwCIiIgIIcT/nasuXLgg2rRpI5o1a5bnplVh57bc1y116lnvI7+8F5SXPn36iPbt2yul4+np+V4d4wXVi1xcXES/fv3yTVNVG1Cd63tuueuQnTp1EkOGDFEZNzAwMM914e3bt0JXV1ecPHlSCKHePYfcss8Nly9fluZlP4ib3UE0Z84coa2trdThderUKaGpqSnu378vzcveX1evXhVCqHdOKKxj/H3bxLnTtbe3V3oIecaMGUrnWVV1oNz5evjwoVLHWVpamjA1NVWr7pV9HOrq6gp9fX2lPw0NDWldd+7cEQBESEiItOz//vc/oaurK9WPVq1aJaytrYUQQhw4cEA4OzuLLl26SOccNzc3qcNUnXO3n5+fqFOnjhSWXebZv7v3rc+ro6D9V9B1T53tys3f31+YmJgo/R7XrVundH5QZ1s/tP4lhPJ5eMOGDaJcuXIiNTVVCj969KjQ0NAQjx8/FkKoX5/8WG21wuoghT1Uqe52/vbbbwIo/Fb6qVOnRPny5aWHpvz8/PK0nQrrGFeloPtn+/btE0ZGRkr1tWypqalCW1tb7NixQ5qXlpYmqlSponROzk3VtUSV3Pdqct4nevr0aYH3hwo7HovaZi6ovZm77efs7Cw9xJOtadOmH3zPMffDU0IoH4OF3ddTpaj31tW9F6Pubyz39Uadukru+4WFtWtyU9VWKKg+VVi5NmnSRAwfPlxp3tdff61Ubyus3T5jxgxRq1atfI9Hde/H5L7vWNhxVtDvW5X8yqo47sWqug+sSvY25ewzsrCwEM2bN5ems68Du3btEkKod3+1KO2k/KhTt3yf++u5ZWZmCkNDQ3H48GEhhHr3N9SpC2Rv54EDB6Q46twHKage/80334jWrVsr1eOzfax2tqr99b7Xq/dRYkOpy2QypeEyPjU7Ozvp/+whnbKHCLt58yZ8fX2lb2EYGBhg+PDhSExMVBomLjdbW9s831q5fv06OnXqhBo1asDQ0BAtW7YEAGmYuGwuLi7S/yYmJtDX10dUVJTK9URFReUZZivn8vlZu3YtGjZsiPLly8PAwAAbNmyQ8pGcnIzExESldLW0tODo6KiURr9+/bBv3z68ffsWALBjxw707t0bGhrvDqXU1FRMmTIFlpaWKFu2LAwMDBAZGYlvvvlGKZ2c5Z895H12+SsUCjg4OOT7zS1190/OdVSsWBHAu30EvBvOpU+fPnjz5g1evHhRaLr9+/dXma6+vj6MjIxUDlFckKioKDRt2lTafh8fHzRt2hSxsbHIzMxUOx1V5R0VFZXn+Mq9H4uLQqGApqamdFwXJjMzE/PmzYOtrS2MjY1hYGCAkydP5slvUVlbW0NTU1Oarly5cqH75ObNmwgICFDa3+7u7sjKykJ8fHyR8xATE5Pnm1GNGjV6r3V+it9HTq9fv5aGG8xp8+bNcHd3h6mpKQCgffv2SE5Oxu+//15gWWR/s6mg82Vh2wkAe/bsQdOmTaVPYcyaNUvpWJk0aRKGDRsGNzc3LFq0qMBhOrPlPle6uLjkOdfm/L28ePECjx49kn6v2Zo2bZrvOVqVmJiYPMdD7mng3XBYhoaG0rQ6x7Iq73PtMTY2hoWFhbRdOc9T2d7nPFUUBV2b3ycNTU1NmJiYSOd+4P+uBzmv+WfPnlX6zdSvXx8AlI6phg0bKq0nKioKLi4uSkPoNW3aFKmpqXjw4IFaef3666/x+vVr1P5/7J11WNRZ+//f5JBDKyA4qEgpYSeCimIsKsbagiI2GGuuqxjY3fW4oNiKsWujggGKgYSCgAjqmigooCB1//7gN58vH2ZgBtR193nO67q4Luac8znnPh33ifr14evrixMnTlR5LRMA1K1bF3Xq1OF+t2nTBqWlpUhOTsanT5+QlpYGHx8fXnwCAwMl6oescdC3bB/LU5N6K4vycQH49SY2NhZmZmawsrL66nCAsjJRWFjIGy+J60515Kw4fpCnD5GXqsY/YrOK9crR0REaGhrc7zZt2iAvL493lVhN6oCscaOsPispKQnm5uYwNTXlyVYVjx8/xufPn9GlSxeev3v37v3q8hYbG4vOnTvL5TYhIQElJSWwsrLiyXH16lWeHBoaGryrYMuX37dv3+Lly5dVhjl69Gju+rM3b97g3LlzvKtSZREXF4e8vDwYGBjw5ExPT+fJKU//VFVdBIBLly6hc+fOqFOnDrS1tTF8+HC8f/+eN174mvSQJy41mUfJ842sdrO6Y2Z/f38EBgaiXbt2CAgIQHx8fJXyydNfVzXuEl+fqaWlhUaNGvH8qtimiK9VLG9WPp9TU1MxePBg1K9fH0KhkLuytuIYZPDgwfj06RMuXrwo19vJVfVb8o6zZCGv7FXJUtO5ujSqCkee9qjiHFCe/l3WGHL8+PE4dOgQnJycMHPmTERFRfH8f/z4MbS1tTn/9fX1UVBQgLS0NLnXHKShrKzM6ydtbGygq6vLG4+LRCLeO6/iPsTc3Jwzs7Oz4333LfKruvW7MoYOHYoDBw4AAIgIBw8exNChQ6vlh6mpKXr27Inff/8dQNl1+1++fMGAAQPk9uPw4cOIjY3l/ZXPo6SkJCgrK/PSzcDAgDePcHFxQWJiIjIzM3H16lW4urrC1dUVERERKCoqQlRUFHetrzxt96BBg5CRkYFbt24BKBtPNG3alBuz13Q8/7VU1e/J27+WJykpiXvmSoy0/kZWXL92/CVNLkdHR2hqanJm7dq14+YgYuQZT36vuZqsMYg8yBtPefj9998xcOBA7r3XwYMHIzIyslpj0equn3Xp0gUikQj169fH8OHDsX//fi4/09LSUFRUxBsvqKiooGXLljLXNaS10bLWasqjr68Pb29vuLu7w8PDAxs2bOA9kyOrPFZ3zlyd+aa844earDlWhax1PXnXemWtqcmDvHWsMuRdw5FnXiPPXKEqZKVrZWPminWgqnl7bGwsnJ2dpb4JXp31GGn1qqpyVlX9ri5fuxZbHRo1asT1PUBZG1++zRf3A9Upt9WZJ1WFrLFlTdbX37x5A19fXzRs2BA6OjoQCoXIy8vj0k+e9Q15xgJiypcjedZBqhrHe3t7IzY2FtbW1vD398fFixc5u+81z5bG1/RX1aXaL7IfOXIEAwcOxPHjx3lvWAFlg6D4+HhcuXKFuyNeTN26dWFmZsZL8G9JYmIijhw5Am9v7yrfixFTvgETL+iJ3+DJy8vDwoUL0bdvX4nvpCmPxJQfPAFlDaK7uzvc3d2xf/9+GBkZ4dmzZ3B3d8ft27dx5MiR76awrMihQ4cwffp0rFmzBm3atIG2tjZWrVrFe2dDHjw8PEBEOHPmDFq0aIHr169j3bp1nP306dMRFhaG1atXw9LSEurq6mjfvr3EO14VOxAFBQUu/cVKtcqQN39UVFSwdOlS2NnZcW+aSOu4ZOW7i4sLr2OqSva/G2np3b9/f947ToBk2fxWyMqriqxatQobNmzA+vXruXdjpkyZIiFvdalJnuTl5WHs2LFS3yaqW7fuV8nztWH+HfWjPIaGhkhISOCZlZSUYM+ePXj9+jU3mROb//7771UOZrOysgCAtzAljariefPmTQwdOhQLFy6Eu7s7dHR0cOjQId6bmQsWLMCQIUNw5swZnDt3DgEBATh06JBE31RdqltfxIM8Kvc+Yvl3gKrDt2hfqup7vrauVURBQUHiXciaxh2oum9WVFSUKyxpaSirz/fw8MCKFSsk/Cr/FmJN2lFZMpubmyM5ORmXLl1CWFgYJkyYgFWrVuHq1atS+ytZiN/f2rVrl8SAtHw/BsgeB32v9vF71NuvaTMrIm85qwl/1/hBWt5WNKtJuDWpA7LGjTUdc1eFuB6cOXOGt4kEAAQCQY38FFOd8pSXlwclJSXcu3dPov5paWlx/0srF+IyKE94I0aMwOzZs3Hz5k1ERUWhXr16cHZ2rpacJiYmvLfPxZR/g1Ge8luVm4yMDO6dzSVLlkBfXx83btyAj48PCgsLuYX0r0kPeePyPZDVblb3bd/Ro0fD3d0dZ86cwcWLF7Fs2TKsWbNGYrNxdagqf/7zn/9wb59WdFex/ZBVFjw8PCASibBr1y7undvGjRtLjEF69OiBffv24ebNm+jUqVO15K/Yb30r5JX975BFVjjytA8V221Z5VSeMWT37t3x9OlTnD17FmFhYejcuTMmTpyI1atXIy8vD82aNcP+/fsl/Jc1L/gWfK85r6yxQXXHGpUxePBgzJo1CzExMcjPz8fz588xcODAavszevRoDB8+HOvWrUNQUBAGDhzIU1bKwtzcHJaWljyz6sZRrMi7evUqrl69iiVLlsDY2BgrVqzAnTt3UFRUhLZt2wKQr+02NjZGp06dcODAAbRu3RoHDhzA+PHjOXffczxfFVW1h9+rT5Inrj9i/CUv32uu9k9ap8vKysKJEydQVFSEbdu2cebitZQlS5bI5U9118+0tbURExODiIgIXLx4EfPnz8eCBQu++l3WivVGnrWaigQFBcHf3x/nz5/H4cOH8dtvvyEsLAytW7eWWR6rO2f+u+ebNUFWmyrvWu+3kEveOlYZ8o6JZMVZ3rlCVXyr/rim6wvVWY+R1h9VFW5V9bu6fcrXrsV+bVhVhS/P+ur3mCdJoyZ9pZeXF96/f48NGzZAJBJBIBCgTZs233wtVkz5ciTPOkhV4/imTZsiPT0d586dw6VLl/Dzzz/Dzc0Nx44d+6Hz7O9JtRXj7du3BwDcuHGD16nk5OTgwYMHUFZWRmRkJE8x/vz5czx//hyDBg36BiJLJzExEQsXLoSrq6tcivGqaNq0KZKTkyUmA9Xl0aNHeP/+PZYvX87tUr579y6Asp0W69evx8GDBwEAt27d4i02f/jwAba2tlL9tbW1RXR0NEaMGMGZiXfOVkZkZCTatm2LCRMmcGbldyvp6OjAxMQE0dHR6NChAwCguLgY9+7dQ9OmTTl3ampq6Nu3L/bv34/Hjx/D2tqaZx8ZGQlvb2+ubOTl5aGgoAATJ06sUr7yODg44D//+Q+ysrKk7vKqTv4sXboU/fv35xTjVVGZv0+ePOHtbqoK8Y0Bsk5T2traIjIykmcWGRkJKysric6yKqSld0ZGxjeTUxb29vYoLS3F1atX4ebmJpe8vXv35k7gl5aWIiUlBXZ2dl8lhyxUVVUl4tq0aVMkJiZ+dT0XY21tjbNnz/LMKk5AvkWY37J+iGnSpAm2bdsGIuIGsmfPnkVubi7u37/PK5MPHjzAyJEj8eHDh0o7vwcPHkBFRUXixFF1iIqKgkgkwty5czmzp0+fSrizsrKClZUVpk6disGDByMoKKjKCc+tW7ck2s4mTZpU6l4oFMLU1BSRkZG8UyCRkZHcTmLxQt+rV6+gp6cHoGwXaXmsra0lykNNJqjSynJFqup7KlK+78nOzkZKSgrX98jTThkZGfF2e6empvJ2Tn6rtkYc1oMHD3hmsbGxNVIgl6dp06YIDQ2FhYUFbxOILGxtbREaGsqrN5GRkdDW1oaZmRknc/n0ycnJkThxra6uDg8PD3h4eGDixImwsbFBQkICr28tz7Nnz/Dy5Utuh+mtW7egqKgIa2tr1K5dG6ampnjy5Em1TxiV51u1jyoqKlLzvrr19mtwcHDAX3/9hZSUFLlOjcsqZw0aNICKigqio6Ml6s7XnBSTpw/5nsTFxSE/P5+b6N+6dQtaWlq8U3YVkacOyBo3yuqzbG1t8fz5c7x69YpbmJE15rWzs4NAIMCzZ8+++vReRRwcHHD58mUsXLhQptsmTZqgpKQEb9++rZaiujza2tqwsLDA5cuXJTYdizEwMECfPn0QFBSEmzdvYuTIkdUKo2nTptwmuK+dQ1XFvXv3UFpaijVr1nDj6iNHjlTLD1npIU9cajKPsrW1xR9//MEzq/iNrHazumNmoEw5NW7cOIwbNw5z5szBrl27pC74fIt5RcXFk5ry/v17JCcnY9euXVy5v3HjhlS348ePR+PGjdGrVy+cOXPmq9vQrx1nVUf2qhCXsfLIKmM1QdweVafOyyqnCQkJco0hjYyM4OXlBS8vLzg7O2PGjBncgtrhw4dRq1YtCIVCqWHIs+YgjeLiYty9e5cbfycnJ1e5ZgL8Xx/y/PlzLj6JiYn48OEDN/+Up02oOJ4rKSnBgwcPuHZIVv2WdzxsZmYGFxcX7N+/H/n5+ejSpQtq1apVqfvK5gU9evSApqYmtm3bhvPnz+PatWtVhltdbG1tUVxcjOjoaE65La4/4nRVUFCAs7MzTp06yQ8DWgABAABJREFUhYcPH6J9+/bQ0NDAly9fsGPHDjRv3pxbzJW3Hxo6dChmzpyJwYMH48mTJ7z1xZqO5+WhpvOZmvSvtra2CAkJQUFBAbfoLq2/kRXXrx1/SZMrODgYnz594vItMjKSm4OIqcl4UhbfKm8rm5eUR954ymL//v0wMzPDyZMneeYXL17EmjVrsGjRIrn655qsnykrK8PNzQ1ubm4ICAiArq4urly5And3d6iqqiIyMhIikQhAmbLpzp07mDJlitxxA+Rfq6lIkyZN0KRJE8yZMwdt2rThNrrIUx6rO2eWd74pHj+U7wP+jjmYrHW9mq71lkfetkueOibPOpQsZI3jv8VcQVa6isfMXl5enFlkZGS11qQdHBywZ88eFBUVSaxFfav1mMqorH5LU9zWNM/kqd/fojxUhjzrq0Dl86Tq9NmyxpY1WV+PjIzE1q1b0aNHDwBlOtF3795x9vKsb8gzFpCGvOsglY3jgbI18IEDB2LgwIHo378/unXrhqysrO82z5aWXw0aNPhm/ZUsqn2VuqmpKerVqycxSbx58yaICAMGDJCwE/8WK9X/6cyfPx979+7FwoUL8fDhQyQlJeHQoUP47bffAJSdxpOHunXrQlVVFZs2bcKTJ0/wxx9/YPHixVLdLlq0CJcvX+YWY9XU1NCnTx+pbidPnozff/8dQUFBSElJQUBAAB4+fFilLA0bNsTdu3dx4cIFpKSkYN68eRKd/eTJk7F8+XKcPHkSjx49woQJE/DhwwcJv4YOHYozZ87g999/l2joGzZsiOPHjyM2NhZxcXEYMmQISktLqzWAHTx4MIyNjdGnTx9ERkbiyZMnCA0Nxc2bNwHIzp+aUpm/ixcvllvxIhKJoKCggNOnTyMzM5PbrVORX375BZcvX+bKQ2xsLDZv3ozp06dXS+bK0lsWtWrVgrq6Os6fP483b97g48eP1QpXjIWFBby8vDBq1CicPHkS6enpiIiIqHTw0rBhQ4SFhSEqKgpJSUkYO3asxG0C3wMLCwtcu3YNL1684DqkWbNmISoqCpMmTUJsbCxSU1Nx6tSpGj/xMHbsWDx69AizZs1CSkoKjhw5guDgYAD/t2vyW4T5PepHx44dkZeXx2tHdu/ejZ49e8LR0RGNGzfm/n7++Wfo6upKPQki5vr163B2dv6q3ZoNGzbEs2fPcOjQIaSlpWHjxo04ceIEZ5+fn49JkyYhIiICT58+RWRkJO7cuVPl4hgAHD16FL///jvXdt6+fVtm+s+YMQMrVqzA4cOHkZycjNmzZyM2NhaTJ08GAFhaWsLc3BwLFixAamoqzpw5I7Gb0s/PD2fPnsXatWuRmpqKHTt24Ny5c7wriOXBwsIC0dHRyMjIwLt376TW95r2Pd7e3jA0NOT6nvLtVEpKCvbs2SPRTnXq1AmbN2/G/fv3cffuXYwbN47XXn6rtkYc1t27d7F3716kpqYiICBAQoFZEyZOnIisrCwMHjwYd+7cQVpaGi5cuICRI0dWOZieMGECnj9/Dj8/Pzx69AinTp1CQEAApk2bxk3kOnXqhJCQEFy/fh0JCQnw8vLiLYIEBwdj9+7dePDgAZ48eYJ9+/ZBXV2dG/xJQ01NDV5eXoiLi8P169fh7++Pn3/+GcbGxgCAhQsXYtmyZdi4cSNSUlKQkJCAoKAgrF27Vu40+Vbto3ji+/r1a2RnZ9e43n4NLi4u6NChA/r164ewsDBuB+z58+elupdVzrS0tODj44MZM2bgypUrXN2RdwNdZcjTh3xPCgsL4ePjg8TERJw9exYBAQGYNGlSlfGSpw4AVY8bZfVZbm5usLKy4pX58pN0aWhra2P69OmYOnUq9uzZg7S0NMTExGDTpk3Ys2fPV6QSMGfOHNy5cwcTJkxAfHw8Hj16hG3btvEmu2KsrKwwdOhQjBgxAsePH0d6ejpu376NZcuW4cyZM3KHuWDBAqxZswYbN25EamoqF5fyjB49Gnv27EFSUhJvoUce3Nzc0KZNG/Tp0wcXL15ERkYGoqKiMHfu3Eo3VdUES0tLFBUVcX1TSEgItm/fXm1/qkoPeeJSk3nUuHHjkJqaihkzZiA5ORkHDhzg6qcYWe1mdcfMU6ZMwYULF5Ceno6YmBiEh4dX2lbK01//Xejp6cHAwAA7d+7E48ePceXKFUybNq1S935+fggMDMRPP/1UIyV0eX++dpxVXdkrQ3wibvXq1UhNTcXmzZsr7XO+hoCAABw8eBABAQFISkpCQkKC1NNe5ZFVTuUZQ86fPx+nTp3C48eP8fDhQ5w+fZorm0OHDoWhoSF69+6N69evc+Xc39+fe2JD3jWHiqioqMDPzw/R0dG4d+8evL290bp16yqvzHdzc4O9vT2GDh2KmJgY3L59GyNGjICLiwt3c588bUKnTp1w5swZnDlzBo8ePcL48eN5Msuq3/KuEYjT8NChQzh69KjMRfXK5gVKSkrw9vbGnDlz0LBhQ+4ayxEjRmDOnDlV+ikPDRs2RO/eveHr64sbN24gLi4Ow4YNQ506ddC7d2/OnaurKw4ePAgnJydoaWlBUVERHTp0wP79+3kLtvL2Q3379kVubi7Gjx+Pjh078q4hrel4Xh6qk3/lqUn/OmTIECgoKMDX15cbk4kXq6sb168Zf1Vk6NCh3BzkwYMHCA8Ph5+fH4YPH85dfQ7UbDwpi2+VtxXnJV8TT1ns3r0b/fv3562jNG7cGD4+Pnj37p3cfUJ1189Onz6NjRs3IjY2Fk+fPsXevXtRWloKa2traGpqYvz48ZgxYwbOnz+PxMRE+Pr64vPnz/Dx8ZE7bmK5qlqrqUh6ejrmzJmDmzdv4unTp7h48SJSU1O5vkNWeazOnLm6800/Pz/s3r0be/bsQWpqKgIDAxEfH//d51+y1vVqutZbHnnXYuSpY/KsQ8lDVeP4bzFXkJWuM2bMQHBwMLZt24bU1FSsXbsWx48fr9aYedKkScjJycGgQYNw9+5dpKamIiQkhHtu4Vusx0ijqvotDQsLC+Tl5eHy5ct49+6d3Neuy1O/LSwsuKei3r17xz3b8S2QZ321qnlSdfpsWWPLmqyvN2zYECEhIUhKSkJ0dDSGDh3KWxuXZ31DnrGANORZB6lqHL927VocPHgQjx49QkpKCo4ePQpjY2Po6up+t3m2tPz6lv2VTGryMPnw4cNJRUWFPn/+zJnNmzePGjduTHv37iUdHR0qKSnh7CZOnEgKCgr07t07zgwATZw4kU6cOEGNGjUiVVVVsrOzo3PnzvHCysjIoPHjx5OVlRWpqamRvr4+9e/fn9LT0zk3QUFBBEDiT1NTU0J28aPu169fJy8vL6pXrx6pqKgQAOrfvz8n4/nz56lt27akrKxMAMje3p5atGhBurq65OTkxPkXEhJCTZs2JUVFRVJRUaGBAwfSs2fPOPsDBw6QhYUFCQQCatOmDf3xxx9SZQVAVlZWpKqqSgBo4MCBXNooKSmRoqIiL22WLFlChoaGpKmpSZaWlqShoUEKCgpkZ2dHu3fvloh3QUEBeXt7k46ODunq6tL48eMJABkYGNCRI0fI1taWBAIBGRsbk5aWFunq6pKbmxtpaWmRoqIiubi4cGleUlJCJiYmBIC6detG5ubmpKqqSmZmZjRy5EhycXEhdXV1Mjc3p82bN5NIJKLyRU38u3z+KygokKGhIRfHjIwM6tevHwmFQtLQ0KDmzZtTdHQ058eff/5JZmZmpKCgQABIRUWFunbtSgUFBVz5kvbn4eFBREQBAQGc2eDBg0lHR4eEQiF16dKFWrduTerq6iQUCqlly5akr69PXl5enL8nTpyg7OxsmjJlChcXXV1dGj58OGVmZtKiRYvI2NiYAJC+vj4Xh/bt29OVK1e4OBw7dozs7OwIAOno6NCqVat4eTZ58mRycXGRyMvypKenU8eOHXnp7eLiQpMnT+al97p16yS+3bVrF5mbm3P5S0Tk5eVFvXv35tysW7eORCIR77uK/ufn59PUqVPJxMSEVFVVydLSkn7//Xci+r/6lp2dTURE79+/p969e5OWlhbVqlWLfvvtNxoxYgQvzIr+VyQgIIAcHR253xVlJpJMu5s3b5KDgwMJBAJeWbx9+zZ16dKFtLS0SFNTkxwcHGjJkiWcfcW0E+c/UVnaA6D79+9z9qdOnSJLS0sSCATk6upK27ZtIwCUn59f4zCJiBwdHSkgIID7Lat+iNuv8uV4586dlaYpEdHPP/9Ms2fPJiKi169fk7KyMh05ckSq2/Hjx1OTJk2ISDI/iIisra3p4MGDVYYnTzxnzJhBBgYGpKWlRQMHDqR169aRjo4OERF9+fKFBg0axLU/pqamNGnSJF5aVwQAbdmyhbp06UICgYAsLCzo8OHDnL20PCUqa/MWLFhAderUIRUVFXJ0dJToq27cuEH29vakpqZGzs7OdPToUQLA66t27txJderUIXV1derTpw8FBgaSsbExZy8tLSvWweTkZK6Nquh/eSrre8RxE9fNP//8k+uDW7ZsSXFxcTx/xO2UiooK1a1bV6KdevHiBXXt2pU0NTWpYcOGdPbsWdLR0aGgoCDOjbS2piIV2woiovv370vEcf78+VS7dm3S0dGhqVOn0qRJk3h+Sms/pJW18nWZiCglJYU8PT1JV1eX1NXVycbGhqZMmUKlpaWV+ktEFBERQS1atCBVVVUyNjamWbNmUVFREWf/8eNHGjhwIAmFQjI3N6fg4GBeOT9x4gS1atWKhEIhaWpqUuvWrenSpUtS04jo/8rI1q1bydTUlNTU1Kh///6UlZXFc7d//35ycnIiVVVV0tPTow4dOtDx48eJSHo5z87OJgAUHh7Omclqq6T1URXT+o8//iBLS0tSVlYmkUgkV70FwCs/FanY5kvLm969e3P9NlFZ3zNy5EgyMDAgNTU1aty4MZ0+fZqIysaQ4nZFjKxylpubS8OGDSMNDQ2qXbs2rVy5UmbfW7HMEZFEXZGnD6mIrH5JWt2qGGdxms6fP59rc319fbkxFVHN6wARf9yYlpYm4YesPis5OZnat29PqqqqZGVlRefPn5eanuUpLS2l9evXk7W1NamoqJCRkRG5u7vT1atXOTcV01+ambT0i4iIoLZt25JAICBdXV1yd3fn7CumU2FhIc2fP58sLCxIRUWFTExMyNPTk+Lj44lIevk7ceIEb6xCRLR9+3YuLiYmJuTn5ycRX5FIRD169JBIC/E8qSpycnLIz8+PTE1NSUVFhczNzWno0KHcnEae/kmeurh27VoyMTEhdXV1cnd3p7179/LS91ukh6y4EP3fPEpLS4u8vLxo5syZEvGryJ9//snVT2dnZ/r9998lyoasdrM6Y+ZJkyZRgwYNSCAQkJGREQ0fPpybp0orl7L6a3nGXRWRp/0gkiwfYWFh3NzSwcGBIiIiZLZVa9asIW1tbYqMjCQi2W2btH5L1jhL7E/5bypSE9mlybJ7924yMzMjdXV18vDwoNWrV0ukW3kq+ivvuCg0NJTr7w0NDalv376cXWVzQFnlVNYYcvHixWRra0vq6uqkr69PvXv3pidPnnDfv3r1ikaMGEGGhoYkEAiofv365OvrSx8/fiQioqKiIpo8eTIJhULS1dWladOmScwHKyIud6GhoVS/fn0SCATk5uZGT58+5dxIa6eIiJ4+fUq9evUiTU1N0tbWpgEDBtDr1695bmS1CYWFhTR+/HjS19enWrVq0bJlyyTat6rqNxFxawQKCgrcd9LazezsbBIIBKShoUG5ubk8u4pjoKrmBWlpaQSAVq5cyZm5uLjwZK5IZXMhabJmZWXR8OHDSUdHh2vTU1JSeN+Iy+ysWbM4s3Xr1hEAOn/+PM+tPG03UdmcFQAvbcXUdDzv5eUlc91F3vyrWC7kjVd5bt68SY6OjqSqqkpOTk4UGhoqkS+y4kr09eOvimOt+Ph46tixI7cu6+vryyujNR1Pfq+5WsW8qDgvqQxZ8ZQ2LinP3bt3CQDdvn1bqn337t3J09OTiKSvZZWnuutn169fJxcXF9LT0yN1dXVycHDgrXfk5+eTn58f1z63a9euUjnFVNaXVLVWQ8Rvk1+/fk19+vTh2keRSETz58/n6Q2qKo/VmTPLmm9KG8csWrSI6wNGjRpF/v7+1Lp1a85enjXHiulUsfwRSZbrqtb1arrWW3FsJ89aDJHsOiatv5FnrCKtb65qHF+TuUJFZK2Xbt26lerXr08qKipkZWVFe/fu5X0vz7w9Li6OunbtShoaGqStrU3Ozs68dra66zFEssuZrPotjXHjxpGBgQEB4MrF167FEpXpmPr160e6urpVrt9Ii5M8/YCs9dWq5klE0vvsisgztiSq/vp6TEwMNW/enNTU1Khhw4Z09OhRifjJs74haywgrf4RyV4HqWocv3PnTnJyciJNTU0SCoXUuXNniomJ4fz+XvNsaflVk/6qJtRIMb5jxw6JyV+nTp1ozJgx9PjxYwLAW1x3cnIiW1tbfsAAOTo6komJCS1evJjWr19P9evXJw0NDV5hPnr0KDk6OtL8+fNp586d9Ouvv5Kenh6JRCL69OkTEZUN+v39/QkA/frrrxQSEkIhISESE57yrF69mpydnWnRokW0c+dOmjx5Mqmrq1PLli15g0mxAtXOzo569+5NW7dupS1bthARUWBgICkoKNDAgQNp69attHDhQjI0NCQLCwuJglmeuLg4Gjx4MKccBkDbt2+nvLy8aqXN69evyczMjMzNzWnRokW0bds26tWrFwGQOnCpCABycHAgc3NzWr58OS1fvpx0dHSobt26tHnzZrKzs6M1a9bQb7/9RqqqqtSxY0fe935+ftSjRw9aunQp7dixg3x8fEhJSYn69+/PcydOw4phyxNHaZSUlHAd0JQpU2jHjh00adIkUlZW5jW4ISEh3OKVuExERUXxZGrSpAn17duXtm7dSqNHjyYANHPmTF54IpGI15Dm5uZS48aNSUlJiXx9fWnbtm20ePFiatGiBddAZWZmkomJCU2bNo22bdtGK1eu5Bqlip1f+Q6K8d9FYGAgmZmZ/Wgx5CIuLo5q1aolsQBTXc6ePUu2trYSihGGJKNHj6b27dv/aDEYjH8MT548IWVlZYmF1f9V/q4+RNaiHOOfT25uLgmFQgoNDZWwmz9/vswFfwbjv5GK46wrV66Qrq6uxGYyxj8feRbCGXyuXbtGKioqVa6JMcro0KEDW4/5BrDxJOO/BTc3Nxo2bNiPFoPBYDAY35kaKcYfPnxIAGjx4sVEVLbrV1NTk/bs2UNERLVr1+aUxzk5OZwCkRcwQKqqqvT48WPOLC4ujgDQpk2bOLPyp9LF3Lx5kwDwdvWId49UtQO8PNL8PXjwIAGga9eucWZiBergwYN5bjMyMkhJSYm3u5qIKCEhgZSVlSXMK7Jq1SoCwIVZXpEub9r4+PiQiYmJhCJ50KBBpKOjIzWO5QFAAoGAt7NYvOnB2NiYcnJyOPM5c+ZI7EKW5v+yZctIQUFBYve2NMW4PHGURkhICCkqKtL169d55tu3bycA3EkDIiJNTU2pu4PEMo0aNYpn7unpSQYGBjyziorx+fPnEwBut1d5xJsqiouL6cuXLzy77Oxsql27tkSYTDH+38OWLVvo9u3blJaWxt2eMXfu3B8tltwEBQVxp9hqytGjR+nWrVvfSKL/LlatWkWxsbGUmppKGzduJBUVFdq1a9ePFovB+MewefNmmjBhwo8W44fxo/oQtpD576WkpITevHlDM2fOpLp160rdlNaiRQveKQkG478VWeOs6dOn807PMv49MMW4/BQUFNDz58+pU6dONGTIkB8tzj+eDx8+UJ06db56cziDjScZ/04+ffpEa9asoQcPHlBSUhK33hsWFvajRWMwGAzGd0b+h5/LYWtrCwMDA+4dsLi4OHz69Alt27YFALRt2xaRkZGYMGECbt68iZKSEqnvi7u5uaFBgwbcbwcHBwiFQjx58oQzK38Pf1FREXJycmBpaQldXV3ExMRg+PDhNYkCz9+CggLk5eWhdevWAICYmBg4Ozvz3I8bN473+/jx4ygtLcXPP//Me1fQ2NgYDRs2RHh4OH799dcayQbIThsiQmhoKH7++WcQEU8Gd3d3HDp0CDExMWjXrl2V4XTu3BkWFhbc71atWgEA+vXrB21tbQnzJ0+ecO7Lp+GnT5+Qn5+Ptm3bgohw//591K1b96viWBlHjx6Fra0tbGxsePHu1KkTACA8PJwri7KomK/Ozs44ceIEcnJyIBQKpX4TGhoKR0dHeHp6StiJ36FRUlLi3pEtLS3Fhw8fUFpaiubNmyMmJkYu2Rj/PsRvEmVlZaFu3br45Zdfvsk7bn8X3t7eX+1H//79v16Q/1Ju376NlStXIjc3F/Xr18fGjRsxevToHy0Wg/GPYeLEiT9ahB/Kv70PYfz9PHv2DPXq1YOZmRmCg4OhrCw5tbt9+/YPkIzB+PuRNc5atWrVD5SOwfh7OHjwIHx8fODk5IS9e/f+aHH+8ejo6OCvv/760WIwGIwfhIKCAs6ePYslS5agoKAA1tbWCA0NhZub248WjcFgMBjfmRopxhUUFNC2bVtcu3YNpaWliIyMRK1atWBpaQmgTDG+efNmAEBkZCQASFWMS1Oc6unpITs7m/udn5+PZcuWISgoCC9evAARcXYfP36sifgAgKysLCxcuBCHDh3C27dveXbS/K1Xrx7vd2pqKogIDRs2lOq/ioqKXHK0bt2aFycxstImMzMTHz58wM6dO7Fz506pfleMlzQqhqOjowMAMDc3l2pePm+ePXuG+fPn448//uCZA/LljTz5L43U1FQkJSXByMhIqr088a5MBj09PQBl8axMMZ6WloZ+/frJ9HvPnj1Ys2YNHj16hKKiIs68Ylli/Pewbt06rFu37keLwfiHcuTIkR8tAoPB+Afzo/qQ4ODgvz1MxrfBwsJC6jyCwfhfhI2z/nvx9vb+Jht4/xdgacX4UbDxJOPfiLq6Oi5duvSjxWAwGAzGD6BGinGgTNH9559/IiEhAZGRkbwTum3btsWMGTPw4sUL3LhxA6ampqhfv76EH+ITtRUpv8Dj5+eHoKAgTJkyBW3atIGOjg4UFBQwaNAglJaW1lR8/Pzzz4iKisKMGTPg5OQELS0tlJaWolu3blL9LX86Gig7BaygoIBz585JjYeWllaNZQNkp41YxmHDhsHLy0uqWwcHhxqHIyv8kpISdOnSBVlZWZg1axZsbGygqamJFy9ewNvbW668kSf/pVFaWgp7e3usXbtWqn1Fpf73kEEW+/btg7e3N/r06YMZM2agVq1aUFJSwrJly5CWlvZVfjMYDAaDwWAwGAwGg8FgMBgMBoPBYDAYjOqhWNMPxSfAb9y4gcjISN6V3c2aNYNAIEBERASio6NlXuddFceOHYOXlxfWrFmD/v37o0uXLmjfvj0+fPjAcye+wloesrOzcfnyZcyePRsLFy6Ep6cnunTpIlV5XxkNGjQAEaFevXpwc3OT+BNfy14Z1ZFXGkZGRtDW1kZJSYnU8N3c3FCrVq2vCqMqEhISkJKSgjVr1mDWrFno3bs33NzcYGpq+t3CFNOgQQNkZWWhc+fOUuNtbW3Nuf3adK4s/AcPHlTp5tixY6hfvz6OHz+O4cOHw93dHW5ubigoKPjm8jAYDAaDwWBUB1dXV0yZMuWHyqCgoICTJ0/+UBn+DjIyMqCgoIDY2Nh/ZTgWFhZYv379N/XzfwnxRtn/RcrXcXnKZ0REBBQUFCTm+f8EgoODoaurW6WbR48eoXXr1lBTU4OTk9PfItf3aEfliSvj2/H582f069cPQqHwh5T/f3K9YzAYDAaD8d+DhYUFrK2t4eTkBGtrayxfvlyu79avX4/Xr19X6aZHjx5ITk7+FmLK5PTp03B1dZVq5+rq+tVj8+fPn6NXr16wt7eHvb09nJyccOXKFQBl47bz589/lf/SyMvL+2Y6tP79+1d6i4yCggLs7e3h6OgIOzs7BAUF8ezFutbOnTtLfHv37l10794d9erVQ7NmzdCkSRMEBgZ+law1Vow3b94campq2L9/P168eME7MS4QCNC0aVNs2bIFnz59knqNurwoKSlJnN7dtGkTSkpKeGaampoAINeAXnxKuKK/1Vn06du3L5SUlLBw4UIJf4gI79+/r/L76sgrDSUlJfTr1w+hoaFSlbSZmZk18rc64QP8NCQibNiw4buGC5Sd9n/x4gV27dolYZefn49Pnz5xvzU1Nb/5JK9fv36Ii4vDiRMnJOzE6SEtfaKjo3Hz5s1vKguDwWAwGAxGZXzPBW95lScLFiz425REX8s/YcPAP407d+5gzJgxP1oMxr+QV69eoXv37j9ajL+NgIAAaGpqIjk5GZcvX/6mfv+b2lEx0pT2/8Z4SONb9q179uzB9evXERUVhVevXnFP2P1dtG3bttrhsg1TDAaDwWAwasLhw4cRGxuLK1euYNmyZbh9+7bMb6pSjJeWlqK0tBRnz57lHZT8NzN+/Hh07NgRCQkJSEhIwKVLl7jnq7+XYrymFBcXV/ub69evIy4uDgcPHsTYsWPx6tUrzu7y5cvQ1dVFfHw80tPTOfOEhAR069YNEydORHp6Ou7du4fLly8jJyfnq+Sv8VXqqqqqaNGiBa5fvw6BQIBmzZrx7Nu2bYs1a9YAkP6+uLz89NNPCAkJgY6ODuzs7HDz5k1cunQJBgYGPHdOTk5QUlLCihUr8PHjRwgEAnTq1EnqqWmhUIgOHTpg5cqVKCoqQp06dXDx4kVegsuiQYMGCAwMxJw5c5CRkYE+ffpAW1sb6enpOHHiBMaMGYPp06dX+r04vebOnYtBgwZBRUUFHh4enMJcHpYvX47w8HC0atUKvr6+sLOzQ1ZWFmJiYnDp0iVkZWXJ7Vd1sbGxQYMGDTB9+nS8ePECQqEQoaGhMt8H/xYMHz4cR44cwbhx4xAeHo527dqhpKQEjx49wpEjR3DhwgU0b94cQFk6X7p0CWvXroWpqSnq1auHVq1afVX4M2bMwLFjxzBgwACMGjUKzZo1Q1ZWFv744w9s374djo6O+Omnn3D8+HF4enqiZ8+eSE9Px/bt22FnZ4e8vLxvkQwMBoPBYDD+RygsLISqquqPFoPxP4iRkdGPFoFHSUkJFBQUoKhY4/3d/2j+m+JnbGz8o0X4W0lLS0PPnj0hEolq7Adr6+Xjvymd0tLSYGtri8aNG/+Q8FVVVf/n6iqDwWAwGIwfS506dWBjY4OnT5+iZcuWeP36Nfz9/ZGRkYH8/Hz07t0bgYGBWLRoEV6+fImBAwdCXV0dwcHBOHnyJBISEpCXl4fnz58jLCwM7dq1w8mTJ+Hk5FSpX/v378fBgwdx+vRpAGWHGRs0aIATJ07A0dERISEh2Lx5M4qKiqClpYVNmzbB0dERRUVFmDx5MsLCwqCnpwdnZ+cq43b58mUsWbIE2dnZ6N27N1avXo179+5h2LBhSEpK4k5mt23bFvPmzZPYSPzXX3+hTp063G9DQ0MAQGxsLLZv346SkhJERESgb9+++PXXX9GzZ0+8f/8e+fn5cHR0xK5du6CpqYmIiAhMmjQJHTp0QGRkJIqLi7Fnzx5OZ7Zjxw6sXr0aWlpa6Nu3L0+GoUOHIjk5GYWFhTA3N8fu3bthbGyMjIwMODk5YezYsQgLC8OIESPQrVs3jBo1Ch8/fkTDhg3x+fNnucqAo6Mj9PT08Ndff8HExAQAsHv3bvj6+iI5ORm///47Fi9eDABYsWIFRo8ejZ9++on7Xl9fHytXrpQrrEqhr2DOnDkEgNq2bSthd/z4cQJA2traVFxcLGEPgCZOnChhLhKJyMvLi/udnZ1NI0eOJENDQ9LS0iJ3d3d69OiRhDsiol27dlH9+vVJSUmJAFB4eHilsv/111/k6elJurq6pKOjQwMGDKCXL18SAAoICODcBQQEEADKzMyU6k9oaCi1b9+eNDU1SVNTk2xsbGjixImUnJxcadhiFi9eTHXq1CFFRUUCQOnp6dVKGyKiN2/e0MSJE8nc3JxUVFTI2NiYOnfuTDt37pQZvrRw0tPTCQCtWrWKZx4eHk4A6OjRo5xZYmIiubm5kZaWFhkaGpKvry/FxcURAAoKCuLcidNQVtiVxVEahYWFtGLFCmrUqBEJBALS09OjZs2a0cKFC+njx4+cu0ePHlGHDh1IXV2dAHB+V5avQUFBvLyoTKb379/TpEmTqE6dOqSqqkpmZmbk5eVF7969IyKi0tJSWrp0KYlEIhIIBNSkSRM6ffo0eXl5kUgkkkiL8mWOwWAwGAzGj6OkpISWLl1KFhYWpKamRg4ODrzxj3hMdOnSJWrWrBmpq6tTmzZt6NGjR5yb2NhYcnV1JS0tLdLW1qamTZvSnTt3iKhsDOLo6MgLc926dbzxgZeXF/Xu3ZsCAwPJxMSELCwsiIjo2bNnNGDAANLR0SE9PT3q1asXb8xSHvGYrvyfeDzj4uJCfn5+NGPGDNLT06PatWtLjEXWrFlDjRs3Jg0NDTIzM6Px48dTbm4uLw3K/0kby4jHVeX/xGNEALRr1y7q06cPqaurk6WlJZ06dYr3fUJCAnXr1o00NTWpVq1aNGzYMN7Y7ejRo9S4cWNSU1MjfX196ty5M+Xl5XH2u3btIhsbGxIIBGRtbU1btmyRmlbiNK8oqzhtZclRUlJCK1asoAYNGpCqqiqZm5tTYGAgLx9CQ0PJ1dWV1NXVycHBgaKionjppKOjQ+fPnycbGxvS1NQkd3d3evnyJS+MhQsXcmNPR0dHOnfunER+379/nzOLiIigFi1akKqqKhkbG9OsWbOoqKiIs8/JyaEhQ4aQhoYGGRsb09q1a8nFxYUmT57MuRGJRLRu3Trud3Z2Nvn4+JChoSFpa2tTx44dKTY2lrOvquxLo6pyVj5tTp06Rba2tqSkpETp6elUUFBAv/zyC5mampKGhga1bNmyyrmftPTJzs7mzRmzsrJoyJAhZGhoSGpqamRpaUm///47515W/SsuLqapU6eSjo4O6evr04wZM2jEiBHUu3fvSuWqafwyMjLop59+Il1dXdLQ0CA7Ozs6c+YMEf1f/Tx9+jTZ29uTQCCgVq1aUUJCAi/sY8eOkZ2dHamqqpJIJKLVq1fz7EUiES1ZsoRGjhxJWlpaZG5uTjt27ODsv3z5QhMnTiRjY2MSCARUt25dWrp0KWcPgE6cOMFL/4MHD1KbNm1IIBBQo0aNKCIignMvljs7O5vy8vJIW1ub1/YSEZ04cYI0NDQoJydHanqeO3eO2rVrx+VBz5496fHjx5y9PPVRnC/m5uakrq5Offr0odWrV5OOjk4luUiVtofx8fHUsWNHro3y9fXlle/K2vqKsnzPdlRaeDo6OnTixAmytLQkgUBAXbt2pWfPnvHcnTx5kpo0aUICgYDq1atHCxYs4NoXkUjEk1ckElUZD1ntirjf3LVrF1lYWJCCggIX/+3bt1PPnj1JXV2dbGxsKCoqilJTU8nFxYU0NDSoTZs2vDIgS3ZZ6VpV3yqNquqZi4sLzx8XFxepfshqV2XV5YKCApo5cyaZmZmRqqoqNWjQgP7zn/8QEb/eibl+/Tq1b9+e1NTUyMzMjPz8/Li+taLMAGpcXxkMBoPBYPzvIBKJuLlYUlISNWjQgN6+fUtERF27duXmBUVFReTu7k5HjhyR+I6obFxoYmJCr1+/lup3ZX59/vyZDAwM6NWrV0REdOXKFWratCkREd24cYO6d+9OBQUFRER07do1srOzIyKizZs3U6dOnejLly/05csXcnV1rXTM5uLiQp06daLCwkL69OkTNWvWjPbv309ERG3btqULFy4QEVFMTAxZWlpSaWmphB8HDhwgLS0tatu2LU2bNo2uXr3Ki3v5uXppaSlPFzVu3DhatmwZEZWN8ZSUlOjWrVtERLRt2zbq2rUrEZXND2rXrs2tN4h1vGLE+UJEtGzZMho7diwR/d84eM+ePZx98+bNuXFlfHw8qaqq8vSC5Sk/5oyIiCA7Ozv68uULEZXp2nR1dSk7O5vi4uLIzMyMSkpKiIjI1taWjh8/LtVPMfPmzaNt27ZV6UZCnmq5ZjAYDAaDwWAw/osJDAwkGxsbOn/+PKWlpVFQUBAJBAJuciVeRG7VqhVFRETQw4cPydnZmbdRtFGjRjRs2DBKSkqilJQUOnLkCLfIL69iXEtLi4YPH04PHjygBw8eUGFhIdna2tKoUaMoPj6eEhMTaciQIWRtbc1NJspTXFxMoaGhBICSk5Pp1atX9OHDByIqm7AJhUJasGABpaSk0J49e0hBQYEuXrzIk+nKlSuUnp5Oly9fJmtraxo/fjwRlSnE1q9fT0KhkF69ekWvXr3iKXvEfP78mX755Rdq1KgR5+7z589EVDYpMjMzowMHDlBqair5+/uTlpYWvX//nojKFCVGRkY0Z84cSkpKopiYGOrSpQt17NiRiIhevnxJysrKtHbtWkpPT6f4+HjasmULJ8e+ffvIxMSEQkND6cmTJxQaGkr6+voUHBwsNd8/fPhAbdq0IV9fX07W4uJimXIQEc2cOZP09PQoODiYHj9+TNevX6ddu3YR0f9NHm1sbOj06dOUnJxM/fv3J5FIxCligoKCSEVFhdzc3OjOnTt07949srW1pSFDhnBhrF27loRCIR08eJAePXpEM2fOJBUVFUpJSeGFI14Q+Ouvv0hDQ4MmTJhASUlJdOLECTI0NORtYBg9ejSJRCK6dOkSJSQkkKenJ2lra1epGHdzcyMPDw+6c+cOpaSk0C+//EIGBgZcvlVV9qVRVTkrnzZt27alyMhIevToEX369IlGjx5Nbdu2pWvXrtHjx49p1apVJBAIuPSoiDyK8YkTJ5KTkxPduXOH0tPTKSwsjP744w8iIrnq34oVK0hPT49CQ0MpMTGRfHx8SFtbW6ZivCbx69mzJ3Xp0oXi4+MpLS2N/vzzT27RRNxG2dra0sWLFyk+Pp5++uknsrCwoMLCQiIiunv3LikqKtKiRYsoOTmZgoKCSF1dnbeIIRKJSF9fn7Zs2UKpqam0bNkyUlRU5DYBrVq1iszNzenatWuUkZFB169fpwMHDnDfS1OMm5mZ0bFjxygxMZFGjx5N2tra3GJORQWdr68v9ejRg5devXr1ohEjRlSanseOHaPQ0FBKTU2l+/fvk4eHB9nb23OLKvLUx1u3bpGioiKtWLGCkpOTacOGDdxm+sp49eoVNWrUiH755ReuPczLyyMTExPq27cvJSQk0OXLl6levXo8Jaq0tr4i37MdlYa4TDZv3pyioqLo7t271LJlS14fd+3aNRIKhRQcHExpaWl08eJFsrCwoAULFhBR2WKaWPH96tUrevv2bZXxkNWuBAQEkKamJnXr1o1iYmIoLi6Oi3+dOnXo8OHDlJycTH369CELCwvq1KkTnT9/nhITE6l169bUrVs3uWWXla5V9a0VkVXP3r9/T76+vtSmTRt69eoVF9+KVNWuylOXf/75ZzI3N6fjx49TWloaXbp0iQ4dOkREkvXu8ePHpKmpSevWraOUlBSKjIykJk2akLe3NyezmZkZLVq0iMtHoprVVwaDwWAwGP87iEQisrKyIhsbG1JQUKD169cTEVFeXh4pKSmRo6Mj99egQQNavHgx911FxbiPj4+E3/fv35fp15gxY2jlypVERDRixAjatGkTERHNmDGDTE1Ned8ZGxvT58+fydPTk7eGsHfv3ioV4+Xdrlu3jkaOHElERIcPHyYPDw8iIvLx8aG1a9dWmlZZWVkUGhpK06ZNI11dXU7miorxkpISmjt3Ljk5OVHjxo3J3NycBg4cSERlYzxra2vObWxsLDVo0ICIiDZs2MCN7YjKNoCXV4yvX7+emjVrRo0aNaIGDRpQq1atiKhsLqWiosLNrT5+/EjKysq8Q9GdOnWqUjHeuHFjatCgASkqKvKU3Rs3bqTBgwdzv5s1a0Znz54lIknF+PTp08nR0ZFMTU2lzp/khSnGGQwGg8FgMBgMKjtVpaGhIXF60MfHhxuklz8xLubMmTMEgPLz84mISFtbu1IFrLyK8dq1a/MU3iEhIWRtbc3bVfzlyxdSV1fndh5XRNpJMKKyCVv79u15Zi1atKBZs2ZJ9Yeo7HS2gYEB91t8qlAW0uJLVDYp+u2337jfeXl5BIA7Bb148WJuR7OY58+fc8qIe/fuEQDKyMiQGm6DBg14Sjqxn23atKlU1oqnpeWRIycnhwQCAacIr4hYESfeRU1E9PDhQwJASUlJRPR/J0LLn2rcsmUL1a5dm/ttampKS5Ys4fndokULmjBhAi8c8aLBr7/+KlFetmzZQlpaWlRSUkI5OTmkoqLCO+H34cMH0tDQqFQxfv36dRIKhdxOejENGjTgThJXVfblQVo5A8BTrj99+pSUlJToxYsXvG87d+5Mc+bMkeqvPIpxDw8PbuGiIvLUPxMTE27RgqjshIKZmZlMxXhN4mdvb89T5JVHXO/Fii+iMmWWuro6HT58mIiIhgwZQl26dOF9N2PGDO5kBFFZ3g8bNoz7XVpaSrVq1eJ24vv5+VGnTp2knnQgkq4YX758OWcvTp8VK1bw5Ba3V9HR0aSkpMSdZHjz5g0pKyvzTpnLIjMzkwBwp+XlqY+DBw+WUPANHDhQZnvn6OjI23iyc+dO0tPT491icebMGVJUVOROuEhr66XxvdpRaYjLpPh0CVHZqR4AFB0dTURlZbH87QBEZXXExMSEJ5s4/6uKhzztSkBAAKmoqPBOr0iL/82bNwkA7d69mzM7ePAgqampcb/llb2qdK2sb62IPPVs8uTJlS6uiqmqXZUVRnJyMgGgsLAwqd9XjIuPjw+NGTOG5+b69eukqKjIjXEqbpgi+jb1lcFgMBgMxn8v5RXcYWFhpKamRvHx8ZSbm0tKSkrcOKOq74gklcPl3cjy69atW2Rra0u5ubmkr6/PbUqcPn16pfPIiorxkJCQainGR40aRURlcx+RSEQxMTFkZGQkcxwp5uDBg2Rvb09EknEPCQmhNm3acLcnb9iwgZt7hoeH88bdCQkJ3JpTRcW4eH5AVDbuq1+/Pr1584aIiE6dOsX5k56ezpsTSVOMd+7cWa4T40FBQSQUCrl5kaOjI9WuXZtEIhG3Qbtfv35EVDbenT17toR/FctGdfn3P17GYDAYDAaDwWB8Ax4/fozPnz+jS5cu0NLS4v727t2LtLQ0nlsHBwfuf/GbSG/fvgUATJs2DaNHj4abmxuWL18u8a082Nvb895QjYuLw+PHj6Gtrc3Jpa+vj4KCghr5X15+cRzE8gPApUuX0LlzZ9SpUwfa2toYPnw43r9/L/ebUdWVQVNTE0KhkJMhLi4O4eHhvHywsbEBUPYmq6OjIzp37gx7e3sMGDAAu3btQnZ2NgDg06dPSEtLg4+PD+/7wMDAaqeVLDmSkpLw5csXdO7cWe64ViwvAKChoYEGDRrw3Ijtc3Jy8PLlS7Rr147nZ7t27ZCUlCQ1vKSkJLRp04Z7w0zsPi8vD3/99ReePHmCoqIitGzZkrPX0dGBtbV1lWmRl5cHAwMDXnqkp6dz6Vrdsi9POVNVVeWlX0JCAkpKSmBlZcWT4+rVqzWqC2LGjx+PQ4cOwcnJCTNnzkRUVBQv7lXVv48fP+LVq1do1aoV942ysjL3hltV1CR+/v7+CAwMRLt27RAQEID4+HgJf9u0acP9r6+vD2tra668JCUlSS1PqampKCkp4czKy6WgoABjY2OuXHp7eyM2NhbW1tbw9/fHxYsXZca1vEzi9KmsDLds2RKNGjXCnj17AAD79u2DSCRChw4dKvU/NTUVgwcPRv369SEUCmFhYQEAePbsGc9dVfUxKSmJl48V5ZaXpKQkODo6QlNTkzNr164dSktLkZyczJlVbOury9e0o5WhrKyMFi1acL9tbGygq6vL5VVcXBwWLVrE89fX1xevXr2qdh8hT7sCACKRCEZGRlXGv3bt2gDK0rS8WUFBAXJycqole1XpKi/y1jNZVNWuygojNjYWSkpKcHFxkSusuLg4BAcH89LH3d0dpaWlSE9Pr/S7mtRXBoPBYDAY/5u4ublh/Pjx+O2336ClpYWOHTti+fLlnP3Lly/x119/AQCEQiE+fvwol7+y/BKP8adPnw43Nzfo6+sDAHr16oV9+/Zxc4bS0lLcvXuXk3Xfvn0oKipCYWEhgoKCqpRB7DY/Px8HDhyAm5sbgLLx9bhx49CrVy94enpCV1dX6venT5/mxqREhPv373PrBBXTIjs7G4aGhhAKhcjNzUVwcLBc6dSpUyecP38er1+/BgBs376d56e2tjYMDAxQWFiIHTt2VOqPUChEkyZNsHfvXgDAw4cPcePGDblk8Pb2RufOnbF06VLcu3cPmZmZePnyJTIyMpCRkYG0tDRcuHABmZmZmDlzJnbt2oWzZ89y3xcWFqK4uFiusCpD+au+ZjAYDAaDwWAw/kvIy8sDAJw5cwZ16tTh2QkEAt5vFRUV7n+x8rG0tBQAsGDBAgwZMgRnzpzBuXPnEBAQgEOHDsHT0xOKiooo2yz7fxQVFUnIUl6ZIpatWbNm2L9/v4RbacoCWZSXXxwHsfwZGRn46aefMH78eCxZsgT6+vq4ceMGfHx8UFhYCA0NjWqHV10Z8vLy4OHhgRUrVkh8Z2JiAiUlJYSFhSEqKgoXL17Epk2bMHfuXERHR3Py7dq1S0LBpaSkVC0ZZcnx5MkTufypqrxUtBe7qVhOfjR5eXkwMTFBRESEhJ14Yl9V2a+IvOVMXV2dp+DPy8uDkpIS7t27J5GfWlpaUmVXVCzbD14+TSvWu+7du+Pp06c4e/YswsLC0LlzZ0ycOBGrV6/+5vWvPDWJ3+jRo+Hu7o4zZ87g4sWLWLZsGdasWQM/P7+vkqUiVdXRpk2bIj09HefOncOlS5fw888/w83NDceOHftm4Y8ePRpbtmzB7NmzERQUhJEjR/LSqiIeHh4QiUTYtWsXTE1NUVpaisaNG6OwsLDSeEmrj38nFdv66vI17WhNycvLw8KFC9G3b18JOzU1tWr7JatdASpPJ2l5WVX+yit7Ven6d1OddrUi6urq1QorLy8PY8eOhb+/v4Rd3bp1q/y2uvWVwWAwGAzG/y7z5s2DpaUl7t27h/3792PatGlo3LgxFBQUoKmpiR07dsDMzAz+/v7w9fWFhoaGXIrfqvwCgJEjR2LmzJk4d+4c942zszNWrlwJT09PFBcXo7CwED179kTz5s3h6+uLBw8ewM7ODnp6enB2dsa9e/cqDd/W1hbt2rVDVlYWevfujUGDBnF2Pj4++PXXXzFp0qRKv7969SpmzJgBZWVlEBGsra2xefNmAICnpydCQkLg5OSEvn37YvLkyTh16hSsra1hZGQEZ2dnPH36VGYaNW7cGAsWLICzszO0tLR44+Ju3bph3759sLa2hoGBAdzc3PDixYtK/dq7dy9GjhyJNWvWoGHDhtXaFLlixQo0a9YMOTk5GDRoEDdnB8rmAV26dEFISAimTZuGs2fPYt68eZg4cSKMjIygoqKC8ePHw8rKCgAwf/58mJqaYty4cXKHzxTjDAaDwWAwGAwGADs7OwgEAjx79kzu01WVYWVlBSsrK0ydOhWDBw9GUFAQPD09YWRkhNevX4OIuAXj2NhYmf41bdoUhw8fRq1atSAUCuWSQXwKsTon0wDg3r17KC0txZo1a7jJyZEjRyT8lsdfed1VpGnTpggNDYWFhQWUlaVPWRQUFNCuXTu0a9cO8+fPh0gkwokTJzBt2jSYmpriyZMnGDp0qNxhSpNVlhwNGzaEuro6Ll++jNGjR1cvknIiFAphamqKyMhIXrmMjIzknfguj62tLUJDQ3nlLDIyEtra2jAzM4Oenh5UVFRw584dTtnx8eNHpKSkVDqZbdq0KV6/fg1lZWXuJK40Kiv7FZGnnEmjSZMmKCkpwdu3b+Hs7CzTPfB/yutXr16hSZMmAKTXOyMjI3h5ecHLywvOzs6YMWMGVq9eLVf9MzExQXR0NJd+xcXFuHfvHpo2bSqXjNWNn7m5OcaNG4dx48Zhzpw52LVrF08xfuvWLS5vs7OzkZKSAltbWwBl5SMyMpLnX2RkJKysrKq1eUQoFGLgwIEYOHAg+vfvj27duiErK4s7fVGRW7duSaRPVQtDw4YNw8yZM7Fx40YkJibCy8urUrfv379HcnIydu3axaWbvCcWymNra4vo6GgJuWviT3BwMD59+sQpdSMjI6GoqFjlzQzS+J7tqDSKi4tx9+5drn1JTk7Ghw8fuPLTtGlTJCcnw9LSslI/VFRUJGSurI2Vp135Vsgjuyzk7Vu/VT0DKm9XZYVhb2+P0tJSXL16lTuxVBVNmzZFYmJilelTWXmsTn1lMBgMBoPxv0VGRgbvt56eHt6/f8/93rdvn9TvRo8ezZtnOzk5Vel3rVq1KvULAGbMmIEZM2ZImA8aNIinxBajoqKCrVu3VupfeaRt9CzP1atX0bFjR97tRhVZtWoVVq1aJdWuXr16uH//Ps/s0qVLUt26urry5ruNGzfmpdPYsWMxduxY7ve8efMAlMX38OHDPL+WLFkCALCwsMCHDx94djY2Nrh582al8SlPxY3/DRs25G51ksbx48e5/1u2bIkLFy5U6nbRokVyyVAedpU6g8FgMBgMBoMBQFtbG9OnT8fUqVOxZ88epKWlISYmBps2beKuB5VFfn4+Jk2ahIiICDx9+hSRkZG4c+cOp1BwdXVFZmYmVq5cibS0NGzZsoW3W7kyhg4dCkNDQ/Tu3RvXr19Heno6IiIi4O/vz10NVhGRSAQFBQWcPn0amZmZ3Il4WVhaWqKoqAibNm3CkydPEBISwrteCyibFOXl5eHy5ct49+5dpdfnWlhYID09HbGxsXj37h2+fPkilwwTJ05EVlYWBg8ejDt37nBXaY0cORIlJSWIjo7G0qVLcffuXTx79gzHjx9HZmYml84LFy7EsmXLsHHjRqSkpCAhIQFBQUFYu3ZtpWFaWFggOjoaGRkZePfuHUpLS2XKoaamhlmzZmHmzJnclfu3bt3C7t275YqnvMyYMQMrVqzA4cOHkZycjNmzZyM2NhaTJ0+W6n7ChAl4/vw5/Pz88OjRI5w6dQoBAQGYNm0aFBUVoa2tDS8vL8yYMQPh4eF4+PAhfHx8oKioWOkJPzc3N7Rp0wZ9+vTBxYsXkZGRgaioKMydOxd3796VWfYrIk85k4aVlRWGDh2KESNG4Pjx40hPT8ft27exbNkynDlzRuo36urqaN26NZYvX46kpCRcvXoVv/32G8/N/PnzcerUKTx+/BgPHz7E6dOnOdnlqX+TJ0/G8uXLcfLkSTx69AgTJkyQWDiQB3niN2XKFFy4cAHp6emIiYlBeHi4RDovWrQIly9fxoMHD+Dt7Q1DQ0P06dMHAPDLL7/g8uXLWLx4MVJSUrBnzx5s3rwZ06dPl1vOtWvX4uDBg3j06BFSUlJw9OhRGBsbV3otIABs2bIFJ06cwKNHjzBx4kRkZ2dj1KhRlbrX09ND3759MWPGDHTt2pU76VGZWwMDA+zcuROPHz/GlStXMG3aNLnjI8bf3x/nz5/H6tWrkZqais2bN+P8+fPV9mfo0KFQU1ODl5cXHjx4gPDwcPj5+WH48OHcld/y8r3a0cpQUVGBn58foqOjce/ePXh7e6N169aconz+/PnYu3cvFi5ciIcPHyIpKQmHDh3i1SkLCwtcvnwZr1+/5p65kBYPWe3Kt0Ye2WUhb9/6LeqZrHZVVhgWFhbw8vLCqFGjcPLkSa7tqmwT0qxZsxAVFYVJkyYhNjYWqampOHXqFG8Di4WFBa5du4YXL17g3bt3nHll9bVz587cSScGg8FgMBiM/0W6deuGmTNnVrkewfibqfHr5AwGg8FgMBgMxn8ZpaWltH79erK2tiYVFRUyMjIid3d3unr1KhERhYeHEwDKzs7mvrl//z4BoPT0dPry5QsNGjSIzM3NSVVVlUxNTWnSpEmUn5/Pud+2bRuZm5uTpqYmjRgxgpYsWUIikYiz9/Lyot69e0vI9urVKxoxYgQZGhqSQCCg+vXrk6+vL338+LHS+CxatIiMjY1JQUGBvLy8iIjIxcWFJk+ezHPXu3dvzp6IaO3atWRiYkLq6urk7u5Oe/fulYj3uHHjyMDAgABQQECA1PALCgqoX79+pKurSwAoKCiIiIgA0IkTJ3hudXR0OHsiopSUFPL09CRdXV1SV1cnGxsbmjJlCpWWllJiYiK5u7uTkZERCQQCsrKyok2bNvH8279/Pzk5OZGqqirp6elRhw4d6Pjx45WmVXJyMrVu3ZrU1dW5/JQlBxFRSUkJBQYGkkgkIhUVFapbty4tXbqUiIjS09MJAN2/f58LJzs7mwBQeHg4EREFBQWRjo4OT5YTJ05Q+alaSUkJLViwgOrUqUMqKirk6OhI586d4+ylhRMREUEtWrQgVVVVMjY2plmzZlFRURFnn5OTQ0OGDCENDQ0yNjamtWvXUsuWLWn27NmcG5FIROvWreN94+fnR6ampqSiokLm5uY0dOhQevbsmVxlvyKyypm0tCEiKiwspPnz55OFhQWpqKiQiYkJeXp6Unx8fKVhJSYmUps2bUhdXZ2cnJzo4sWLvHxYvHgx2drakrq6Ounr61Pv3r3pyZMn3Pey6l9RURFNnjyZhEIh6erq0rRp02jEiBFS67KYmsZv0qRJ1KBBAxIIBGRkZETDhw+nd+/eEdH/tVF//vknNWrUiFRVVally5YUFxfHC+PYsWNkZ2fHldlVq1bx7CvmPRGRo6MjV9d37txJTk5OpKmpSUKhkDp37kwxMTGc2/J1XFw+Dxw4QC1btiRVVVWys7OjK1eucO6lta1ERJcvXyYAdOTIkUrTUUxYWBjZ2tqSQCAgBwcHioiIkCpHVfWRiGj37t1kZmZG6urq5OHhQatXr5aaT5WljZj4+Hjq2LEjqampkb6+Pvn6+lJubi5nX1lbX5Hv1Y5KQ1wmQ0NDqX79+iQQCMjNzY2ePn3Kc3f+/Hlq27Ytqaurk1AopJYtW9LOnTs5+z/++IMsLS1JWVmZ698qi0dV7QoRUUBAADk6OkrIWjH+0vJXWrmSJbs86Sqtb5WGrHo2efJkcnFxqfR7edpVWWHk5+fT1KlTycTEhFRVVcnS0pJ+//33StPn9u3b1KVLF9LS0iJNTU1ycHCgJUuWcPY3b94kBwcHEggEVHFJUVp9FYlElY4RGAwGg8FgMBiMH4EC0T/s8bpvgIWFBVxdXWW+OxAcHIyRI0ciPT39b7m2qzooKCggICAACxYs+NGiSODq6gpA9vUQ/xSklYfU1FRMnDgR0dHRyMnJwYkTJ/Dhw4calQdvb29ERERIXAlSU9kYDAaDwWAwGIy/m0+fPqFOnTpYs2YNfHx8frQ4jBoSERGBjh07Ijs7u8rT2/8WQkJCMHXqVLx8+ZK7wprBYPwzYfWVwWAwGAwGg/FvgF2l/gM5e/bsP1Lx/W8kKioKCxYskPu6Qi8vLyQkJGDJkiUICQlB8+bNv6+ADAaDwWAwGAzGP4j79+/j4MGD3JMB4vfYe/fu/YMlYzCAz58/Iy0tDcuXL8fYsWOZko3B+AfD6iuDwWAwGAwG49+E8o8W4HuQnJwMRcV/vs7/7Nmz2LJli1TleH5+PpSV/5nZc/HixR8tggRRUVFYuHAhvL29JU5GVCwP+fn5uHnzJubOnct7K2v48OEYNGgQBAJBtcLetWsXSktLv0p+BoPBYDAYDAbj72b16tVITk6GqqoqmjVrhuvXr8PQ0PBHi8VgYOXKlViyZAk6dOiAOXPm/GhxGAxGFbD6ymAwGAwGg8H4N/HP1Lx+JdVVbH4rPn36BE1NzW/il5qa2jfx53vwT9r9K0+aVywPmZmZACChQFdSUoKSklK1ZVBRUan2NwwGg8FgMBgMxo+kSZMmuHfv3o8Wg/GNcXV1xX/Da2kLFixgt6sxGP8SWH1lMBgMBoPBYPyb+Kpj1S9evICPjw9MTU0hEAhQr149jB8/HoWFhZybJ0+eYMCAAdDX14eGhgZat26NM2fO8PyJiIiAgoICjhw5giVLlsDMzAxqamro3LkzHj9+zHObmpqKfv36wdjYGGpqajAzM8OgQYPw8eNHzo2FhQW8vb153z18+BCdOnWCuro6zMzMEBgYWOkp33PnzsHZ2RmamprQ1tZGz5498fDhQ54bb29vaGlpIS0tDT169IC2tjZ3/eD169cxYMAA1K1bFwKBAObm5pg6dSry8/N532/ZsgVA2Xvi4j8xCgoKEhOL+/fvo3v37hAKhdDS0kLnzp1x69Ytnpvg4GAoKCggMjIS06ZNg5GRETQ1NeHp6ckphMXcvXsX7u7uMDQ0hLq6OurVq4dRo0ZJTZPyuLq6cu+MA9XLP2k8ffoUEyZMgLW1NdTV1WFgYIABAwZIvNktjtvVq1cxYcIE1KpVC2ZmZliwYAFmzJgBAKhXrx6XluLvy5eHBQsWQCQSAQBmzJgBBQUF7j1xsf8Vwz137hxcXFygra0NoVCIFi1a4MCBA5y9t7e3xJvkq1evRtu2bWFgYAB1dXU0a9YMx44dk5kWDAaDwWAwGAwGg8FgMBgMBoPBYDAYDAbj21PjE+MvX75Ey5Yt8eHDB4wZMwY2NjZ48eIFjh07hs+fP0NVVRVv3rxB27Zt8fnzZ/j7+8PAwAB79uxBr169cOzYMXh6evL8XL58ORQVFTF9+nR8/PgRK1euxNChQxEdHQ0AKCwshLu7O758+QI/Pz8YGxvjxYsXOH36ND58+AAdHR2psr5+/RodO3ZEcXExZs+eDU1NTezcuRPq6uoSbkNCQuDl5QV3d3esWLECnz9/xrZt29C+fXvcv3+fpwAtLi6Gu7s72rdvj9WrV0NDQwMAcPToUXz+/Bnjx4+HgYEBbt++jU2bNuGvv/7C0aNHAQBjx47Fy5cvERYWhpCQEJnp/fDhQzg7O0MoFGLmzJlQUVHBjh074OrqiqtXr6JVq1Y8935+ftDT00NAQAAyMjKwfv16TJo0CYcPHwYAvH37Fl27doWRkRFmz54NXV1dZGRk4Pjx4zJlqQxZ+VcZd+7cQVRUFAYNGgQzMzNkZGRg27ZtcHV1RWJiIpeuYiZMmAAjIyPMnz8fnz59Qvfu3ZGSkoKDBw9i3bp13PWPRkZGEmH17dsXurq6mDp1KgYPHowePXpAS0urUtmCg4MxatQoNGrUCHPmzIGuri7u37+P8+fPY8iQIZV+t2HDBvTq1QtDhw5FYWEhDh06hAEDBuD06dPo2bNnlenBYDAYDAaDwWAwGAwGg8FgMBgMBoPBYDC+LTVWjM+ZMwevX79GdHQ0mjdvzpkvWrSIu7pt+fLlePPmDa5fv4727dsDAHx9feHg4IBp06ahd+/evLefCwoKEBsby13Vraenh8mTJ+PBgwdo3LgxEhMTkZ6ejqNHj6J///7cd/Pnz69S1hUrViAzMxPR0dFo2bIlAMDLywsNGzbkucvLy4O/vz9Gjx6NnTt3cuZeXl6wtrbG0qVLeeZfvnzBgAEDsGzZMonwyivdx4wZA0tLS/z666949uwZ6tatizZt2sDKygphYWEYNmxYlfIDwG+//YaioiLcuHED9evXBwCMGDEC1tbWmDlzJq5evcpzb2BggIsXL3Kn0EtLS7Fx40Z8/PgROjo6iIqKQnZ2Ni5evMjLv8DAQJmyVIas/KuMnj178vITADw8PNCmTRuEhoZi+PDhPDt9fX1cvnyZd+1506ZNcfDgQfTp00fi9HZ5HBwcIBQKMXXqVDRt2rTKtP/48SP8/f3RsmVLRERE8K63l3U9YUpKCq8MTJo0CU2bNsXatWuZYpzBYDAYDAaDwWAwGAwGg8FgMBgMBoPB+Jup0VXqpaWlOHnyJDw8PHhKVTFiZezZs2fRsmVLTikOAFpaWhgzZgwyMjKQmJjI+27kyJG896udnZ0BlF3HDoA7EX7hwgV8/vxZbnnPnj2L1q1bc0pxoOw0sfjqczFhYWH48OEDBg8ejHfv3nF/SkpKaNWqFcLDwyX8Hj9+vIRZeYXop0+f8O7dO7Rt2xZEhPv378stt5iSkhJcvHgRffr04ZTiAGBiYoIhQ4bgxo0byMnJ4X0zZswY3tXszs7OKCkpwdOnTwH83/vap0+fRlFRUbVlkoas/KuM8ulVVFSE9+/fw9LSErq6uoiJiZFw7+vrW6O3wKtLWFgYcnNzMXv2bIk338unrTTKxyk7OxsfP36Es7Oz1PgwGAwGg8FgMBgMBoPBYDAYDAaDwWAwGIzvS40U45mZmcjJyanyFDBQ9na0tbW1hLmtrS1nX566devyfuvp6QEoUywCZe9HT5s2Df/5z39gaGgId3d3bNmyhfe+eGVyVDwdDkBCttTUVABAp06dYGRkxPu7ePEi3r59y3OvrKwMMzMzCX+fPXsGb29v6OvrQ0tLC0ZGRnBxcQEAmbJKIzMzE58/f640LUtLS/H8+XOeuay0dHFxQb9+/bBw4UIYGhqid+/eCAoKwpcvX6otn7xhVkZ+fj7mz58Pc3NzCAQCGBoawsjICB8+fJCaXvXq1auxjNUhLS0NAGSWc2mcPn0arVu3hpqaGvT19WFkZIRt27bVKP8ZDAaDwWAwGAwGg8FgMBgMBoPBYDAYDMbXUeOr1L8HlZ0CLn9t9Zo1a+Dt7Y1Tp07h4sWL8Pf3x7Jly3Dr1i2pSurqUFpaCqDsnXFjY2MJe2VlfnIJBALeVfBA2enuLl26ICsrC7NmzYKNjQ00NTXx4sULeHt7c2F8b2SlpYKCAo4dO4Zbt27hzz//xIULFzBq1CisWbMGt27dqvLd7ZqGWRl+fn4ICgrClClT0KZNG+jo6EBBQQGDBg2Sml7S3ob/J3H9+nX06tULHTp0wNatW2FiYgIVFRUEBQXhwIEDP1o8BoPBYDAYDAaDwWAwGAwGg8FgMBgMBuN/jhopxo2MjCAUCvHgwYMq3YlEIiQnJ0uYP3r0iLOvCfb29rC3t8dvv/2GqKgotGvXDtu3b6/0fWyRSMSdBi9PRdkaNGgAAKhVqxbc3NxqJFtCQgJSUlKwZ88ejBgxgjMPCwuTcCvrOm4xRkZG0NDQqDQtFRUVYW5uXiN5W7dujdatW2PJkiU4cOAAhg4dikOHDmH06NE18q8mHDt2DF5eXlizZg1nVlBQgA8fPsjth7xpWR3E5eHBgwewtLSU+7vQ0FCoqanhwoULEAgEnHlQUNA3l5HBYDAYDAaDwWAwGAwGg8FgMBgMBoPBYMimRlepKyoqok+fPvjzzz9x9+5dCXvxCeEePXrg9u3buHnzJmf36dMn7Ny5ExYWFrCzs6tWuDk5OSguLuaZ2dvbQ1FRscorwHv06IFbt27h9u3bnFlmZib279/Pc+fu7g6hUIilS5dKfXc7MzNTpoziU9PlT0kTETZs2CDhVlNTEwBkKoCVlJTQtWtXnDp1ChkZGZz5mzdvcODAAbRv3x5CoVCmbOXJzs6WOMnt5OQEAF91nXpNUFJSkpBl06ZNKCkpkdsPedOyOnTt2hXa2tpYtmwZCgoKeHZVnYJXUlKCgoICT/6MjAycPHnym8nGYDAYDAaDwWAwGAwGg8FgMBgMBoPBYDDkp8ZXqS9duhQXL16Ei4sLxowZA1tbW7x69QpHjx7FjRs3oKuri9mzZ+PgwYPo3r07/P39oa+vjz179iA9PR2hoaES15DL4sqVK5g0aRIGDBgAKysrFBcXIyQkBEpKSujXr1+l382cORMhISHo1q0bJk+eDE1NTezcuRMikQjx8fGcO6FQiG3btmH48OFo2rQpBg0aBCMjIzx79gxnzpxBu3btsHnz5ipltLGxQYMGDTB9+nS8ePECQqEQoaGhUt/ZbtasGQDA398f7u7uUFJSwqBBg6T6GxgYiLCwMLRv3x4TJkyAsrIyduzYgS9fvmDlypXyJB+PPXv2YOvWrfD09ESDBg2Qm5uLXbt2QSgUokePHtX272v46aefEBISAh0dHdjZ2eHmzZu4dOkSDAwM5PZDnJZz587FoEGDoKKiAg8PD05hXhOEQiHWrVuH0aNHo0WLFhgyZAj09PQQFxeHz58/Y8+ePVK/69mzJ9auXYtu3bphyJAhePv2LbZs2QJLS0teeWMwGAwGg8FgMBgMBoPBYDAYDAaDwfhvwt/fH3/88QeePn2K+/fvc4cyvwYFBQVkZ2dDV1f3q/36XmRkZOD8+fMYN27cdw3n9OnTWL16NSIiImS67d+/P3766Sd4e3vXODxXV1dMmTIFffr0qbEf35vY2Fg8evSoUh1reWqsGK9Tpw6io6Mxb9487N+/Hzk5OahTpw66d+8ODQ0NAEDt2rURFRWFWbNmYdOmTSgoKICDgwP+/PNP9OzZs9phOjo6wt3dHX/++SdevHgBDQ0NODo64ty5c2jdunWl35mYmCA8PBx+fn5Yvnw5DAwMMG7cOJiamsLHx4fndsiQITA1NcXy5cuxatUqfPnyBXXq1IGzszNGjhwpU0YVFRX8+eef3Nvnampq8PT0xKRJk+Do6Mhz27dvX/j5+eHQoUPYt28fiKjSTGvUqBGuX7+OOXPmYNmyZSgtLUWrVq2wb98+tGrVSo7U4+Pi4oLbt2/j0KFDePPmDXR0dNCyZUvs378f9erVq7Z/X8OGDRugpKSE/fv3o6CgAO3atcOlS5fg7u4utx8tWrTA4sWLsX37dpw/fx6lpaVIT0//KsU4APj4+KBWrVpYvnw5Fi9eDBUVFdjY2GDq1KmVftOpUyfs3r0by5cvx5QpU1CvXj2sWLECGRkZTDHOYDAYDAaDwWAwGAwGg8FgMBgMBuO/lv79+2PmzJlo3779jxblbyUjIwPbt2//7opxhiSxsbE4efKkXIpxBarqTmgGg8FgMBgMBoPBYDAYDAaDwWAwGAwGg8GoBhYWFjh58qTcJ8ZTU1MxZcoUvH37Fl++fMGYMWMwadIkAPwT43fv3oW/vz/y8vKgpqaGdevWoV27dsjIyICTkxNGjx6NixcvoqSkBBs2bICbmxtn5+fnhzNnziA3NxfBwcE4duwYwsPDUVxcjEOHDqFx48YAgJCQEGzevBlFRUXQ0tLCpk2b4OjoiODgYOzbtw9GRkZ48OABBAIBjhw5gvr168PGxgZPnz6FtbU16tatiz/++IMXv4SEBIwfPx6fP39GQUEBhgwZgt9++w0AsGDBAiQlJeHz589IS0uDsbExjh07Bn19fRQVFWHy5MkICwuDnp4enJ2dce/ePaknxh89eoRRo0bh48ePaNiwIT5//owhQ4bA29sbubm5mDZtGuLi4lBQUIDWrVtj8+bNUFVVhaurK+zt7XHr1i1kZ2ejd+/eWL16NRQUFHgnxt++fYtx48YhNTUVRAQ/Pz+MHTsWx44dw86dO3Hx4kUAQElJCerXr49z587h7du3mDRpEpydnREZGQkiwv79+7F27Vrcu3cPGhoaOH78OOrUqQMAWL16NY4cOYLi4mLUqlULO3bsgEgkqjSNiouL0bx5c3z8+BH16tVD69atsX379krLWY3eGGcwGAwGg8FgMBgMBoPBYDAYDAaDwWAwGIyvpaSkBIMHD8aaNWtw584d3Lp1Czt37sSdO3d47goLC9G3b18EBAQgPj4ea9euRb9+/ZCXlwcA+PjxI2xtbREfH4/du3djyJAhyM3N5eyaNWuGmJgYzJ49G+7u7ujVqxdiY2Ph5eWFhQsXAgAiIyNx8OBBXLt2DTExMViyZAmGDBnCyXDnzh0sXboUCQkJcHNzw4oVKwAA27dvh7W1NWJjYyWU4kDZRoHLly8jJiYG9+7dQ2hoKG7dusXZR0dHIzg4GImJiZxCGAB27tyJ5ORkPHz4EDdu3EBMTEyl6Th8+HD4+Pjg4cOHWLx4Ma5evcrZ/fLLL3B2dsbt27cRFxeH0tJSbNiwgbNPTExEVFQU4uPjcfXqVRw8eFDCfz8/P1hbWyMhIQFXrlxBYGAgbt26BU9PT6SkpCA5ORkA8Mcff8DS0hJ2dnYAyhT2o0ePRnx8PPr06YNOnTph9uzZSEhIQPPmzbF+/XoAwIEDB5CcnIybN28iJiYGQ4cOxYQJE6pMo1q1amHRokXo2LEjYmNjq1SKA0wxzmAwGAwGg8FgMBgMBoPBYDAYDAaDwWAwfhBixe+gQYPg5OSEtm3bIjc3F4mJiRLuFBUVuad427dvj9q1ayM2NhYAoKyszL2n3bp1a5iamuL+/fsAADU1Ne6d7ObNm0NLSwsdO3YEALRs2RKpqakAgFOnTiEuLg6tWrXiTplnZWUhPz8fANCmTRvuSeI2bdogLS1Nrjjm5+dj9OjRsLe3R+vWrfH06VNObgDo1q0bDAwMJPy9fPkyRowYAVVVVaiqqmLUqFFS/c/JyUFsbCwXf3t7e9519idPnsSqVavg5OSEJk2a4Pr163j8+DFnP2LECKioqEBDQwPDhg3DpUuXJMK4dOkSxo4dCwCoVasW+vbti0uXLkFJSQkTJkzAli1bAABbtmzhTvsDgKWlJZo1awagLO0tLS1hY2MDgJ/2J0+exKVLl9CsWTM4OTlh5cqVePbsmcw0qg41fmOcwWAwGAwGg8FgMBgMBoPBYDAYDAaDwWAwvgYigr6+Pk9RLC8KCgpy2QsEAs5MSUkJampqvN/FxcWcLF5eXli6dKlU/yr7Tha//vorDA0Ncf/+fSgrK6Nv374oKCiotr+y4luZWyJCaGgorKysqv2tPG58fX1hZ2eHESNG4PHjx+jVqxdnVzFuVaX9nDlzMGbMGKnh1TTty8NOjDMYDAaDwWAwGAwGg8FgMBgMBoPBYDAYjB+CtbU1hEIhgoKCOLPHjx8jKytLwl1paSnCwsIAAFFRUXj9+jX3jnlxcTFCQkIAALdv38bLly/lfuNcTK9evbBv3z7upHJpaSnu3r0r8zuhUIiPHz9Wap+dnQ0zMzMoKysjOTmZi4Ms3NzcsG/fPhQVFaGwsJCXRhXDb9KkCfbu3QsA3NXrYvr06YMVK1ZwyuTs7GzeiXFxGPn5+Thw4ADc3NykyrJr1y4AQGZmJo4fP44uXboAAPT09NC7d294enpi7NixUFJSkit+5enTpw+2b9/O5XtRURF34r8qZKV9eZhinMFgMBgMBoPBYDAYDAaDwWAwGAwGg8FgfDVjx46FmZkZ/vrrL7i7u8PS0pKzGz16tNT3t5WVlXH69GkcP34cDg4OaNSoEXx8fLjry8Woqqri+PHjCAgIgIODA6ZMmYJjx45BS0sLAKCjo4MHDx7A0dERI0eOxIEDB6CtrV0t+Z2dnbFy5Up4enrC0dERjRo1wqFDh2R+J5a7cePGvNPSYn777TcEBQXBwcEBs2fPRqdOneSSx9fXFw0bNoSdnR3at29fpaJ/79692LlzJxo3bozffvsNHTp04OzWrVsHdXV1ODk5wcHBAZ07d0ZGRgZnb2tri3bt2sHe3h7Ozs4YNGiQhP8bN25EUlIS7O3t0bFjR8ydOxetWrXiyZqZmQlfX1+54laRoUOHwtvbGx07doSjoyOcnJxw5coVmd917twZX758gYODA8aNG1elWwUiohpJx2AwGAwGg8FgMBgMBoPBYDAYDAaDwWAwGD+YjIwMODk54cOHDz9alH8drq6umDJlCvcGe01ZvXo1kpKSsHv37m8j2HegWifGg4ODoaCgwNtB8HeyYMECiTvtLSwsuIfkASAiIgIKCgqIiIj4e4X7B1IxbX4k/yRZGAwGg8FgMBgMBoPBYDAYDAaDwWAwGAzGt6FRo0YIDg7GwoULf7QoVfK3XaWemJiIBQsW/DClOoPxIzhw4ADWr18vYf727Vv89ttvaNy4MTQ1NdGgQQMEBgZybzswGAwGg8FgMBiMH8eCBQuq/Q7dP4GMjAwoKCggNjb2u/j/b00XBoPBYDAYDAaD8d+PhYUFOy1eQyIiIr76tPjDhw/x4MEDmJmZfRuhvhPVUowPHz4c+fn5EIlE1Q4oMTERCxcu/O6K8Q4dOiA/P593bz6D8aOoTDG+detW7NmzB3369MH69evRvHlzzJs3D4GBgX+/kAwGg8FgMBgMxg/A1dW1WnbXrl2Dh4cHTE1NoaCggJMnT9YoXAUFBe5PWVkZdevWxbRp0/DlyxfOzfTp03H58uUa+f9Px9XVlZcG4j95Nun+m9KluuWLwWAwGAwGg8FgMBj//VRLMa6kpAQ1NTWJ68z/SSgqKkJNTQ2Kin/bYXgGo9p4enoiNTUVgYGB8PX1xeHDh9GmTRvs37//R4vGYDAYDAaDwWB8NyIjI3Hp0iWe2aVLlxAVFVWlHQB8+vQJjo6O2LJly1fLERQUhFevXiE9PR1bt25FSEgIb5OqlpYWDAwMvjqcfyq+vr549eoV709ZWVnmd7LSpbCw8FuKWW1SUlJw6NAhnllMTAxOnz5dpR2DwWAwGAwGg8H4ewkICICNjQ1atWpVqRsXFxdYWlqCiHjmCgoK3+RkeI8ePZCcnPzV/nzvMLZv345Vq1Zxv318fGBnZwdPT0/88ccfmDp16teKKZUPHz5g+fLlNfo2NjZWYv5VVb59bTq5urrKvXn+q98Yt7CwwE8//YQbN26gZcuWUFNTQ/369bF3717edwMGDAAAdOzYkduRXv4d8HPnzsHZ2RmamprQ1tZGz5498fDhw+qIB6DyN8a3bNmC+vXrQ11dHS1btsT169fh6uoqsVP8y5cvCAgIgKWlJQQCAczNzTFz5kze6QGgLAMnTZqEkydPonHjxhAIBGjUqBHOnz8vIdOLFy/g4+MDU1NTCAQC1KtXD+PHj+ctGnz48AFTpkyBubk5BAIBLC0tsWLFCpSWlsqMMxEhMDAQZmZm0NDQQMeOHStNO1nhFBUVQV9fHyNHjpT4NicnB2pqapg+fXq100saT548wYABA6Cvrw8NDQ20bt0aZ86c4bkR5+fhw4fx66+/wtjYGJqamujVqxeeP3/Oc+vq6orGjRsjPj4eLi4u0NDQgKWlJY4dOwYAuHr1Klq1agV1dXVYW1tLLLoBZXk1atQo1K5dm8vT33//XapMR44cwZIlS2BmZgY1NTV07twZjx8/5slz5swZPH36lCvzFhYWAABHR0eoqanx/FVTU/vhC0kMBoPBYDAYDMb3pG7dutixYwcmTJiA3NxcTJgwATt37oS5uXmVdgDQvXt3BAYGwtPT86vl0NXVhbGxMczNzfHTTz+hd+/eiImJ4ewrXhl+584ddOnSBYaGhtDR0YGLiwvPPRFhwYIFqFu3LgQCAUxNTeHv719p+Glpaejduzdq164NLS0ttGjRQmJ+YmFhgaVLl2LUqFHQ1tZG3bp1sXPnTp6b27dvo0mTJlBTU0Pz5s1x//59ueKvoaEBY2Nj3h8AzJo1C1ZWVtDQ0ED9+vUxb948FBUVVZou3t7e6NOnD5YsWQJTU1NYW1tz17kfP34cHTt2hIaGBhwdHXHz5k2eDDdu3ICzszPU1dVhbm4Of39/fPr0ibPfunUrGjZsCDU1NdSuXRv9+/fn7I4dOwZ7e3uoq6vDwMAAbm5u+PTpEwwNDREeHo6ff/4ZHz58wPz58zFnzhzUr1+/SjsGg8FgMBgMBoPx97Jy5UqEh4cjOjpaqn1qaipSU1MhEAhw9erVGoVRUlJSpf3Zs2dhbW1dI7/l5VuEMW7cOMyYMQMA8ObNGxw6dAgJCQk4ceIEevXqhXXr1n0LUSX41orxqvg78kLMNzlW/fjxY/Tv3x9dunTBmjVroKenB29vb04526FDB25R4Ndff0VISAhCQkJga2sLAAgJCUHPnj2hpaWFFStWYN68eUhMTET79u2/ydXr27Ztw6RJk2BmZoaVK1fC2dkZffr0wV9//cVzV1pail69emH16tXw8PDApk2b0KdPH6xbtw4DBw6U8PfGjRuYMGECBg0ahJUrV6KgoAD9+vXD+/fvOTcvX75Ey5YtcejQIQwcOBAbN27E8OHDcfXqVXz+/BkA8PnzZ7i4uGDfvn0YMWIENm7ciHbt2mHOnDmYNm2azPjNnz8f8+bNg6OjI1atWoX69euja9euvEUFecNRUVGBp6cnTp48KaGkPXnyJL58+YJBgwbVKL3K8+bNG7Rt2xYXLlzAhAkTsGTJEhQUFKBXr144ceKEhPslS5bgzJkzmDVrFvz9/REWFgY3Nzfk5+fz3GVnZ+Onn35Cq1atsHLlSggEAgwaNAiHDx/GoEGD0KNHDyxfvhyfPn1C//79kZuby5OpdevWuHTpEiZNmoQNGzbA0tISPj4+Uq9DX758OU6cOIHp06djzpw5uHXrFoYOHcrZz507F05OTjA0NOTKvDR/AOCPP/5AREQERo0aVWW6MRgMBoPBYDAY/2bMzc1x9OhR6OjoICYmBrq6ujhy5AjMzc2rtPuepKSk4MqVK1WeVMjNzYWXlxdu3LiBW7duoWHDhujRowc3nwgNDcW6deuwY8cOpKam4uTJk7C3t6/Uv7y8PPTo0QOXL1/G/fv30a1bN3h4eODZs2c8d2vWrOEU3hMmTMD48eO5XfR5eXn46aefYGdnh3v37mHBggW8Tcw1QVtbG8HBwUhMTMSGDRuwa9cumYssly9fRnJyMsLCwninr+fOnYvp06cjNjYWVlZWGDx4MHdde1paGrp164Z+/fohPj4ehw8fxo0bNzBp0iQAwN27d+Hv749FixYhOTkZ58+f555Le/XqFQYPHoxRo0YhKSkJERER6Nu3L4gI+vr62LFjB9zc3BAXF4e0tDRcuHABdnZ2VdoxGAwGg8FgMBiMb8+FCxfQtGlTODg4wMXFBYmJiQCAtm3boqCgAF27dq10Q/Hvv/+OYcOGYfTo0di9e7dc4QUHB6Njx47o168f7O3tcfv2baxduxYtWrSAk5MTWrRowduwa2FhgdjYWABAYGAgbG1t4eTkBCcnJzx9+hRA2SbpTp06oXnz5mjSpAmOHj0qNez//Oc/sLOzg5OTE+zt7TmFf/kwHj16hDZt2qBRo0bo27cvunbtiuDgYABlm47Hjh2Lzp07w8rKCn379uV0dAsWLMCUKVPw4cMHdOzYEQUFBWjWrBmWL1+O4OBg3tvgQUFBcHJygqOjI5o3b46MjAwUFxfD3d0dzZs3R6NGjTBkyBBOfxgREYHGjRtjwoQJcHR0RKNGjXD37l0AZQr53NxcODk5oXnz5lLjHRISglatWqFp06bo0KED4uLi8PbtW8yfPx/h4eFwcnLCuHHjOPdbt25Fy5YtUa9ePQQFBUnNixcvXqB///6wt7eHg4MD5s2bB6Ds6eJWrVqhSZMmcHR0xJ9//llleagUqgZBQUEEgNLT0zkzkUhEAOjatWuc2du3b0kgENAvv/zCmR09epQAUHh4OM/P3Nxc0tXVJV9fX57569evSUdHh2ceEBBAFUUWiUTk5eXF/Q4PD+eF8+XLFzIwMKAWLVpQUVER5y44OJgAkIuLC2cWEhJCioqKdP36dV4Y27dvJwAUGRnJmQEgVVVVevz4MWcWFxdHAGjTpk2c2YgRI0hRUZHu3LlDFSktLSUiosWLF5OmpialpKTw7GfPnk1KSkr07NkziW/FvH37llRVValnz56cf0REv/76KwHgpY284Vy4cIEA0J9//slz16NHD6pfvz73uzrpVTGfpkyZQgB43+bm5lK9evXIwsKCSkpKiOj/8rNOnTqUk5PDuT1y5AgBoA0bNnBmLi4uBIAOHDjAmT169IgAkKKiIt26dYszF8cxKCiIM/Px8SETExN69+4dLz6DBg0iHR0d+vz5M08mW1tb+vLlC+duw4YNBIASEhI4s549e5JIJKKquHLlCgkEAurZsycVFxdX6ZbBYDAYDAaDwfg389dff9HAgQNp3Lhx1LRpUxo3bhwNHDiQ/vrrryrtKgKATpw4USMZAJCamhppamqSQCAgAPTTTz9RYWEh5yYgIIAcHR0r9aOkpIS0tbW5OdOaNWvIysqK50d1adSoEW8uKRKJaNiwYdzv0tJSqlWrFm3bto2IiHbs2EEGBgaUn5/Pudm2bRsBoPv371cajouLC6moqJCmpib3N23aNKluV61aRc2aNeN+V0wXLy8vql27Nm9elJ6eTgDoP//5D2f28OFDAkBJSUlEVDb3GjNmDC+s69evk6KiIuXn51NoaCgJhULeHFDMvXv3CABlZGRI2GVlZdH48eNpwIAB5OjoSPPmzaNu3brRo0ePqrRjMBgMBoPBYDAY35Y3b96Qvr4+xcfHExHRvn37yNbWltNjAaDs7Gyp3xYXF5OJiQklJSVRZmYm6ejo0IcPHzj7yr4NCgoidXV13hj/7du33P83b94ka2tr7rdIJKL79+9TVlYWTwf06dMnys/Pp+zsbHJycqKXL18SEVFmZiaZm5tLnaMKhULOXWFhIeXm5vLCICJq3rw5/f7770RElJiYSAKBgNNReXl5UcuWLenTp09UXFxMbdu25XRdAQEBNHnyZCIqm2/p6Ojw4ty7d28iKtNdWVhYcHJ8+vSJPn36RKWlpZzeq7S0lMaNG0fLli3jvlFSUuL0Z9u2baOuXbtKDasiN27coO7du1NBQQEREV27do3s7Owk5BIDgFavXk1ERElJSaSlpcXpbcunk6urKy1dupT7TpyH796948pPeno61a5dmwvbxcVF7jWCb3Ji3M7ODs7OztxvIyMjWFtb48mTJzK/DQsLw4cPHzB48GC8e/eO+1NSUkKrVq0QHh7+VbLdvXsX79+/h6+vL+/NtKFDh0JPT4/n9ujRo7C1tYWNjQ1Plk6dOgGAhCxubm5o0KAB99vBwQFCoZCLd2lpKU6ePAkPDw+puynEb7UfPXoUzs7O0NPT44Xr5uaGkpISXLt2rdL4Xbp0CYWFhfDz8+O9/T5lyhQJt/KG06lTJxgaGuLw4cPct9nZ2QgLC+OdBK9uepXn7NmzaNmyJdq3b8+ZaWlpYcyYMcjIyOB2DokZMWIEtLW1ud/9+/eHiYkJzp49y3OnpaXFnWgHAGtra+jq6sLW1pZ3AkT8vziviAihoaHw8PAAEfHi4+7ujo8fP/KuSgSAkSNHQlVVlfstrgPylHsxmZmZ6NevH5ycnHD06FEoKSnJ/S2DwWAwGAwGg/FvIyMjA6NHj8a2bdugra2Nbdu2YfTo0cjIyKjS7luzbt06xMbGIi4ujnt/evjw4ZW6f/PmDXx9fdGwYUPo6OhAKBQiLy+PO+E9YMAA5Ofno379+vD19cWJEye409HSyMvLw/Tp02FrawtdXV1oaWkhKSlJ4sS4g4MD97+CggKMjY3x9u1bAEBSUhIcHBx4TzS1adNGrvgPHToUsbGx3N+cOXMAAIcPH0a7du1gbGwMLS0t/PbbbxIyVcTe3p43L5Imu4mJCQBwssfFxSE4OBhaWlrcn7u7O0pLS5Geno4uXbpAJBKhfv36GD58OPbv38/duObo6IjOnTvD3t4eAwYMwK5du5Cdnc357+zsjCNHjkBXVxeLFi3CkiVLkJKSUqUdg8FgMBgMBoPB+LZER0fD3t6eu0lr6NChePnyJV68eCHz27Nnz8LCwgI2NjYwNDSEm5sbDhw4IFe4bdu25V3Jff/+fbi4uKBx48YYN24ckpOTJW4iFgqFaNiwIYYNG4YdO3YgKysLampqiIqKwpMnT9C9e3c4OTnBzc0NAKS+hd25c2cMHz4cGzZsQHp6OrS0tHj2OTk5iI2NxYgRIwAAtra2PP0YAHh6ekJDQwNKSkpo2bIl0tLS5IqzmDNnzmD48OHc/EtDQwMaGhogIqxbtw5NmjSBg4MDzpw5w53OBgBLS0tOZ9amTRu5wz116hTi4uLQqlUrODk5wc/PD1lZWRLpWx7xrcs2NjZQVlbG69evefZ5eXm4ceMGfvnlF87MyMgIAJCeno7u3bujcePG6NOnD7KyspCeni6XrOVRlu1ENnXr1pUw09PT4yanVZGamgoAnDK1IkKh8KtkE193YGlpyTNXVlbm3nsuL0tSUhKXyBURT+LFyIp3ZmYmcnJy0Lhx4yplTE1NRXx8vNzhlkccv4YNG/LMjYyMJBT/8oajrKyMfv364cCBA/jy5QsEAgGOHz+OoqIinmK8uulVUW5pVxWKr9d/+vQpL90qxk9BQQGWlpYSi2RmZma8DQIAoKOjI3H9oo6ODgDw8urDhw/YuXOnxLt9lcWnYv6L01ueci/mzJkzyM7Oxrp166Curi73dwwGg8FgMBgMxr+Rdu3aSZiJFxekUZXd12BsbMzNEa2trZGbm4vBgwcjMDBQYu4IAF5eXnj//j02bNgAkUgEgUCANm3acFfbmZubIzk5GZcuXUJYWBgmTJiAVatW4erVq1BRUZHwb/r06QgLC8Pq1athaWkJdXV19O/fX+I5q4rfKigooLS09Kvjr6OjIxHPmzdvYujQoVi4cCHc3d2ho6ODQ4cOYc2aNVX6pampKdW8vOziOZpY9ry8PIwdO1bqtYl169aFqqoqYmJiEBERgYsXL2L+/PlYsGAB7ty5A11dXYSFhSEqKgoXL17Epk2bMHfuXERHR8Pa2lriXbqmTZuiadOmAFClHYPBYDAYDAaDwfhnsHv3bqSkpHA6vPz8fGRkZGD8+PEyvy2vkC4sLETfvn0RHh6OFi1aICcnBzo6Ovjy5QtPH6OkpIRbt24hKioKERERaN26NQ4ePAgiQqNGjRAVFSUz3NDQUNy7dw8RERHo0aMHAgMDeYc4pVFRl1V+07OSklKVm62rw4EDB3DlyhVcvXoVQqEQGzduxJUrV746XCKCl5cXli5dKrcsXxPHQYMGYfny5ejfvz8AQF9fHwUFBXJ/L+abKMYrO+VadjK+asQT45CQEBgbG0vYlz/l/b0pLS2Fvb091q5dK9W+onL1a+JdMdwuXbpg5syZUu2trKyq5d+3CGfQoEHYsWMHzp07hz59+uDIkSOwsbGBo6Mjz7/qpNffQWV5IiuvxOVw2LBh8PLykuq2/IkHefyUB/F79OIdPAwGg8FgMBgMxv8KERERNbL7HojH9pXtbI+MjMTWrVvRo0cPAMDz58/x7t07nht1dXV4eHjAw8MDEydOhI2NDRISEqQqXiMjI+Ht7Q1PT08AZYri6p6Mt7W1RUhICAoKCrjFhVu3blXLj/JERUVBJBJh7ty5nJl4I/a3pmnTpkhMTJS6CUGMsrIy3Nzc4ObmhoCAAOjq6uLKlSvo27cvFBQU0K5dO7Rr1w7z58+HSCTCiRMnMG3aNO77f1L5YjAYDAaDwWAw/tdo3bo1EhIS8ODBAzRu3BiHDh1CnTp1UKdOnSq/e/PmDS5fvoznz59DV1cXQJn+xszMDHFxcTwdlSwKCgpQWFjIHXLctGmTVHe5ubnIzc2Fs7MznJ2d8fDhQ9y/fx/Dhw9Heno6Ll26xG3ajo2NhZ2dHe/WrOLiYmRkZKB58+Zo3rw53r17h9u3b/MU40KhEI6Ojti3bx+8vLyQnJyMGzducCeovwUeHh7w9vbG+PHjYWJiwt26lZ2dDUNDQwiFQuTm5iI4OFjqwd+KCIVC5Ofno7CwUOotYb169cLQoUMxbtw41K1bF6WlpYiJiUHz5s0hFArx8ePHasdBS0sLHTp0wJo1a7ibzTIzM2FkZITs7GzUq1cPALBv375qHVItz9+mda6480GM+CryWrVqfZfTACKRCADw+PFjdOzYkTMXF9Tyys4GDRogLi4OnTt3rlTe6mBkZAShUIgHDx5U6a5BgwbIy8urUfzF8UtNTUX9+vU588zMTIlCUZ1wOnToABMTExw+fBjt27fHlStXeAskYv9qml4ikUjqdROPHj3ixUuM+GYBMUSEx48fSyira4qRkRG0tbVRUlLyTcuhrHRxcnLCxIkTv/pmBAaDwWAwGAwG47+dvLw8PH78mPudnp6O2NhY6Ovrc5P6OXPm4MWLF9i7d2+Vfn348AGvX79GaWkpUlNTsWjRIlhZWXE3WFWkYcOGCAkJQfPmzZGTk4MZM2bwThgEBwejpKQErVq1goaGBvbt2wd1dXWJeU15/44fPw4PDw8oKChg3rx51T4JPmTIEMydOxe+vr6YM2cOMjIysHr16mr5UVGmZ8+e4dChQ2jRogXOnDmDEydO1Ni/qpg1axZat26NSZMmYfTo0dDU1ERiYiLCwsKwefNmnD59Gk+ePEGHDh2gp6eHs2fPorS0FNbW1oiOjsbly5fRtWtX1KpVC9HR0cjMzKw07xgMBoPBYDAYDMbfj5GREfbv348RI0aguLgYenp6OHr0qEydyZ49e9C1a1dOKQ4AioqKGDRoEHbv3o2NGzfKLYNQKERgYCBatmwJQ0PDSk9wf/z4Ef3798enT5+goKCAhg0bwsvLCzo6Ojhz5gymT5+OX375BUVFRahbty5OnjzJ+76kpASjRo1CVlYWlJWVYWRkhKCgIIlw9u7di1GjRmHVqlWwtLREixYtePH8Wjp06ICAgAC4u7tDQUEBqqqqOHbsGEaMGIFTp07B2toaRkZGcHZ2lmsTtL6+PkaMGAEHBwdoaWnh7t27PHtnZ2esXLkSnp6eKC4uRmFhIXr27InmzZujc+fOWL16NRwcHNC2bVts375d7niEhITAz88PjRo1goqKCnr37o2FCxdiw4YN6N+/P3R1ddGpUye5lPtSkesl8v9PUFAQAaD09HTOTCQSUc+ePSXcuri4kIuLC/f73LlzBEDi8fOPHz+SUCgkFxcXKiwslPBH/Kg6UdkD8xVFFolE5OXlxf0ODw8nABQeHk5ERF++fCEDAwNq0aIF94g7EVFwcDAB4MkoNtuxY4eEHJ8/f6a8vDzuNwCaOHGihLuK8owYMYIUFRXpzp07Em7Fj8QvWLCAAND58+cl3GRnZ/Pkrsjbt29JRUWFevbsyflHRPTrr78SAJ4s1Q3Hz8+PNDU1ae3atQSAEhMTefbVSa+K6TJlyhQCQFFRUZxZXl4e1a9fnywsLKikpISI/i8/69SpQzk5OZzbI0eOEABav349Z+bi4kKNGjWSkKWyMloxD729vUlVVZUSEhIk3JYvh2KZjh49ynOTnp5OACgoKIgzGzhwIOnq6kr4JyYzM5OSkpKkln0Gg8FgMBgMBoPxf4jH4RX/ys8zvLy8eHM8aZT/VkFBgUxMTGjgwIGUlpbGuQkICCBHR0fud0xMDDVv3pzU1NSoYcOGdPToURKJRLRu3ToiIjpx4gS1atWKhEIhaWpqUuvWrenSpUuVypCenk4dO3YkdXV1Mjc3p82bN5OLiwtNnjyZc1PefzGOjo4UEBDA/b558yY5OjqSqqoqOTk5UWhoKAGg+/fvVxp2xXDKM2PGDDIwMCAtLS0aOHAgrVu3jnR0dCpNFy8vL+rdu7dE3CrKkJ2dzZunExHdvn2bunTpQlpaWqSpqUkODg60ZMkSIiK6fv06ubi4kJ6eHqmrq5ODgwMdPnyYiIgSExPJ3d2djIyMSCAQkJWVFW3atKnS+DIYDAaDwWAwGAzGP4Hc3FxOj/fkyROqXbs2PXv27AdL9b+HApH89z4HBwdj5MiRSE9P5+72t7CwQOPGjXH69GmeW1dXVwD/d0XZ69evYWZmhhYtWmDcuHEQCATo1KkTatWqhQMHDmD48OGws7PDoEGDYGRkhGfPnuHMmTNo164dNm/eDABYsGABFi5cyLuq2sLCAq6urggODubC69ixI8LDwzkZNm/eDD8/Pzg7O+Pnn39GRkYGgoODoaurC3Nzc4SHhwMou47Bw8MD586dw8CBA9GuXTuUlJTg0aNHOHLkCC5cuIDmzZsDKDsJPHHiRE62yuR58eIFd6pgzJgxsLW1xatXr3D06FHcuHEDurq6+Pz5M5ydnREfHw9vb280a9YMnz59QkJCAo4dO4aMjAwYGhpWmi+//vorli1bhh49eqBHjx64f/8+zp07x+3OEMtS3XAiIyPRvn17aGtrw8LCAvHx8bxwq5NeFdPlzZs3cHR0REFBAfz9/aGvr489e/YgLi4OoaGh3JWC4vy0t7eHgoICRo4ciTdv3mD9+vXc1RkaGhpcmXv37p3ECf3KymjFPHzz5g1atWqFzMxM+Pr6ws7ODllZWYiJicGlS5eQlZXFk+no0aPcWwYAkJGRgXr16iEoKAje3t4AgFWrVmHmzJmYOnUqWrRoAS0tLXh4eHDfiMt0+TrFYDAYDAaDwWAwGAwGg8FgMBgMBoPB+O/h4sWLmDFjBoCyU+Zz587F4MGDf7BU/3v8bVepGxsbY/v27Vi2bBl8fHxQUlKC8PBw1KpVC0OGDIGpqSmWL1+OVatW4cuXL6hTpw6cnZ0xcuTIrw570qRJICKsWbMG06dPh6OjI/744w/4+/vzHnpXVFTEyZMnsW7dOuzduxcnTpyAhoYG6tevj8mTJ9fore86deogOjoa8+bNw/79+5GTk4M6deqge/funEJXQ0MDV69exdKlS3H06FHs3bsXQqEQVlZWWLhwIXR0dKoMIzAwEGpqati+fTvCw8PRqlUrXLx4ET179uS5q244bdu2hbm5OZ4/f46BAwdKhPs16VW7dm1ERUVh1qxZ2LRpEwoKCuDg4IA///xTQm6gTPkfHx+PZcuWITc3F507d8bWrVu5NPwW1K5dG7dv38aiRYtw/PhxbN26FQYGBmjUqBFW/D/27j4+5/r////t2BYjQ85KOhmN2dmxYzZjy2QsQ1ksrCzn50K8IyqxJCVCqKacTCLyVs5PcpqTjA3HRljIKJLeNsvZ7Oz4/eHn9TV2Rph87tfLxeWy1+v5fD6ej9fzOC7vy6X343g+X2PH3lLMvn37YrVamTVrFhMnTuTJJ5/MVRgXERERERERERERERGR+1vTpk1p2rRpcafxf95N7Ri/n+Tk5FC5cmXCw8P58ssvizsdyUd+u7NFRERERERERERERERE8lOpUiXi4+Pv6Im93bt3JzIykuDg4AL7xcTEUL9+fWrXrl1ozKioKM6ePcukSZNuU5Zy1V3bMV6c0tPTKVmyJCaTybj31VdfkZKSYhy3LiIiIiIiIiIiIiIiIiJSVNOnTy9Sv6uveC5KYfzfJCsrCweHf0+52a64E7gbYmNjqVOnDmPGjGHatGn06tWL7t274+npSdu2bYs7PREREREREREREREREREBIiMj8fPzw2w289xzz3Hq1CkAkpOTKV++PCNHjsTX1xcXFxdWrlxpjFu6dClubm6YzWbeeOONfOPHxsbi6+uLxWLB09OTzz//HIDTp08THh6Ol5cXnp6eTJs2zRhz4MABQkNDMZvNmM1moqOjAWjUqBGLFy8G4Ny5c/To0QN/f3/MZjM9e/YkIyOD6dOnEx8fz6BBg7BYLKxcuZK9e/fSoEED6tSpg7u7O6NHjy7S2uSXx+HDhwkJCcFsNmOxWIycAEwmE2PGjMHf35/q1asza9asQuOdOnWKdu3a4e/vj5eXF8OHDzfGODs7M3ToUPz9/enUqRNRUVFERETQsmVL3N3dady4MSkpKQWudXH5P1EYd3Z25vHHH2fy5Mn079+fJUuW0LFjR9avX0+JEiWKOz0RERERERERERERERERASZNmkR8fDyJiYkEBQURFRVltKWlpWE2m9m1axdTp05l0KBBwJWidpcuXVi0aBGJiYm4uLhw5syZPON/8MEHDB48GKvVyr59+3jppZcA6N+/P66uruzdu5cNGzYwevRoYmNjycrK4oUXXqBz584kJiaSmJiY5+t/X3/9dYKCgti5cycJCQnk5OTwySef0L17d/z8/Jg4cSJWq5UWLVrg7OzM+vXr2b17N7t27WLRokXExsYWuC4F5REZGUnbtm1JTExk4cKFdOvWjWPHjhljS5Ysyc6dO1m1ahUDBgwgKyurwHidOnXi1VdfZefOnezZs4f4+HgWLlxoxDtz5gw7duxg7ty5AOzYsYOYmBj2799PlSpVjB8V5LfWxeXfs7f9H3B2dmbp0qXFnYbcgkaNGmGz2Yo7DREREREREREREREREbkL5s2bx5w5c0hPTyc9PZ1KlSoZbY6OjoSHhwMQEBDAkSNHgCs7k81mM+7u7gB069aN/v375xk/ODiY9957j0OHDtG4cWMaNGgAwLp169i1axcAVapUITw8nHXr1uHk5ER6ejovv/yyEePanK5avHgx27dvZ8KECQBcunQJe3v7PHO4dOkSffv2xWq1Ymdnx2+//YbVaqV+/fr5rktSUlKeeZw7d47du3ezbds2AGrWrEmDBg3YsmULTz75JHClcA5Qu3ZtHBwcOHXqFGlpaXnGu3DhAuvXr+fPP/807p8/f56kpCTjunPnzrleYd2sWTMqVqwIXPlc9u7dW+BaF5f/E4VxEREREREREREREREREbm3bd26lcmTJ7N9+3aqVKnC0qVLGTFihNFesmRJoyBrb29PdnZ2nnGuLdpeb+DAgbzwwgusW7eOt956C09PTz777LObipEXm83GokWLqFWrVqF933rrLSpVqsSePXtwcHAgPDyc9PT0m5qvINfn7ujoaPxtb29PVlZWvmOvbliNjY3NNe5aZcqUKVL8oq713fJ/4ih1EREREREREREREREREbm3paam4uTkRMWKFcnIyMj1nu+CBAQEkJiYyMGDBwGYOXMmGRkZefZNSkqievXq9OjRg7feess4wjwkJIQvv/wSgL/++ovvvvuOZ599FldXV0qXLs0333xjxPjf//53Q9xWrVoxduxYoyicmprK4cOHAShbtixpaWm5nvOxxx7DwcGBpKQk1q5dW+gz5peHk5MTderUMd4dfvjwYbZu3UrDhg1vKV6ZMmUIDg7mww8/NO6fPHmS33//vdAcr5ffWhcXFcZFREREREREREREREREpNg1a9YMV1dXXF1dCQoKwmKxFGlc5cqVmTlzJq1bt8bb25tDhw4ZR3tfb+rUqXh4eODj48Pw4cP5+OOPAZg8eTIHDhzAy8uL4OBg3n77berVq4eDgwNLlixh1qxZeHl54e3tzaJFi26IO3HiREqVKoXFYsFsNtOkSROSk5MB6NmzJ2PGjMFisbBy5UqGDx/OrFmzMJvNDBs2jMaNGxf6jAXlMXfuXBYsWIC3tzdt2rRh+vTpPPHEE/8o3uHDh/H09MTLy4vw8PB839lekPzWeunSpXTv3v2m4/1TJpte4CwiIiIiIiIiIiIiIiIiIvcx7RgXEREREREREREREREREZH7mgrjIiIiIiIiIiIiIiIiIiJyX1NhXERERERERERERERERERE7msqjIuIiIiIiIiIiIiIiIhIsbNYLFgsFtzd3bG3tzeuIyIi2LRpExaLpbhTvG8sXbqUQYMG/aMYJpOJs2fP3p6E7gKTzWazFXcSIiIiIiIiIiIiIiIiIiIAycnJWCyWXEXXTZs2MXDgQKxWa7Hlda/IysrCwcGhuNPAZDKRmppK+fLlizuVItGOcRERERERERERERERERG552VlZdG3b1+8vb3x8PAgPj7eaFuzZg0NGjTA19cXf39/Nm7cCMChQ4d4+umn8fb2xsvLi+HDhwOQmZnJsGHD8Pf3x2Kx0K5dO1JTU/Ocd8WKFdStWxdvb28sFgs7duww5qxTpw5ms5lnnnmG/fv3A1eK+J6envnmml+8uLg4GjdujJ+fHz4+PixcuBC48kOB8uXLM3ToUOrUqcPUqVNp1KgRgwcPJigoiKeeeorevXsb8adPn467uzsWiwUvLy8j/rViYmJo1arVP8r3Ws7Ozrl+tODn58emTZvy/iCLSfH/lEBEREREREREREREREREpBAHDx5kxowZfPbZZ0RHR/P222+zZs0afv31V6KiolizZg1ly5bl8OHDBAUFkZyczNSpU3n++ed58803AUhJSQFg3LhxPPjgg+zcuROA9957j+HDh/Ppp5/mmvOXX36hS5cubN68mdq1a5OZmcnFixc5ffo07du3Z9OmTXh5eTF37lzatGnDzz//XGCu+cU7e/YsPXv2ZOXKlVStWpX//e9/1KlTh8DAQADS0tLw8PBg7NixACxevJgjR46wceNGMjMzcXd3Z/v27QQEBPD6669z8OBBqlatSmZmJpcvX77ltc0v338j7RgXERERERERERERERERkXuei4sL9erVAyAgIIAjR44AsHr1ag4fPkzDhg2xWCy0adMGOzs7jh8/TsOGDfnyyy95++23+eGHH4xjvxcvXszXX39tvMf8m2++4ejRozfMuXbtWpo1a0bt2rUBeOCBByhXrhw7duzAy8sLLy8vACIjIzl58iQnTpwoMNf84v3000/8+uuvNG/eHIvFQkhICABJSUlGv1deeSVXbhERETg4OFCqVCksFosxR5MmTejQoQOffPIJR48epUyZMre8tvnl+2+kHeMiIiIiIiIiIiIiIiIics9zdHQ0/ra3tycrKwsAm83Gs88+y7x5824YU7NmTQIDA1m7di1Tp05l0qRJrFy5EpvNxpQpU2jatOldzTU/NpsNDw8PfvrppxvakpOTKV26NHZ2ufc85zfHokWL2LVrF5s2baJFixaMHj2al1566bbmez0HBweys7ON6/T09Jsafzdox7iIiIiIiIiIiIiIiIiI/GuFhoaybt06EhMTjXtXj0g/dOgQDz/8MB07duSjjz4iNjYWgFatWjFx4kTjWPCLFy8ax6BfH3vNmjUcPHgQuPJu8rS0NOrXr8/evXvZt28fAPPnz6datWpUq1at0FzzihcYGMjRo0dZt26d0ddqtZKRkXFTa5GVlcWRI0fw8/Nj8ODBtGnTxliLW5FfvtdzcXEx3j2+c+dOY6f7vUSFcRERERERkfuQzWajZ8+eVKhQAZPJhNVqLfJYZ2dnJk2adMdyu1mdO3emVatWxZ3GHbdp0yZMJhNnz57Nt09UVBQWi+W2zRkTE2McI3gvSk5OLvT7W5R1u9NMJhOLFy8utvnv9c9RREREROROc3FxYd68efTq1Qtvb2/c3NyM/67973//i5eXFz4+PkRERBAdHQ3A0KFDqVu3LvXq1cNsNlO/fv08/9vDxcWFWbNm8corr+Dt7U29evVISkqicuXKzJ07l44dO2I2m/n8889ZuHAhJpOp0FzzivfQQw+xYsUKxowZg7e3N+7u7gwbNoycnJybWovs7Gy6du2Kp6cnFouFXbt28Z///OemYhQl3+uNHj2aTz/9FG9vb2bOnImHh4fRFh0dzYgRI4zrFi1aEB8fD0B8fDwtWrS45fxuhslms9nuykwiIiIiIiL3kd9++42RI0eyevVq/ve//1G1alVatWrFiBEjqFixYnGnx6pVq3jhhRfYtGkTNWrUoFKlSjg45H6bVkxMDAMHDryhoOjs7MzAgQMZOHDg3Uu4AGlpadhstvuq8NeoUSMsFkuuHyBs2rSJ4OBgUlNT833W8+fPc/ny5dv2HcvvO3CvyM7O5q+//srz+3tVUdbtdomKimLx4sU3/J9lp06d4qGHHqJkyZJ3dP78XLp0iXPnzlGlSpVimV9EREREROTfQDvGRUREREREbtKvv/6Kn58fhw4d4ptvvuHw4cNER0ezfv16AgICSElJKe4UOXLkCFWrViUwMJBHHnkk36LivSw7O5ucnBzKlStXLEXxmz2u7m4oU6bMPfHDi7vF3t7+rnx//+ln/cgjjxRbURygVKlSKoqLiIiIiIgUQoVxERERERGRm/Tqq69SokQJfvjhB5555hmeeOIJmjdvzrp16zhx4gRvv/220dfZ2ZkxY8bQtWtXnJyceOKJJ/jiiy9yxfvtt99o164d5cuXp0KFCrzwwgskJycXmMOPP/6Iv78/JUuWpGrVqgwbNoysrCzgytHj/fv35/jx45hMJpydnW8Yv2nTJrp06UJaWhomkwmTyURUVJTRfvHixdua89XjrlesWIHZbMbR0ZH69esb72KD/3cc9NKlS3F3d6dkyZIcP378hqPUGzVqRP/+/Rk4cCAPPfQQDz/8MF9++SUXLlygS5cuODk54eLiwqpVq4wx2dnZdOvWjerVq1OqVClcXV355JNPcuV4dZ7333+fRx99FFdXV0aNGoWnp+cNz2OxWHjnnXfyfd59+/bRvHlzypQpw8MPP0yHDh343//+Z8zz448/8sknnxhrf+3a7dq1Cz8/P0qXLk1gYGCuI+ryOkr96hF1V78L/fr1M9omTJiAl5cXDz74II8//jh9+/bl/Pnz+eadl99//52XX36ZChUq8OCDD+Ln52e8Nw7g888/56mnnqJEiRK4uroyZ86cXONNJhPTp0+ndevWlC5dmpo1a7J06VKjPTU1lcjISCpXrkypUqWoWbMms2bNAvI+Sn3lypXUqlWLUqVKERwcnOf3buvWrQQFBVGqVCkef/xxBgwYwIULF4x2Z2dn3nvvPTp27EjZsmXp2bMncOUoxVq1alG6dGlq1KjBO++8Q2ZmJnDl+/nuu++SkJBgfG4xMTHGM157lPrevXtp3LgxpUqVomLFivTs2TPXul/9ro0fP56qVatSsWJFXn31VWMugM8++4yaNWvi6OjIww8/TJs2bfL9jK4/Sv3q92TOnDk4OztTrlw5XnrpJc6dO5dvDBERERERkfudCuMiIiIiIiI3ISUlhTVr1tC3b19KlSqVq+2RRx4hMjKSBQsWcO1bqz7++GP8/PzYs2cPffv2pU+fPkaxMzMzk9DQUJycnNiyZQvbtm2jTJkyNGvWLN9drCdOnKBFixbUrVuXhIQEPv/8c2bMmMHo0aMB+OSTTxg1ahSPPfYYf/zxB3FxcTfECAwMZNKkSZQtW5Y//viDP/74g8GDB9+xnK8aMmQIH3/8MXFxcVSuXJmWLVvmKgZevHiRsWPHMn36dH7++ed8d8HOnj2bSpUqsXPnTvr370+fPn1o27YtgYGB7N69m6ZNm9KhQwcuXrwIQE5ODo899hgLFy5k//79jBgxgrfeeotvv/02V9z169eTlJTE2rVrWb58OV27duXAgQO51nDPnj0kJibSpUuXPHM7e/YsjRs3xsfHh/j4eFavXs2ff/5Ju3btjM8nICCAHj16GGv/+OOPG+PffvttPv74Y+Lj43FwcKBr1675rufnn3/Oq6++Ss+ePdm7dy9Lly7FxcXFaLezs2Py5Mn8/PPPzJ49mw0bNvDGG2/kG+9658+f55lnnuHEiRMsXbqUhIQE3njjDeMdd99//z2vvfYar7/+Ovv27aNXr1506dKFjRs35orz7rvv0q5dOxITE2nRogWRkZHGyQrvvPMO+/fvZ9WqVRw4cIDPP/+cSpUq5ZnPb7/9Rnh4OC1btsRqtdK9e3eGDRuWq8+RI0do1qwZL774IomJiSxYsICtW7fm+sEAwPjx4/H29mbPnj3GjxycnJyIiYlh//79fPLJJ3z55ZdMnDgRgIiICF5//XU8PDyMzy0iIuKGHC9cuEBoaCgPPfQQcXFxLFy4kHXr1t0w/8aNGzly5AgbN25k9uzZxMTEGIX2+Ph4BgwYwKhRo0hKSmL16tU0bNiwKB9ZrnVYvHgxy5cvZ/ny5fz44498+OGHNxVDRERERETkvmITERERERGRIouNjbUBtu+//z7P9gkTJtgA259//mmz2Wy2J5980vbKK68Y7Tk5ObYqVarYPv/8c5vNZrPNmTPH5urqasvJyTH6XL582VaqVCnbmjVr8pzjrbfeumHMp59+aitTpowtOzvbZrPZbBMnTrQ9+eSTBT7LrFmzbOXKlbvh/p3IeePGjTbANn/+fOPemTNnbKVKlbItWLDAyAewWa3WXGM7depke+GFF4zrZ555xtagQQPjOisry/bggw/aOnToYNz7448/bIBt+/bt+T7/q6++anvxxRdzzfPwww/bLl++nKtf8+bNbX369DGu+/fvb2vUqFG+cd977z1b06ZNc9377bffbIAtKSnJeIbXXnstV5+ra7Ru3Trj3ooVK2yA7dKlSzabzWYbOXKkzdvb22h/9NFHbW+//Xa+uVxv4cKFtooVKxrX+X0Hrpo2bZrNycnJdubMmTzbAwMDbT169Mh1r23btrYWLVoY14Bt+PDhxvX58+dtgG3VqlU2m81ma9mypa1Lly55xj969KgNsO3Zs8dms9lsb775ps3d3T1Xn6FDh9oAW2pqqs1ms9m6detm69mzZ64+W7ZssdnZ2Rnr+OSTT9patWqV73NfNW7cOJuvr69xff36X/uMV/834YsvvrA99NBDtvPnzxvtK1assNnZ2dlOnTpls9mufNeefPJJW1ZWltGnbdu2toiICJvNZrMtWrTIVrZsWdvff/9daI42242f48iRI22lS5fONX7IkCG2evXqFSmeiIiIiIjI/Ug7xkVERERERG6B7Zod4YUxm83G3yaTiUceeYTTp08DkJCQwOHDh3FycqJMmTKUKVOGChUqkJ6ezpEjR/KMd+DAAQICAjCZTMa9p59+mvPnz/P777/f4hPd2ZyvCggIMP6uUKECrq6uHDhwwLhXokSJXHMXJT97e3sqVqyIl5eXce/hhx8GMHIG+PTTT/H19aVy5cqUKVOGL774guPHj+eK6+XlRYkSJXLd69GjB9988w3p6elkZGQwb968AndxJyQksHHjRmNtypQpQ+3atQEKXZ/rn61q1ao3PMdVp0+f5uTJkzRp0iTfWOvWraNJkyZUq1YNJycnOnTowJkzZ4yd9IWxWq34+PhQoUKFPNsPHDjA008/neve008/neszvf6ZHnzwQcqWLWs8U58+fZg/fz4Wi4U33niDn376Kd98Dhw4QL169XLdu/Y7BVfWPyYmJtf6h4aGkpOTw9GjR41+fn5+N8RfsGABTz/9NI888ghlypRh+PDhN3xHCnPgwAG8vb158MEHjXtPP/00OTk5uY7F9/DwwN7e3riuWrWqsSbPPvssTz75JDVq1KBDhw7MnTu3yJ/ZVc7Ozjg5OeUZX0RERERE5P8ih+JOQERERERE5N/ExcUFk8nEgQMHaN269Q3tBw4c4KGHHqJy5crGvQceeCBXH5PJZBxFff78eXx9fZk7d+4Nsa6NcbcVV86lSpXKVfC/mfyuvXc1xtWc58+fz+DBg/n4448JCAjAycmJcePG5XpXNpCrmHlVy5YtKVmyJN9//z0lSpQgMzOzwPc9nz9/npYtWzJ27Ngb2q4Wuov6bNc/x7WuP8r/esnJyTz//PP06dOH999/nwoVKrB161a6detGRkYGpUuXLjSXwuYoqoK+T82bN+fYsWOsXLmStWvX0qRJE1599VXGjx9/S3OdP3+eXr16MWDAgBvannjiCePv6z/r7du3ExkZybvvvktoaCjlypVj/vz5fPzxx7eUR2EKWhMnJyd2797Npk2b+OGHHxgxYgRRUVHExcXlepf4rcYXERERERH5v0iFcRERERERkZtQsWJFnn32WT777DMGDRqUq3B46tQp5s6dS8eOHYtU3AWoU6cOCxYsoEqVKpQtW7ZIY9zc3Fi0aBE2m82YZ9u2bTg5OfHYY48V+VlKlChBdnZ2kfv/k5yvio2NNYqTqamp/PLLL7i5ud10Djdr27ZtBAYG0rdvX+NeUXZvAzg4ONCpUydmzZpFiRIleOmllwosGNepU4dFixbh7OyMg0Pe/9l9q2t/LScnJ5ydnVm/fj3BwcE3tO/atYucnBw+/vhj7OyuHBh3/TvVC2M2m5k+fTopKSl57hp3c3Nj27ZtdOrUybi3bds23N3db2qeypUr06lTJzp16kRQUBBDhgzJszDu5ubG0qVLc92LjY3NdV2nTh3279+f613rRfHTTz/x5JNP8vbbbxv3jh07lqtPUT43Nzc3YmJiuHDhglF837ZtG3Z2dri6uhY5HwcHB0JCQggJCWHkyJGUL1+eDRs2EB4efhNPJSIiIiIiIlfpKHUREREREZGbNHXqVC5fvkxoaCibN2/mt99+Y/Xq1Tz77LNUq1aN999/v8ixIiMjqVSpEi+88AJbtmzh6NGjbNq0iQEDBuR7LHrfvn357bff6N+/PwcPHmTJkiWMHDmS//znP0YBtCicnZ05f/4869ev53//+1+Rj2q+lZyvGjVqFOvXr2ffvn107tyZSpUq0apVqyLnfKtq1qxJfHw8a9as4ZdffuGdd94hLi6uyOO7d+/Ohg0bWL16dYHHqAO8+uqrpKSk8PLLLxMXF8eRI0dYs2YNXbp0MYqqzs7O7Nixg+TkZP73v//d8k7eqKgoPv74YyZPnsyhQ4fYvXs3U6ZMAa6cbpCZmcmUKVP49ddfmTNnDtHR0TcV/+WXX+aRRx6hVatWbNu2jV9//ZVFixaxfft2AIYMGUJMTAyff/45hw4dYsKECXz33XcMHjy4yHOMGDGCJUuWcPjwYX7++WeWL1+e748levfuzaFDhxgyZAhJSUnMmzePmJiYXH2GDh3KTz/9RL9+/bBarRw6dIglS5bQr1+/AvOoWbMmx48fZ/78+Rw5coTJkyfz/fff5+rj7OzM0aNHsVqt/O9//+Py5cs3xImMjMTR0ZFOnTqxb98+Nm7cSP/+/enQoYNxxH9hli9fzuTJk7FarRw7doyvvvqKnJycmyqs36wTJ05Qu3Ztdu7cadzr2LEjb775pnH9/fffG68FEBERERER+bdRYVxEREREROQmXS2y1qhRg3bt2vHUU0/Rs2dPgoOD2b59e77vY85L6dKl2bx5M0888QTh4eG4ubnRrVs30tPT892NXa1aNVauXMnOnTvx9vamd+/edOvWjeHDh9/UcwQGBtK7d28iIiKoXLkyH3300R3L+aoPP/yQ1157DV9fX06dOsWyZctueKf3ndCrVy/Cw8OJiIigXr16nDlzJtfu8cLUrFmTwMBAateufcM7rq/36KOPsm3bNrKzs2natCleXl4MHDiQ8uXLGz9cGDx4MPb29ri7u1O5cuWbfo/1VZ06dWLSpEl89tlneHh48Pzzz3Po0CEAvL29mTBhAmPHjsXT05O5c+fywQcf3FT8EiVK8MMPP1ClShVatGiBl5cXH374ofFu7FatWvHJJ58wfvx4PDw8mDZtGrNmzaJRo0Y3Ncebb76J2WymYcOG2NvbM3/+/Dz7PvHEEyxatIjFixfj7e1NdHQ0Y8aMydXHbDbz448/8ssvvxAUFISPjw8jRozg0UcfLTCPsLAwBg0aRL9+/bBYLPz000+88847ufq8+OKLNGvWjODgYCpXrsw333xzQ5zSpUuzZs0aUlJSqFu3Lm3atKFJkyZMnTq1yGtSvnx5vvvuOxo3boybmxvR0dF88803eHh4FDnGzcrMzCQpKSnXD2SOHz/OH3/8YVynpaXlek+6iIiIiIjIv4nJZrPZijsJERERERERub9t2rSJ4OBgUlNTi/yO5HuJzWajZs2a9O3bl//85z/FnY6IiIiIiIiI3CS9Y1xERERERESkAH/99Rfz58/n1KlTdOnSpbjTEREREREREZFboMK4iIiIiIiISAGqVKlCpUqV+OKLL3jooYeKOx0RERERERERuQU6Sl1ERERERERERERERERERO5rdsWdgIiIiIiIiIiIiIiIiIiIyJ2kwriIiIiIiIiIiIiIiIiI3PPS09Np1aoVtWrVwtvbm2effZbDhw8Xd1p3RUxMDK1atbrtcfft24ezs/Ntj3svUmFcRERERERERERERERERP4VevbsSVJSEgkJCbzwwgt07969uFMy5OTkkJOTU9xp5CsrK+uejHW3qDAuIiIiIiIiIiIiIiIiIvc8R0dHWrRogclkAqB+/fokJycXaezo0aNxc3PDYrFgsVg4duzYDX3Wr19PQEAAPj4+eHh4MGPGDKMtLS2N7t274+npibe3N127dgUgKiqKF198kdDQUDw9Pfnjjz+YM2cOZrMZs9nMc889x4kTJwCIjY3F19cXi8WCp6cnn3/+OQDTp0/H3d0di8WCl5cXO3bsyPMZ/v77b8LCwnB3d6dhw4bGs2dnZzNkyBA8PT3x9PSkf//+ZGRkANC5c2e6du1Kw4YN8fT0NHKuWbMmvr6+zJ8/P9cca9asoUGDBvj6+uLv78/GjRsB2LRpEx4eHnTr1g2LxcL3339f5LzvFQ7FnYCIiIiIiIiIiIiIiIiIyM365JNPeOGFFwrtl5qayvjx4/njjz8oVaoUFy9exM7uxv3DderUYevWrdjb25OSkoKPjw+hoaE89thjDBw4kFKlSpGYmIidnR1//fWXMW779u3s2bOHhx9+mH379jFkyBB27dpFtWrVeP/99+nevTurVq3igw8+YPDgwbz88stGXgCvv/46Bw8epGrVqmRmZnL58uU8n2Pbtm1YrVbc3Nz46KOP6NmzJz/88ANffPEFcXFx7Nq1C3t7e8LCwpg4cSJDhw4FYNeuXWzduhUnJydWrFjBwoUL2bVrF05OTnTo0MGI/+uvvxIVFcWaNWsoW7Yshw8fJigoyCjAHzhwgM8++8z4wUC5cuWKlPe9QjvGRURERERERERERERERORfZcyYMRw+fJgPPvig0L5ly5alZs2avPLKK0ybNo2UlBQcHR1v6HfmzBnatm2Lp6cnjRs35syZM+zbtw+A5cuXM3jwYKOgXrlyZWNcixYtePjhhwHYuHEjzZo1o1q1agD07duXDRs2kJ2dTXBwMO+99x6jRo1i69atPPTQQwA0adKEDh068Mknn3D06FHKlCmT53MEBgbi5uYGXDlSftOmTWRnZ7Nu3To6d+5MyZIlcXBwoEePHqxdu9YY17ZtW5ycnIAru+LbtWtH2bJlMZlM9OrVy+i3evVqDh8+TMOGDbFYLLRp0wY7OzuOHz8OQI0aNXjmmWeM/kXN+16hwriIiIiIiIiIiIiIiIiI/GuMHz+e7777jlWrVlG6dOlC+9vb2xMbG8vAgQM5ffo09evXZ8uWLTf06927Nw0aNGDv3r1YrVZq1apFenp6ofELKghfPfYdYODAgaxYsYKqVavy1ltv0bdvXwAWLVrEhx9+SGZmJi1atLjhePObde2cN5OfzWbj2WefxWq1Gv9OnDhBzZo184xzu/O+01QYFxEREREREREREREREZF/hQkTJvDNN9+wdu1aypcvX6Qx586d488//yQoKIh33nmHBg0asGfPnhv6paam8uSTT2Iymdi8eTMJCQlGW1hYGOPHjycnJwcg11Hq1woODmb16tWcPHkSgOjoaJo0aYK9vT1JSUlUr16dHj168NZbbxEbG0tWVhZHjhzBz8+PwYMH06ZNG3bu3Jln7O3bt3Pw4EHgynvJg4ODsbe3JyQkhK+++oqMjAyysrKYPn06TZs2zTNGSEgICxcu5Ny5c9hsNr744gujLTQ0lHXr1pGYmGjcyy+Xm8n7XqF3jIuIiIiIiIiIiIiIiIjIPe/333/n9ddfp0aNGgQHBwNQsmRJduzYAcCIESN49NFH6d27d65xaWlptGnThgsXLmAymahZsyadOnW6If6HH35I3759ee+997BYLNSrV89omzhxIoMGDcLLy4sHHniAunXr8uWXX94Qw9PTk3HjxtGsWTMAHn/8caPf1KlT2bBhAyVKlMDe3p6PP/6Y7OxsunbtSkpKCg4ODlSuXJlZs2bl+fyBgYEMHTqUw4cPU7FiRb766ivgyrHqR44coU6dOgA0atSIgQMH5hmjRYsW7Ny5kzp16lC2bFmaN29utLm4uDBv3jx69erFxYsXycjIwMfHh3nz5t0Qp6C88/scipvJZrPZijsJERERERERERERERERERGRO0VHqYuIiIiIiIiIiIiIiIiIyH1NhXEREREREREREREREREREbmvqTAuIiIiIiIiIiIiIiIiIiL3NRXGRURERERERERERERERKRYOTs74+rqisViwd3dnU8//bTQMTExMbRq1arQtvj4eCIiIm4qn5iYGMqVK4fFYjH+vfrqqzcV436xadMmVq9efcfnGTx4MFFRUXcsvsMdiywiIiIiIiIiIiIiIiIiUkQLFizAYrFw7NgxzGYzQUFBmM3mfxzXz8+PBQsW3PS44OBgFi9e/I/n/7fbtGkTZ8+epVmzZsWdyj+iHeMiIiIiIiIiIiIiIiIics948skncXV15ZdffiEqKoqBAwcabVOnTqVz587G9d9//01YWBju7u40bNiQ5OTkG+Jt2rQJi8ViXK9YsYK6devi7e2NxWJhx44dN5VfVFQUERERtGzZEnd3dxo3bkxKSgoAmZmZ9O3bl1q1alG/fn1ef/11GjVqBMCpU6cIDg7G19cXDw8P+vXrR05OTqHjAObMmUO9evWoU6cODRs2JCEhAbiysz0kJISXX34Zd3d3AgMD2b9/P61bt8bNzY2mTZty/vx5Y45hw4bh7++PxWKhXbt2pKamAtC5c2d69epFkyZNqFWrFuHh4WRkZGC1WomOjmbu3LlYLBZGjRp1w3rMmzePevXq4ePjg7e3N8uWLTPaGjVqxODBgwkKCuKpp56id+/eRtsff/xBaGgo7u7uhISE8Pvvv9/U53CzVBgXERERERERERERERERkXvG3r17OXjwIN7e3oX23bZtG2PHjmX//v08//zz9OzZs8D+v/zyC126dGHOnDkkJCQQFxdH7dq18+y7cePGXEepT5w40WjbsWMHMTEx7N+/nypVqjBt2jQAvvjiCw4dOsTPP//Mli1bSExMNMaUL1+eZcuWsWvXLhITE0lOTubbb78tdNy2bdv45ptv2Lx5M7t37+b999+nffv2RntcXJyxBk899RQtW7YkOjqaAwcOUKJECWbPng3AuHHjePDBB9m5cydWqxUvLy+GDx9uxLFarSxbtowDBw7w559/smjRIiwWC7179yYyMhKr1cqIESNuWKfQ0FBiY2PZs2cPS5YsoUePHly+fNloP3LkCBs3bmTfvn2sWbOG7du3AzBgwAD8/f3Zv38/s2fPZv369QV+dv+UjlIXERERERERERERERERkWIXERFBqVKlKF26NDNnzqRmzZqFjgkMDMTNzQ2Anj17Mnz4cLKzs/Ptv3btWpo1a2YUwx944AHKlSuXZ9+CjlJv1qwZFStWBCAgIIC9e/cCsH79el555RUeeOABADp16sT06dMByMnJYejQoWzduhWbzcbp06fx9PTkpZdeKnDckiVLSEhIoF69esb8KSkpXLp0yZj/iSeeAK4cG5+ZmcnDDz8MQN26dTl06BAAixcvJi0tjUWLFgGQkZGBs7OzEbN169aULl0aAH9/f44cOZLvOl7r6NGjREZG8vvvv+Pg4EBKSgpHjx411jgiIgIHBwccHBywWCwcOXKEgIAA1q9fz/jx4wGoVq0aYWFhRZrvVqkwLiIiIiIiIiIiIiIiIiLF7uo7xq/l4OCQq9Cdnp5+l7PKm6Ojo/G3vb09WVlZefYzmUzG3xMmTOD06dPs2LEDR0dH/vOf/+T7PNeOs9lsdOrUiTFjxhQpl/xys9lsTJkyhaZNm/6jZ7reSy+9xIcffkibNm0AqFChQq7nupW1uhN0lLqIiIiIiIiIiIiIiIiI3JNcXFyIj48nOzubixcvGrudr9q+fTsHDx4EYPr06QQHB2Nvb59vvNDQUNasWWOMyczMJC0t7bbl27hxY+bNm0dmZiaZmZl89dVXRltqaiqPPPIIjo6OnDp1ioULFxZpXFhYGF9//TXHjx8Hruw8j4+Pv+ncWrVqxcSJE7l48SIAFy9e5Oeffy50XNmyZQtco9TUVKpXrw7A119/bby3vDAhISHMnDkTuPK+8aVLlxZp3K3SjnERERERERERERERERERuSeFh4ezcOFC3NzceOyxx/Dx8TEKu3DlKPWhQ4dy+PBhKlasmKugnBcXFxdmzZrFK6+8QmZmJvb29kRHR+Pv739D36vvGL/K1dWVBQsWFBi/V69e7N27F3d3dx566CH8/Pw4efIkAK+99hpt2rTBw8ODRx99lJCQkCKNCwoK4qOPPqJ169ZkZWWRkZHBc889h5+fX6Hrd62hQ4dy+fJl6tWrZ+zOHjp0KB4eHgWOa926NXPmzMFisRAeHn7De8Y/+eQT2rRpQ/ny5WncuLFxrHthPvnkEzp37oy7uzvVqlWjcePGN/U8N8tks9lsd3QGEREREREREREREREREZH/I86dO4eTkxOZmZlERkbi6+vL0KFD79g4KRoVxkVEREREREREREREREREbpN69epx+fJl0tPTadCgAVOmTKFUqVJ3bJwUjQrjIiIiIiIiIiIiIiIiIiJyX7Mr7gRERERERERERERERERERO41zs7OuLq6YrFYjH979+7Ns2+lSpVITk6+I3nExMRw8OBB43rp0qUMGjTojsx1P3Mo7gRERERERERERERERERERO5FCxYswGKxFGsOMTExlC9fntq1awMQFhZGWFhYseb0b6Qd4yIiIiIiIiIiIiIiIiIiN2Hp0qW4ublhNpt54403crU5OztjtVqNaz8/PzZt2gTAiRMnaNOmDV5eXpjNZt555x0A5s2bR7169fDx8cHb25tly5YBMH36dOLj4xk0aBAWi4WVK1cSExNDq1atjPjjxo3Dw8MDLy8vIiMjSUtLAyAqKoqIiAhatmyJu7s7jRs3JiUl5c4tyj1OhXERERERERERERERERERkTxERETkOkr90qVLnD59mi5durBo0SISExNxcXHhzJkzRYr3yiuv4Ovry969e0lMTGTAgAEAhIaGEhsby549e1iyZAk9evTg8uXLdO/eHT8/PyZOnIjVaqVFixa54q1atYqZM2eybds29u7dy4MPPsiwYcOM9h07dhATE8P+/fupUqUK06ZNu32L8y+jo9RFRERERERERERERERERPKQ11Hqa9euxWw24+7uDkC3bt3o379/obHOnz/P1q1bWbNmjXGvcuXKABw9epTIyEh+//13HBwcSElJ4ejRo8bx6flZt24dERERlC9fHoA+ffrQtm1bo71Zs2ZUrFgRgICAgHzfkf5/gXaMi4iIiIiIiIiIiIiIiIjcIpPJlOvawcGB7Oxs4zo9Pb3QGC+99BLdu3dn3759WK1WypQpU6RxheXi6Oho/G1vb09WVtZNx7xfqDAuIiIiIiIiIiIiIiIiIlJEAQEBJCYmcvDgQQBmzpxJRkaG0e7i4sKOHTsA2LlzJ0lJSQCUKVOGhg0b8vHHHxt9//rrLwBSU1OpXr06AF9//TWpqalGn7JlyxrvDb9eSEgI3377LX///TcA06ZNo2nTprfrUe8rKoyLiIiIiIiIiIiIiIiIiOTh+neMb9y4kcqVKzNz5kxat26Nt7c3hw4dMo4rBxg9ejSffvop3t7ezJw5Ew8PD6Ntzpw5xMfH4+HhgcViYerUqQB88skntGnTBh8fH/bs2cMTTzxhjOnZsydjxozBYrGwcuXKXPk1b96cLl26EBAQgJeXF3///TcffPDBHV6VfyeTzWazFXcSIiIiIiIiIiIiIiIiIiIid4p2jIuIiIiIiIiIiIiIiIiIyH1NhXEREREREREREREREREREbmvqTAuIiIiIiIiIiIiIiIiIiL3NRXGRUREREREREREREREROSe4uzsjKurKxaLxfi3d+/eYsmle/fubNy48bbGnDRpEqdOnTKuo6KiGDhw4G2d42Y4OztjtVoLbbuVtbBYLJw7d+4fZvjPORR3AiIiIiIiIiIiIiIiIiIi11uwYAEWi6W402D69Om3PeakSZNo1KgRjzzyyG2PfSfdylrkV3C/27RjXERERERERERERERERET+NUwmE2PGjMHf35/q1asza9Yso+2nn37CYrHg5eVF165d8fb2ZtOmTQBMmDCBunXrYrFYqFu3Ltu3by/SuEaNGrF48WIAOnfuTK9evWjSpAm1atUiPDycjIwMAM6dO0dERAS1a9cmKCiIXr160blz5xvyHzVqFCdPniQiIgKLxWIUjv/44w9atmyJu7s7jRs3JiUlxRgzfvx4/P39qVOnDs2aNePYsWP5rs3w4cPx8fGhVq1azJ07N1fb2bNnjetKlSqRnJxsXM+dOxdfX19cXFwYN25cnvGvXYu0tDS6d++Op6cn3t7edO3aNd+crs7r7OzMiBEjCAgIoHr16owePTrPMXeCCuMiIiIiIiIiIiIiIiIics+5Wji++u/SpUtGW8mSJdm5cyerVq1iwIABZGVlkZGRQUREBBMnTmTv3r106NCBxMREY0yHDh2Ii4vDarUyZcoUunTpAlDouOtZrVaWLVvGgQMH+PPPP1m0aBFwpeBdqlQpDhw4wMqVK/npp5/yHD9ixAgeffRRFixYgNVqNXbF79ixg5iYGPbv30+VKlWYNm0aAPPmzSMpKYnt27eze/duIiMj6du3b775mUwm9uzZw+rVq+nfv3+u4ndB/vzzT+Lj44mNjWXKlCn55n/VwIEDKVGiBImJiSQkJDB27NgizXP27Fm2b99OXFwc48aN48SJE0Ua90/pKHURERERERERERERERERuecUdJR6ZGQkALVr18bBwYFTp06RkpKCg4MDwcHBAAQHB/PUU08ZY/bs2cP777/PmTNncHBwICkpiUuXLnHo0KECx12vdevWlC5dGgB/f3+OHDkCwPr165k4cSImkwknJyciIiI4fPhwkZ+3WbNmVKxYEYCAgADjneqLFy8mLi4OX19fALKzswuM0717dwBq1KhBw4YN2bx5M87OzoXO361bN0wmE5UqVSI8PJx169YRGBiYb//ly5ezY8cO7Oyu7MWuXLlyoXMAtG/fHriyY71GjRocPXqUatWqFWnsP6HCuIiIiIiIiIiIiIiIiIj8qzg6Ohp/29vbk5WVlWc/k8kEXNkVHh4ezsaNG6lbty5///035cqV4/LlywWOux1zF1V+cW02G2+++SY9e/a8qXjX52Fvb5+rqJ6enl6kcbdbUdfvdtNR6iIiIiIiIiIiIiIiIiLyr+fq6kpmZiY//vgjAD/++KOxYzs9PZ2MjAyeeOIJAKZMmVKkcTejcePGzJ49G5vNxvnz5/n222/z7Vu2bFnS0tKKFLdVq1ZER0cb7xzPzMxkz549+fa/+s715ORktmzZQlBQEAAuLi7s2LEDgO+++44LFy7kGhcTEwNASkoK33//PU2aNCkwr7CwMMaPH09OTg4Af/31V5Gep7hox7iIiIiIiIiIiIiIiIiI3HMiIiIoVaqUcT1x4kTjuPO8lCxZkvnz5/Pqq6+Sk5ODr68vrq6ulC9fnrJlyzJ69Gj8/f2pVKkSL730UpHG3YwRI0bQrVs33NzcqFSpEt7e3vnGGDBgAD169KB06dJGQTo/kZGRnDlzxnj2rKwsunbtio+PT579s7Oz8fHx4cKFC0yePNk4Rn3ixIkMGDCA4cOH89xzzxnHtl9VuXJlfH19SUtLo1+/fgUeo3413qBBg/Dy8uKBBx6gbt26fPnllwWOKU4mm81mK+4kRERERERERERERERERET+qXPnzuHk5ARAXFwcYWFhHDlyxHgn+O0ed63MzEyys7NxdHTkwoULhIaG0r9/fyIiIm79gW6SyWQiNTX1pov6/xdox7iIiIiIiIiIiIiIiIiI3BcWLVrExIkTsdlsODg4MGfOnCIVt2913LVSU1Np3rw52dnZpKen88ILL9CuXbtbfRS5zbRjXERERERERERERERERERE7mt2xZ2AiIiIiIiIiIiIiIiIiIjInaTCuIiIiIiIiIiIiIiIiIjcdZUqVSI5ORmAFi1akJSUVKz5xMfH39X3geclOTn5tr8fPDk5mejo6EL7zZo1C5PJxJYtW3Ld79y5M5MmTcpzjMVi4dy5c7cjzTtOhXERERERERERERERERERKVYrV67E1dW1WHPw8/NjwYIFxZrDnVDUwviMGTNo0qQJM2bMKHJsq9WKk5PTP0nvrlFhXERERERERERERERERETuuKVLl+Lm5obZbOaNN97I1ebs7IzVagVg9OjRuLm5YbFYsFgsHDt2DIDt27fToEEDvL29MZvNLFmy5IaxcKXAvWnTpnxjXbp0iYiICNzd3fH29qZp06YAbNq0CYvFYsSZM2cOZrMZs9nMc889x4kTJwCIiYkhJCSEl19+GS8vL/z8/Pj1118BOHToEE8//TTe3t54eXkxfPjwPNciMjISPz8/I/apU6dytQ8ePBiz2YyHhwfr1q0rUk6tWrUy+i1fvpxGjRoB0Lt3b5KSkrBYLISFheWZT1JSEkePHuWrr75i8eLF/P3333n2u57JZOLs2bPk5OTQr18/3Nzc8Pb2xtfXl/T09Bv6p6Wl0b17dzw9PfH29qZr165Fmud2cLhrM4mIiIiIiIiIiIiIiIjI/0mnT5+mS5cubNmyBXd3d7744gvOnDlzQ7/U1FTGjx/PH3/8QalSpbh48SJ2dnakpKTQqlUr/vvf/xIUFEROTg5nz54tcM78Yq1atYqzZ8+yf/9+AFJSUm4Yu2/fPoYMGcKuXbuoVq0a77//Pt27d2fVqlUAxMXFYbVaqV69OsOGDWPs2LFMmzaNqVOn8vzzz/Pmm2/mGxtg0qRJVK5cGYAPP/yQqKgoY1d3Wloabm5ujB8/ntjYWMLCwjhy5AjHjh0rMKf8REdHM3DgwFw/HrjejBkz6NChA48++iiNGzdm/vz59OzZs8C410pISGD9+vX8/PPP2NnZkZaWRokSJW7oN3DgQEqVKkViYiJ2dnb89ddfRZ7jn9KOcRERERERERERERERERG5o2JjYzGbzbi7uwPQrVu3PAunZcuWpWbNmrzyyitMmzaNlJQUHB0d2b59O66urgQFBQFgZ2dHhQoVCpwzv1je3t4cOHCAvn37smDBAh544IEbxm7cuJFmzZpRrVo1APr27cuGDRvIzs4GICAggOrVqxt/HzlyBICGDRvy5Zdf8vbbb/PDDz/k+77wefPm4efnh6enJ9OnT89VtHZwcKBz584A1K9fn0cffZQ9e/YUmtOtysrK4quvvqJLly4AdO3a9aaOUweoUaMGWVlZdO3aldmzZ5OZmYmd3Y2l6OXLlzN48GCj7eqPA+4GFcZFRERERERERERERERE5K4ymUx53re3tyc2NpaBAwdy+vRp6tevz5YtWwqM5eDgkKs4fPUI7/xi1ahRg/3799OsWTO2bduGp6cnqampN5Wvo6NjrpyzsrIAePHFF9m2bRuurq7G7vHrbd26lcmTJ7Ny5Ur27dvHhAkT8jx2vKD5r7+X3xoUxfLlyzl79iyhoaE4Ozvz6quvsnv3bvbt21fkGOXKlWPfvn20b9+egwcPYjabOXz4cJHH3w0qjIuIiIiIiIiIiIiIiIjIHRUQEEBiYiIHDx4EYObMmWRkZNzQ79y5c/z5558EBQXxzjvv0KBBA/bs2UNgYCCHDh0yiuQ5OTnGMeUuLi7s2LEDgJ07d5KUlFRgrN9//x2TyURYWBjjx4/HZrPx22+/5cojODiY1atXc/LkSeDKceRNmjTB3t6+wOc8dOgQDz/8MB07duSjjz4iNjb2hj6pqak4OTlRsWJFMjIymDZtWq72rKws5syZYzzPyZMnsVgsBebk4uJCYmIily5dIisri3nz5hnxypYtS1paWr45z5gxg0mTJpGcnExycjLHjh3jP//5z03tGv/rr7+4cOECTZs2ZcyYMTg7OxtH1V/r6prn5OQY4+4WFcZFRERERERERERERERE5I6qXLkyM2fOpHXr1nh7e3Po0CEqVqx4Q7+0tDTCw8Px8vLCbDaTmZlJp06deOihh/j+++8ZNmwYZrOZOnXqsG3bNgBGjx7Np59+ire3NzNnzsTDw6PAWHv37uXpp5/G29sbHx8fOnTogNlszpWHp6cn48aNo1mzZpjNZrZs2cKXX35Z6HP+97//xcvLCx8fHyIiIoz3hl+rWbNmuLq6GkfDWyyWXO1Xd197e3vTpUsX5s2bh5OTU4E51a9fnxYtWuDp6UmjRo2oWbOmEc9sNuPh4YGnpydhYWG55jp58iTr16+nbdu2ue5HRkby9ddf5/njhbz89ttvPPvss5jNZjw9PfH09KR58+Y39Js4cSKXL1/Gy8sLi8XCW2+9BUB8fDwtWrQw+o0YMSLX2lksFuMHAbfKZLPZbP8ogoiIiIiIiIiIiIiIiIiIyD1MO8ZFREREREREREREREREROS+psK4iIiIiIiIiIiIiIiIiIjc11QYFxERERERERERERERERGR+5oK4yIiIiIiIiIiIiIiIiIicl9TYVxERERERERERERERERERO5rKoyLiIiIiIiIiIiIiIiIiMh9TYVxERERERERERERERERERG5r6kwLiIiIiIiIiIiIiIiIiIi9zUVxkVERERERERERERERERE5L6mwriIiIiIiIiIiIiIiIiIiNzXVBgXEREREREREREREREREZH7mgrjIiIiIiIiIiIiIiIiIiJyX1NhXERERERERERERERERERE7msqjIuIiIiIiIiIiIiIiIiIyH3NobgTEBERERERERERERERERH5v6Bp06acOnUKOzs7nJycmDx5Mj4+PsWd1v8JJpvNZivuJERERERERERERERERERE7ndnz56lfPnyAHz//fdERUWRkJBQvEn9H6Gj1EVERERERERERERERERE7oKrRXGAtLQ0TCZT8SXzf4yOUhcRERERERERERERERERuUs6duzIxo0bAVi5cmUxZ/N/h45SFxERERERERERERERERG5y2bPns2CBQtUHL9LVBgXERERERERERERERERESkGpUqV4vfff6dixYrFncp9T+8YFxERERERERERERERERG5w86ePcvJkyeN68WLF1OxYkUqVKhQjFn936F3jIuIiIiIiIiIiIiIiIiI3GFpaWm0bduWS5cuYWdnR+XKlVm+fDkmkwmA7t27ExYWRlhYWDFnen/SUeoiIiIiIiIiIiIiIiIiInJf01HqIiIiIiIiIiIiIiIiIiJyX1NhXERERERERERERERERERE7msqjIuIiIiIiIiIiIiIiIjIPcvZ2RlXV1csFgvu7u58+umn+fZt0aIFSUlJdzG7O6dSpUokJyffcD8zM5MBAwbg4eGBt7c37u7uTJgw4a7lFR0dzbhx4wCwWq3Mnz//rs39TzgUdwIiIiIiIiIiIiIiIiIiIgVZsGABFouFY8eOYTabCQoKwmw2G+05OTkArFy5srhSvGs++eQTTp48SUJCAg4ODqSnp3PkyJG7MndWVha9e/c2rq1WK4sXL+all166K/P/E9oxLiIiIiIiIiIiIiIiIiL/Ck8++SSurq788ssvREVF8eKLLxIaGoqnpyd//PEHzs7OWK1Wtm3bhpeXV66xjRo1YsmSJWRlZREaGoqfnx8eHh60b9+eCxcuGP1mzZqFxWLB29sbPz8/kpOT6devH2PGjDH6JCUl8fjjj5OVlXVDjpGRkfj5+WE2m3nuuec4deoUAMnJyZQvX56RI0fi6+uLi4tLrkL+0qVLcXNzw2w288Ybb+S7Br///jtVqlTBweHKHmhHR0c8PDyM9jlz5lCvXj3q1KlDw4YNSUhIMNrGjh2Ll5cX3t7e1K9fn4sXL7Jp0yYsFovRZ9++fTg7O+fKeejQodSpU4epU6cSFRXFwIEDOX36NCNGjGDjxo1YLBZ69+7N+PHj6dmzpxHr7NmzVKpUiZSUlHyf525RYVxERERERERERERERERE/hX27t3LwYMH8fb2BmD79u189dVX7N+/n2rVqhn9nn76aS5fvkx8fDwAv/76K0lJSTz33HPY29szb9484uPj2bdvH+XKlWPKlCkAbNq0iVGjRrFq1SoSEhLYvHkzVapUoX///nzxxRdkZ2cD8Nlnn9GzZ0+jOH2tSZMmER8fT2JiIkFBQURFRRltaWlpmM1mdu3axdSpUxk0aBAAp0+fpkuXLixatIjExERcXFw4c+ZMnmvQo0cPli1bhpubGz169GD+/PlGXtu2beObb75h8+bN7N69m/fff5/27dsDMHv2bBYtWsTWrVtJSEhg1apVlCxZstA1T0tLw8PDg927dzNw4EDjfpUqVRg1ahTBwcFYrVaio6Pp3r07ixcv5uzZs8CVHxm88MILVKhQodB57jQdpS4iIiIiIiIiIiIiIiIi97SIiAhKlSpF6dKlmTlzJjVr1gSuvFP84YcfznNMly5dmDVrFn5+fsyePZvIyEgcHBzIyclh4sSJrFixgqysLNLS0ggMDARgxYoVdOjQgapVqwJQunRpAFxdXXF3d2fJkiWEhobyzTffsHfv3jznnTdvHnPmzCE9PZ309HQqVapktDk6OhIeHg5AQECAcQR6bGwsZrMZd3d3ALp160b//v3zjO/h4cGRI0fYunUrP/30EyNHjmTOnDmsWLGCJUuWkJCQQL169Yz+KSkpXLp0ieXLl9O7d2/KlSsHwEMPPVSElYcHHniAV155pUh9y5cvT5s2bZg5cyaDBg3i888/Z8GCBUUae6epMC4iIiIiIiIiIiIiIiIi97Sr7xi/XpkyZfId06lTJ7y9vRk/fjxfffUVy5cvB64Urjds2MCPP/5I2bJlmTx5Mhs2bCg0h9dee42xY8fy119/8eyzz+ZZkN+6dSuTJ09m+/btVKlShaVLlzJixAijvWTJkphMJgDs7e2Nnd7Xu9onPyVKlKBx48Y0btyY7t27U7VqVVJSUrDZbHTq1CnXse+FcXBwyJVHenp6rvbSpUtjZ1f0g8gHDBhAWFgYbm5uVK5cGR8fnyKPvZN0lLqIiIiIiIiIiIiIiIiI3HceffRR6taty6BBg6hSpYrxHu7U1FQqVapE2bJlOXfuHDExMcaYli1b8vXXX/PHH38AcPHiRS5evAhA06ZNOXXqFKNHj6Zfv355zpmamoqTkxMVK1YkIyODadOmFSnXgIAAEhMTOXjwIAAzZ84kIyMjz76bN2828gPYtWsXFSpUoHz58oSFhfH1119z/PhxAHJycozj5MPCwoiOjiYtLQ248v7v7OxsatSowbFjx/jrr7+AK+8oL6qyZcsa8a6qXbs2NWrUoGfPnvmuU3FQYVxERERERERERERERERE7ktdunRh2rRpdOnSxbjXsWNHLl68iKurK82bNycoKMhoa9iwISNHjiQ0NBRvb2+eeeYZo2BsMpno1q0bVapUISAgIM/5mjVrhqurK66urgQFBeW5yz0vlStXZubMmbRu3Rpvb28OHTpExYoV8+x7/PhxnnvuOdzd3bFYLHzwwQcsWbIEOzs7goKC+Oijj4w4Hh4ezJ8/H4AOHTrw4osvEhgYiLe3Ny1atODy5cs8+uijvPHGG/j7+1O/fv2beh94kyZNuHz5Mmazmd69exv3e/ToQVZWFm3atClyrDvNZLPZbMWdhIiIiIiIiIiIiIiIiIjIve75558nIiKCDh06FHcq97R+/frx8MMP88477xR3KgbtGBcRERERERERERERERERKUB8fDwuLi7Y2dnRvn374k7nnnXy5Elq167N7t27GThwYHGnk4t2jIuIiIiIiIiIiIiIiIiIyH1NO8ZFREREREREREREREREROS+psK4iIiIiIiIiIiIiIiIiPwj586do0yZMnTr1i3X/U2bNmGxWO7o3Js2bWL16tV3dI5b1a1bN9zd3WndujWLFy8mNja2WPOJiooqtiPOnZ2dsVqtxTI3qDAuIiIiIiIiIiIiIiIiIv/QggUL8PX15bvvvuP8+fN3de57tTD+559/Mn/+fPbu3cv3339/VwrjWVlZdzT+3XY7n0eFcRERERERERERERERERH5R2bMmMHQoUNp2LAhCxYsKNKY6dOn4+7ujsViwcvLix07dvDf//6Xpk2bGn2ys7N58skn2b9/P4cOHeLpp5/G29sbLy8vhg8fjtVqJTo6mrlz52KxWBg1ahQAa9asoUGDBvj6+uLv78/GjRuBK0V0T09P+vTpg9lsxsvLi8TERDp37oyXlxf16tXjxIkTAMTGxuLr64vFYsHT05PPP/88z+eYM2cOZrMZs9nMc889x4kTJzh79izBwcGkp6fj6+vLmDFjWLp0KePGjcNisTB9+nRjbL169ahTpw4NGzYkISEBgJiYGEJCQnj55Zfx8vLCz8+PX3/9Nc/5GzVqxIABAwgICDDWbvz48fj7+1OnTh2aNWvGsWPHbhjn5eXFTz/9ZFx/8cUXRERE5DnHuHHj8PDwwMvLi8jISNLS0oArO9AjIiJo2bIl7u7uNG7cmJSUlHw+8f9nwoQJ1K1bF4vFQt26ddm+fbvR5uzszNChQ/H396dTp06cO3eOiIgIateuTVBQEL169aJz585G/6I8K4BDoVmJiIiIiIiIiIiIiIiIiORj//79/Pbbb4SGhpKVlcWHH354w5HqeXn99dc5ePAgVatWJTMzk8uXL+Pn58fgwYNJSkrC1dWVpUuX4uLigru7O6+99hrPP/88b775JgApKSlUqFCB3r17c/bsWSZNmgTAr7/+SlRUFGvWrKFs2bIcPnyYoKAgkpOTATh48CCzZ8/m888/55133qFx48Zs3bqV2rVr8+qrrzJp0iTGjRvHBx98wODBg3n55ZcBSE1NveEZ9u3bx5AhQ9i1axfVqlXj/fffp3v37qxatYqVK1disViM48N/+eUXLBaLcZT5tm3b+Oabb9i8eTMlS5Zky5YttG/fnp9//hmAuLg4rFYr1atXZ9iwYYwdO5Zp06bluZa//PILmzdv5oEHHmDevHkkJSWxfft27O3tmTNnDn379mXFihW5xgwYMICpU6cSGBgIwKeffsrUqVNviL1q1SpmzpzJ9u3bKV++PD179mTYsGHGDwV27NjBrl27qFixIi+99BLTpk0zPqP8dOjQgf/85z/AlR8gdO7cmYMHDxrtZ86cYceOHZhMJoYMGUKpUqU4cOAA58+fJzAwEF9fX4AiPyuoMC4iIiIiIiIiIiIiIiIi/8CMGTPo2LEj9vb2tGjRgl69enHgwAHc3NwKHNekSRM6dOhAy5Ytad68ObVq1QKgb9++fPrpp0yePJlPP/2Ufv36AdCwYUOGDBnC+fPneeaZZwgJCckz7urVqzl8+DANGzY07tnZ2XH8+HEAXFxcjMKqn58fLi4u1K5dGwB/f3++//57AIKDg3nvvfc4dOgQjRs3pkGDBjfMtXHjRpo1a0a1atWM3EeNGkV2dnah67ZkyRISEhKoV6+ecS8lJYVLly4BEBAQQPXq1Y2/p0yZkm+sV155hQceeACAxYsXExcXZzxjfrm88sorjBgxgj///JNDhw5hMpkICgq6od+6deuIiIigfPnyAPTp04e2bdsa7c2aNaNixYpGnnv37i302ffs2cP777/PmTNncHBwICkpiUuXLlGqVCkAOnfujMlkAmD9+vVMnDgRk8mEk5MTERERHD58+KaeFVQYFxEREREREREREREREZFblJmZyZw5c4ydygAXL15kxowZjB8/vsCxixYtYteuXWzatIkWLVowevRoXnrpJXr06IG7uzsdO3bk8OHDhIWFAfDiiy8SGBjI2rVrmTp1KpMmTWLlypU3xLXZbDz77LNGPtc6ceIEjo6OxrW9vf0N11ffaz1w4EBeeOEF1q1bx1tvvYWnpyefffZZgc90tZhbFDabjU6dOjFmzJg82/PLKy9lypTJFffNN9+kZ8+eBc5fqlQpOnfuzLRp0zhw4ACvvvpqkfK+/hlvJk+AjIwMwsPD2bhxI3Xr1uXvv/+mXLlyXL582SiMX/s8Bc1f1GcFvWNcRERERERERERERERERG7R0qVLqVGjBidOnCA5OZnk5GRiY2OZM2cOmZmZ+Y7LysriyJEjxtHpbdq0YefOnQA89NBDvPDCC7Ru3ZpevXphb28PwKFDh3j44Yfp2LEjH330EbGxsQCULVvWeOc1QGhoKOvWrSMxMdG4dzX2zUhKSqJ69er06NGDt956y5jvWsHBwaxevZqTJ08CEB0dTZMmTYycr3V9nmFhYXz99dfGTvacnBzi4+NvOs/rtWrViujoaONd35mZmezZsyfPvq+++ipffPEFGzZsIDIyMs8+ISEhfPvtt/z9998ATJs2Ldd74G9Weno6GRkZPPHEEwAF7oQHaNy4MbNnz8Zms3H+/Hm+/fZbo+1mnlU7xkVERERERERERERERETklsyYMeOGgqqbmxvVqlVj2bJlVKhQIc9x2dnZdO3alZSUFBwcHKhcuTKzZs0y2nv06EFMTAw9evQw7v33v//l66+/pkSJEuTk5BAdHQ1A69atmTNnDhaLhfDwcEaMGMG8efPo1asXFy9eJCMjAx8fnzx3kBdk6tSpbNiwgRIlSmBvb8/HH398Qx9PT0/GjRtHs2bNAHj88cf58ssv84zXoUMHOnfuzOLFi3n11Vfp3r07H330Ea1btyYrK4uMjAyee+45/Pz8birP60VGRnLmzBmCg4OBKz9C6Nq1Kz4+Pjf0feyxx/Dx8aFWrVqULl06z3jNmzdn3759BAQEYGdnh9lsLnTnfEHKli3L6NGj8ff3p1KlSrz00ksF9h8xYgTdunXDzc2NSpUq4e3tbRzrfjPParLZbLZbzlpERERERERERERERERE5DYbP348Bw4cYMaMGcWdyn3twoULuLq6smXLFuN95veazMxMsrOzcXR05MKFC4SGhtK/f38iIiJuKo52jIuIiIiIiIiIiIiIiIjIPcPDwwOTycTq1auLO5X7WnR0NO+//z59+/a9Z4viAKmpqTRv3pzs7GzS09N54YUXaNeu3U3H0Y5xERERERERERERERERERG5r9kVdwIiIiIiIiIiIiIiIiIiIiJ3kgrjIiIiIiIiIiIiIiIiIiLXSE5OJjo6+rbGjI2NxcvLCx8fH9asWVNg3+XLl9OoUaNCY1qtVubPn3+bMiy6ESNGMHfu3AL7dO/enY0bN96ljAqnwriIiIiIiIiIiIiIiIiIyDXuRGF89uzZtG/fnj179hAaGnpbYt6pwnhWVlaB7aNGjSIyMrLAPtOnTyc4OPh2pvWPqDAuIiIiIiIiIiIiIiIiIsUiMjISPz8/zGYzzz33HKdOnQKuFKbLly/PO++8Q506dahZsybbtm1j0KBBWCwWPD092bdvnxFn3LhxeHh44OXlRWRkJGlpaQBERUUxcOBAo9/UqVPp3LkzADExMYSEhPDyyy/j5eWFn58fv/76KwC9e/cmKSkJi8VCWFgYOTk59OvXDzc3N7y9vfH19SU9Pf2G5zl9+jTh4eF4eXnh6enJtGnTAPjwww9ZsGABU6dOxWKxcPbs2VzjMjMz6du3LzVr1sTf3z/XTutTp04RHByMr68vHh4e9OvXj5ycHE6fPs2IESPYuHEjFouF3r17F7imBbm63kOHDqVOnTpMnTqV9evXExAQgI+PDx4eHsyYMcPo37lzZyZNmgTAsmXLMJvNxueyZMkSABo1asTixYuN/r169aJJkybUqlWL8PBwMjIyADh37hwRERHUrl2boKAgevXqZXxGt5MK4yIiIiIiIiIiIiIiIiJSLCZNmkR8fDyJiYkEBQURFRVltKWlpeHr68vu3bsZNmwYoaGhhIWFYbVa6dSpE++++y4Aq1atYubMmWzbto29e/fy4IMPMmzYsCLNHxcXx5gxY9i7dy8hISGMHTsWgOjoaFxdXbFarSxdupSEhATWr1/Pzz//TEJCAhs2bKBEiRI3xOvfvz+urq7s3buXDRs2MHr0aGJjYxk2bBhhYWEMGTIEq9VK+fLlc4374osvSEpK4ueff2br1q3s3r3baCtfvjzLli1j165dJCYmkpyczLfffkuVKlUYNWoUwcHBWK1WY4d7QWtakLS0NDw8PNi9ezcDBw6kTp06bN26lT179rBlyxZGjRrF77//fsO44cOHM23aNKxWK4mJiTzzzDN5xrdarSxbtowDBw7w559/smjRIuDK7vNSpUpx4MABVq5cyU8//VSkfG+WCuMiIiIiIiIiIiIiIiIiUizmzZuHn58fnp6eTJ8+HavVarQ5OjrSqlUrAPz8/ChTpoxxNLe/vz+HDh0CYN26dURERBjF5j59+rB27doizR8QEED16tWNv48cOZJnvxo1apCVlUXXrl2ZPXs2mZmZ2NndWGpdt24dvXr1AqBKlSqEh4ezbt26QvNYv349HTt2pESJEpQoUYKuXbsabTk5OQwdOhRvb298fHyIj4/PtU7XK2hNC/LAAw/wyiuvGNdnzpyhbdu2eHp60rhxY86cOZNrl/5VTZo04bXXXuOjjz4iMTHxhqL/Va1bt6Z06dLY29vj7+9vrPX69evp0qULJpMJJycnIiIiipTvzVJhXERERERERERERERERETuuq1btzJ58mRWrlzJvn37mDBhQq7jyUuWLGn8bW9vj6OjY67r/N6DbTKZjL8dHBzIzs42rq8//ryoMcuVK8e+ffto3749Bw8exGw2c/jw4UKf8dpcbsa14yZMmMDp06fZsWMHiYmJtG/fPs9j3KHwNS1I6dKlcxX7e/fuTYMGDdi7dy9Wq5VatWrlGWvChAnMmjWL0qVL06lTJz766KM849/K53c7qTAuIiIiIiIiIiIiIiIiInddamoqTk5OVKxYkYyMDON93DcrJCSEb7/9lr///huAadOm0bRpUwBcXFyIj48nOzubixcvGsd3F6Zs2bLGe8oB/vrrLy5cuEDTpk0ZM2YMzs7O7N+/P89cvvzyS2PMd999x7PPPlukZ/j666/JzMwkIyODWbNmGW2pqak88sgjODo6curUKRYuXJhvnoWtaZMmTdi5c2eR1iA1NZUnn3wSk8nE5s2bSUhIyLPfwYMHjXef9+nTh9jY2CLFv6px48bMnj0bm83G+fPn+fbbb29qfFGpMC4iIiIiIiIiIiIiIiIid12zZs1wdXXF1dWVoKAgLBbLLcVp3rw5Xbp0ISAgAC8vL/7++28++OADAMLDw3n00Udxc3Pj+eefx8fHp0gxzWYzHh4eeHp6EhYWxm+//cazzz6L2WzG09MTT09PmjdvfsO4yZMnc+DAAby8vAgODubtt9+mXr16hc7Xo0cPatasibu7Ow0aNMi1Fq+99ho7duzAw8ODDh06EBISYrQ1adKEy5cvYzab6d27d4Frmp2dTUJCAo899liR1uDDDz9k2LBhWCwWZs6cme9zvPXWW3h4eODj48OcOXOK/E7zq0aMGMG5c+dwc3OjWbNmeHt753sc+z9hstlsttseVURERERERERERERERERE7hlxcXFMmzaN6dOnF3cquWRmZpKdnY2joyMXLlwgNDSU/v373/Z3jaswLiIiIiIiIiIiIiIiIiIixeL06dM0b96c7Oxs0tPTeeGFF/jwww9v+7vGVRgXEREREREREREREREREZH7mt4xLiIiIiIiIiIiIiIiIiJSDGJiYmjVqtVtjblp0yZWr1592+JZLBbOnTt32+IVFxXGRURERERERERERERERETykJWVVdwp3LTbVRi/+uxWqxUnJ6d/HK+4qTAuIiIiIiIiIiIiIiIiIveE7du306BBA7y9vTGbzSxZsgSA+Ph4AgMDMZvN+Pv7s23bNgCSk5MpX748I0eOxNfXFxcXF1auXFlovEOHDvHcc89Rt25dzGYzU6dONcaYTCZGjhxJ3bp1efPNN+ncuTO9evWiSZMm1KpVi/DwcDIyMgBYtmwZZrMZi8WCp6enEf9ap06dIjg4GF9fXzw8POjXrx85OTlG+99//01YWBju7u40bNiQ5ORkALKzsxkyZAienp54enrSv39/Y97OnTszadIkI8bgwYOJiorCarUSHR3N3LlzsVgsjBo16oZ8OnfuTNeuXQkMDKRWrVp06tSJS5cu5Wpr2LAhnp6exnqcPXuWuXPn8vzzzxtxbDYbNWrUICEhodBnvBeoMC4iIiIiIiIiIiIiIiIixS4lJYVWrVrxwQcfkJCQgNVqJSgoiIyMDMLDwxk5ciSJiYlMmDCBF198kfPnzwOQlpaG2Wxm165dTJ06lUGDBhUYLzs7m5dffpmPP/6YuLg4YmNj+eKLL4iLizNysbe3Jy4ujnHjxgFXdk0vW7aMAwcO8Oeff7Jo0SIAhg8fzrRp07BarSQmJvLMM8/c8Fzly5dn2bJl7Nq1i8TERJKTk/n222+N9m3btjF27Fj279/P888/T8+ePQGMnHbt2oXVauXIkSNMnDixwDW0WCz07t2byMhIrFYrI0aMyLPfjh07WLNmDQcOHCAlJSVX3F27drFixQoOHjyYa0x4eDixsbGcOnUKuLIz/aGHHsLb27vQZ7wXqDAuIiIiIiIiIiIiIiIiIsVu+/btuLq6EhQUBICdnR0VKlQgKSkJOzs7QkNDAWjQoAEPP/wwVqsVAEdHR8LDwwEICAjgyJEjhcb7+eefeemll7BYLAQGBnLu3Dn2799v5NK1a9dcubVu3ZrSpUtjb2+Pv7+/MUeTJk147bXX+Oijj0hMTKR8+fI3PFdOTg5Dhw7F29sbHx8f4uPjjdwBAgMDcXNzA6Bnz55s2rSJ7Oxs1q1bR+fOnSlZsiQODg706NGDtWvX/sNVvqJdu3Y4OTlhb29Pt27dWLdundHWtm3bPI9OL1WqFC+++CJz5swBrrwfvUuXLkV6xnuBQ3EnICIiIiIiIiIiIiIiIiJyM0wmk/F3yZIljWt7e3uys7MLHGuz2ahQoUKBhdsyZcrkunZ0dDT+tre3N96/PWHCBH7++Wc2btxIp06diIyM5I033sg1dsKECZw+fZodO3bg6OjIf/7zH9LT04v0nNe69pkdHBxyPWd6evoNOd9q7ILidO3alS5dutCnTx+WL19u7DS/Xc94J2nHuIiIiIiIiIiIiIiIiIgUu8DAQA4dOsSWLVuAK7uQU1JScHV1JScnx9gt/dNPP3Hq1CksFsstxytbtiyzZs0y+h4+fJiUlJSbzvngwYPGO7X79OlDbGzsDX1SU1N55JFHcHR05NSpUyxcuDBX+/bt241jy6dPn05wcDD29vaEhITw1VdfkZGRQVZWFtOnT6dp06YAuLi4sHPnTgDOnDmT673qZcuWJS0trcC8//vf/3L+/Hmys7OZNWsWISEhRXreevXqAVfeaR4SEkKFChWK9Iz3AhXGRURERERERERERERERKTYPfTQQ3z//fcMGzYMs9lMnTp12LZtGyVKlOC7775j5MiRmM1mBg4cyH//+99Cd0jnF8/BwYHly5fz3XffYTab8fDwoFu3bly6dOmmc37rrbfw8PDAx8eHOXPmEBUVdUOf1157jR07duDh4UGHDh1uKEIHBgYydOhQPDw8WLp0KdOmTQOuHKtep04d6tSpg8ViwdnZmYEDBxptf/31F25ubnTs2JH69esb8Vq3bo3VasVisTBq1Kg8865bty6hoaG4ublRvnx5I25RdOnShWnTphnHqBf2jCdPnsz1I4bo6Ohc7z5v0aIF8fHxRZ7/VplsNpvtjs8iIiIiIiIiIiIiIiIiIiLFrnPnzlgslpsqht8PtGNcRERERERERERERERERETua9oxLiIiIiIiIiIiIiIiIiIi9zXtGBcRERERERERERERERERkfuaCuMiIiIiIiIiIiIiIiIiInJfU2FcRERERERERERERERERETuayqMi4iIiIiIiIiIiIiIiIjIfU2FcRERERERERERERERERERua+pMC4iIiIiIiIiIiIiIiIiIvc1FcZFREREREREREREREREROS+psK4iIiIiIiIiIiIiIiIiIjc11QYFxERERERERERERERERGR+5oK4yIiIiIiIiIiIiIiIiIicl9TYVxERERERERERERERERERO5rKoyLiIiIiIiIiIiIiIiIiMh9TYVxEREREREREREREREREZHbaNasWZhMJhYvXlzcqcj/T4VxEREREREREREREREREZHbJDk5mS+//JL69esXdypyDRXGRURERERERERERERERERug5ycHLp3786UKVMoWbJkkccdPnyYkJAQzGYzFosl105zk8nEmDFj8Pf3p3r16syaNesOZH7/U2FcREREREREREREREREROQ2mDBhAk8//TS+vr43NS4yMpK2bduSmJjIwoUL6datG8eOHTPaS5Ysyc6dO1m1ahUDBgwgKyvrdqd+33Mo7gRERERERERERERERERERP7t9u3bx6JFi9i8efNNjTt37hy7d+9m27ZtANSsWZMGDRqwZcsWnnzySeBK4Rygdu3aODg4cOrUKR577LHb+wD3ORXGRURERERERERERERERET+oS1btpCcnEzNmjUBOHXqFD179uSPP/6gT58+NxXLZDLlunZ0dDT+tre3147xW6Cj1EVERERERERERERERERE/qE+ffrwxx9/kJycTHJyMvXr1+eLL74otCju5OREnTp1jHeHHz58mK1bt9KwYcO7kfb/GSqMi4iIiIiIiIiIiIiIiIjcYSNGjCA6OjrPtrlz57JgwQK8vb1p06YN06dP54knnrjLGd7fTDabzVbcSYiIiIiIiIiIiIiIiIiIiNwp2jEuIiIiIiIiIiIiIiIiIiL3NRXGRURERERERERERERERETkvqbCuIiIiIiIiIiIiIiIiIiI3NdUGBcRERERERERERERERGRu+bcuXOUKVOGbt265bq/adMmLBZLnmNGjBjB3Llz73huBeVwJ1gsFs6dO3fX5rvWvn37cHZ2vmPxo6KiSE9PN67v1meYHxXGRUREREREREREREREROSuWbBgAb6+vnz33XecP3++SGNGjRpFZGTkHc7s7rNarTg5ORV3GnfEu+++m6swXtyfoQrjIiIiIiIiIiIiIiIiInLXzJgxg6FDh9KwYUMWLFhQpDGdO3dm0qRJACxbtgyz2YzFYsHT05MlS5bkOWbOnDnUq1ePOnXq0LBhQxISEgCIiYkhJCSEl19+GS8vL/z8/Pj111+NcVlZWfTt2xdvb288PDyIj4837oeGhuLn54eHhwft27fnwoULwJWd5p6ennmOA1ixYgV169bF29sbi8XCjh07ADCZTJw9exYAZ2dnRowYQUBAANWrV2f06NHG+IMHDxIQEICHhwfh4eE0bdqUmJiYPJ97zZo1NGjQAF9fX/z9/dm4caPRFhUVRc2aNfH19WX+/PnG/eTkZMqXL29cnz9/HpPJZFxv376dBg0a4O3tjdlsNtZ88ODB1K1bF4vFQsOGDUlKSgKgd+/eAAQFBWGxWDh9+nSuz/D8+fN07doVT09PPD09effdd425GjVqxODBgwkKCuKpp54yYv1TKoyLiIiIiIiIiIiIiIiIyF2xf/9+fvvtN0JDQ+nWrRszZsy46RjDhw9n2rRpWK1WEhMTeeaZZ27os23bNr755hs2b97M7t27ef/992nfvr3RHhcXx5gxY9i7dy8hISGMHTvWaDt48CCdOnUiISGB/v378/bbbwNgb2/PvHnziI+PZ9++fZQrV44pU6YUOu6XX36hS5cuzJkzh4SEBOLi4qhdu3aez3b27Fm2b99OXFwc48aN48SJEwB06NCBnj178vPPP/P++++zefPmPMf/+uuvREVFsXLlSnbt2sW8efNo3749ly9fZsWKFSxcuJBdu3YRHx9PcnJykdY7JSWFVq1a8cEHH5CQkIDVaiUoKAiAoUOHEhcXh9VqpW/fvrz22msAREdHA7BlyxasVitVqlTJFfO9997j8uXLJCYmsmPHDhYvXpzrRxJHjhxh48aN7Nu3jzVr1rB9+/Yi5VoQFcZFRERERERERERERERE5K6YMWMGHTt2xN7enhYtWnD06FEOHDhwUzGaNGnCa6+9xkcffURiYmKunc5XLVmyhISEBOrVq4fFYqF///6kpKRw6dIlAGNX9tW/jxw5Yox1cXGhXr16N7TZbDYmTpyIj48PZrOZFStWYLVaCx23du1amjVrZhTDH3jgAcqVK5fns10t3leqVIkaNWpw9OhR/v77b6xWKx07dgTAzc2NBg0a5Dl+9erVHD58mIYNG2KxWGjTpg12dnYcP36c9evX065dO8qWLYvJZKJXr16FLzZXdou7uroaxXA7OzsqVKhgPFtAQACenp6MGjUq13oUZN26dfTo0QM7OzsefPBBOnbsyNq1a432iIgIHBwcKFWqFBaLJdfnc6sc/nEEEREREREREREREREREZFCZGZmMmfOHB544AHmzZsHwMWLF5kxYwbjx48vcpwJEybw888/s3HjRjp16kRkZCRvvPFGrj42m41OnToxZsyYPGM4Ojoaf9vb25OVlVVo27x589iwYQM//vgjZcuWZfLkyWzYsKFIMYuqqDGuPeb8WjabjWeffdZY34JcG8PBwYHs7Gzj+tp3g+fn+PHj9OvXj7i4OJ566ikSExNp2LBhoeMKywVuz1peTzvGRUREREREREREREREROSOW7p0KTVq1ODEiRMkJyeTnJxMbGwsc+bMITMzs8hxDh48iIeHB/369aNPnz7Exsbe0CcsLIyvv/6a48ePA5CTk5Prnd+3IjU1lUqVKlG2bFnOnTuX7zu+rxcaGsqaNWs4ePAgcOUHAmlpaUWet2zZsnh7e/P1118DkJSUxNatW/Oda926dSQmJhr3du7cCUBISAgLFy7k3Llz2Gw2vvjiC6PPI488gs1mY//+/QB89dVXRltgYCCHDh1iy5YtwJW1TElJIS0tjQceeICqVatis9mYOnVqrlycnJzyfc6QkBBmzJiBzWbjwoULzJkzh6ZNmxZ5TW6FdoyLiIiIiIiIiIiIiIiIyB03Y8YMIiMjc91zc3OjWrVqLFu2zDieuzBvvfUWSUlJlChRgtKlS/P555/f0CcoKIiPPvqI1q1bk5WVRUZGBs899xx+fn63nH/Hjh1ZsmQJrq6uVK5cmaCgII4dO1boOBcXF2bNmsUrr7xCZmYm9vb2REdH4+/vX+S5v/rqK7p27cq4ceNwcXGhbt26eR4h7+Liwrx58+jVqxcXL14kIyMDHx8f5s2bR4sWLdi5cyd16tShbNmyNG/e3Bjn4ODAlClTeP7556lYsSJt2rQx2h566CG+//57Xn/9dc6dO4ednR3vvfceLVu25KWXXsLDw4OKFSvSqlWrXLm8/vrrPPvss5QuXZoffvghV9s777zDgAED8PLyAqBt27a0a9euyOtxK0w2m812R2cQEREREREREREREREREZFbdv78eR588EFMJhNHjx4lICCAuLg4Hn/88eJO7V9DO8ZFRERERERERERERERERO5hP/30E0OGDAEgOzubiRMnqih+k7RjXERERERERERERERERERE7mt2xZ2AiIiIiIiIiIiIiIiIiIjInaTCuIiIiIiIiIiIiIiIiIjckqioKNLT043rzp07M2nSpGLLJzk5mfLlyxfb/HdaVFQUAwcOBGDp0qUMGjTotsWeOnUqnTt3zrPN2dkZq9V62+YqTHR0NOPGjbutMfWOcRERERERERERERERERG5Je+++y4DBw7E0dHxpsbl5OQAYGf3797Hm5WVhYND8ZRcw8LCCAsLK5a5/6nC1q137963fc5/9zdNRERERERERERERERERIrF1eJlUFAQFouF06dPA3DgwAGaNGlCrVq1CA8PJyMjA7iy2/nFF18kNDQUT09P/vjjD+bMmYPZbMZsNvPcc89x4sQJAGJiYmjVqpUx1/Lly2nUqJFxPXLkSFxcXKhbty7Dhw/H2dk5V24jR47E19cXFxcXVq5cmWf+V/Np3LgxtWvXpmXLlpw5cwaAzMxMhg0bhr+/PxaLhXbt2pGamgpc2RXftWtXGjZsiKenJ5cuXSIiIgJ3d3e8vb1p2rSpMce4cePw8PDAy8uLyMhI0tLSjLkjIiJo2bIl7u7uNG7cmJSUFAD27t1LgwYNqFOnDu7u7owePTrP/K9do1GjRmGxWLBYLHh6emIymTh27BgA48ePx9/fnzp16tCsWTPj/rlz54iIiMDV1ZUGDRqwd+/efD7p3E6dOkW7du3w9/fHy8uL4cOHG22DBw+mbt26WCwWGjZsSFJSktFmMpkYOXIkdevW5c0336Rz58706tUr3+/K1Z3xMTExhISE8PLLL+Pl5YWfnx+//vqrEbew78JVKoyLiIiIiIiIiIiIiIiIyE2Ljo4GYMuWLVitVqpUqQKA1Wpl2bJlHDhwgD///JNFixYZY7Zv385XX33F/v37SU1NZciQIaxatYrExEQCAwPp3r17ofOuWLGCRYsWsWfPHnbu3GkU069KS0vDbDaza9cupk6dWuBx41u2bGHevHkcPHiQxx9/nDfffBO4UtB+8MEH2blzJ1ar9YYC8K5du1ixYgUHDx5k9erVnD17lv3795OQkMD8+fMBWLVqFTNnzmTbtm3s3buXBx98kGHDhhkxduzYQUxMDPv376dKlSpMmzYNuHJs+fr169m9eze7du1i0aJFxMbGFrgmI0aMwGq1YrVaCQwMpGvXrjz55JPMmzePpKQktm/fzu7du4mMjKRv377AlWJ6yZIlOXjwICtWrGDz5s2Frj1Ap06dePXVV9m5cyd79uwhPj6ehQsXAjB06FDi4uKwWq307duX1157LddYe3t74uLijGPSC/quXCsuLo4xY8awd+9eQkJCGDt2LFD4d+FaOkpdRERERERERERERERERG6b1q1bU7p0aQD8/f05cuSI0daiRQsefvhhADZu3EizZs2oVq0aAH379mXUqFFkZ2cXGH/9+vW0bdsWJycnALp168bGjRuNdkdHR8LDwwEICAjINf/1nnvuOR555BEAevbsaYxbvHgxaWlpRqE2IyMj107ka+f39vbmwIED9O3bl2eeeYYWLVoAsG7dOiIiIox3nvfp04e2bdsaMZo1a0bFihWNPK/u2L506RJ9+/bFarViZ2fHb7/9htVqpX79+gWuC8Do0aM5fvw4y5cvN54jLi4OX19fgFxru379eiZOnIjJZKJcuXK0b9++wLUCuHDhAuvXr+fPP/807p0/f97YGb527VqmTJnCuXPnyMnJMXbBX9W1a9dc1wV9V64VEBBA9erVjb+nTJliPENB34VrqTAuIiIiIiIiIiIiIiIiIrfNte8bt7e3Jysry7guU6ZMvuNMJpPxt4ODQ64ibnp6epHGAZQsWdK4Z29vX2ihPa9YNpuNKVOm5DoW/VrXPkeNGjXYv38/GzZsYN26dbzxxhtYrdZC88xvnd566y0qVarEnj17cHBwIDw8vMDnv+qrr77iu+++Y/Pmzcb7u202G2+++SY9e/YsdPz1+eXFZrMBEBsbe8N75Y8fP06/fv2Ii4vjqaeeIjExkYYNG+bqc/3nX9B35Vb6FfQMOkpdRERERERERERERERERG6Jk5OT8d7smxUcHMzq1as5efIkcOVo9iZNmmBvb4+LiwuJiYlcunSJrKws5s2bZ4xr3LgxixYt4vz589hsNmbOnHnL+a9cudLY/Tx9+nRCQkIAaNWqFRMnTuTixYsAXLx4kZ9//jnPGL///jsmk4mwsDDGjx+PzWbjt99+IyQkhG+//Za///4bgGnTpuVbaL9Wamoqjz32GA4ODiQlJbF27dpCx6xbt4733nuPFStW5Co+t2rViujoaGPndmZmJnv27AEgJCSEWbNmYbPZ+Pvvv/nmm28KnadMmTIEBwfz4YcfGvdOnjzJ77//TlpaGg888ABVq1bFZrMxderUQuP9UzfzXdCOcRERERERERERERERERG5Ja+//jrPPvsspUuX5ocffripsZ6enowbN45mzZoB8Pjjj/Pll18CUL9+fVq0aIGnpydVq1bl6aefZseOHQA8//zz7NixA4vFQvny5XnmmWeM48pvVlBQEO3bt+fEiRPUrFmTmJgY4Mq7si9fvky9evWMXchDhw7Fw8Pjhhh79+7lzTffxGazkZWVRYcOHTCbzZjNZvbt20dAQAB2dnaYzWY+++yzQnMaPnw4HTp0YPbs2Tz11FM0bty40DHvv/8+Fy9epHnz5sa9lStXEhkZyZkzZwgODgYgKyuLrl274uPjwzvvvEP37t2pXbs2lStXpkGDBly+fLnQuebOnct//vMfPD09MZlMPPjgg0ybNg1vb29eeuklPDw8qFixIq1atSo01j91M98Fk+3qfncRERERERERERERERERkX+Bc+fO4eTkhM1m4/XXX+fSpUt8/vnnNxUjKiqKs2fPMmnSpDuTpNwVRf0uaMe4iIiIiIiIiIiIiIiIiPyrdOzYkeTkZNLT0/Hw8CA6Orq4U5JiUtTvgnaMi4iIiIiIiIiIiIiIiIjIfc2uuBMQERERERERERERERERkfuPyWTi7NmzhfaLiooiPT3duE5JSeHpp5/GYrHw/vvv38EM755JkyZx6tSpuzrn9et6M9q0aWO8b/1umTp1Kp07dwZg6dKlDBo06LbGV2FcRERERERERERERERERIrNu+++m6uAu3btWsqUKYPVauXtt98ucpysrKw7kd5tURyF8evX9W77J59HWFgYEydOvI3ZqDAuIiIiIiIiIiIiIiIiIrfIZDIxfPhwfHx8qFWrFnPnzs2z3+DBg6lbty4Wi4WGDRuSlJQEQO/evQEICgrCYrHw7bffMmTIEGJjY7FYLKxbt47Tp08THh6Ol5cXnp6eTJs2zYjr7OzM0KFD8ff3p1OnTkRFRdGuXTtatmxJrVq1eP7559m3bx+hoaHUqlWLl19+mZycHACmT5+Ou7s7FosFLy8vduzYUejz5vccV9fi2h3ylSpVIjk5mVGjRnHy5Mn/r707jau6zP8//j4C7guKmmaLmrkABw7KIo5QAgpqERopaiqDS471MydJsNEydcptUtMmqAgSQp2yzKw0Nfy55J4nVNRREq1cf4KIIiLw/d/o33lEgGKjaWdez1ucc22f64J7b67vVwMHDpTFYpHVatXFixcVExMjd3d3ubu76+WXX7aNe/jhhzVhwgQFBgbqvvvu05QpU/T555+re/fuat26tV577bUbPtczZ86ooKBAo0aNkq+vrzw8PDR69GgVFxdLkg4ePKhu3brJzc1NERERunDhQpVnkJiYqPbt26tz586aPn26TCZTuTN46aWX5OPjo0mTJmnv3r3q3r27OnfuLFdXV82YMcPWt6CgQAMHDlSHDh3UvXt37d2719aWkpKiiIgISdKGDRvk7u6usWPHytPTU25ubtq1a1e16inHAAAAAAAAAAAAAIDfQJIxefJkwzAMIzs722jcuLFx9OhRW1teXp5hGIZx5swZ25glS5YYoaGh5eb4uZ9hGEZycrLx2GOP2T4PGDDAiI+PNwzDME6fPm3cc889xtatWw3DMIz777/fGDFihFFWVmYYhmG89NJLRps2bYzc3FyjrKzMCAwMNPz8/IwLFy4YV69eNTw9PY1Vq1YZhmEYDRs2NE6cOGEYhmEUFxcbBQUF193vjezDxcXFdhb333+/sWfPHlvbxIkTjcGDBxulpaXGxYsXDYvFYixdutQwDMN46KGHjMcff9woKSkxcnNzjYYNGxpPP/20UVZWZvzwww9GvXr1ftO5jho1ynjvvfcMwzCMsrIyY8SIEcbs2bMNwzAMb29v45133jEMwzAyMzONmjVrGsnJyRX2v3fvXqNFixbGyZMnDcMwjBdffNH4ZeQsyXj55Zdtny9cuGAUFRUZhmEYhYWFhsVisf3uYmNjjaFDhxplZWXG+fPnjY4dOxrDhw83DKP830BGRobh4OBgbNu2zTAMw3jzzTeNXr16VaueX+LGOAAAAAAAAAAAAIDfbOTIkZKktm3bKjAwUBs3bqzQZ+3atfL395e7u7umTZsmq9Va7fnXrVunp556SpLUvHlz9e/fX+vWrbO1R0dHl7sl3KtXLzVu3Fgmk0mdO3fWww8/rAYNGsjR0VFeXl46fPiwJCk4OFhDhw7VggULdPToUdWvX/+6tfwn+/j1nkaNGqUaNWqoXr16GjZsmNauXWtrj4yMlIODgxo3bqy2bdvqkUcekclkUqtWrdSsWTPl5OTccD0rVqzQnDlzZLFY5OXlpU2bNunIkSO6cOGCrFar7f3eZrNZ3bt3r3SOr776SmFhYWrRooUkadSoURX6xMTE2H6+fPmyRo4cKbPZrK5du+rYsWO2GtevX68RI0bIZDKpUaNGGjx4cJW1t2vXTn5+fpIkf39/ZWdnV7uenzlW2QIAAAAAAAAAAAAAN+jXj7I+fvy4nnnmGe3cuVMPPPCAMjMzFRgYeNPm/3WgXbt2bdvPDg4OFT7//O7r5cuXa/fu3dqwYYP69OmjGTNmKCoqqsp1r7cPBwcHlZaW2j7fyPu9f72n6uzhRs/VMAwtX75c7du3L/d9ZY9Nr/Jx5NXo98vfxwsvvKCmTZtqz549cnR0VP/+/as8l2utWdXv8Ebm4MY4AAAAAAAAAAAAgN8sOTlZkpSTk6NNmzYpICCgXHt+fr6cnJzUsmVLGYahRYsWlWtv0KCB8vPzq5w/JCREb7/9tiTp7Nmz+uijj9SzZ8//qOaSkhJlZ2fL29tbsbGxioyM1I4dOyRJkyZNqlBjdfbRrl0723vKP/roI126dMnW1rBhw3J7DAkJUVJSkgzD0KVLl5SamqpevXrd0B5u9FwjIiI0a9YsW6icl5enI0eOqGHDhvLy8tLixYslSfv379fmzZsrXbNHjx5as2aNzpw5I0lKSkq6Zo15eXm655575OjoqEOHDpW7FR8SEqLk5GQZhqELFy5oyZIlN7T/G62HYBwAAAAAAAAAAADAb1ZaWiovLy/16tVLr7/+ulq3bl2u3Ww2KyoqSm5ubvLx8dF9991Xrn3ChAnq2bOnLBaLLeD8pddff10HDhyQ2WxWjx499Le//c32WO3/pOaYmBi5u7vLYrFo9+7deu655yRJ3377re3R3Deyj3nz5unZZ59V586dtWfPHrm4uNjaxo0bp1GjRslischqtWrKlClycnKS2WyWn5+fwsPDNWDAgBvaw42e67x581SnTh1ZLBZ5eHgoODjY9kj2xYsX66233pK7u7smT55c5c1zs9msyZMn609/+pM6d+6soqIiNWrUqMoaJ0+erOTkZHl4eCg+Pl5BQUG2tilTpujy5cvq2LGj+vTpU+Xj2693BtWtx/T/X4IOAAAAAAAAAAAAADfEZDIpLy9Pzs7Ot7uUm6K0tFRdu3bV9u3bVaMGd4wrU1BQoAYNGkiSFixYoNWrV+uLL7644+vhHeMAAAAAAAAAAAAAoJ/eX71z587bXcYdLT4+Xlu2bNHVq1d19913KzEx8Q9RDzfGAQAAAAAAAAAAAAB2jfv/AAAAAAAAAAAAAAC7RjAOAAAAAAAAAAAAAL+D+fPn69SpU7e7jP9KBOMAAAAAAAAAAAAA8DsgGL99CMYBAAAAAAAAAAAAoAqXL1/WwIED5erqKk9PT/Xq1UuS9Mgjjyg9Pd3W78svv5Sfn58k6Z133pGrq6ssFovMZrO2b9+uadOm6cSJExo4cKAsFousVquuXr2q+Ph4+fr6ymKxaMCAAcrLy5MkRUdHa/To0QoJCVGbNm0UExOjHTt26OGHH1bbtm313HPP2daeMWOGOnXqJIvFIovFomPHjv2OJ/THYDIMw7jdRQAAAAAAAAAAAADAnejjjz9WQkKC1qxZI0nKzc1VkyZNtHbtWr300kv6+uuvJUmPPfaYIiMjNXToUDVq1EgHDx5Uy5YtdfXqVV25ckX169dX69attWLFClksFknSK6+8otLSUk2ZMkWSNH36dJ06dUpvvPGGoqOjdejQIWVkZKhGjRpydXWV2WzWv/71LxUXF6tt27b66quvdPfdd6tNmzY6efKk6tSpo8LCQtWoUUO1a9e+Led1p+LGOAAAAAAAAAAAAABUwdPTUwcOHNDYsWO1bNkyOTk5SZJ69uyp/Px87dmzR8eOHdOOHTs0YMAASVJwcLCGDh2qBQsW6OjRo6pfv36lc69YsUJpaWm2m95LlizR0aNHbe2PPfaYateurZo1a8psNis0NFROTk6qV6+eXF1ddfjwYTVs2FAPPvignnzySSUmJio3N5dQvBIE4wAAAAAAAAAAAABQhbZt2yorK0thYWHasmWL3N3dbY87HzdunBYuXKiEhATFxMSoVq1akqTly5dr5syZunr1qvr06aOlS5dWOrdhGFq4cKGsVqusVquysrL0+eef29p/GXA7ODhU+FxSUiIHBwdt27ZN48eP15kzZ9S1a1dt2rTpVhzFHxrBOAAAAAAAAAAAAABU4YcffpDJZFJ4eLjmzp0rwzD0/fffS5KGDh2qNWvWKDk5WWPGjJEklZSUKDs7W97e3oqNjVVkZKR27NghSWrYsKHy8/Ntc0dERGjevHkqLCyUJBUWFmr//v03VF9BQYFOnz6tgIAATZkyRd27d9eePXskScOGDdPHH3/8H5+BPXC83QUAAAAAAAAAAAAAwJ1q7969mjRpkgzDUElJiYYOHSoPDw9JUt26ddW/f3+dOHFC9957rySptLRUMTExys3NlaOjo5o1a6bk5GRJP90wHzVqlOrWrauUlBTFxcXpypUr8vPzk8lkkiTFxcXJzc2t2vXl5+crMjJSly5dkslk0oMPPqjhw4dLknbt2qVx48bdzOP4wzIZhmHc7iIAAAAAAAAAAAAA4I+mtLRUXbp00cKFCxUQEHC7yynn7NmzGjx4sNauXXu7S7kj8Ch1AAAAAAAAAAAAALhBK1eu1AMPPCB/f/87LhSXpGbNmhGK/wI3xgEAAAAAAAAAAAAAdo0b4wAAAAAAAAAAAAAAu0YwDgAAAAAAAAAAAACwawTjAAAAAAAAAAAAAPA7GDJkiLy9veXh4aG+ffvq1KlTt7uk/xq8YxwAAAAAAAAAAAAAfgdnz55Vs2bNJEkzZ85UTk6OEhISbnNV/x0cb3cBAAAAAAAAAAAAAPDfID09XampqSoqKlJRUZGaNm16u0v6r8Gj1AEAAAAAAAAAAADgFtu8ebNef/11ff7559q3b59ee+01FRUV3e6y/msQjAMAAAAAAAAAAADALZaXl6cGDRrIxcVFxcXFSkxMvN0l/VchGAcAAAAAAAAAAACAWywsLEwdOnRQhw4dFBAQIIvFYmvr06ePdu3aJUnatWuX+vTpY2t78cUXeQ/5TWAyDMO43UUAAAAAAAAAAAAAAHCrcGMcAAAAAAAAAAAAAGDXCMYBAAAAAAAAAAAAAHaNYBwAAAAAAAAAAAAAYNcIxgEAAAAAAAAAAAAAdo1gHAAAAAAAAAAAAABg1wjGAQAAAAAAAAAAAAB2jWAcAAAAAAAAAAAAAGDXCMYBAAAAAAAAAAAAAHaNYBwAAAAAAAAAAAAAYNcIxgEAAAAAAAAAAAAAdo1gHAAAAAAAAAAAAABg1wjGAQAAAAAAAAAAAAB2jWAcAAAAAAAAAAAAAGDXCMYBAAAAAAAAAAAAAHaNYBwAAAAAAAAAAAAAYNcIxgEAAAAAAAAAAAD8Jq1bt1aHDh1ksVhksVi0bNmyW7LOrl27NHDgwOv2S0lJUURExE1f/0FU5G4AACHvSURBVOGHH9aKFStuylwJCQmaM2fOTZnrj27q1KkaP37877KW4++yCgAAAAAAAAAAAAC7tGzZMlksllu6hre39y0L3SWppKREjo6/T3Q6ZsyYWzr/77mXPxJujAMAAAAAAAAAAAD43Xz55Zfq1auXJOnChQtycnLSW2+9JUlavHixYmJiKozZsGFDufA9NTVVHh4e8vDwUN++ffXjjz/a2i5cuKDw8HC5uroqMDBQOTk5ldbRunVrxcXFydfXV8OHD9fFixcVExMjd3d3ubu76+WXX650XEFBgUaNGiVfX195eHho9OjRKi4uliTNmDFDnTp1st2gP3bsWIXxv7wlnZKSopCQEA0aNEhms1ne3t767rvvKl33tddek4+PjywWi3x8fLR169Yq93L16lXFx8fL19dXFotFAwYMUF5eniQpPT1dfn5+8vLykqenpz799FPbPNWp/9NPP5WHh4csFovc3d31ySefSKp4qz4yMlIpKSmSpPz8fI0cOVLu7u7y9PQs9zs+efKkHn30Ubm6uiooKEi5ubmV7j82Nta2/8DAQB06dEiSdPnyZQ0cOFCurq7y9PS0/W39Gv8qAAAAAAAAAAAAAOA3GzZsmAzDkK+vr2bOnKlmzZpds39AQICioqJ05coVZWRkyMfHR+vWrdPo0aO1du1a9e7d+5rj9+3bp+eff167d+9Wq1at9Pe//10jR47UF198IUnasmWLrFarOnXqpNmzZ2v06NH68ssvK53r3Llz2r59u0wmk+Li4nTlyhVlZmbq8uXL6t69uzp27FjhEe4TJkxQQECA3n77bRmGoVGjRmnBggUaOXKk5s6dq5MnT6pOnToqLCxUjRrXv6e8c+dOWa1WtWnTRvHx8Zo1a5YSExMr9Bs6dKiee+45SdK2bdsUHR2tgwcPVrqXV155RfXq1dOOHTskSdOnT9fkyZP1xhtvKDQ0VIMGDZLJZFJOTo66du2qY8eOqbCwsFr1T548WYmJifL391dZWZkuXLhw3T2OHz9ederUUWZmpmrUqKGzZ8/a2rZv367du3fLxcVFUVFRSkxM1KRJkyrMERcXp7lz50qSli5dqmeffVarV6/W6tWrdf78eWVlZUlSlcE6N8YBAAAAAAAAAAAA/CYbN25UZmamvvnmGzVt2lTDhw+/7pg6derIYrFoy5YtWrduneLj4/XNN9+orKxMX331lYKCgq45PiMjQ2FhYWrVqpUkaezYsfrqq69UWloqSerWrZs6deokSRo9erQ2bNhga/u16OhomUwmSdK6des0atQo1ahRQ/Xq1dOwYcO0du3aCmNWrFihOXPmyGKxyMvLS5s2bdKRI0fUsGFDPfjgg3ryySeVmJio3Nxc1a5d+7rn4e/vrzZt2th+zs7OrrTfnj179NBDD8nd3V1jxozRoUOHdPny5Ur3smLFCqWlpdlufi9ZskRHjx6VJB09elS9e/eWu7u7IiIilJubq6NHj1a7/uDgYD377LOaPXu2MjMz5ezsfN09rlq1SrGxsbag/Zf/PBEWFiYXF5fr7n/t2rXy9/eXu7u7pk2bJqvVKkny9PTUgQMHNHbsWC1btkxOTk6VjufGOAAAAAAAAAAAAIDf5L777pMkOTk5afz48Wrfvn21xoWEhGjdunXauHGjZs6cKbPZrLS0NDVu3FgtWrS4oRp+DoN/i/r169/wvIZhaPny5ZXuddu2bfr666+1YcMGde3aVUuWLFFAQMA1a/hl+Ozg4KCSkpIKfYqLi9W/f3/bDfsLFy6oUaNGunLliurUqVNhL4ZhaOHChZU+VjwqKkozZ85UZGSkJKlJkyYqKiqSg4NDtep/7bXXtH//fmVkZGj48OEaMmSIJk6cKEdHx3L/gFBUVHTNfd/I/o8fP65nnnlGO3fu1AMPPKDMzEwFBgZKktq2bausrCx99dVXWrdunSZOnCir1arGjRuXm4Mb4wAAAAAAAAAAAABu2KVLl3T+/Hnb5yVLlsjLy6taY0NCQpSeni5nZ2fVq1dPISEhevHFFxUSEnLdsT169NDq1at14sQJSVJCQoKCg4Pl4OAgSdq6davtEePvvPOOevToYWu7Xk1JSUkyDEOXLl1SampqpcFyRESEZs2aZQtw8/LydOTIERUUFOj06dMKCAjQlClT1L17d+3Zs6da53E9RUVFKi4utv0jwsKFC6/ZPyIiQvPmzVNhYaEkqbCwUPv377fV+/MN9bS0NNu7x6tb/8GDB+Xm5qZnnnlGf/nLX7Rt2zZJUrt27bR9+3ZJP91K37x5s21MeHi45s6dq7KyMkkq9yj16sjPz5eTk5NatmwpwzC0aNEiW9sPP/wgk8lkW8MwDH3//fcV5uDGOAAAAAAAAAAAAIAbdvr0aT3++OMqLS2VYRhq27atFi9ebGsfOXKkwsPDFR4eXmGst7e38vPzFRwcLEnq2bOnnnnmGdvnyvx8g9vd3V1z5sxRWFiYJOnee+/V22+/bevXrVs3xcXF6ciRI3JxcSlX07VMmTJF48aNk9lsliQ98cQTGjBgQIV+8+bNU3x8vCwWi2rUqCFHR0fNnj1btWvXVmRkpC5duiSTyaQHH3ywWo+Wr46GDRtqxowZ8vX1VdOmTRUVFXXN/j+/L93Pz892bnFxcXJzc9OCBQsUGRkpZ2dnBQUF2cL2/Pz8atX/wgsv6NChQ6pZs6bq1q2rN998U5I0ceJEDRw4UGazWW5ubvLz8yt3Zn/9619lNpvl5OQkHx+fcr+z6zGbzYqKipKbm5tcXFwUERFha9u7d68mTZokwzBUUlKioUOHysPDo8IcJsMwjGqvCAAAAAAAAAAAAAC/s3/96196++23K33nN1Ad3BgHAAAAAAAAAAAAcMf65z//qXnz5umNN9643aXgD4wb4wAAAAAAAAAAAAAAu1bjdhcAAAAAAAAAAAAAADdTQUGB6tevrxEjRpT7fsOGDbJYLNWaIzY2VlOnTr35xUnKycmRs7PzLZn7ZrJarVq6dOntLuOmIBgHAAAAAAAAAAAAYFeWLVumLl266KOPPtLFixdvdzl/WATjAAAAAAAAAAAAAHCHSkpKUlxcnAIDA7Vs2bJqjTl58qRCQ0Pl6uqqkJAQ/fDDD7a2q1evKj4+Xr6+vrJYLBowYIDy8vL0/fffq3nz5iouLrb1jY6O1oIFCyRJO3fuVFBQkLy9veXl5aUPPvig0rXXrFmjzp07y8PDQw899JCysrIk/XTD3d3dXcOGDZO7u7u6dOkiq9Varu0vf/mLPDw8ZDablZmZqejoaJnNZvn5+enHH3+0rTF37lz5+vqqc+fOCgsL07FjxyRJU6dO1cCBA/Xoo4/K1dVVQUFBys3N1ZkzZ/Tiiy8qIyNDFotFY8aMqf4v4A5EMA4AAAAAAAAAAADAbmRlZen7779XaGioRowYoaSkpGqNGzdunHx9fZWVlaX33ntP69evt7XNmTNH9erV044dO2S1WmU2mzV58mTde++9slgsWrlypSTp4sWLWrlypZ588kmdP39eo0eP1vvvv69du3Zp7dq1mjBhQrmwWpLOnDmjwYMH67333lNmZqZGjx6tyMhIGYYhSdq/f7+GDx+uffv2KS4uTlFRUba2gwcPauTIkcrMzFRERISCgoIUHx+vvXv3ytvbW/Pnz5ckpaen69ChQ9q6dau++eYbDRkyRGPHjrXVsH37dqWkpCgrK0vNmzdXYmKimjdvrmnTpqlHjx6yWq1KSEj4zb+TO4Hj7S4AAAAAAAAAAAAAAG6WpKQkDRs2TA4ODurTp4+eeuopHThwQJ06dbrmuPXr12vu3LmSpFatWik8PNzWtmLFCuXn52v58uWSpOLiYrVu3VqS9Oc//1nJycmKjIzUBx98oKCgILm4uOjzzz/Xd999p969e5db59ChQ2rbtq3t8/bt22U2m2U2myVJQ4YM0dNPP20L0Fu3bq3g4GBJ0oABAzR69Gh9//33kqR27dqpS5cukiRvb2+1a9dOHTt2lCT5+vrq448/ttW/c+dOW9/S0tJyNYWFhcnFxUWS5O/vr7179177kP+ACMYBAAAAAAAAAAAA2IWrV68qNTVVTk5OSk9PlyQVFhYqKSnJFnpXl8lksv1sGIYWLlyoXr16VejXr18/jRs3TidPnlRKSoomTpxoG+Pm5qavv/66wpicnJwbquXXdf1cW+3atW3fOzg4VPhcUlJiq2XSpEkaPXp0pXNWNc6e8Ch1AAAAAAAAAAAAAHZh5cqVatu2rX788Ufl5OQoJydH27ZtU2pqqq5evXrNsSEhIXr33Xcl/fS+8Z8fjy5JERERmjdvngoLCyX9FLbv379f0k+h8hNPPKGpU6cqOztbYWFhkqRu3brp6NGjWrdunW0eq9Va7n3kktS1a1ft3btX+/btkyQtXbpUrVq1UqtWrST9FKJnZGRIkj788EPddddduueee27oXCIiIpSQkKDc3FxJP/0DwZ49e647rmHDhsrPzy/33bBhw2w30f9IuDEOAAAAAAAAAAAAwC4kJSVpyJAh5b7r1KmTWrVqpU8//VRNmjSpcuyCBQsUHR0tV1dXtWrVSkFBQba2uLg4XblyRX5+frbb2nFxcXJzc5P00+PUfX19FRcXJwcHB0lS48aN9dlnnyk2NlYTJkzQ1atXdd9992nFihXl1m3WrJnef/99DRs2TCUlJWrcuLE++OAD2zpubm5KSUnRuHHjVLNmTS1ZsqTcbfbqGDJkiM6dO6cePXpIkkpKShQTEyMvL69rjgsODtbcuXPl4eGhbt26KSEhQbt27dK4ceNuaP07gcn4+c3sAAAAAAAAAAAAAIA7xoYNGzR+/HhZrdbbXYok6ezZsxo8eLDWrl17u0u5YTxKHQAAAAAAAAAAAABwXc2aNftDhuISN8YBAAAAAAAAAAAAAHaOG+MAAAAAAAAAAAAAALtGMA4AAAAAAAAAAADgpmjatKlycnKu22/q1KkqKiq65fWsXLlSf/3rX2/5Or9FRkaGTCaTUlNTy30/depUjR8//obn+/WZRkdHa/78+ZX2TUhI0Jw5c254jd/LokWLFB0dXWW7YRhq06aNgoODy32fk5MjZ2fnSscQjAMAAAAAAAAAAAD4Xb388su/SzAeHh6uefPm3fJ1foukpCQFBwcrKSnppsx3I2c6ZswYPf/88zdl3dth/fr1cnZ2VmZmpo4ePVqtMQTjAAAAAAAAAAAAAH6TlStXqlOnTvLw8NDEiRPLtcXGxsrHx0cWi0WBgYE6dOiQpJ9CWUkKCAiQxWLRmTNnlJ6eLj8/P3l5ecnT01OffvpppesdPnxYf/rTn+Tp6Smz2azJkydLkoqLi/X888/L3d1dnp6eCgsLkySlpKQoIiLCNj41NVV+fn7q3LmzAgMD9e2339r6hYSEaNCgQTKbzfL29tZ3331nG5ecnCyLxSJPT095e3vbbsWvWbNG3bt3V5cuXeTr66uMjIxqndv58+f12WefKS0tTVlZWTpy5Ei1xh0+fFh9+/aVj4+PPDw8tGjRoirPVJIOHDig4OBgtW/fXv3791dxcbGkirfSZ82aJbPZLE9PT3Xt2lWFhYVVnvW1rF+/Xv7+/vLy8pKbm1u50D86OlpPPfVUpfUUFBRo4MCB6tChg7p37669e/dec52kpCSNGjVKgwcP1rvvvluts3OsVi8AAAAAAAAAAAAA+IUzZ87oz3/+szZt2iRXV1e99dZbOnfunK09Li5Oc+fOlSQtXbpUzz77rFavXq2EhAQlJiZq06ZNtsdeh4aGatCgQTKZTMrJyVHXrl117Ngx1apVq9yaixYt0iOPPKJJkyZJknJzcyVJr776qv79739r9+7dqlWrls6ePVuh3i1btmjJkiXauHGjatWqpU2bNmnw4MHav3+/JGnnzp2yWq1q06aN4uPjNWvWLCUmJmrDhg2aNm2avv76a7Vs2VKFhYWSpO+++05Tp07VmjVr1LBhQx05ckQBAQHKycmpUPevpaenKzQ0VC1atNCTTz6pd999V6+88so1x5SWlmrQoEFKS0tTx44dVVhYqK5du8rPz6/SM5Ukq9WqjIwM1apVS4GBgVq+fLkGDRpUbt733ntPy5cv1+bNm9WoUSPl5eWpVq1aVZ71tXTu3FmbN2+Wg4ODcnNz5eXlpdDQUN1zzz3XrGfatGmqVauWDh48qAsXLtj2VZnc3FytXr1ab775po4fP66+ffvq5ZdfVo0a174TTjAOAAAAAAAAAAAA4IZt27ZNHh4ecnV1lSSNGDFC//M//2NrX7t2rRYuXKiCggKVlZVdM1g9evSohgwZoh9++EGOjo7Kzc3V0aNH1bFjx3L9AgMD9fzzz+vixYt66KGHFBISIklatWqVZs2aZQukmzVrVmGNTz75RN9++225wDU3N1eXL1+WJPn7+6tNmza2nxcuXChJ+uyzzzR06FC1bNlSklS3bl1J0urVq3XkyBEFBgba5qtRo4aOHz+uBx988Jpnl5SUpFdffVWSFBMTo9DQUE2fPl0ODg5Vjjl06JD279+vqKgo23cFBQXKysqSj49PpWP69etnq9fX11fZ2dkV+qxatUpjxoxRo0aNJEmNGzeWVPVZX8u5c+c0YsQI/fvf/5ajo6POnTunffv22YLxqupZv3695s2bJ5PJpEaNGmnw4MGV1ipJ77//vnr37i1nZ2c5Ozvrrrvu0po1a9S7d+9r1kYwDgAAAAAAAAAAAOA/ZjKZbD8fP35czzzzjHbu3KkHHnhAmZmZ5QLkX4uKitLMmTMVGRkpSWrSpEml78t+/PHH1a1bN61du1aLFi3S/Pnz9fnnn1erPsMwNHz48CpvZteuXdv2s4ODg0pKSq47X8+ePZWenl6t9X9mtVqVmZmpUaNG2c7s//7v//TFF1/okUceueZ6TZo0kdVqrfZaN7qnX/otZz1mzBj16dNHy5cvl8lkUufOncv9Hqtbzy//ln4tKSlJp06dUuvWrSX99M8BSUlJ1w3Gecc4AAAAAAAAAAAAgBvm7++vzMxMHTx4UJL07rvv2t4ZnZ+fLycnJ7Vs2VKGYdjehf2zBg0aKD8/3/Y5Ly/Pdls7LS1NeXl5la55+PBh3XXXXRo2bJhmz56tbdu2SZLCw8O1YMECXblyRZIqfZR6eHi40tLSdPz4cUlSWVmZdu3add19Pvroo0pLS9PJkyclSYWFhSosLFRoaKjWrVunzMxMW98dO3bYfu7YsaN+/PHHCvMlJSVpwoQJOnbsmHJycpSTk6P58+eXex93ZTp06KCGDRsqOTnZ9t2RI0dsN/F/fabVFR4eroSEBNvY8+fPq7S0tMqz3rFjh4KDgyudKy8vT/fff79MJpM2btxoe4f79YSEhCg5OVmGYejChQtasmRJpf12796ts2fP6sSJE7azy87O1po1ayr9nf8SN8YBAAAAAAAAAAAA3LBmzZrp3XffVb9+/VSzZk2FhYXJxcVFkmQ2mxUVFSU3Nze5uLgoIiKi3NgJEyaoZ8+eqlu3rr788kstWLBAkZGRcnZ2VlBQkO67775K1/zwww+VlpammjVrqqysTAkJCZJ+ep/53/72N3Xu3FlOTk66++67K9xuDggI0OzZs9WvXz+VlJSouLhYffv2lbe39zX3GRgYqJdeekmhoaEymUyqWbOmPvzwQ7Vr107p6el66qmnVFhYqOLiYnl5eSk9PV1nzpzRuXPn1KRJk3JzFRUV6f3339f//u//lvt+wIABio2N1enTp6usw9HRUatWrdL48eM1b948lZaWqmnTprYb678+0+oaOnSoTpw4oW7dusnR0VH16tXTunXrqjzrnJwc1alTp9K5Zs6cqbFjx2r69OmyWCxVvif816ZMmaKRI0eqY8eOatasmbp37277J4dfSkpKUlRUVLn3iTs7O6tnz55KTU1V//79q1zDZBiGUa1qAAAAAAAAAAAAAADX9cEHH+jQoUOaPHny7S7lpnv66ac1aNAgde/e/XaXckMIxgEAAAAAAAAAAAAAdo13jAMAAAAAAAAAAAAA7BrBOAAAAAAAAAAAAIA7wtSpU1VUVPSbxp44cUIBAQHV6tunTx8dOnToN61zPStXrpTFYpHFYlGLFi3UrFkz2+f3339f0dHRmj9//i1Z+2ZJSUmp8F74PzoepQ4AAAAAAAAAAADgjmAymZSXlydnZ+cKbSUlJXJ0dPz9i/oPTJ06VefPny8XhEdHR8tisWj8+PG3ra6ysjJJUo0ald+jTklJ0YoVK7RixYrfsapbixvjAAAAAAAAAAAAAG67MWPGSJICAgJksVh05swZRUdHKyYmRoGBgXJ3d5ckDRkyRN7e3vLw8FDfvn116tQpSVJOTk65QN1kMumVV16Rr6+v2rRpo+TkZFtb69atZbVaJUkPP/ywYmNjFRAQoAceeMBWhySdPHlSvXr1kqurq3r16qWoqChNnTr1P97rgQMHFBwcrPbt26t///4qLi6WJF29elXx8fHy9fWVxWLRgAEDlJeXJ0l655135OrqKovFIrPZrO3bt0uSDh8+rL59+8rHx0ceHh5atGhRpWtOnTpVjz/+uEJDQ+Xu7q6TJ08qNTVVHh4etrP88ccfKx2bmpoqPz8/de7cWYGBgfr2228lSdu2bVOXLl1ksVjk7u6uN9988z8+m1uFYBwAAAAAAAAAAADAbZeQkCBJ2rRpk6xWq5o3by5J2r17tz777DMdPHhQkjR//nzt2rVLmZmZCggIuGZQXatWLe3YsUNffPGFxo0bp5KSkkr7ZWdnKyMjQ/v27dOaNWu0detWSdK4cePk7++vrKwsLV68WBs2bLgpe7Varfr000914MABnT59WsuXL5ckzZkzR/Xq1dOOHTtktVplNps1efJkSdKECRO0fv16Wa1WffPNN3Jzc1NpaakGDRqkf/zjH9q5c6e2bdumt956Szt37qx03a1bt2rx4sXKyspSXl6enn/+eX3xxRfKzMxUt27dNHLkyApjtmzZoiVLlmjjxo365ptv9Pe//12DBw+WJL366quKjY2V1WrVvn37FBUVdVPO51b4Yz1rAAAAAAAAAAAAAMB/lSeeeEINGjSwfU5PT1dqaqqKiopUVFSkpk2bVjl2yJAhkqSOHTvK0dFRp06d0j333FOh38CBA+Xo6ChHR0dZLBZlZ2fL399f69ev19y5cyVJLVq00COPPHJT9tSvXz/VrVtXkuTr66vs7GxJ0ooVK5Sfn28LyouLi9W6dWtJUnBwsIYOHapHH31UvXv3Vvv27ZWVlaX9+/eXC6QLCgqUlZUlHx+fCuv26dNHd911lyQpIyNDYWFhatWqlSRp7NixmjZtmkpLS8uN+eSTT/Ttt9/Kz8/P9l1ubq4uX76sHj16aPr06Tp8+LCCgoLUvXv3m3I+twLBOAAAAAAAAAAAAIA7Vv369W0/b968Wa+//rq2bt2q5s2ba+XKlXrxxRerHFu7dm3bzw4ODlXeGK9uP5PJdKPl39B6hmFo4cKF6tWrV4Uxy5cv1+7du7Vhwwb16dNHM2bMkNlsVpMmTWyPhb+eX57lr1W1N8MwNHz4cL3yyisV2saPH6/HHntM69at0wsvvCB3d3f985//rFYtvzcepQ4AAAAAAAAAAADgjtCgQQPl5+dX2Z6Xl6cGDRrIxcVFxcXFSkxMvKX1BAUFKSUlRZJ0+vRprVq16pauFxERoXnz5qmwsFCSVFhYqP3796ukpETZ2dny9vZWbGysIiMjtWPHDnXo0EENGzYs9/70I0eOKDc397pr9ejRQ6tXr9aJEyck/fQo++DgYDk4OJTrFx4errS0NB0/flySVFZWpl27dkmSDh06pDZt2mjUqFF64YUXtG3btptyDrcCwTgAAAAAAAAAAACAO8KECRPUs2dPWSwWnTlzpkJ7WFiYOnTooA4dOiggIEAWi+WW1rNgwQJt2rRJrq6uGjJkiPz8/OTs7CxJ2rVrl/r06XNT14uLi5OPj4/8/Pzk4eGhrl27ymq1qrS0VDExMXJ3d5fFYtHu3bv13HPPydHRUatWrdJHH30kDw8Pubm5acSIEbp8+fJ113J3d9ecOXMUFhYmDw8Pbdq0SW+//XaFfgEBAZo9e7b69esnT09Pubm5aenSpZKkRYsWyc3NTV5eXpo8ebL+8Y9/VLpWnz59bGH6r8/txRdftL1f/lYyGYZh3PJVAAAAAAAAAAAAAOAP5vLly3JycpKjo6POnTunrl27Ki0trdz7tvHHwDvGAQAAAAAAAAAAAKAShw8f1rBhw2QYhoqLizV27FhC8T8obowDAAAAAAAAAAAAAOwa7xgHAAAAAAAAAAAAANg1gnEAAAAAAAAAAAAAgF0jGAcAAAAAAAAAAAAA2DWCcQAAAAAAAAAAAACAXSMYBwAAAAAAAAAAAADYNYJxAAAAAAAAAAAAAIBdIxgHAAAAAAAAAAAAANg1gnEAAAAAAAAAAAAAgF0jGAcAAAAAAAAAAAAA2DWCcQAAAAAAAAAAAACAXSMYBwAAAAAAAAAAAADYNYJxAAAAAAAAAAAAAIBdIxgHAAAAAAAAAAAAANg1gnEAAAAAAAAAAAAAgF0jGAcAAAAAAAAAAAAA2DWCcQAAAAAAAAAAAACAXSMYBwAAAAAAAAAAAADYNYJxAAAAAAAAAAAAAIBdIxgHAAAAAAAAAAAAANg1gnEAAAAAAAAAAAAAgF0jGAcAAAAAAAAAAAAA2DWCcQAAAAAAAAAAAACAXSMYBwAAAAAAAAAAAADYNYJxAAAAAAAAAAAAAIBdIxgHAAAAAAAAAAAAANg1gnEAAAAAAAAAAAAAgF0jGAcAAAAAAAAAAAAA2DWCcQAAAAAAAAAAAACAXSMYBwAAAAAAAAAAAADYNYJxAAAAAAAAAAAAAIBdIxgHAAAAAAAAAAAAANg1gnEAAAAAAAAAAAAAgF0jGAcAAAAAAAAAAAAA2DWCcQAAAAAAAAAAAACAXSMYBwAAAAAAAAAAAADYNYJxAAAAAAAAAAAAAIBdIxgHAAAAAAAAAAAAANg1gnEAAAAAAAAAAAAAgF0jGAcAAAAAAAAAAAAA2DWCcQAAAAAAAAAAAACAXSMYBwAAAAAAAAAAAADYNYJxAAAAAAAAAAAAAIBdIxgHAAAAAAAAAAAAANg1gnEAAAAAAAAAAAAAgF0jGAcAAAAAAAAAAAAA2DWCcQAAAAAAAAAAAACAXSMYBwAAAAAAAAAAAADYNYJxAAAAAAAAAAAAAIBdIxgHAAAAAAAAAAAAANg1gnEAAAAAAAAAAAAAgF0jGAcAAAAAAAAAAAAA2DWCcQAAAAAAAAAAAACAXSMYBwAAAAAAAAAAAADYNYJxAAAAAAAAAAAAAIBdIxgHAAAAAAAAAAAAANg1gnEAAAAAAAAAAAAAgF0jGAcAAAAAAAAAAAAA2DWCcQAAAAAAAAAAAACAXSMYBwAAAAAAAAAAAADYNYJxAAAAAAAAAAAAAIBdIxgHAAAAAAAAAAAAANg1gnEAAAAAAAAAAAAAgF0jGAcAAAAAAAAAAAAA2DWCcQAAAAAAAAAAAACAXSMYBwAAAAAAAAAAAADYNYJxAAAAAAAAAAAAAIBdIxgHAAAAAAAAAAAAANi1/wdhnb18htqFFQAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Extracting text from data/AI_Information.pdf...\n", + "Extracted 15 pages with content\n", + "Created 4 text chunks\n", + "Created 4 text chunks\n", + "Created 3 text chunks\n", + "Created 3 text chunks\n", + "Created 3 text chunks\n", + "Created 3 text chunks\n", + "Created 3 text chunks\n", + "Created 3 text chunks\n", + "Created 3 text chunks\n", + "Created 3 text chunks\n", + "Created 3 text chunks\n", + "Created 3 text chunks\n", + "Created 3 text chunks\n", + "Created 4 text chunks\n", + "Created 3 text chunks\n", + "Creating embeddings for chunks...\n", + "Vector store created with 48 chunks\n", + "\n", + "\n", + "===== Evaluating Query 1/1 =====\n", + "Query: How does neural network architecture impact AI performance?\n", + "\n", + "=== Processing query with HyDE: How does neural network architecture impact AI performance? ===\n", + "\n", + "Generating hypothetical document...\n", + "Generated hypothetical document of 3438 characters\n", + "Creating embedding for hypothetical document...\n", + "Retrieving 5 most similar chunks...\n", + "Generating final response...\n", + "\n", + "=== Processing query with Standard RAG: How does neural network architecture impact AI performance? ===\n", + "\n", + "Creating embedding for query...\n", + "Retrieving 5 most similar chunks...\n", + "Generating final response...\n", + "\n", + "=== OVERALL ANALYSIS ===\n", + "**Overall Analysis: HyDE RAG vs Standard RAG**\n", + "\n", + "Based on the evaluation of Query 1, we can observe that both HyDE RAG and standard RAG provide accurate information about the impact of neural network architecture on AI performance. However, a more in-depth analysis of the strengths and weaknesses of each approach reveals the following:\n", + "\n", + "**Strengths and Weaknesses of HyDE RAG:**\n", + "\n", + "Strengths:\n", + "\n", + "1. **Improved contextual understanding**: HyDE RAG's use of document embedding allows it to capture the context and relationships between different concepts, leading to more accurate and informative responses.\n", + "2. **Better handling of complex queries**: HyDE RAG's ability to represent documents as vectors enables it to capture the nuances of complex queries, such as the impact of neural network architecture on AI performance.\n", + "\n", + "Weaknesses:\n", + "\n", + "1. **Higher computational requirements**: HyDE RAG's use of document embedding requires more computational resources, which can lead to slower response times.\n", + "2. **Overfitting to training data**: HyDE RAG's reliance on document embedding can lead to overfitting to the training data, which can result in poor performance on unseen queries.\n", + "\n", + "**Strengths and Weaknesses of Standard RAG:**\n", + "\n", + "Strengths:\n", + "\n", + "1. **Faster response times**: Standard RAG's use of direct query embedding allows it to respond quickly, even to complex queries.\n", + "2. **Robustness to overfitting**: Standard RAG's direct query embedding approach is less prone to overfitting, as it does not rely on document representation.\n", + "\n", + "Weaknesses:\n", + "\n", + "1. **Limited contextual understanding**: Standard RAG's direct query embedding approach can lead to limited contextual understanding, resulting in less accurate and informative responses.\n", + "2. **Difficulty handling complex queries**: Standard RAG's direct query embedding approach can struggle to capture the nuances of complex queries, such as the impact of neural network architecture on AI performance.\n", + "\n", + "**When HyDE RAG Outperforms Standard RAG:**\n", + "\n", + "HyDE RAG is likely to outperform Standard RAG in the following scenarios:\n", + "\n", + "1. **Complex queries**: HyDE RAG's ability to capture the nuances of complex queries, such as the impact of neural network architecture on AI performance, makes it a better choice for these types of queries.\n", + "2. **Contextual understanding**: HyDE RAG's improved contextual understanding, enabled by document embedding, makes it a better choice when the query requires a deep understanding of the context and relationships between different concepts.\n", + "\n", + "**When Standard RAG Outperforms HyDE RAG:**\n", + "\n", + "Standard RAG is likely to outperform HyDE RAG in the following scenarios:\n", + "\n", + "1. **Simple queries**: Standard RAG's direct query embedding approach can respond quickly and accurately to simple queries, making it a better choice for these types of queries.\n", + "2. **Real-time applications**: Standard RAG's faster response times make it a better choice for real-time applications, such as search engines or chatbots.\n", + "\n", + "**Recommendations:**\n", + "\n", + "1. **Use HyDE RAG for complex queries**: When dealing with complex queries that require a deep understanding of the context and relationships between different concepts, HyDE RAG is a better choice.\n", + "2. **Use Standard RAG for simple queries**: When dealing with simple queries that require a quick and accurate response, Standard RAG is a better choice.\n", + "3. **Use HyDE RAG for applications requiring contextual understanding**: When the application requires a deep understanding of the context and relationships between different concepts, HyDE RAG is a better choice.\n", + "4. **Use Standard RAG for applications requiring real-time responses**: When the application requires a quick and accurate response, Standard RAG is a better choice.\n" + ] + } + ], + "source": [ + "# Path to the AI information document\n", + "pdf_path = \"data/AI_Information.pdf\"\n", + "\n", + "# Process document and create vector store\n", + "# This loads the document, extracts text, chunks it, and creates embeddings\n", + "vector_store = process_document(pdf_path)\n", + "\n", + "# Example 1: Direct comparison for a single query related to AI\n", + "query = \"What are the main ethical considerations in artificial intelligence development?\"\n", + "\n", + "# Run HyDE RAG approach\n", + "# This generates a hypothetical document answering the query, embeds it, \n", + "# and uses that embedding to retrieve relevant chunks\n", + "hyde_result = hyde_rag(query, vector_store)\n", + "print(\"\\n=== HyDE Response ===\")\n", + "print(hyde_result[\"response\"])\n", + "\n", + "# Run standard RAG approach for comparison\n", + "# This directly embeds the query and uses it to retrieve relevant chunks\n", + "standard_result = standard_rag(query, vector_store)\n", + "print(\"\\n=== Standard RAG Response ===\")\n", + "print(standard_result[\"response\"])\n", + "\n", + "# Visualize the differences between HyDE and standard RAG approaches\n", + "# Shows the query, hypothetical document, and retrieved chunks side by side\n", + "visualize_results(query, hyde_result, standard_result)\n", + "\n", + "# Example 2: Run full evaluation with multiple AI-related queries\n", + "test_queries = [\n", + " \"How does neural network architecture impact AI performance?\"\n", + "]\n", + "\n", + "# Optional reference answers for better evaluation\n", + "reference_answers = [\n", + " \"Neural network architecture significantly impacts AI performance through factors like depth (number of layers), width (neurons per layer), connectivity patterns, and activation functions. Different architectures like CNNs, RNNs, and Transformers are optimized for specific tasks such as image recognition, sequence processing, and natural language understanding respectively.\",\n", + "]\n", + "\n", + "# Run comprehensive evaluation comparing HyDE and standard RAG approaches\n", + "evaluation_results = run_evaluation(\n", + " pdf_path=pdf_path,\n", + " test_queries=test_queries,\n", + " reference_answers=reference_answers\n", + ")\n", + "\n", + "# Print the overall analysis of which approach performs better across queries\n", + "print(\"\\n=== OVERALL ANALYSIS ===\")\n", + "print(evaluation_results[\"overall_analysis\"])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv-new-specific-rag", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/20_crag.ipynb b/20_crag.ipynb new file mode 100644 index 0000000..1bca015 --- /dev/null +++ b/20_crag.ipynb @@ -0,0 +1,1189 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "markdown" + } + }, + "source": [ + "# Corrective RAG (CRAG) Implementation\n", + "\n", + "In this notebook, I implement Corrective RAG - an advanced approach that dynamically evaluates retrieved information and corrects the retrieval process when necessary, using web search as a fallback.\n", + "\n", + "CRAG improves on traditional RAG by:\n", + "\n", + "- Evaluating retrieved content before using it\n", + "- Dynamically switching between knowledge sources based on relevance\n", + "- Correcting the retrieval with web search when local knowledge is insufficient\n", + "- Combining information from multiple sources when appropriate" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the Environment\n", + "We begin by importing necessary libraries." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import numpy as np\n", + "import json\n", + "import fitz # PyMuPDF\n", + "from openai import OpenAI\n", + "import requests\n", + "from typing import List, Dict, Tuple, Any\n", + "import re\n", + "from urllib.parse import quote_plus\n", + "import time" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the OpenAI API Client\n", + "We initialize the OpenAI client to generate embeddings and responses." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize the OpenAI client with the base URL and API key\n", + "client = OpenAI(\n", + " base_url=\"https://api.studio.nebius.com/v1/\",\n", + " api_key=os.getenv(\"OPENAI_API_KEY\") # Retrieve the API key from environment variables\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Document Processing Functions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def extract_text_from_pdf(pdf_path):\n", + " \"\"\"\n", + " Extract text content from a PDF file.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF file\n", + " \n", + " Returns:\n", + " str: Extracted text content\n", + " \"\"\"\n", + " print(f\"Extracting text from {pdf_path}...\")\n", + " \n", + " # Open the PDF file\n", + " pdf = fitz.open(pdf_path)\n", + " text = \"\"\n", + " \n", + " # Iterate through each page in the PDF\n", + " for page_num in range(len(pdf)):\n", + " page = pdf[page_num]\n", + " # Extract text from the current page and append it to the text variable\n", + " text += page.get_text()\n", + " \n", + " return text" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def chunk_text(text, chunk_size=1000, overlap=200):\n", + " \"\"\"\n", + " Split text into overlapping chunks for efficient retrieval and processing.\n", + " \n", + " This function divides a large text into smaller, manageable chunks with\n", + " specified overlap between consecutive chunks. Chunking is critical for RAG\n", + " systems as it allows for more precise retrieval of relevant information.\n", + " \n", + " Args:\n", + " text (str): Input text to be chunked\n", + " chunk_size (int): Maximum size of each chunk in characters\n", + " overlap (int): Number of overlapping characters between consecutive chunks\n", + " to maintain context across chunk boundaries\n", + " \n", + " Returns:\n", + " List[Dict]: List of text chunks, each containing:\n", + " - text: The chunk content\n", + " - metadata: Dictionary with positional information and source type\n", + " \"\"\"\n", + " chunks = []\n", + " \n", + " # Iterate through the text with a sliding window approach\n", + " # Moving by (chunk_size - overlap) ensures proper overlap between chunks\n", + " for i in range(0, len(text), chunk_size - overlap):\n", + " # Extract the current chunk, limited by chunk_size\n", + " chunk_text = text[i:i + chunk_size]\n", + " \n", + " # Only add non-empty chunks\n", + " if chunk_text:\n", + " chunks.append({\n", + " \"text\": chunk_text, # The actual text content\n", + " \"metadata\": {\n", + " \"start_pos\": i, # Starting position in the original text\n", + " \"end_pos\": i + len(chunk_text), # Ending position\n", + " \"source_type\": \"document\" # Indicates the source of this text\n", + " }\n", + " })\n", + " \n", + " print(f\"Created {len(chunks)} text chunks\")\n", + " return chunks" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Simple Vector Store Implementation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class SimpleVectorStore:\n", + " \"\"\"\n", + " A simple vector store implementation using NumPy.\n", + " \"\"\"\n", + " def __init__(self):\n", + " # Initialize lists to store vectors, texts, and metadata\n", + " self.vectors = []\n", + " self.texts = []\n", + " self.metadata = []\n", + " \n", + " def add_item(self, text, embedding, metadata=None):\n", + " \"\"\"\n", + " Add an item to the vector store.\n", + " \n", + " Args:\n", + " text (str): The text content\n", + " embedding (List[float]): The embedding vector\n", + " metadata (Dict, optional): Additional metadata\n", + " \"\"\"\n", + " # Append the embedding, text, and metadata to their respective lists\n", + " self.vectors.append(np.array(embedding))\n", + " self.texts.append(text)\n", + " self.metadata.append(metadata or {})\n", + " \n", + " def add_items(self, items, embeddings):\n", + " \"\"\"\n", + " Add multiple items to the vector store.\n", + " \n", + " Args:\n", + " items (List[Dict]): List of items with text and metadata\n", + " embeddings (List[List[float]]): List of embedding vectors\n", + " \"\"\"\n", + " # Iterate over items and embeddings and add them to the store\n", + " for i, (item, embedding) in enumerate(zip(items, embeddings)):\n", + " self.add_item(\n", + " text=item[\"text\"],\n", + " embedding=embedding,\n", + " metadata=item.get(\"metadata\", {})\n", + " )\n", + " \n", + " def similarity_search(self, query_embedding, k=5):\n", + " \"\"\"\n", + " Find the most similar items to a query embedding.\n", + " \n", + " Args:\n", + " query_embedding (List[float]): Query embedding vector\n", + " k (int): Number of results to return\n", + " \n", + " Returns:\n", + " List[Dict]: Top k most similar items\n", + " \"\"\"\n", + " # Return an empty list if there are no vectors in the store\n", + " if not self.vectors:\n", + " return []\n", + " \n", + " # Convert query embedding to numpy array\n", + " query_vector = np.array(query_embedding)\n", + " \n", + " # Calculate similarities using cosine similarity\n", + " similarities = []\n", + " for i, vector in enumerate(self.vectors):\n", + " similarity = np.dot(query_vector, vector) / (np.linalg.norm(query_vector) * np.linalg.norm(vector))\n", + " similarities.append((i, similarity))\n", + " \n", + " # Sort by similarity (descending)\n", + " similarities.sort(key=lambda x: x[1], reverse=True)\n", + " \n", + " # Return top k results\n", + " results = []\n", + " for i in range(min(k, len(similarities))):\n", + " idx, score = similarities[i]\n", + " results.append({\n", + " \"text\": self.texts[idx],\n", + " \"metadata\": self.metadata[idx],\n", + " \"similarity\": float(score)\n", + " })\n", + " \n", + " return results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating Embeddings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def create_embeddings(texts, model=\"text-embedding-3-small\"):\n", + " \"\"\"\n", + " Create vector embeddings for text inputs using OpenAI's embedding models.\n", + " \n", + " Embeddings are dense vector representations of text that capture semantic meaning,\n", + " allowing for similarity comparisons. In RAG systems, embeddings are essential\n", + " for matching queries with relevant document chunks.\n", + " \n", + " Args:\n", + " texts (str or List[str]): Input text(s) to be embedded. Can be a single string\n", + " or a list of strings.\n", + " model (str): The embedding model name to use. Defaults to \"text-embedding-3-small\".\n", + " \n", + " Returns:\n", + " List[List[float]]: If input is a list, returns a list of embedding vectors.\n", + " If input is a single string, returns a single embedding vector.\n", + " \"\"\"\n", + " # Handle both single string and list inputs by converting single strings to a list\n", + " input_texts = texts if isinstance(texts, list) else [texts]\n", + " \n", + " # Process in batches to avoid API rate limits and payload size restrictions\n", + " # OpenAI API typically has limits on request size and rate\n", + " batch_size = 100\n", + " all_embeddings = []\n", + " \n", + " # Process each batch of texts\n", + " for i in range(0, len(input_texts), batch_size):\n", + " # Extract the current batch of texts\n", + " batch = input_texts[i:i + batch_size]\n", + " \n", + " # Make API call to generate embeddings for the current batch\n", + " response = client.embeddings.create(\n", + " model=model,\n", + " input=batch\n", + " )\n", + " \n", + " # Extract the embedding vectors from the response\n", + " batch_embeddings = [item.embedding for item in response.data]\n", + " all_embeddings.extend(batch_embeddings)\n", + " \n", + " # If the original input was a single string, return just the first embedding\n", + " if isinstance(texts, str):\n", + " return all_embeddings[0]\n", + " \n", + " # Otherwise return the full list of embeddings\n", + " return all_embeddings" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Document Processing Pipeline" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def process_document(pdf_path, chunk_size=1000, chunk_overlap=200):\n", + " \"\"\"\n", + " Process a document into a vector store.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF file\n", + " chunk_size (int): Size of each chunk in characters\n", + " chunk_overlap (int): Overlap between chunks in characters\n", + " \n", + " Returns:\n", + " SimpleVectorStore: Vector store containing document chunks\n", + " \"\"\"\n", + " # Extract text from the PDF file\n", + " text = extract_text_from_pdf(pdf_path)\n", + " \n", + " # Split the extracted text into chunks with specified size and overlap\n", + " chunks = chunk_text(text, chunk_size, chunk_overlap)\n", + " \n", + " # Create embeddings for each chunk of text\n", + " print(\"Creating embeddings for chunks...\")\n", + " chunk_texts = [chunk[\"text\"] for chunk in chunks]\n", + " chunk_embeddings = create_embeddings(chunk_texts)\n", + " \n", + " # Initialize a new vector store\n", + " vector_store = SimpleVectorStore()\n", + " \n", + " # Add the chunks and their embeddings to the vector store\n", + " vector_store.add_items(chunks, chunk_embeddings)\n", + " \n", + " print(f\"Vector store created with {len(chunks)} chunks\")\n", + " return vector_store" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Relevance Evaluation Function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def evaluate_document_relevance(query, document):\n", + " \"\"\"\n", + " Evaluate the relevance of a document to a query.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " document (str): Document text\n", + " \n", + " Returns:\n", + " float: Relevance score (0-1)\n", + " \"\"\"\n", + " # Define the system prompt to instruct the model on how to evaluate relevance\n", + " system_prompt = \"\"\"\n", + " You are an expert at evaluating document relevance. \n", + " Rate how relevant the given document is to the query on a scale from 0 to 1.\n", + " 0 means completely irrelevant, 1 means perfectly relevant.\n", + " Provide ONLY the score as a float between 0 and 1.\n", + " \"\"\"\n", + " \n", + " # Define the user prompt with the query and document\n", + " user_prompt = f\"Query: {query}\\n\\nDocument: {document}\"\n", + " \n", + " try:\n", + " # Make a request to the OpenAI API to evaluate the relevance\n", + " response = client.chat.completions.create(\n", + " model=\"gpt-3.5-turbo\", # Specify the model to use\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt}, # System message to guide the assistant\n", + " {\"role\": \"user\", \"content\": user_prompt} # User message with the query and document\n", + " ],\n", + " temperature=0, # Set the temperature for response generation\n", + " max_tokens=5 # Very short response needed\n", + " )\n", + " \n", + " # Extract the score from the response\n", + " score_text = response.choices[0].message.content.strip()\n", + " # Use regex to find the float value in the response\n", + " score_match = re.search(r'(\\d+(\\.\\d+)?)', score_text)\n", + " if score_match:\n", + " return float(score_match.group(1)) # Return the extracted score as a float\n", + " return 0.5 # Default to middle value if parsing fails\n", + " \n", + " except Exception as e:\n", + " # Print the error message and return a default value on error\n", + " print(f\"Error evaluating document relevance: {e}\")\n", + " return 0.5 # Default to middle value on error" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Web Search Function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def duck_duck_go_search(query, num_results=3):\n", + " \"\"\"\n", + " Perform a web search using DuckDuckGo.\n", + " \n", + " Args:\n", + " query (str): Search query\n", + " num_results (int): Number of results to return\n", + " \n", + " Returns:\n", + " Tuple[str, List[Dict]]: Combined search results text and source metadata\n", + " \"\"\"\n", + " # Encode the query for URL\n", + " encoded_query = quote_plus(query)\n", + " \n", + " # DuckDuckGo search API endpoint (unofficial)\n", + " url = f\"https://api.duckduckgo.com/?q={encoded_query}&format=json\"\n", + " \n", + " try:\n", + " # Perform the web search request\n", + " response = requests.get(url, headers={\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\"\n", + " })\n", + " data = response.json()\n", + " \n", + " # Initialize variables to store results text and sources\n", + " results_text = \"\"\n", + " sources = []\n", + " \n", + " # Add abstract if available\n", + " if data.get(\"AbstractText\"):\n", + " results_text += f\"{data['AbstractText']}\\n\\n\"\n", + " sources.append({\n", + " \"title\": data.get(\"AbstractSource\", \"Wikipedia\"),\n", + " \"url\": data.get(\"AbstractURL\", \"\")\n", + " })\n", + " \n", + " # Add related topics\n", + " for topic in data.get(\"RelatedTopics\", [])[:num_results]:\n", + " if \"Text\" in topic and \"FirstURL\" in topic:\n", + " results_text += f\"{topic['Text']}\\n\\n\"\n", + " sources.append({\n", + " \"title\": topic.get(\"Text\", \"\").split(\" - \")[0],\n", + " \"url\": topic.get(\"FirstURL\", \"\")\n", + " })\n", + " \n", + " return results_text, sources\n", + " \n", + " except Exception as e:\n", + " # Print error message if the main search fails\n", + " print(f\"Error performing web search: {e}\")\n", + " \n", + " # Fallback to a backup search API\n", + " try:\n", + " backup_url = f\"https://serpapi.com/search.json?q={encoded_query}&engine=duckduckgo\"\n", + " response = requests.get(backup_url)\n", + " data = response.json()\n", + " \n", + " # Initialize variables to store results text and sources\n", + " results_text = \"\"\n", + " sources = []\n", + " \n", + " # Extract results from the backup API\n", + " for result in data.get(\"organic_results\", [])[:num_results]:\n", + " results_text += f\"{result.get('title', '')}: {result.get('snippet', '')}\\n\\n\"\n", + " sources.append({\n", + " \"title\": result.get(\"title\", \"\"),\n", + " \"url\": result.get(\"link\", \"\")\n", + " })\n", + " \n", + " return results_text, sources\n", + " except Exception as backup_error:\n", + " # Print error message if the backup search also fails\n", + " print(f\"Backup search also failed: {backup_error}\")\n", + " return \"Failed to retrieve search results.\", []" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def rewrite_search_query(query):\n", + " \"\"\"\n", + " Rewrite a query to be more suitable for web search.\n", + " \n", + " Args:\n", + " query (str): Original query\n", + " \n", + " Returns:\n", + " str: Rewritten query\n", + " \"\"\"\n", + " # Define the system prompt to instruct the model on how to rewrite the query\n", + " system_prompt = \"\"\"\n", + " You are an expert at creating effective search queries.\n", + " Rewrite the given query to make it more suitable for a web search engine.\n", + " Focus on keywords and facts, remove unnecessary words, and make it concise.\n", + " \"\"\"\n", + " \n", + " try:\n", + " # Make a request to the OpenAI API to rewrite the query\n", + " response = client.chat.completions.create(\n", + " model=\"gpt-3.5-turbo\", # Specify the model to use\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt}, # System message to guide the assistant\n", + " {\"role\": \"user\", \"content\": f\"Original query: {query}\\n\\nRewritten query:\"} # User message with the original query\n", + " ],\n", + " temperature=0.3, # Set the temperature for response generation\n", + " max_tokens=50 # Limit the response length\n", + " )\n", + " \n", + " # Return the rewritten query from the response\n", + " return response.choices[0].message.content.strip()\n", + " except Exception as e:\n", + " # Print the error message and return the original query on error\n", + " print(f\"Error rewriting search query: {e}\")\n", + " return query # Return original query on error" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def perform_web_search(query):\n", + " \"\"\"\n", + " Perform web search with query rewriting.\n", + " \n", + " Args:\n", + " query (str): Original user query\n", + " \n", + " Returns:\n", + " Tuple[str, List[Dict]]: Search results text and source metadata\n", + " \"\"\"\n", + " # Rewrite the query to improve search results\n", + " rewritten_query = rewrite_search_query(query)\n", + " print(f\"Rewritten search query: {rewritten_query}\")\n", + " \n", + " # Perform the web search using the rewritten query\n", + " results_text, sources = duck_duck_go_search(rewritten_query)\n", + " \n", + " # Return the search results text and source metadata\n", + " return results_text, sources" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Knowledge Refinement Function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def refine_knowledge(text):\n", + " \"\"\"\n", + " Extract and refine key information from text.\n", + " \n", + " Args:\n", + " text (str): Input text to refine\n", + " \n", + " Returns:\n", + " str: Refined key points from the text\n", + " \"\"\"\n", + " # Define the system prompt to instruct the model on how to extract key information\n", + " system_prompt = \"\"\"\n", + " Extract the key information from the following text as a set of clear, concise bullet points.\n", + " Focus on the most relevant facts and important details.\n", + " Format your response as a bulleted list with each point on a new line starting with \"• \".\n", + " \"\"\"\n", + " \n", + " try:\n", + " # Make a request to the OpenAI API to refine the text\n", + " response = client.chat.completions.create(\n", + " model=\"gpt-3.5-turbo\", # Specify the model to use\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt}, # System message to guide the assistant\n", + " {\"role\": \"user\", \"content\": f\"Text to refine:\\n\\n{text}\"} # User message with the text to refine\n", + " ],\n", + " temperature=0.3 # Set the temperature for response generation\n", + " )\n", + " \n", + " # Return the refined key points from the response\n", + " return response.choices[0].message.content.strip()\n", + " except Exception as e:\n", + " # Print the error message and return the original text on error\n", + " print(f\"Error refining knowledge: {e}\")\n", + " return text # Return original text on error" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Core CRAG Process" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def crag_process(query, vector_store, k=3):\n", + " \"\"\"\n", + " Run the Corrective RAG process.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " vector_store (SimpleVectorStore): Vector store with document chunks\n", + " k (int): Number of initial documents to retrieve\n", + " \n", + " Returns:\n", + " Dict: Process results including response and debug info\n", + " \"\"\"\n", + " print(f\"\\n=== Processing query with CRAG: {query} ===\\n\")\n", + " \n", + " # Step 1: Create query embedding and retrieve documents\n", + " print(\"Retrieving initial documents...\")\n", + " query_embedding = create_embeddings(query)\n", + " retrieved_docs = vector_store.similarity_search(query_embedding, k=k)\n", + " \n", + " # Step 2: Evaluate document relevance\n", + " print(\"Evaluating document relevance...\")\n", + " relevance_scores = []\n", + " for doc in retrieved_docs:\n", + " score = evaluate_document_relevance(query, doc[\"text\"])\n", + " relevance_scores.append(score)\n", + " doc[\"relevance\"] = score\n", + " print(f\"Document scored {score:.2f} relevance\")\n", + " \n", + " # Step 3: Determine action based on best relevance score\n", + " max_score = max(relevance_scores) if relevance_scores else 0\n", + " best_doc_idx = relevance_scores.index(max_score) if relevance_scores else -1\n", + " \n", + " # Track sources for attribution\n", + " sources = []\n", + " final_knowledge = \"\"\n", + " \n", + " # Step 4: Execute the appropriate knowledge acquisition strategy\n", + " if max_score > 0.7:\n", + " # Case 1: High relevance - Use document directly\n", + " print(f\"High relevance ({max_score:.2f}) - Using document directly\")\n", + " best_doc = retrieved_docs[best_doc_idx][\"text\"]\n", + " final_knowledge = best_doc\n", + " sources.append({\n", + " \"title\": \"Document\",\n", + " \"url\": \"\"\n", + " })\n", + " \n", + " elif max_score < 0.3:\n", + " # Case 2: Low relevance - Use web search\n", + " print(f\"Low relevance ({max_score:.2f}) - Performing web search\")\n", + " web_results, web_sources = perform_web_search(query)\n", + " final_knowledge = refine_knowledge(web_results)\n", + " sources.extend(web_sources)\n", + " \n", + " else:\n", + " # Case 3: Medium relevance - Combine document with web search\n", + " print(f\"Medium relevance ({max_score:.2f}) - Combining document with web search\")\n", + " best_doc = retrieved_docs[best_doc_idx][\"text\"]\n", + " refined_doc = refine_knowledge(best_doc)\n", + " \n", + " # Get web results\n", + " web_results, web_sources = perform_web_search(query)\n", + " refined_web = refine_knowledge(web_results)\n", + " \n", + " # Combine knowledge\n", + " final_knowledge = f\"From document:\\n{refined_doc}\\n\\nFrom web search:\\n{refined_web}\"\n", + " \n", + " # Add sources\n", + " sources.append({\n", + " \"title\": \"Document\",\n", + " \"url\": \"\"\n", + " })\n", + " sources.extend(web_sources)\n", + " \n", + " # Step 5: Generate final response\n", + " print(\"Generating final response...\")\n", + " response = generate_response(query, final_knowledge, sources)\n", + " \n", + " # Return comprehensive results\n", + " return {\n", + " \"query\": query,\n", + " \"response\": response,\n", + " \"retrieved_docs\": retrieved_docs,\n", + " \"relevance_scores\": relevance_scores,\n", + " \"max_relevance\": max_score,\n", + " \"final_knowledge\": final_knowledge,\n", + " \"sources\": sources\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Response Generation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_response(query, knowledge, sources):\n", + " \"\"\"\n", + " Generate a response based on the query and knowledge.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " knowledge (str): Knowledge to base the response on\n", + " sources (List[Dict]): List of sources with title and URL\n", + " \n", + " Returns:\n", + " str: Generated response\n", + " \"\"\"\n", + " # Format sources for inclusion in prompt\n", + " sources_text = \"\"\n", + " for source in sources:\n", + " title = source.get(\"title\", \"Unknown Source\")\n", + " url = source.get(\"url\", \"\")\n", + " if url:\n", + " sources_text += f\"- {title}: {url}\\n\"\n", + " else:\n", + " sources_text += f\"- {title}\\n\"\n", + " \n", + " # Define the system prompt to instruct the model on how to generate the response\n", + " system_prompt = \"\"\"\n", + " You are a helpful AI assistant. Generate a comprehensive, informative response to the query based on the provided knowledge.\n", + " Include all relevant information while keeping your answer clear and concise.\n", + " If the knowledge doesn't fully answer the query, acknowledge this limitation.\n", + " Include source attribution at the end of your response.\n", + " \"\"\"\n", + " \n", + " # Define the user prompt with the query, knowledge, and sources\n", + " user_prompt = f\"\"\"\n", + " Query: {query}\n", + " \n", + " Knowledge:\n", + " {knowledge}\n", + " \n", + " Sources:\n", + " {sources_text}\n", + " \n", + " Please provide an informative response to the query based on this information.\n", + " Include the sources at the end of your response.\n", + " \"\"\"\n", + " \n", + " try:\n", + " # Make a request to the OpenAI API to generate the response\n", + " response = client.chat.completions.create(\n", + " model=\"gpt-4\", # Using GPT-4 for high-quality responses\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0.2\n", + " )\n", + " \n", + " # Return the generated response\n", + " return response.choices[0].message.content.strip()\n", + " except Exception as e:\n", + " # Print the error message and return an error response\n", + " print(f\"Error generating response: {e}\")\n", + " return f\"I apologize, but I encountered an error while generating a response to your query: '{query}'. The error was: {str(e)}\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluation Functions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def evaluate_crag_response(query, response, reference_answer=None):\n", + " \"\"\"\n", + " Evaluate the quality of a CRAG response.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " response (str): Generated response\n", + " reference_answer (str, optional): Reference answer for comparison\n", + " \n", + " Returns:\n", + " Dict: Evaluation metrics\n", + " \"\"\"\n", + " # System prompt for the evaluation criteria\n", + " system_prompt = \"\"\"\n", + " You are an expert at evaluating the quality of responses to questions.\n", + " Please evaluate the provided response based on the following criteria:\n", + " \n", + " 1. Relevance (0-10): How directly does the response address the query?\n", + " 2. Accuracy (0-10): How factually correct is the information?\n", + " 3. Completeness (0-10): How thoroughly does the response answer all aspects of the query?\n", + " 4. Clarity (0-10): How clear and easy to understand is the response?\n", + " 5. Source Quality (0-10): How well does the response cite relevant sources?\n", + " \n", + " Return your evaluation as a JSON object with scores for each criterion and a brief explanation for each score.\n", + " Also include an \"overall_score\" (0-10) and a brief \"summary\" of your evaluation.\n", + " \"\"\"\n", + " \n", + " # User prompt with the query and response to be evaluated\n", + " user_prompt = f\"\"\"\n", + " Query: {query}\n", + " \n", + " Response to evaluate:\n", + " {response}\n", + " \"\"\"\n", + " \n", + " # Include reference answer in the prompt if provided\n", + " if reference_answer:\n", + " user_prompt += f\"\"\"\n", + " Reference answer (for comparison):\n", + " {reference_answer}\n", + " \"\"\"\n", + " \n", + " try:\n", + " # Request evaluation from the GPT-4 model\n", + " evaluation_response = client.chat.completions.create(\n", + " model=\"gpt-4\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " response_format={\"type\": \"json_object\"},\n", + " temperature=0\n", + " )\n", + " \n", + " # Parse the evaluation response\n", + " evaluation = json.loads(evaluation_response.choices[0].message.content)\n", + " return evaluation\n", + " except Exception as e:\n", + " # Handle any errors during the evaluation process\n", + " print(f\"Error evaluating response: {e}\")\n", + " return {\n", + " \"error\": str(e),\n", + " \"overall_score\": 0,\n", + " \"summary\": \"Evaluation failed due to an error.\"\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def compare_crag_vs_standard_rag(query, vector_store, reference_answer=None):\n", + " \"\"\"\n", + " Compare CRAG against standard RAG for a query.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " vector_store (SimpleVectorStore): Vector store with document chunks\n", + " reference_answer (str, optional): Reference answer for comparison\n", + " \n", + " Returns:\n", + " Dict: Comparison results\n", + " \"\"\"\n", + " # Run CRAG process\n", + " print(\"\\n=== Running CRAG ===\")\n", + " crag_result = crag_process(query, vector_store)\n", + " crag_response = crag_result[\"response\"]\n", + " \n", + " # Run standard RAG (directly retrieve and respond)\n", + " print(\"\\n=== Running standard RAG ===\")\n", + " query_embedding = create_embeddings(query)\n", + " retrieved_docs = vector_store.similarity_search(query_embedding, k=3)\n", + " combined_text = \"\\n\\n\".join([doc[\"text\"] for doc in retrieved_docs])\n", + " standard_sources = [{\"title\": \"Document\", \"url\": \"\"}]\n", + " standard_response = generate_response(query, combined_text, standard_sources)\n", + " \n", + " # Evaluate both approaches\n", + " print(\"\\n=== Evaluating CRAG response ===\")\n", + " crag_eval = evaluate_crag_response(query, crag_response, reference_answer)\n", + " \n", + " print(\"\\n=== Evaluating standard RAG response ===\")\n", + " standard_eval = evaluate_crag_response(query, standard_response, reference_answer)\n", + " \n", + " # Compare approaches\n", + " print(\"\\n=== Comparing approaches ===\")\n", + " comparison = compare_responses(query, crag_response, standard_response, reference_answer)\n", + " \n", + " return {\n", + " \"query\": query,\n", + " \"crag_response\": crag_response,\n", + " \"standard_response\": standard_response,\n", + " \"reference_answer\": reference_answer,\n", + " \"crag_evaluation\": crag_eval,\n", + " \"standard_evaluation\": standard_eval,\n", + " \"comparison\": comparison\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def compare_responses(query, crag_response, standard_response, reference_answer=None):\n", + " \"\"\"\n", + " Compare CRAG and standard RAG responses.\n", + " \n", + " Args:\n", + " query (str): User query\n", + " crag_response (str): CRAG response\n", + " standard_response (str): Standard RAG response\n", + " reference_answer (str, optional): Reference answer\n", + " \n", + " Returns:\n", + " str: Comparison analysis\n", + " \"\"\"\n", + " # System prompt for comparing the two approaches\n", + " system_prompt = \"\"\"\n", + " You are an expert evaluator comparing two response generation approaches:\n", + " \n", + " 1. CRAG (Corrective RAG): A system that evaluates document relevance and dynamically switches to web search when needed.\n", + " 2. Standard RAG: A system that directly retrieves documents based on embedding similarity and uses them for response generation.\n", + " \n", + " Compare the responses from these two systems based on:\n", + " - Accuracy and factual correctness\n", + " - Relevance to the query\n", + " - Completeness of the answer\n", + " - Clarity and organization\n", + " - Source attribution quality\n", + " \n", + " Explain which approach performed better for this specific query and why.\n", + " \"\"\"\n", + " \n", + " # User prompt with the query and responses to be compared\n", + " user_prompt = f\"\"\"\n", + " Query: {query}\n", + " \n", + " CRAG Response:\n", + " {crag_response}\n", + " \n", + " Standard RAG Response:\n", + " {standard_response}\n", + " \"\"\"\n", + " \n", + " # Include reference answer in the prompt if provided\n", + " if reference_answer:\n", + " user_prompt += f\"\"\"\n", + " Reference Answer:\n", + " {reference_answer}\n", + " \"\"\"\n", + " \n", + " try:\n", + " # Request comparison from the GPT-4 model\n", + " response = client.chat.completions.create(\n", + " model=\"gpt-4\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " # Return the comparison analysis\n", + " return response.choices[0].message.content.strip()\n", + " except Exception as e:\n", + " # Handle any errors during the comparison process\n", + " print(f\"Error comparing responses: {e}\")\n", + " return f\"Error comparing responses: {str(e)}\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Complete Evaluation Pipeline" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def run_crag_evaluation(pdf_path, test_queries, reference_answers=None):\n", + " \"\"\"\n", + " Run a complete evaluation of CRAG with multiple test queries.\n", + " \n", + " Args:\n", + " pdf_path (str): Path to the PDF document\n", + " test_queries (List[str]): List of test queries\n", + " reference_answers (List[str], optional): Reference answers for queries\n", + " \n", + " Returns:\n", + " Dict: Complete evaluation results\n", + " \"\"\"\n", + " # Process document and create vector store\n", + " vector_store = process_document(pdf_path)\n", + " \n", + " results = []\n", + " \n", + " for i, query in enumerate(test_queries):\n", + " print(f\"\\n\\n===== Evaluating Query {i+1}/{len(test_queries)} =====\")\n", + " print(f\"Query: {query}\")\n", + " \n", + " # Get reference answer if available\n", + " reference = None\n", + " if reference_answers and i < len(reference_answers):\n", + " reference = reference_answers[i]\n", + " \n", + " # Run comparison between CRAG and standard RAG\n", + " result = compare_crag_vs_standard_rag(query, vector_store, reference)\n", + " results.append(result)\n", + " \n", + " # Display comparison results\n", + " print(\"\\n=== Comparison ===\")\n", + " print(result[\"comparison\"])\n", + " \n", + " # Generate overall analysis from individual results\n", + " overall_analysis = generate_overall_analysis(results)\n", + " \n", + " return {\n", + " \"results\": results,\n", + " \"overall_analysis\": overall_analysis\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_overall_analysis(results):\n", + " \"\"\"\n", + " Generate an overall analysis of evaluation results.\n", + " \n", + " Args:\n", + " results (List[Dict]): Results from individual query evaluations\n", + " \n", + " Returns:\n", + " str: Overall analysis\n", + " \"\"\"\n", + " # System prompt for the analysis\n", + " system_prompt = \"\"\"\n", + " You are an expert at evaluating information retrieval and response generation systems.\n", + " Based on multiple test queries, provide an overall analysis comparing CRAG (Corrective RAG) \n", + " with standard RAG.\n", + " \n", + " Focus on:\n", + " 1. When CRAG performs better and why\n", + " 2. When standard RAG performs better and why\n", + " 3. The overall strengths and weaknesses of each approach\n", + " 4. Recommendations for when to use each approach\n", + " \"\"\"\n", + " \n", + " # Create summary of evaluations\n", + " evaluations_summary = \"\"\n", + " for i, result in enumerate(results):\n", + " evaluations_summary += f\"Query {i+1}: {result['query']}\\n\"\n", + " if 'crag_evaluation' in result and 'overall_score' in result['crag_evaluation']:\n", + " crag_score = result['crag_evaluation'].get('overall_score', 'N/A')\n", + " evaluations_summary += f\"CRAG score: {crag_score}\\n\"\n", + " if 'standard_evaluation' in result and 'overall_score' in result['standard_evaluation']:\n", + " std_score = result['standard_evaluation'].get('overall_score', 'N/A')\n", + " evaluations_summary += f\"Standard RAG score: {std_score}\\n\"\n", + " evaluations_summary += f\"Comparison summary: {result['comparison'][:200]}...\\n\\n\"\n", + " \n", + " # User prompt for the analysis\n", + " user_prompt = f\"\"\"\n", + " Based on the following evaluations comparing CRAG vs standard RAG across {len(results)} queries, \n", + " provide an overall analysis of these two approaches:\n", + " \n", + " {evaluations_summary}\n", + " \n", + " Please provide a comprehensive analysis of the relative strengths and weaknesses of CRAG \n", + " compared to standard RAG, focusing on when and why one approach outperforms the other.\n", + " \"\"\"\n", + " \n", + " try:\n", + " # Generate the overall analysis using GPT-4\n", + " response = client.chat.completions.create(\n", + " model=\"gpt-4\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=0\n", + " )\n", + " \n", + " return response.choices[0].message.content.strip()\n", + " except Exception as e:\n", + " print(f\"Error generating overall analysis: {e}\")\n", + " return f\"Error generating overall analysis: {str(e)}\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluation of CRAG with Test Queries" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Path to the AI information PDF document\n", + "pdf_path = \"data/AI_Information.pdf\"\n", + "\n", + "# Run comprehensive evaluation with multiple AI-related queries\n", + "test_queries = [\n", + " \"How does machine learning differ from traditional programming?\",\n", + "]\n", + "\n", + "# Optional reference answers for better quality evaluation\n", + "reference_answers = [\n", + " \"Machine learning differs from traditional programming by having computers learn patterns from data rather than following explicit instructions. In traditional programming, developers write specific rules for the computer to follow, while in machine learning\",\n", + "]\n", + "\n", + "# Run the full evaluation comparing CRAG vs standard RAG\n", + "evaluation_results = run_crag_evaluation(pdf_path, test_queries, reference_answers)\n", + "print(\"\\n=== Overall Analysis of CRAG vs Standard RAG ===\")\n", + "print(evaluation_results[\"overall_analysis\"])\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv-new-specific-rag", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/21_rag_with_rl.ipynb b/21_rag_with_rl.ipynb new file mode 100644 index 0000000..87302d8 --- /dev/null +++ b/21_rag_with_rl.ipynb @@ -0,0 +1,2172 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "markdown" + } + }, + "source": [ + "# Simple RAG with RL\n", + "\n", + "[![Python 3.7+](https://img.shields.io/badge/python-3.7+-blue.svg)](https://www.python.org/downloads/release/python-370/) [![Nebius AI](https://img.shields.io/badge/Nebius%20AI-LLM-brightgreen)](https://cloud.nebius.ai/services/llm-embedding) [![OpenAI](https://img.shields.io/badge/OpenAI-API-lightgrey)](https://openai.com/) [![Medium](https://img.shields.io/badge/Medium-Blog-black?logo=medium)](https://medium.com/@fareedkhandev/maximizing-simple-rag-performance-using-rl-rewards-in-python-d4c14cbadf59)\n", + "\n", + "A simple RAG works in three simple steps:\n", + "\n", + "1. **Indexing**: Break documents into chunks and convert to vector embeddings.\n", + "\n", + "2. **Retrieval**: When a question is asked, find the most relevant chunks.\n", + "\n", + "3. **Generation**: Combine the question with retrieved chunks and let the AI generate an answer using this information.\n", + "\n", + "The actual problem is to generate an answer to a given question using the provided documents. Simple RAG often fails to generate accurate answers due to the lack of context in the retrieved chunks. In this notebook, we will use the `RL RAG` approach to generate answers to the given questions using the provided documents." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Table of Contents\n", + "\n", + "- [Setting Up the Environment](#setting-up-the-environment)\n", + "- [Data Preprocessing](#data-preprocessing)\n", + "- [Document Embedding Generation](#document-embedding-generation)\n", + "- [Vector Store Implementation](#vector-store-implementation)\n", + "- [Simple Retrieval Implementation](#simple-retrieval-implementation)\n", + " - [Cosine Similarity](#cosine-similarity)\n", + " - [Similarity Search](#similarity-search)\n", + " - [LLM Response Generation](#llm-response-generation)\n", + " - [Basic RAG Pipeline](#basic-rag-pipeline)\n", + " - [Evaluation of Basic RAG](#evaluate-the-basic-rag-pipeline)\n", + "- [Reinforcement Learning for RAG](#reinforcement-learning-for-rag)\n", + " - [State, Action Space, and Reward Methodology](#state-action-space-and-reward-methodology)\n", + " - [Policy Network](#policy-network)\n", + " - [Single RL Step](#single-rl-step)\n", + " - [Training Parameters and Policy Update](#training-parameters-and-policy-update)\n", + " - [Training Loop](#training-loop)\n", + " - [Performance Comparison Logic](#performance-comparison-logic)\n", + "- [Evaluation Framework](#evaluation-framework)\n", + "- [Evaluating RL vs Simple RAG](#evaluating-rl-vs-simple-rag)\n", + "- [Saving Comparison Results](#saving-the-comparison-results)\n", + "- [Conclusion](#what-can-we-conclude)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up the Environment\n", + "\n", + "First, we need to import the necessary libraries and set up the environment. We will be using HuggingFace Models hosted under **Nebius** platform. Obviously, you can use your own models as long as they are compatible with OpenAI's API." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Importing the os module for interacting with the operating system\n", + "import os\n", + "\n", + "# Importing the OpenAI module for working with OpenAI's API\n", + "from openai import OpenAI\n", + "\n", + "# Importing numpy for numerical operations\n", + "import numpy as np\n", + "\n", + "# Importing json for working with JSON data\n", + "import json\n", + "\n", + "# Typing module for type hints\n", + "from typing import Dict, List, Tuple, Optional, Union" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we need to initialize the client responsible for response and embedding generation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Set up the API connection using the OpenAI client\n", + "# Replace the base_url and api_key with your own values\n", + "\n", + "client = OpenAI(\n", + " base_url=\"https://api.studio.nebius.com/v1/\", # Base URL for (eg. ollama api, anyother llm api provider)\n", + " api_key= os.environ[\"OPENAI_API_KEY\"] # API key for authentication \n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Data Preprocessing\n", + "Now that we have moved onto the data preprocessing stage, we need to load the data and preprocess it. Let's create a function that will load all the `.txt` files from a directory and return a list of documents." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to load documents from a directory\n", + "def load_documents(directory_path: str) -> List[str]:\n", + " \"\"\"\n", + " Load all text documents from the specified directory.\n", + "\n", + " Args:\n", + " directory_path (str): Path to the directory containing text files.\n", + "\n", + " Returns:\n", + " List[str]: A list of strings, where each string is the content of a text file.\n", + " \"\"\"\n", + " documents = [] # Initialize an empty list to store document contents\n", + " for filename in os.listdir(directory_path): # Iterate through all files in the directory\n", + " if filename.endswith(\".txt\"): # Check if the file has a .txt extension\n", + " # Open the file in read mode with UTF-8 encoding and append its content to the list\n", + " with open(os.path.join(directory_path, filename), 'r', encoding='utf-8') as file:\n", + " documents.append(file.read())\n", + " return documents # Return the list of document contents" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We need to create a function that performs chunking of the documents once they are loaded. We are using a `chunk_size` of `100` characters, but you can adjust it as per your requirements." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to split documents into chunks\n", + "def split_into_chunks(documents: List[str], chunk_size: int = 30) -> List[str]:\n", + " \"\"\"\n", + " Split documents into smaller chunks of specified size.\n", + "\n", + " Args:\n", + " documents (List[str]): A list of document strings to be split into chunks.\n", + " chunk_size (int): The maximum number of words in each chunk. Default is 100.\n", + "\n", + " Returns:\n", + " List[str]: A list of chunks, where each chunk is a string containing up to `chunk_size` words.\n", + " \"\"\"\n", + " chunks = [] # Initialize an empty list to store the chunks\n", + " for doc in documents: # Iterate through each document\n", + " words = doc.split() # Split the document into words\n", + " # Create chunks of the specified size\n", + " for i in range(0, len(words), chunk_size):\n", + " chunk = \" \".join(words[i:i + chunk_size]) # Join words to form a chunk\n", + " chunks.append(chunk) # Add the chunk to the list\n", + " return chunks # Return the list of chunks" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This step is **optional**, where we preprocess each chunk by removing special characters, converting to lowercase, etc." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to preprocess text (e.g., lowercasing, removing special characters)\n", + "def preprocess_text(text: str) -> str:\n", + " \"\"\"\n", + " Preprocess the input text by converting it to lowercase and removing special characters.\n", + "\n", + " Args:\n", + " text (str): The input text to preprocess.\n", + "\n", + " Returns:\n", + " str: The preprocessed text with only alphanumeric characters and spaces.\n", + " \"\"\"\n", + " # Convert the text to lowercase\n", + " text = text.lower()\n", + " # Remove special characters, keeping only alphanumeric characters and spaces\n", + " text = ''.join(char for char in text if char.isalnum() or char.isspace())\n", + " return text" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "However, if you are using the previous preprocessing step, you can simply create a function to preprocess the entire document." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to preprocess all chunks\n", + "def preprocess_chunks(chunks: List[str]) -> List[str]:\n", + " \"\"\"\n", + " Apply preprocessing to all text chunks.\n", + "\n", + " Args:\n", + " chunks (List[str]): A list of text chunks to preprocess.\n", + "\n", + " Returns:\n", + " List[str]: A list of preprocessed text chunks.\n", + " \"\"\"\n", + " # Apply the preprocess_text function to each chunk in the list\n", + " return [preprocess_text(chunk) for chunk in chunks]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we have implemented all the functions for data preprocessing, we can load the documents from the directory, split them into chunks, and preprocess the chunks." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "# Specify the directory path containing the text files\n", + "directory_path = \"data\"\n", + "\n", + "# Load all text documents from the specified directory\n", + "documents = load_documents(directory_path)\n", + "\n", + "# Split the loaded documents into smaller chunks of text\n", + "chunks = split_into_chunks(documents)\n", + "\n", + "# Preprocess the chunks (e.g., lowercasing, removing special characters)\n", + "preprocessed_chunks = preprocess_chunks(chunks)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Print the first 200 characters of the first two chunks" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Chunk 1: quantum computing principles progress and possibil ... \n", + "--------------------------------------------------\n", + "Chunk 2: process information in binary digits bits quantum ... \n", + "--------------------------------------------------\n" + ] + } + ], + "source": [ + "# Print the first 2 preprocessed chunks, displaying only the first 200 characters of each chunk\n", + "for i in range(2):\n", + " # Use slicing to limit the output to the first 200\n", + " print(f\"Chunk {i+1}: {preprocessed_chunks[i][:50]} ... \")\n", + " print(\"-\" * 50) # Print a separator line" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Document Embedding Generation\n", + "\n", + "In the previous step, we chunked our document. Now it's time to generate embeddings for the chunk dataset. When working with RAG, our knowledge base is typically quite large. Therefore, we need to perform embedding generation in batches. Let's create a core function to generate embeddings for the chunks in batches.\n", + "\n", + "The embedding model we are using is `BAAI/bge-en-icl`." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to generate embeddings for a single batch of text chunks\n", + "def generate_embeddings_batch(chunks_batch: List[str], model: str = \"BAAI/bge-en-icl\") -> List[List[float]]:\n", + " \"\"\"\n", + " Generate embeddings for a batch of text chunks using the OpenAI client.\n", + "\n", + " Args:\n", + " chunks_batch (List[str]): A batch of text chunks to generate embeddings for.\n", + " model (str): The model to use for embedding generation. Default is \"BAAI/bge-en-icl\".\n", + "\n", + " Returns:\n", + " List[List[float]]: A list of embeddings, where each embedding is a list of floats.\n", + " \"\"\"\n", + " # Use the OpenAI client to create embeddings for the input batch\n", + " response = client.embeddings.create(\n", + " model=model, # Specify the model to use for embedding generation\n", + " input=chunks_batch # Provide the batch of text chunks as input\n", + " )\n", + " # Extract embeddings from the response and return them\n", + " embeddings = [item.embedding for item in response.data]\n", + " return embeddings" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we will define a function to generate embeddings for all text chunks in batches. This function will take a list of text chunks as input and generate embeddings for each batch of chunks using the OpenAI client. The function will return a list of embeddings corresponding to all the text chunks." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to generate embeddings for all chunks with batching\n", + "def generate_embeddings(chunks: List[str], batch_size: int = 10) -> np.ndarray:\n", + " \"\"\"\n", + " Generate embeddings for all text chunks in batches.\n", + "\n", + " Args:\n", + " chunks (List[str]): A list of text chunks to generate embeddings for.\n", + " batch_size (int): The number of chunks to process in each batch. Default is 10.\n", + "\n", + " Returns:\n", + " np.ndarray: A NumPy array containing embeddings for all chunks.\n", + " \"\"\"\n", + " all_embeddings = [] # Initialize an empty list to store all embeddings\n", + "\n", + " # Iterate through the chunks in batches\n", + " for i in range(0, len(chunks), batch_size):\n", + " # Extract the current batch of chunks\n", + " batch = chunks[i:i + batch_size]\n", + " # Generate embeddings for the current batch\n", + " embeddings = generate_embeddings_batch(batch)\n", + " # Extend the list of all embeddings with the embeddings from the current batch\n", + " all_embeddings.extend(embeddings)\n", + "\n", + " # Convert the list of embeddings to a NumPy array and return it\n", + " return np.array(all_embeddings)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's create another function to save the embeddings to a file in JSON format." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to save embeddings to a file\n", + "def save_embeddings(embeddings: np.ndarray, output_file: str) -> None:\n", + " \"\"\"\n", + " Save embeddings to a JSON file.\n", + "\n", + " Args:\n", + " embeddings (np.ndarray): A NumPy array containing the embeddings to save.\n", + " output_file (str): The path to the output JSON file where embeddings will be saved.\n", + "\n", + " Returns:\n", + " None\n", + " \"\"\"\n", + " # Open the specified file in write mode with UTF-8 encoding\n", + " with open(output_file, 'w', encoding='utf-8') as file:\n", + " # Convert the NumPy array to a list and save it as JSON\n", + " json.dump(embeddings.tolist(), file)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we have implemented all the functions for embedding generation, we can proceed to generate embeddings for the preprocessed text chunks and save them to a JSON file." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "# Ensure the chunks are preprocessed before generating embeddings\n", + "preprocessed_chunks = preprocess_chunks(chunks)\n", + "\n", + "# Generate embeddings for the preprocessed chunks\n", + "embeddings = generate_embeddings(preprocessed_chunks)\n", + "\n", + "# Save the generated embeddings to a JSON file named \"embeddings.json\"\n", + "save_embeddings(embeddings, \"embeddings.json\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Vector Store Implementation\n", + "Since we are not using any python libraries for vector storage, we will implement a simple vector store using a dictionary." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize an in-memory vector store as a dictionary\n", + "# The keys will be unique identifiers (integers), and the values will be dictionaries containing embeddings and corresponding text chunks\n", + "vector_store: dict[int, dict[str, object]] = {}\n", + "\n", + "# Function to add embeddings and corresponding text chunks to the vector store\n", + "def add_to_vector_store(embeddings: np.ndarray, chunks: List[str]) -> None:\n", + " \"\"\"\n", + " Add embeddings and their corresponding text chunks to the vector store.\n", + "\n", + " Args:\n", + " embeddings (np.ndarray): A NumPy array containing the embeddings to add.\n", + " chunks (List[str]): A list of text chunks corresponding to the embeddings.\n", + "\n", + " Returns:\n", + " None\n", + " \"\"\"\n", + " # Iterate over embeddings and chunks simultaneously\n", + " for embedding, chunk in zip(embeddings, chunks):\n", + " # Add each embedding and its corresponding chunk to the vector store\n", + " # Use the current length of the vector store as the unique key\n", + " vector_store[len(vector_store)] = {\"embedding\": embedding, \"chunk\": chunk}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Simple Retrieval Implementation\n", + "\n", + "We do know for retrieving the most similar text chunks to a given query, we can use the cosine similarity between the query embedding and the embeddings of all text chunks. The higher the cosine similarity, the more similar the text chunks are. We can then sort the chunks based on their similarity scores and return the top-k most similar chunks.\n", + " \n", + "So, let's implement a simple cosine similarity-based retrieval function." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "tex" + } + }, + "source": [ + "The cosine similarity between two vectors $A$ and $B$ is calculated as:\n", + "\n", + "$$\\text{cosine similarity} = \\frac{A \\cdot B}{||A|| \\times ||B||} = \\frac{\\sum_{i=1}^{n} A_i B_i}{\\sqrt{\\sum_{i=1}^{n} A_i^2} \\times \\sqrt{\\sum_{i=1}^{n} B_i^2}}$$\n", + "\n", + "Where:\n", + "- $A \\cdot B$ is the dot product of vectors $A$ and $B$\n", + "- $||A||$ and $||B||$ are the Euclidean norms (magnitudes) of the vectors\n", + "- $n$ is the dimension of the vectors" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to compute cosine similarity between two vectors\n", + "def cosine_similarity(vec1: np.ndarray, vec2: np.ndarray) -> float:\n", + " \"\"\"\n", + " Compute the cosine similarity between two vectors.\n", + "\n", + " Args:\n", + " vec1 (np.ndarray): The first vector.\n", + " vec2 (np.ndarray): The second vector.\n", + "\n", + " Returns:\n", + " float: The cosine similarity between the two vectors, ranging from -1 to 1.\n", + " \"\"\"\n", + " # Compute the dot product of the two vectors\n", + " dot_product = np.dot(vec1, vec2)\n", + " # Compute the magnitude (norm) of the first vector\n", + " norm_vec1 = np.linalg.norm(vec1)\n", + " # Compute the magnitude (norm) of the second vector\n", + " norm_vec2 = np.linalg.norm(vec2)\n", + " # Return the cosine similarity as the ratio of the dot product to the product of the norms\n", + " return dot_product / (norm_vec1 * norm_vec2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "When we calculate the cosine similarity between a query and all the chunks, we can perform a similarity search. Based on the `top_k` parameter, we retrieve the top k most similar chunks." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to perform similarity search in the vector store\n", + "def similarity_search(query_embedding: np.ndarray, top_k: int = 5) -> List[str]:\n", + " \"\"\"\n", + " Perform similarity search in the vector store and return the top_k most similar chunks.\n", + "\n", + " Args:\n", + " query_embedding (np.ndarray): The embedding vector of the query.\n", + " top_k (int): The number of most similar chunks to retrieve. Default is 5.\n", + "\n", + " Returns:\n", + " List[str]: A list of the top_k most similar text chunks.\n", + " \"\"\"\n", + " similarities = [] # Initialize a list to store similarity scores and corresponding keys\n", + "\n", + " # Iterate through all items in the vector store\n", + " for key, value in vector_store.items():\n", + " # Compute the cosine similarity between the query embedding and the stored embedding\n", + " similarity = cosine_similarity(query_embedding, value[\"embedding\"])\n", + " # Append the key and similarity score as a tuple to the list\n", + " similarities.append((key, similarity))\n", + "\n", + " # Sort the list of similarities in descending order based on the similarity score\n", + " similarities = sorted(similarities, key=lambda x: x[1], reverse=True)\n", + "\n", + " # Retrieve the top_k most similar chunks based on their keys\n", + " return [vector_store[key][\"chunk\"] for key, _ in similarities[:top_k]]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once we have the similarity search function ready, we can simply code a retrieval function on top of it that will provide the relevant chunks based on the query." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to retrieve relevant document chunks for a query\n", + "def retrieve_relevant_chunks(query_text: str, top_k: int = 5) -> List[str]:\n", + " \"\"\"\n", + " Retrieve the most relevant document chunks for a given query text.\n", + "\n", + " Args:\n", + " query_text (str): The query text for which relevant chunks are to be retrieved.\n", + " top_k (int): The number of most relevant chunks to retrieve. Default is 5.\n", + "\n", + " Returns:\n", + " List[str]: A list of the top_k most relevant text chunks.\n", + " \"\"\"\n", + " # Generate embedding for the query text using the embedding model\n", + " query_embedding = generate_embeddings([query_text])[0]\n", + " \n", + " # Perform similarity search to find the most relevant chunks\n", + " relevant_chunks = similarity_search(query_embedding, top_k=top_k)\n", + " \n", + " # Return the list of relevant chunks\n", + " return relevant_chunks" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we have implemented all the functions for retrieval, we can proceed to test the retrieval system with a sample query." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Chunk 1: quantum computing principles progress and possibil ... \n", + "--------------------------------------------------\n", + "Chunk 2: through distinct stages 1 nisq era current 2 error ... \n", + "--------------------------------------------------\n", + "Chunk 3: quantum advantage and practical applications quant ... \n", + "--------------------------------------------------\n", + "Chunk 4: process information in binary digits bits quantum ... \n", + "--------------------------------------------------\n", + "Chunk 5: measuring the correct answer quantum gates and cir ... \n", + "--------------------------------------------------\n" + ] + } + ], + "source": [ + "# Add the generated embeddings and their corresponding preprocessed chunks to the vector store\n", + "add_to_vector_store(embeddings, preprocessed_chunks)\n", + "\n", + "# Define a query text for which we want to retrieve relevant document chunks\n", + "query_text = \"What is Quantum Computing?\"\n", + "\n", + "# Retrieve the most relevant chunks from the vector store based on the query text\n", + "relevant_chunks = retrieve_relevant_chunks(query_text)\n", + "\n", + "# Print the first 50 characters of each retrieved relevant chunk\n", + "for idx, chunk in enumerate(relevant_chunks):\n", + " print(f\"Chunk {idx + 1}: {chunk[:50]} ... \")\n", + " print(\"-\" * 50) # Print a separator line" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## LLM Response Generation\n", + "\n", + "When we have a query and a set of relevant document chunks, we can use a large language model (LLM) to generate a response based on the query and the retrieved information. In this section, we will use the OpenAI API to generate a response to a query by providing the query text and the relevant document chunks as context to the LLM.\n", + "\n", + "First we need a function to construct the input prompt for the LLM, which includes the query text and the relevant document chunks as context." + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to construct a prompt with context\n", + "def construct_prompt(query: str, context_chunks: List[str]) -> str:\n", + " \"\"\"\n", + " Construct a prompt by combining the query with the retrieved context chunks.\n", + "\n", + " Args:\n", + " query (str): The query text for which the prompt is being constructed.\n", + " context_chunks (List[str]): A list of relevant context chunks to include in the prompt.\n", + "\n", + " Returns:\n", + " str: The constructed prompt to be used as input for the LLM.\n", + " \"\"\"\n", + " # Combine all context chunks into a single string, separated by newlines\n", + " context = \"\\n\".join(context_chunks)\n", + " \n", + " # Define the system message to guide the LLM's behavior\n", + " system_message = (\n", + " \"You are a helpful assistant. Only use the provided context to answer the question. \"\n", + " \"If the context doesn't contain the information needed, say 'I don't have enough information to answer this question.'\"\n", + " )\n", + " \n", + " # Construct the final prompt by combining the system message, context, and query\n", + " prompt = f\"System: {system_message}\\n\\nContext:\\n{context}\\n\\nQuestion:\\n{query}\\n\\nAnswer:\"\n", + " \n", + " return prompt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To generate an LLM response, we need to implement a function that takes the constructed input prompt and sends it to the OpenAI API for response generation." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to generate a response using the OpenAI chat model\n", + "def generate_response(\n", + " prompt: str,\n", + " model: str = \"google/gemma-2-2b-it\",\n", + " max_tokens: int = 512,\n", + " temperature: float = 1,\n", + " top_p: float = 0.9,\n", + " top_k: int = 50\n", + ") -> str:\n", + " \"\"\"\n", + " Generate a response from the OpenAI chat model based on the constructed prompt.\n", + "\n", + " Args:\n", + " prompt (str): The input prompt to provide to the chat model.\n", + " model (str): The model to use for generating the response. Default is \"google/gemma-2-2b-it\".\n", + " max_tokens (int): Maximum number of tokens in the response. Default is 512.\n", + " temperature (float): Sampling temperature for response diversity. Default is 0.5.\n", + " top_p (float): Probability mass for nucleus sampling. Default is 0.9.\n", + " top_k (int): Number of highest probability tokens to consider. Default is 50.\n", + "\n", + " Returns:\n", + " str: The generated response from the chat model.\n", + " \"\"\"\n", + " # Use the OpenAI client to create a chat completion\n", + " response = client.chat.completions.create(\n", + " model=model, # Specify the model to use for generating the response\n", + " max_tokens=max_tokens, # Maximum number of tokens in the response\n", + " temperature=temperature, # Sampling temperature for response diversity\n", + " top_p=top_p, # Probability mass for nucleus sampling\n", + " extra_body={ # Additional parameters for the request\n", + " \"top_k\": top_k # Number of highest probability tokens to consider\n", + " },\n", + " messages=[ # List of messages to provide context for the chat model\n", + " {\n", + " \"role\": \"user\", # Role of the message sender (user in this case)\n", + " \"content\": [ # Content of the message\n", + " {\n", + " \"type\": \"text\", # Type of content (text in this case)\n", + " \"text\": prompt # The actual prompt text\n", + " }\n", + " ]\n", + " }\n", + " ]\n", + " )\n", + " # Return the content of the first choice in the response\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Basic RAG Pipeline\n", + "\n", + "We cannot run small pieces of code repeatedly. Therefore, we need to create a simple RAG pipeline that takes only one parameter, which is our query, and returns the LLM response." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to implement the basic Retrieval-Augmented Generation (RAG) pipeline\n", + "def basic_rag_pipeline(query: str) -> str:\n", + " \"\"\"\n", + " Implement the basic Retrieval-Augmented Generation (RAG) pipeline:\n", + " retrieve relevant chunks, construct a prompt, and generate a response.\n", + "\n", + " Args:\n", + " query (str): The input query for which a response is to be generated.\n", + "\n", + " Returns:\n", + " str: The generated response from the LLM based on the query and retrieved context.\n", + " \"\"\"\n", + " # Step 1: Retrieve the most relevant chunks for the given query\n", + " relevant_chunks: List[str] = retrieve_relevant_chunks(query)\n", + " \n", + " # Step 2: Construct a prompt using the query and the retrieved chunks\n", + " prompt: str = construct_prompt(query, relevant_chunks)\n", + " \n", + " # Step 3: Generate a response from the LLM using the constructed prompt\n", + " response: str = generate_response(prompt)\n", + " \n", + " # Return the generated response\n", + " return response" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluate the basic RAG pipeline\n", + "\n", + "Now that we have coded the basic RAG pipeline, we can use it for evaluation. Our evaluation queries contain different targeted segments, such as `factual_queries` and `complex_nature`. We are going to test the factual knowledge of our RAG pipeline.\n", + "\n", + "Let's load our evaluation queries and their expected answers." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Sample Query: What is the mathematical representation of a qubit in superposition?\n", + "\n", + "Expected Answer: |ψ⟩ = α|0⟩ + β|1⟩, where α and β are complex numbers satisfying |α|² + |β|² = 1, representing the probability amplitudes for measuring the qubit in state |0⟩ or |1⟩ respectively.\n", + "\n" + ] + } + ], + "source": [ + "# Open the validation data file in read mode and load its content as a dictionary\n", + "with open('data/val_rl.json', 'r') as file:\n", + " validation_data = json.load(file)\n", + "\n", + "# Test the basic RAG pipeline with a sample query\n", + "sample_query = validation_data['basic_factual_questions'][0]['question'] # Extract the query text\n", + "expected_answer = validation_data['basic_factual_questions'][0]['answer'] # Extract the ground truth answer\n", + "\n", + "# print the sample query and expected answer\n", + "print(f\"Sample Query: {sample_query}\\n\")\n", + "print(f\"Expected Answer: {expected_answer}\\n\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's test the basic RAG pipeline with this eval query and see how well it performs." + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "🔍 Running the Retrieval-Augmented Generation (RAG) pipeline...\n", + "📥 Query: What is the mathematical representation of a qubit in superposition?\n", + "\n", + "🤖 AI Response:\n", + "--------------------------------------------------\n", + "ψ α0 β1\n", + "--------------------------------------------------\n", + "✅ Ground Truth Answer:\n", + "--------------------------------------------------\n", + "|ψ⟩ = α|0⟩ + β|1⟩, where α and β are complex numbers satisfying |α|² + |β|² = 1, representing the probability amplitudes for measuring the qubit in state |0⟩ or |1⟩ respectively.\n", + "--------------------------------------------------\n" + ] + } + ], + "source": [ + "# Print a message to indicate the start of the RAG pipeline\n", + "print(\"🔍 Running the Retrieval-Augmented Generation (RAG) pipeline...\")\n", + "print(f\"📥 Query: {sample_query}\\n\")\n", + "\n", + "# Run the RAG pipeline and get the response\n", + "response = basic_rag_pipeline(sample_query)\n", + "\n", + "# Print the response with better formatting\n", + "print(\"🤖 AI Response:\")\n", + "print(\"-\" * 50)\n", + "print(response.strip())\n", + "print(\"-\" * 50)\n", + "\n", + "# Print the ground truth answer for comparison\n", + "print(\"✅ Ground Truth Answer:\")\n", + "print(\"-\" * 50)\n", + "print(expected_answer)\n", + "print(\"-\" * 50)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The simple RAG pipeline doesn't seem to perform well in its current state. The generated response is not only irrelevant to the ground truth but also misses critical information.\n", + "\n", + "But don't worry! In the upcoming steps, we will implement a Reinforcement Learning-based RAG pipeline to address these shortcomings. This will help us improve the retrieval and generation process, making the responses more accurate and contextually relevant.\n", + "\n", + "Stay tuned as we take our RAG pipeline to the next level! 🚀" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "vscode": { + "languageId": "julia" + } + }, + "source": [ + "## Reinforcement Learning for RAG\n", + "\n", + "Reinforcement Learning (RL) is a type of machine learning where an agent learns to make decisions by taking actions in an environment to maximize some notion of cumulative reward. Unlike supervised learning, the agent is not explicitly told which actions to take, but instead must discover which actions yield the most reward through trial and error.\n", + "\n", + "Follow are the main components of a reinforcement learning system:\n", + "\n", + "1. **Agent**: The learner or decision-maker\n", + "2. **Environment**: The world with which the agent interacts\n", + "3. **State (S)**: The current situation of the agent in the environment\n", + "4. **Action (A)**: A set of possible moves the agent can make\n", + "5. **Reward (R)**: Feedback from the environment after each action\n", + "6. **Policy (π)**: Strategy that the agent follows to determine the next action\n", + "\n", + "The goal in reinforcement learning is to learn a policy π that maximizes the expected cumulative reward:\n", + "\n", + "$$\\pi^* = \\arg\\max_\\pi \\mathbb{E}\\left[ \\sum_{t=0}^{T} \\gamma^t R_t \\right]$$\n", + "\n", + "Where:\n", + "- $\\pi^*$ is the optimal policy\n", + "- $\\gamma$ is the discount factor (0 ≤ γ ≤ 1)\n", + "- $R_t$ is the reward at time step t\n", + "- $T$ is the final time step\n", + "\n", + "In the context of RAG systems, reinforcement learning can be used to:\n", + "- Improve retrieval by learning which documents are most helpful\n", + "- Refine prompt construction based on user feedback\n", + "- Optimize the generation process by learning from successful responses" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## State, Action Space, and Reward Methodology\n", + "\n", + "The very first step when coding an RL algorithm is to define three things:\n", + "\n", + "- **State**: It is the current situation of the environment. In our case, the initial state is our simple RAG pipeline (query, context, response).\n", + "- **Action Space**: It is the decision that the agent takes based on the state. In our case, the actions can include changing the model, modifying the context, altering the query, etc.\n", + "- **Reward**: It is the feedback that the agent receives after taking an action. In our case, the reward can be the similarity between the generated response and the ground truth answer." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Our state will be changing constantly as we perform training. For that, we need to save the state after each `training episode` so that our RL agent can learn from it and avoid making the same mistakes again." + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to define the state representation for reinforcement learning\n", + "def define_state(\n", + " query: str, \n", + " context_chunks: List[str], \n", + " rewritten_query: str = None, \n", + " previous_responses: List[str] = None, \n", + " previous_rewards: List[float] = None\n", + ") -> dict:\n", + " \"\"\"\n", + " Define the state representation for the reinforcement learning agent.\n", + " \n", + " Args:\n", + " query (str): The original user query.\n", + " context_chunks (List[str]): Retrieved context chunks from the knowledge base.\n", + " rewritten_query (str, optional): A reformulated version of the original query.\n", + " previous_responses (List[str], optional): List of previously generated responses.\n", + " previous_rewards (List[float], optional): List of rewards received for previous actions.\n", + " \n", + " Returns:\n", + " dict: A dictionary representing the current state with all relevant information.\n", + " \"\"\"\n", + " state = {\n", + " \"original_query\": query, # The initial query from the user\n", + " \"current_query\": rewritten_query if rewritten_query else query, # Current version of the query (may be rewritten)\n", + " \"context\": context_chunks, # Retrieved context chunks from the knowledge base\n", + " \"previous_responses\": previous_responses if previous_responses else [], # History of generated responses\n", + " \"previous_rewards\": previous_rewards if previous_rewards else [] # History of received rewards\n", + " }\n", + " return state" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We have defined the state representation for the RL agent, including the user query, retrieved context chunks, rewritten query (if any), and histories of responses and rewards. This state will guide the agent in generating better responses. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next we need to define the action space for the reinforcement learning agent. The action space consists of the set of possible actions that the agent can take at each step. In this case, we define four actions:\n", + "- `rewrite_query`: Reformulate the original query to improve retrieval\n", + "- `expand_context`: Retrieve additional context chunks\n", + "- `filter_context`: Remove irrelevant context chunks\n", + "- `generate_response`: Generate a response based on the current query and context" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to define the action space for reinforcement learning\n", + "def define_action_space() -> List[str]:\n", + " \"\"\"\n", + " Define the set of possible actions the reinforcement learning agent can take.\n", + " \n", + " Actions include:\n", + " - rewrite_query: Reformulate the original query to improve retrieval\n", + " - expand_context: Retrieve additional context chunks\n", + " - filter_context: Remove irrelevant context chunks\n", + " - generate_response: Generate a response based on current query and context\n", + " \n", + " Returns:\n", + " List[str]: A list of available actions.\n", + " \"\"\"\n", + "\n", + " # Define the set of actions the agent can take\n", + " actions = [\"rewrite_query\", \"expand_context\", \"filter_context\", \"generate_response\"]\n", + " return actions" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Obviously, when our RL agent takes an action, it will be based on the current state and the action space. It will be rewarded based on the quality of the response generated by the RAG pipeline. The reward function will be based on the cosine similarity between the generated response and the ground truth answer." + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to calculate the reward based on response quality\n", + "def calculate_reward(response: str, ground_truth: str) -> float:\n", + " \"\"\"\n", + " Calculate a reward value by comparing the generated response to the ground truth.\n", + " \n", + " Uses cosine similarity between the embeddings of the response and ground truth\n", + " to determine how close the response is to the expected answer.\n", + " \n", + " Args:\n", + " response (str): The generated response from the RAG pipeline.\n", + " ground_truth (str): The expected correct answer.\n", + " \n", + " Returns:\n", + " float: A reward value between -1 and 1, where higher values indicate \n", + " greater similarity to the ground truth.\n", + " \"\"\"\n", + " # Generate embeddings for both the response and ground truth\n", + " response_embedding = generate_embeddings([response])[0]\n", + " ground_truth_embedding = generate_embeddings([ground_truth])[0]\n", + " \n", + " # Calculate cosine similarity between the embeddings as the reward\n", + " similarity = cosine_similarity(response_embedding, ground_truth_embedding)\n", + " return similarity" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Our goal is to maximize the reward by generating responses that are similar to the ground truth answer. Higher reward values indicate that the generated response is more aligned with the expected answer." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Action Function Logic\n", + "\n", + "Now that we have defined the action space, we need to implement the logic for each action. This logic will determine how the RAG pipeline should be modified based on the action taken by the RL agent.\n", + "\n", + "Just to revisit, the four actions are:\n", + "- `rewrite_query`: Reformulate the original query to improve retrieval\n", + "- `expand_context`: Retrieve additional context chunks\n", + "- `filter_context`: Remove irrelevant context chunks\n", + "- `generate_response`: Generate a response based on the current query and context" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's create our first action logic for the agent. The first action we will implement is the `rewrite_query` action, which involves reformulating the original user query to improve retrieval performance. This action is crucial for enhancing the relevance of the retrieved context and generating more accurate responses." + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to rewrite the query for better document retrieval\n", + "def rewrite_query(\n", + " query: str, \n", + " context_chunks: List[str], \n", + " model: str = \"google/gemma-2-2b-it\", \n", + " max_tokens: int = 100, \n", + " temperature: float = 0.3\n", + ") -> str:\n", + " \"\"\"\n", + " Use the LLM to rewrite the query for better document retrieval.\n", + "\n", + " Args:\n", + " query (str): The original query text.\n", + " context_chunks (List[str]): A list of context chunks retrieved so far.\n", + " model (str): The model to use for generating the rewritten query. Default is \"google/gemma-2-2b-it\".\n", + " max_tokens (int): Maximum number of tokens in the rewritten query. Default is 100.\n", + " temperature (float): Sampling temperature for response diversity. Default is 0.3.\n", + "\n", + " Returns:\n", + " str: The rewritten query optimized for document retrieval.\n", + " \"\"\"\n", + " # Construct a prompt for the LLM to rewrite the query\n", + " rewrite_prompt = f\"\"\"\n", + " You are a query optimization assistant. Your task is to rewrite the given query to make it more effective \n", + " for retrieving relevant information. The query will be used for document retrieval.\n", + " \n", + " Original query: {query}\n", + " \n", + " Based on the context retrieved so far:\n", + " {' '.join(context_chunks[:2]) if context_chunks else 'No context available yet'}\n", + " \n", + " Rewrite the query to be more specific and targeted to retrieve better information.\n", + " Rewritten query:\n", + " \"\"\"\n", + " \n", + " # Use the LLM to generate a rewritten query\n", + " response = client.chat.completions.create(\n", + " model=model, # Specify the model to use for generating the response\n", + " max_tokens=max_tokens, # Maximum number of tokens in the response\n", + " temperature=temperature, # Sampling temperature for response diversity\n", + " messages=[\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": rewrite_prompt\n", + " }\n", + " ]\n", + " )\n", + " \n", + " # Extract and return the rewritten query from the response\n", + " rewritten_query = response.choices[0].message.content.strip()\n", + " return rewritten_query" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This action is crucial for enhancing the relevance of the retrieved context and generating more accurate responses." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's code our next action logic, which is to expand the context by retrieving additional chunks. We will use the existing function `retrieve_relevant_chunks` to get more context chunks and then filter out any duplicates from the current context. We will limit the number of new chunks to be added to the context to a specified top_k value." + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to expand the context by retrieving additional chunks\n", + "def expand_context(query: str, current_chunks: List[str], top_k: int = 3) -> List[str]:\n", + " \"\"\"\n", + " Expand the context by retrieving additional chunks.\n", + "\n", + " Args:\n", + " query (str): The query text for which additional context is needed.\n", + " current_chunks (List[str]): The current list of context chunks.\n", + " top_k (int): The number of additional chunks to retrieve. Default is 3.\n", + "\n", + " Returns:\n", + " List[str]: The expanded list of context chunks including new unique chunks.\n", + " \"\"\"\n", + " # Retrieve more chunks than currently available\n", + " additional_chunks = retrieve_relevant_chunks(query, top_k=top_k + len(current_chunks))\n", + " \n", + " # Filter out chunks that are already in the current context\n", + " new_chunks = []\n", + " for chunk in additional_chunks:\n", + " if chunk not in current_chunks:\n", + " new_chunks.append(chunk)\n", + " \n", + " # Add new unique chunks to the current context, limited to top_k\n", + " expanded_context = current_chunks + new_chunks[:top_k]\n", + " return expanded_context" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We need to filter the context to keep only the most relevant chunks for the query. This filtering step is crucial to ensure that the context provided to the language model is concise and focused on the most relevant information." + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to filter the context to keep only the most relevant chunks\n", + "def filter_context(query: str, context_chunks: List[str]) -> List[str]:\n", + " \"\"\"\n", + " Filter the context to keep only the most relevant chunks.\n", + "\n", + " Args:\n", + " query (str): The query text for which relevance is calculated.\n", + " context_chunks (List[str]): The list of context chunks to filter.\n", + "\n", + " Returns:\n", + " List[str]: A filtered list of the most relevant context chunks.\n", + " \"\"\"\n", + " if not context_chunks:\n", + " return []\n", + " \n", + " # Generate embeddings for the query and each chunk\n", + " query_embedding = generate_embeddings([query])[0]\n", + " chunk_embeddings = [generate_embeddings([chunk])[0] for chunk in context_chunks]\n", + " \n", + " # Calculate relevance scores for each chunk\n", + " relevance_scores = []\n", + " for chunk_embedding in chunk_embeddings:\n", + " score = cosine_similarity(query_embedding, chunk_embedding)\n", + " relevance_scores.append(score)\n", + " \n", + " # Sort chunks by relevance scores in descending order\n", + " sorted_chunks = [x for _, x in sorted(zip(relevance_scores, context_chunks), reverse=True)]\n", + " \n", + " # Keep the top 5 most relevant chunks or fewer if less than 5 are available\n", + " filtered_chunks = sorted_chunks[:min(5, len(sorted_chunks))]\n", + " \n", + " return filtered_chunks" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This action will help the agent explore more information relevant to the query." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Policy Network\n", + "\n", + "Previously, we defined our state, actions, and reward logic. Next, we need to create a policy network that will select an action based on the current state.\n", + "\n", + "A policy network is a function that takes the current state and the action space as input and returns the selected action based on the state.\n", + "\n", + "The policy network can use a simple heuristic to select an action based on the current state. For example, if there are no previous responses, the policy network can prioritize rewriting the query. If the context has too many chunks, the policy network can choose to filter the context." + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to define a policy network to select an action based on the state\n", + "def policy_network(\n", + " state: dict, \n", + " action_space: List[str], \n", + " epsilon: float = 0.2\n", + ") -> str:\n", + " \"\"\"\n", + " Define a policy network to select an action based on the current state using an epsilon-greedy strategy.\n", + "\n", + " Args:\n", + " state (dict): The current state of the environment, including query, context, responses, and rewards.\n", + " action_space (List[str]): The list of possible actions the agent can take.\n", + " epsilon (float): The probability of choosing a random action for exploration. Default is 0.2.\n", + "\n", + " Returns:\n", + " str: The selected action from the action space.\n", + " \"\"\"\n", + " # Use epsilon-greedy strategy: random exploration vs. exploitation\n", + " if np.random.random() < epsilon:\n", + " # Exploration: randomly select an action from the action space\n", + " action = np.random.choice(action_space)\n", + " else:\n", + " # Exploitation: select the best action based on the current state using a simple heuristic\n", + "\n", + " # If there are no previous responses, prioritize rewriting the query\n", + " if len(state[\"previous_responses\"]) == 0:\n", + " action = \"rewrite_query\"\n", + " # If there are previous responses but the rewards are low, try expanding the context\n", + " elif state[\"previous_rewards\"] and max(state[\"previous_rewards\"]) < 0.7:\n", + " action = \"expand_context\"\n", + " # If the context has too many chunks, try filtering the context\n", + " elif len(state[\"context\"]) > 5:\n", + " action = \"filter_context\"\n", + " # Otherwise, generate a response\n", + " else:\n", + " action = \"generate_response\"\n", + " \n", + " return action" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "So our policy network works like this:\n", + "- If there are no previous responses, prioritize rewriting the query.\n", + "- If there are previous responses but the rewards are low, try expanding the context.\n", + "- If the context has too many chunks, try filtering the context.\n", + "- Otherwise, generate a response." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Single RL Step\n", + "\n", + "We have coded an important component of the RL pipeline. For any developer who has done any kind of training, there exists a training loop where each iteration is a single step in which the RL agent takes an action, rewards are calculated, states are updated, and so on. So, we need to code a single step of our training loop. Let's do that." + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to perform a single RL step\n", + "def rl_step(\n", + " state: dict, \n", + " action_space: List[str], \n", + " ground_truth: str\n", + ") -> tuple[dict, str, float, str]:\n", + " \"\"\"\n", + " Perform a single RL step: select an action, execute it, and calculate the reward.\n", + "\n", + " Args:\n", + " state (dict): The current state of the environment, including query, context, responses, and rewards.\n", + " action_space (List[str]): The list of possible actions the agent can take.\n", + " ground_truth (str): The expected correct answer to calculate the reward.\n", + "\n", + " Returns:\n", + " tuple: A tuple containing:\n", + " - state (dict): The updated state after executing the action.\n", + " - action (str): The action selected by the policy network.\n", + " - reward (float): The reward received for the action.\n", + " - response (str): The response generated (if applicable).\n", + " \"\"\"\n", + " # Select an action using the policy network\n", + " action: str = policy_network(state, action_space)\n", + " response: str = None # Initialize response as None\n", + " reward: float = 0 # Initialize reward as 0\n", + "\n", + " # Execute the selected action\n", + " if action == \"rewrite_query\":\n", + " # Rewrite the query to improve retrieval\n", + " rewritten_query: str = rewrite_query(state[\"original_query\"], state[\"context\"])\n", + " state[\"current_query\"] = rewritten_query # Update the current query in the state\n", + " # Retrieve new context based on the rewritten query\n", + " new_context: List[str] = retrieve_relevant_chunks(rewritten_query)\n", + " state[\"context\"] = new_context # Update the context in the state\n", + "\n", + " elif action == \"expand_context\":\n", + " # Expand the context by retrieving additional chunks\n", + " expanded_context: List[str] = expand_context(state[\"current_query\"], state[\"context\"])\n", + " state[\"context\"] = expanded_context # Update the context in the state\n", + "\n", + " elif action == \"filter_context\":\n", + " # Filter the context to keep only the most relevant chunks\n", + " filtered_context: List[str] = filter_context(state[\"current_query\"], state[\"context\"])\n", + " state[\"context\"] = filtered_context # Update the context in the state\n", + "\n", + " elif action == \"generate_response\":\n", + " # Construct a prompt using the current query and context\n", + " prompt: str = construct_prompt(state[\"current_query\"], state[\"context\"])\n", + " # Generate a response using the LLM\n", + " response: str = generate_response(prompt)\n", + " # Calculate the reward based on the similarity between the response and the ground truth\n", + " reward: float = calculate_reward(response, ground_truth)\n", + " # Update the state with the new response and reward\n", + " state[\"previous_responses\"].append(response)\n", + " state[\"previous_rewards\"].append(reward)\n", + "\n", + " # Return the updated state, selected action, reward, and response\n", + " return state, action, reward, response" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In our single step function, we first select an action using the policy network. The policy network uses an epsilon-greedy strategy to balance exploration and exploitation. If the random number is less than epsilon, we choose a random action from the action space for exploration. Otherwise, we select the best action based on the current state using a simple heuristic." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Training Parameters and Policy Update\n", + "\n", + "We need to define some training parameters for our training loop and also define a function to update the policy based on the rewards received.\n", + "\n", + "Though the training parameters function is **optional**, it can be used for advanced implementations of the RL pipeline." + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to initialize training parameters\n", + "def initialize_training_params() -> Dict[str, Union[float, int]]:\n", + " \"\"\"\n", + " Initialize training parameters such as learning rate, number of episodes, and discount factor.\n", + "\n", + " Returns:\n", + " Dict[str, Union[float, int]]: A dictionary containing the initialized training parameters.\n", + " \"\"\"\n", + " params = {\n", + " \"learning_rate\": 0.01, # Learning rate for policy updates\n", + " \"num_episodes\": 100, # Total number of training episodes\n", + " \"discount_factor\": 0.99 # Discount factor for future rewards\n", + " }\n", + " return params" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Similar to how our state changes after each step in the RL process, the policy also needs to be updated based on the rewards received. The update_policy function takes the current policy, state, action, reward, and learning rate as input and returns the updated policy." + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to update policy based on reward\n", + "def update_policy(\n", + " policy: Dict[str, Dict[str, Union[float, str]]], \n", + " state: Dict[str, object], \n", + " action: str, \n", + " reward: float, \n", + " learning_rate: float\n", + ") -> Dict[str, Dict[str, Union[float, str]]]:\n", + " \"\"\"\n", + " Update the policy based on the reward received.\n", + "\n", + " Args:\n", + " policy (Dict[str, Dict[str, Union[float, str]]]): The current policy to be updated.\n", + " state (Dict[str, object]): The current state of the environment.\n", + " action (str): The action taken by the agent.\n", + " reward (float): The reward received for the action.\n", + " learning_rate (float): The learning rate for updating the policy.\n", + "\n", + " Returns:\n", + " Dict[str, Dict[str, Union[float, str]]]: The updated policy.\n", + " \"\"\"\n", + " # Example: Simple policy update (to be replaced with a proper RL algorithm)\n", + " policy[state[\"query\"]] = {\n", + " \"action\": action, # Store the action taken\n", + " \"reward\": reward # Store the reward received\n", + " }\n", + " return policy" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the above `update_policy` logic, we store the action taken and the reward received for each query in the policy dictionary. In a more advanced RL algorithm, the policy update would involve more sophisticated methods such as policy gradients or Q-learning." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we need to implement progress tracking logic to monitor the training process. This will help us understand how the model is learning and improving over time." + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to track training progress\n", + "def track_progress(\n", + " episode: int, \n", + " reward: float, \n", + " rewards_history: List[float]\n", + ") -> List[float]:\n", + " \"\"\"\n", + " Track the training progress by storing rewards for each episode.\n", + "\n", + " Args:\n", + " episode (int): The current episode number.\n", + " reward (float): The reward received in the current episode.\n", + " rewards_history (List[float]): A list to store the rewards for all episodes.\n", + "\n", + " Returns:\n", + " List[float]: The updated rewards history.\n", + " \"\"\"\n", + " # Append the current reward to the rewards history\n", + " rewards_history.append(reward)\n", + " \n", + " # Print progress every 10 episodes\n", + " print(f\"Episode {episode}: Reward = {reward}\")\n", + " \n", + " return rewards_history" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Training Loop\n", + "\n", + "Now that we have coded every part of the training loop, we can put it all together in a single function that implements the training loop for the RL-enhanced RAG system." + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to implement the training loop\n", + "def training_loop(\n", + " query_text: str, \n", + " ground_truth: str, \n", + " params: Optional[Dict[str, Union[float, int]]] = None\n", + ") -> Tuple[Dict[str, Dict[str, Union[float, str]]], List[float], List[List[str]], Optional[str]]:\n", + " \"\"\"\n", + " Implement the training loop for RL-enhanced RAG.\n", + "\n", + " Args:\n", + " query_text (str): The input query text for the RAG pipeline.\n", + " ground_truth (str): The expected correct answer for the query.\n", + " params (Optional[Dict[str, Union[float, int]]]): Training parameters such as learning rate, \n", + " number of episodes, and discount factor. If None, default parameters are initialized.\n", + "\n", + " Returns:\n", + " Tuple: A tuple containing:\n", + " - policy (Dict[str, Dict[str, Union[float, str]]]): The updated policy after training.\n", + " - rewards_history (List[float]): A list of rewards received in each episode.\n", + " - actions_history (List[List[str]]): A list of actions taken in each episode.\n", + " - best_response (Optional[str]): The best response generated during training.\n", + " \"\"\"\n", + " # Initialize training parameters if not provided\n", + " if params is None:\n", + " params = initialize_training_params()\n", + " \n", + " # Initialize variables to track progress\n", + " rewards_history: List[float] = [] # List to store rewards for each episode\n", + " actions_history: List[List[str]] = [] # List to store actions taken in each episode\n", + " policy: Dict[str, Dict[str, Union[float, str]]] = {} # Policy dictionary to store actions and rewards\n", + " action_space: List[str] = define_action_space() # Define the action space\n", + " best_response: Optional[str] = None # Variable to store the best response\n", + " best_reward: float = -1 # Initialize the best reward to a very low value\n", + " \n", + " # Get initial performance from the simple RAG pipeline for comparison\n", + " simple_response: str = basic_rag_pipeline(query_text)\n", + " simple_reward: float = calculate_reward(simple_response, ground_truth)\n", + " print(f\"Simple RAG reward: {simple_reward:.4f}\")\n", + "\n", + " # Start the training loop\n", + " for episode in range(params[\"num_episodes\"]):\n", + " # Reset the environment with the same query\n", + " context_chunks: List[str] = retrieve_relevant_chunks(query_text)\n", + " state: Dict[str, object] = define_state(query_text, context_chunks)\n", + " episode_reward: float = 0 # Initialize the reward for the current episode\n", + " episode_actions: List[str] = [] # Initialize the list of actions for the current episode\n", + " \n", + " # Maximum number of steps per episode to prevent infinite loops\n", + " for step in range(10):\n", + " # Perform a single RL step\n", + " state, action, reward, response = rl_step(state, action_space, ground_truth)\n", + " episode_actions.append(action) # Record the action taken\n", + " \n", + " # If a response is generated, end the episode\n", + " if response:\n", + " episode_reward = reward # Update the episode reward\n", + " \n", + " # Track the best response and reward\n", + " if reward > best_reward:\n", + " best_reward = reward\n", + " best_response = response\n", + " \n", + " break # Exit the loop as the episode ends\n", + " \n", + " # Update rewards and actions history\n", + " rewards_history.append(episode_reward)\n", + " actions_history.append(episode_actions)\n", + " \n", + " # Print progress every 5 episodes\n", + " if episode % 5 == 0:\n", + " print(f\"Episode {episode}: Reward = {episode_reward:.4f}, Actions = {episode_actions}\")\n", + " \n", + " # Compare the best RL-enhanced RAG reward with the simple RAG reward\n", + " improvement: float = best_reward - simple_reward\n", + " print(f\"\\nTraining completed:\")\n", + " print(f\"Simple RAG reward: {simple_reward:.4f}\")\n", + " print(f\"Best RL-enhanced RAG reward: {best_reward:.4f}\")\n", + " print(f\"Improvement: {improvement:.4f} ({improvement * 100:.2f}%)\")\n", + "\n", + " return policy, rewards_history, actions_history, best_response" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This function will take the input query text, the expected ground truth answer, and optionally some training parameters. It will return the updated policy, a list of rewards received in each episode, a list of actions taken in each episode, and the best response generated during training.\n", + "\n", + "In more detail, the `training_loop` function will:\n", + "- Initialize training parameters if not provided.\n", + "- Get the initial performance from the simple RAG pipeline for comparison.\n", + "- Start the training loop for the specified number of episodes.\n", + "- Perform a single RL step in each episode.\n", + "- Update rewards and actions history for each episode.\n", + "- Print progress every 5 episodes.\n", + "- Compare the best RL-enhanced RAG reward with the simple RAG reward.\n", + "- Return the updated policy, rewards history, actions history, and the best response generated during training." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Performance Comparison Logic\n", + "\n", + "Although we can manually compare the simple RAG pipeline with the RL-based RAG pipeline, a function can definitely help us in this regard. So, let's define a function to compare the performance of the simple RAG pipeline with the RL-enhanced RAG pipeline." + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to compare Simple RAG vs RL-Enhanced RAG\n", + "def compare_rag_approaches(query_text: str, ground_truth: str) -> Tuple[str, str, float, float]:\n", + " \"\"\"\n", + " Compare the outputs of simple RAG versus RL-enhanced RAG.\n", + "\n", + " Args:\n", + " query_text (str): The input query text for the RAG pipeline.\n", + " ground_truth (str): The expected correct answer for the query.\n", + "\n", + " Returns:\n", + " Tuple[str, str, float, float]: A tuple containing:\n", + " - simple_response (str): The response generated by the simple RAG pipeline.\n", + " - best_rl_response (str): The best response generated by the RL-enhanced RAG pipeline.\n", + " - simple_similarity (float): The similarity score of the simple RAG response to the ground truth.\n", + " - rl_similarity (float): The similarity score of the RL-enhanced RAG response to the ground truth.\n", + " \"\"\"\n", + " print(\"=\" * 80)\n", + " print(f\"Query: {query_text}\")\n", + " print(\"=\" * 80)\n", + " \n", + " # Step 1: Generate a response using the simple RAG pipeline\n", + " # The basic RAG pipeline retrieves relevant chunks and generates a response without reinforcement learning.\n", + " simple_response: str = basic_rag_pipeline(query_text)\n", + " # Calculate the similarity score between the simple RAG response and the ground truth.\n", + " simple_similarity: float = calculate_reward(simple_response, ground_truth)\n", + " \n", + " print(\"\\nSimple RAG Output:\")\n", + " print(\"-\" * 40)\n", + " print(simple_response)\n", + " print(f\"Similarity to ground truth: {simple_similarity:.4f}\")\n", + " \n", + " # Step 2: Train the RL-enhanced RAG model\n", + " print(\"\\nTraining RL-enhanced RAG model...\")\n", + " # Initialize training parameters (e.g., learning rate, number of episodes, discount factor).\n", + " params: Dict[str, float | int] = initialize_training_params()\n", + " # Set the number of episodes to a smaller value for demonstration purposes.\n", + " params[\"num_episodes\"] = 5\n", + " \n", + " # Run the training loop for the RL-enhanced RAG model.\n", + " # This loop trains the model to optimize its responses using reinforcement learning.\n", + " _, rewards_history, actions_history, best_rl_response = training_loop(\n", + " query_text, ground_truth, params\n", + " )\n", + " \n", + " # If no response was generated during training, generate one using the current query and context.\n", + " if best_rl_response is None:\n", + " # Retrieve relevant chunks for the query.\n", + " context_chunks: List[str] = retrieve_relevant_chunks(query_text)\n", + " # Construct a prompt using the query and retrieved context.\n", + " prompt: str = construct_prompt(query_text, context_chunks)\n", + " # Generate a response using the language model.\n", + " best_rl_response: str = generate_response(prompt)\n", + " \n", + " # Calculate the similarity score between the RL-enhanced RAG response and the ground truth.\n", + " rl_similarity: float = calculate_reward(best_rl_response, ground_truth)\n", + " \n", + " print(\"\\nRL-enhanced RAG Output:\")\n", + " print(\"-\" * 40)\n", + " print(best_rl_response)\n", + " print(f\"Similarity to ground truth: {rl_similarity:.4f}\")\n", + " \n", + " # Step 3: Evaluate and compare the results\n", + " # Calculate the improvement in similarity score achieved by the RL-enhanced RAG model.\n", + " improvement: float = rl_similarity - simple_similarity\n", + " \n", + " print(\"\\nEvaluation Results:\")\n", + " print(\"-\" * 40)\n", + " print(f\"Simple RAG similarity to ground truth: {simple_similarity:.4f}\")\n", + " print(f\"RL-enhanced RAG similarity to ground truth: {rl_similarity:.4f}\")\n", + " print(f\"Improvement: {improvement * 100:.2f}%\")\n", + " \n", + " # Step 4: Plot the reward history (if there are enough episodes and matplotlib is available)\n", + " if len(rewards_history) > 1:\n", + " try:\n", + " import matplotlib.pyplot as plt\n", + " # Create a plot to visualize the reward history during RL training.\n", + " plt.figure(figsize=(10, 6))\n", + " plt.plot(rewards_history)\n", + " plt.title('Reward History During RL Training')\n", + " plt.xlabel('Episode')\n", + " plt.ylabel('Reward')\n", + " plt.grid(True)\n", + " plt.show()\n", + " except ImportError:\n", + " # If matplotlib is not available, print a message instead of plotting.\n", + " print(\"Matplotlib not available for plotting rewards\")\n", + " \n", + " # Return the results: responses and similarity scores for both approaches.\n", + " return simple_response, best_rl_response, simple_similarity, rl_similarity" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "So our performance comparison logic is not very complicated but is based on 4 steps:\n", + "1. Generate a response using the simple RAG pipeline.\n", + "2. Train the RL-enhanced RAG model using the training loop.\n", + "3. Evaluate and compare the results.\n", + "4. Plot the reward history (if available)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluation Framework (**Optional**)\n", + "\n", + "This step is optional but in case you want to evaluate all the eval queries in the validation data, you can use the following code.\n", + "\n", + "First, to check the relevance of the retrieved chunks and the ground truth, we need to have a function that evaluates the relevance of the retrieved chunks." + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to evaluate relevance of retrieved chunks\n", + "def evaluate_relevance(retrieved_chunks: List[str], ground_truth_chunks: List[str]) -> float:\n", + " \"\"\"\n", + " Evaluate the relevance of retrieved chunks by comparing them to ground truth chunks.\n", + "\n", + " Args:\n", + " retrieved_chunks (List[str]): A list of text chunks retrieved by the system.\n", + " ground_truth_chunks (List[str]): A list of ground truth text chunks for comparison.\n", + "\n", + " Returns:\n", + " float: The average relevance score between the retrieved chunks and the ground truth chunks.\n", + " \"\"\"\n", + " relevance_scores: List[float] = [] # Initialize a list to store relevance scores\n", + "\n", + " # Iterate through pairs of retrieved and ground truth chunks\n", + " for retrieved, ground_truth in zip(retrieved_chunks, ground_truth_chunks):\n", + " # Calculate the cosine similarity between the embeddings of the retrieved and ground truth chunks\n", + " relevance: float = cosine_similarity(\n", + " generate_embeddings([retrieved])[0],\n", + " generate_embeddings([ground_truth])[0]\n", + " )\n", + " # Append the relevance score to the list\n", + " relevance_scores.append(relevance)\n", + "\n", + " # Return the average relevance score\n", + " return np.mean(relevance_scores)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To evaluate the accuracy of the generated responses, we can use the cosine similarity between the embeddings of the generated responses and the ground truth. So let's define a function to evaluate the accuracy of the responses based on this similarity metric." + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to evaluate the accuracy of generated responses\n", + "def evaluate_accuracy(responses: List[str], ground_truth_responses: List[str]) -> float:\n", + " \"\"\"\n", + " Evaluate the accuracy of generated responses by comparing them to ground truth responses.\n", + "\n", + " Args:\n", + " responses (List[str]): A list of generated responses to evaluate.\n", + " ground_truth_responses (List[str]): A list of ground truth responses to compare against.\n", + "\n", + " Returns:\n", + " float: The average accuracy score, calculated as the mean cosine similarity \n", + " between the embeddings of the generated responses and the ground truth responses.\n", + " \"\"\"\n", + " accuracy_scores: List[float] = [] # Initialize a list to store accuracy scores\n", + "\n", + " # Iterate through each pair of generated response and ground truth response\n", + " for response, ground_truth in zip(responses, ground_truth_responses):\n", + " # Calculate the cosine similarity between the embeddings of the response and ground truth\n", + " accuracy: float = cosine_similarity(\n", + " generate_embeddings([response])[0],\n", + " generate_embeddings([ground_truth])[0]\n", + " )\n", + " # Append the accuracy score to the list\n", + " accuracy_scores.append(accuracy)\n", + "\n", + " # Return the mean of the accuracy scores\n", + " return np.mean(accuracy_scores)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We also need to measure the response quality and assign a relevant score for it to be used in the reinforcement learning process." + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to evaluate response quality\n", + "def evaluate_response_quality(responses: List[str]) -> float:\n", + " \"\"\"\n", + " Evaluate the quality of responses using a heuristic or external model.\n", + "\n", + " Args:\n", + " responses (List[str]): A list of generated responses to evaluate.\n", + "\n", + " Returns:\n", + " float: The average quality score of the responses, ranging from 0 to 1.\n", + " \"\"\"\n", + " quality_scores: List[float] = [] # Initialize a list to store quality scores for each response\n", + "\n", + " for response in responses:\n", + " # Example heuristic: Calculate a quality score based on response length\n", + " # Normalize the length by a maximum of 100 words and cap the score at 1.0\n", + " quality: float = len(response.split()) / 100\n", + " quality_scores.append(min(quality, 1.0)) # Append the capped quality score to the list\n", + "\n", + " # Return the average quality score across all responses\n", + " return np.mean(quality_scores)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then we can evaluate the performance of the RL-enhanced RAG model on the validation dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to evaluate RAG performance\n", + "def evaluate_rag_performance(\n", + " queries: List[str], \n", + " ground_truth_chunks: List[str], \n", + " ground_truth_responses: List[str]\n", + ") -> Dict[str, float]:\n", + " \"\"\"\n", + " Evaluate the performance of the RAG pipeline using relevance, accuracy, and response quality metrics.\n", + "\n", + " Args:\n", + " queries (List[str]): A list of query strings to evaluate.\n", + " ground_truth_chunks (List[str]): A list of ground truth text chunks corresponding to the queries.\n", + " ground_truth_responses (List[str]): A list of ground truth responses corresponding to the queries.\n", + "\n", + " Returns:\n", + " Dict[str, float]: A dictionary containing the average relevance, accuracy, and quality scores.\n", + " \"\"\"\n", + " # Initialize lists to store scores for each metric\n", + " relevance_scores: List[float] = []\n", + " accuracy_scores: List[float] = []\n", + " quality_scores: List[float] = []\n", + "\n", + " # Iterate through each query and its corresponding ground truth data\n", + " for query, ground_truth_chunk, ground_truth_response in zip(queries, ground_truth_chunks, ground_truth_responses):\n", + " # Retrieve relevant chunks for the query\n", + " retrieved_chunks: List[str] = retrieve_relevant_chunks(query)\n", + " \n", + " # Evaluate the relevance of the retrieved chunks compared to the ground truth chunk\n", + " relevance: float = evaluate_relevance(retrieved_chunks, [ground_truth_chunk])\n", + " relevance_scores.append(relevance)\n", + "\n", + " # Generate a response using the basic RAG pipeline\n", + " response: str = basic_rag_pipeline(query)\n", + " \n", + " # Evaluate the accuracy of the generated response compared to the ground truth response\n", + " accuracy: float = evaluate_accuracy([response], [ground_truth_response])\n", + " accuracy_scores.append(accuracy)\n", + "\n", + " # Evaluate the quality of the generated response\n", + " quality: float = evaluate_response_quality([response])\n", + " quality_scores.append(quality)\n", + "\n", + " # Calculate the average scores for each metric\n", + " avg_relevance: float = np.mean(relevance_scores)\n", + " avg_accuracy: float = np.mean(accuracy_scores)\n", + " avg_quality: float = np.mean(quality_scores)\n", + "\n", + " # Return the average scores as a dictionary\n", + " return {\n", + " \"average_relevance\": avg_relevance,\n", + " \"average_accuracy\": avg_accuracy,\n", + " \"average_quality\": avg_quality\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluating (RL vs Simple) RAG\n", + "\n", + "Ah, the moment of truth! Let's evaluate the performance of the simple RAG pipeline against the RL-enhanced RAG pipeline on our factual query, where the simple RAG previously failed to provide the correct answer. Let's see if the RL-enhanced RAG pipeline can perform better." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's revisit our evaluation query and see what the simple RAG pipeline generates for it." + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "🔍 Running the Retrieval-Augmented Generation (RAG) pipeline...\n", + "📥 Query: What is the mathematical representation of a qubit in superposition?\n", + "\n", + "🤖 AI Response:\n", + "--------------------------------------------------\n", + "ψ α0 β1\n", + "--------------------------------------------------\n", + "✅ Ground Truth Answer:\n", + "--------------------------------------------------\n", + "|ψ⟩ = α|0⟩ + β|1⟩, where α and β are complex numbers satisfying |α|² + |β|² = 1, representing the probability amplitudes for measuring the qubit in state |0⟩ or |1⟩ respectively.\n", + "--------------------------------------------------\n" + ] + } + ], + "source": [ + "# Print a message to indicate the start of the RAG pipeline\n", + "print(\"🔍 Running the Retrieval-Augmented Generation (RAG) pipeline...\")\n", + "print(f\"📥 Query: {sample_query}\\n\")\n", + "\n", + "# Run the RAG pipeline and get the response\n", + "response = basic_rag_pipeline(sample_query)\n", + "\n", + "# Print the response with better formatting\n", + "print(\"🤖 AI Response:\")\n", + "print(\"-\" * 50)\n", + "print(response.strip())\n", + "print(\"-\" * 50)\n", + "\n", + "# Print the ground truth answer for comparison\n", + "print(\"✅ Ground Truth Answer:\")\n", + "print(\"-\" * 50)\n", + "print(expected_answer)\n", + "print(\"-\" * 50)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "================================================================================\n", + "Query: What is the mathematical representation of a qubit in superposition?\n", + "================================================================================\n", + "\n", + "Simple RAG Output:\n", + "----------------------------------------\n", + "ψ α0 β1 \n", + "\n", + "Similarity to ground truth: 0.6726\n", + "\n", + "Training RL-enhanced RAG model...\n", + "Simple RAG reward: 0.6772\n", + "Episode 0: Reward = 0.0000, Actions = ['rewrite_query', 'rewrite_query', np.str_('rewrite_query'), 'rewrite_query', np.str_('rewrite_query'), 'rewrite_query', 'rewrite_query', 'rewrite_query', np.str_('expand_context'), 'rewrite_query']\n", + "\n", + "Training completed:\n", + "Simple RAG reward: 0.6772\n", + "Best RL-enhanced RAG reward: 0.8652\n", + "Improvement: 0.1879 (18.79%)\n", + "\n", + "RL-enhanced RAG Output:\n", + "----------------------------------------\n", + "The mathematical representation of a qubit in superposition is: \n", + "ψ = α0 + β1 \n", + "\n", + "Where:\n", + "\n", + "* α and β are complex numbers.\n", + "* α² + β² = 1 \n", + "\n", + "\n", + "Let me know if you would like a deeper explanation of any of these terms! \n", + "\n", + "Similarity to ground truth: 0.8652\n", + "\n", + "Evaluation Results:\n", + "----------------------------------------\n", + "Simple RAG similarity to ground truth: 0.6726\n", + "RL-enhanced RAG similarity to ground truth: 0.8652\n", + "Improvement: 19.26%\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA04AAAIjCAYAAAA0vUuxAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8ekN5oAAAACXBIWXMAAA9hAAAPYQGoP6dpAACF8UlEQVR4nO3dB3yUVbr48SeT3iGQRhLS6NJBelexra676lpREUEp+3fXLVf37up6vXfde3fXu3sFQRCwi2t3FbHRe0dAWkIagRQI6aTNzP9zzmTGAIEQyOSd8vt+PiFvJpPJw8lk8j7vc85zfKxWq1UAAAAAABdkuvCnAAAAAAAKiRMAAAAAtIDECQAAAABaQOIEAAAAAC0gcQIAAACAFpA4AQAAAEALSJwAAAAAoAUkTgAAAADQAhInAAAAAGgBiRMAuBkfHx/54x//2OaP+9BDD0lKSkqbP663efXVV/XPKDs72+hQ3NqVPB/V74f6GQBAWyJxAuBxJ6z2Nz8/P0lISNAnYPn5+eJN1Em7GoO//vWvFz2xPHny5BV9n++//14/lismCern3vT5EBYWJmlpaXLHHXfIBx98IBaLRTyN/edqf/P399fJx//7f/9PSktLz7u/+tyPfvSjVn2Ppo9/sbfVq1e34f8MAIznZ3QAANDW/uM//kNSU1OlpqZGNm/erBOq9evXy759+yQoKMjo8FzWokWLWp1MqMTp2WeflQkTJrhktSowMFBeeeUVfXzmzBnJycmRf/3rXzp5UjF/8sknEhER0abfc8qUKXL33Xfr722U+fPn60SxqqpKvv32W3nxxRdl586d+vfgSr3xxhtnffz666/L119/fd7tvXv3bvfno93vf/97efLJJ6/o+wPAuUicAHicG2+8UYYOHaqPH3nkEencubP893//t3z66afys5/9TFydOtkNDQ1t9++rqhOuorq6WkJCQq74cVTV8f777z/rtv/8z/+UP//5z/LUU0/J9OnT5d1335W2/Ln5+vrqNyOpxFA975VHH31UJ3Lq/7l161YZNmzYFT32ueOpLk6oxOnc26/0Z3olz0f1c1dvANCWmKoHwOONHTtWv8/MzDzr9oMHD+oTzKioKF2JUsmWSq7s1NQmdQL8f//3f47b1NQ2k8kknTp1EqvV6rh95syZEhcX5/h43bp1cuedd0rXrl115SEpKUl++ctf6qrHudPJVGVAxXbTTTdJeHi43HffffpztbW1+muio6P17bfeeqscO3bMCSP0QyznVo2WLVsmQ4YM0d9fVWb69esn//jHP/TnVCVP/R+ViRMnNjtF66WXXpKrrrpKj0GXLl1k9uzZ500ZU5Wfvn37yo4dO2TcuHH65Pp3v/udPPjgg/rkv76+/rxYJ0+eLD179rzs/6uqRqjHeO+99+Tw4cMtrh9T46LG59xpoWvWrJFZs2ZJTEyMJCYmnvW5ptMX7VPiVMVHJS7q+aamDapqzbm+++47GT9+vAQHB+vHVIne0qVLr2jd1IV+B5zlQj9TRVX5br75Zv18UM+L9PR0ee6558RsNl/0+dh0+unChQv116mvv/rqq2Xbtm0trnFSH8+ZM0c+/vhjHZv6WvXcXLFixXnxq+ewej1QPyf1fV5++WXWTQGg4gTA89lPNjt27Oi4bf/+/TJ69Gi9BkqdRKtKwT//+U+57bbb9PqXn/zkJ9KhQwd9grV27Vq9RkRRJ77q5KmkpERPU1MnXvZEyX5yqqgTcnWFXSVUKslSV/rVdCmV+KjPNdXQ0CDXX3+9jBkzRp8U2q/Kq2rZm2++Kffee6+MGjVKVq5cqU84W0PF0Nw6JnV7S1QV4Z577pFrrrlGV+yUAwcOyIYNG+Txxx/XJ8RqXFRiqU6K7VOz7O/ViaaaxnfttdfqcTh06JCeQqZOctVjNK0onDp1SlcKVWVEVS5iY2P1z0QlFl9++eVZ63AKCgr0WDzzzDNypVPqvvrqK/3/7NGjx2U9hkqaVGL79NNP64rTxWRkZOhEfdq0aTopXLJkiU4OVGJqfx6ptXj2JFRVxNQYqKmGVzrtr7nfAWdr7mdqTyzVxYInnnhCv1c/SzV+5eXl8pe//KXFx3377beloqJCV9LUOP3P//yP/PSnP5WjR4+2WKVSv78ffvih/rmpiwHquXv77bdLbm6u/j1Vdu3aJTfccIPEx8fr569K6NT0X/VzBuDlrADgIZYuXapKQNZvvvnGWlxcbM3Ly7O+//771ujoaGtgYKD+2O6aa66x9uvXz1pTU+O4zWKxWEeNGmXt3r2747bZs2dbY2NjHR8/8cQT1nHjxlljYmKs8+fP17edOnXK6uPjY/3HP/7huF91dfV58T3//PP6fjk5OY7bHnzwQR3zk08+edZ9d+/erW+fNWvWWbffe++9+vZnnnnmomORlZWl79fSmxqnprEkJyc7Pn788cetERER1oaGhgt+n/fee08/zqpVq866vaioyBoQEGCdPHmy1Ww2O26fO3euvv+SJUsct40fP17ftmDBgrMeQ31dYmKi9a677jrr9hdeeEGP49GjRy86Bur/ExoaesHP79q1S3/fX/7yl47bLjS2alzU4537XBszZsx542P/nPoZNP16ddvatWvPGiP1vPzVr37luO3nP/+5/r+p2OzU8ysqKuq8x2yOil3d79ChQ/pnm52drcc6ODhY/x5UVVWd9/+6+eabrVdC/Y6cezpxoZ/phX43Hn30UWtISMhZv4/nPh/tz+lOnTpZS0pKHLd/8skn+vZ//etf541DU+pj9ZzMyMhw3LZnzx59+4svvui47ZZbbtGx5OfnO247cuSI1c/P77zHBOBdmKoHwOOoCoe6Oqymx6kr/OqqvZqCZ59KpapF6iq3Wu+krlyriox6U1fIVeXnyJEjji58qopUWFioqyX2ypKqtKjb1bH9KrY6L2tacVLTrOxUJUI9vqoaqfupK9rnUhWZppYvX67f2ytddr/4xS9aNRYzZszQFZVz31S1pSWq4qZiV/dvrW+++Ubq6up0vGpqo51aU6Sm/H3++edn3V9VVKZOnXrWberr1LRF9bNTPye7t956S4+lagByJVS1Q2n62K2l/j+Xup6pT58+Zz1H1HNUTTdUlRI7NW1s5MiRMnDgQMdtaiqpffrmpVKPqx5fTXV7+OGHpVu3bvLFF1+0ybqxS9Xcz/Tc3w37758aF1UFVdNnW3LXXXedVTmzj2nTcbzYa4OaemfXv39//Xy0f62qLqnnrqo8q6mEdmr8VPUMgHcjcQLgcebNm6dP9t9//329bkidmDWd6qSmTKkE5g9/+IM+uWz6Zp/+VVRUdNZJmUqSVBKhkh51m0qe7ImTeq9OvgYMGOD4Hmrqj5qGpU561Qm6emy1bkUpKys7K161iN2e1Nmp7m8qcWh6kqe0dl1P9+7d9cniuW9qfU1L1HQmNYVNnTCq+NQJeHPrQZqj4m8u3oCAAP297Z+3U1Mm1efO9cADD+h1YR999JH+WCWwat3MpSR+LamsrNTv1ZSty9Wa5E2tdzuXSgBOnz7t+FiNizpJP1dzt12Mmm6qfgfUtLYRI0bo53PThKU9XOhnqqbJqqmwkZGR+vdG/W7YG0uc+7txKeNoT6KajuOlfq396+1fq8ZJPd/a4mcAwPOwxgmAx1GL7+1d9dSVY7V2SK0TUifdKomxtzj+9a9/rStMzbGfJKmrzurkWK1zUlfvVcKlKgLqZE+t81EnuipxUhUQe2VFXbW+7rrrdGXr3/7t36RXr1666qWqWCqZOrfFskrqmlZlXIVqeLB79269xkhVK9SbalKgkpnXXnutTb/XhU7qVZVGrQFSa73U91Xv1cl4W3RHVO3pL/WE+NzGBXatSUYuVJlq2mSkrajE3t5V75ZbbtFNPVTVSiWd7fVca25sVGMQdQFBJUxq3ZC6MKAaMKhW6ep35VLaj1/JOLbnzwCA5yFxAuDR1InS888/rxfcz507VzeCsFdb1EJyVX1piaowqcRJJVBqCpWqUKjqkrpiriow6qRPLSK327t3r+7UppILdbJv15opb8nJyfokUnVBa1q1sU8ZbC8qSVEn3upNxaOqUKrDmKrWqYTjQl3GVPz2eJtWt9T0vaysrEsadzs1hqqRwIkTJ3QFRTXIaIsmB2rfIRW/SnLt1OOe2/VPxay+d3tQ46Yqoudq7rZLpS4WqEqqmjanGqCoZg1GUd3q1JRY1aBBJXd26jnhKhcLVCLX1j8DAJ7B9S5xAoATWiOrKtTf//53vSmuOjlSt6kEoLkT4uLi4vMSJ9WVTO2DY5+6p67aqyrTCy+8oNtlN127Yr+q3fQqtjq2t/G+FPb1FE1boSvq/9Be1AluU+r/rNaE2FulK/b9ps5NNlRipJIuFX/TcVi8eLGejtWa7oCqs59KcFSFT61FaWm/oEuh9nFSHfXUehk1ndFOVUBUktyUan19oYpTW1MV0E2bNulKn52qXKp1XVdCVZvUdEt7d0SjNPe7oRJT1bbeFaj41HNXtSw/fvz4WUmTqrgC8G5UnAB4hd/85jd6zyHVCvmxxx7T66DUFD41hUkt8FdVEdUEQp20qpbhe/bscXytPSlS1ZM//elPjtvVFXN1MmXfS8ZOTc1TJ+BqKqCanqemJak1J5eyBsNOVbZUwqBOKFWioZK0b7/9tl2veqt26OqkfdKkSfqkW01LVC3VVWz2luPqWJ1sqhNyFacaC3V/lZyqdtqqEqdaO6s9qNT4qf+PGqvWJD9qWqR6DNXGXTWsaE3SpVq9q+l9ikqa1f9BNZtQeyWpKqRKis79P6vnh2pRrSpR6nmgpirap705229/+1sdr/reP//5zx3tyNXaHPWzuNx9hFR1VSWe6vdAVUnVeNqp55TaK+pcgwYNanX7+5ao57Gq6ql27Krxifr/qMqfK02VU230VVKttitQTVtU0qyq1WprgqYJLQDvQ+IEwCuofV5UMqP2SVKJklo7s337dn1ir5IpVV1RJ/vqZFHtKdOUmiqnPqcWjqtk69yESlWzmjafUCep//rXv/SJoZomqKb+qMXwavPNpg0kWqL2+VFJg6o2qCvgKiFR3ehUt8D2oJIblVioZEdVlNQGv6pCo04s7etk1G0LFizQ/0+1P5E6yVy1apUeL3U/Fb866VQb+apGGarLn0o+W9pvp7npep999ple29SaPY1UZczeSEJ1lFNxqTVT6mesfibnrvdRzw01bUxVxlSCoX7Gaoql2suqPaifrRo/9dxR46TGT20arBIodZt6Ll0uNfYqQVLVtqaJk0po1dTLc6mfZ1snTmqvJPVz/NWvfiW///3vdRKlnmdqfC+03rC9qeeHuiCiLnyocVE/E7UeS+1hdild/wB4Lh/Vk9zoIAAAuJhPPvlEN/pQ0+iaTov0Fqqtu5paqjoBXmr7c7Qt9fxTHQHVdgUAvBNrnAAALm/RokV6OmXTip+nUu2wm1LVUDWdTf3fSZqM+RmoZEntrabWRgLwXkzVAwC4rGXLlun1SGqKomqucblrfNyJanevTtDVOjK17k5NGywvL292Oh2cQyXpausA+55j8+fP181O1Bo0AN6LqXoAAJelEiXVTlutrVJrqdRmwZ7ud7/7nd68WTUpUf//wYMH63birWnhjiujWrertWYFBQV6TZ1KZtWaM/WzAOC9SJwAAAAAoAWscQIAAACAFpA4AQAAAEALPH+y+DksFoveDTw8PNwrFhkDAAAAaJ5atVRRUSFdunQ5b28/8fbESSVN7bV5JAAAAADXl5eXJ4mJiRe9j9clTqrSZB+ciIgIo8OR+vp6+eqrr2Ty5Mni7+9vdDgeh/F1LsbXuRhf52J8nYvxdS7G17kYX+8Z3/Lycl1UsecIF+N1iZN9ep5KmlwlcQoJCdGxGP3E8USMr3Mxvs7F+DoX4+tcjK9zMb7Oxfh63/j6XMISHppDAAAAAEALSJwAAAAAoAUkTgAAAADQAhInAAAAAGgBiRMAAAAAtIDECQAAAABaQOIEAAAAAC0gcQIAAACAFpA4AQAAAEALSJwAAAAAoAUkTgAAAADQAhInAAAAAGgBiRMAAAAAtIDECQAAAABaQOIEAAAAAC0gcQIAAACAFpA4AQAAAGgXFTX1si37tGwq9BF342d0AAAAAAA8i9VqlaKKWtl/vEy+P14u+4+Xy/cnyiXnVHXjPXzlieo6iYn0F3dB4gQAAADgspktVsk6WaUTI3uipN5OVdU1e//4yCDpZKqWytoGiRH3QeIEAAAA4JLU1JvlYEFFYxWpTCdLB09UyJl683n3NfmIpEeHyVVdIqRPlwi5qkuk9I6PkPAAH1m+fLkkdQwRd0LiBAAAAOA8p6vqzqoi7T9eLpnFlWKxnn/fYH9f6RUfbkuS4iN1otQrLlyC/H3Pu299fb24IxInAAAAwMvXIx07fcaxDun7xkTpeFlNs/fvFBqgEyN7FalPfISkdg4VX1Vi8mAkTgAAAICXqDdbJKOo0pYkNZluV1HT0Oz9kzuF6MToqiaJUkx4oPj4eHaS1BwSJwAAAMBDW3+fux7pcEGl1Jkt593X39dHesSGN0mS1HqkcAkPcp+ud85G4gQAAAC4+VS7Yt36u/ysNUnZjtbfZwsP9JPeunqk1iPZqkjdYsIkwI8tXi+GxAkAAABwo9bf2aeqztobSa1JOll54dbf5061S+wY7JVT7a4UiRMAAADgoq2/D6mpdk2qSGrqXXVd862/0+ytvxurSGqqXaewQENi90QkTgAAAIDBSqvrzqoiqUQps7hKV5jOFeRvkl5xZ1eResaGS3DA+a2/0XZInAAAAIB2bv1tS45sne0OnCiX/NIzzd4/KjTAUUWyJUmq9XeYx7f+dkUkTgAAAICTWn+rDWP355/dtKH8Aq2/u0aF/DDVLsG2kWxshHe2/nZFJE4AAADAFaqsbZCDTapIKlE6VFghdQ3Nt/7uHhPuqCCpREl1uYug9bdLI3ECAAAAWqGoouaHBKkxSVKd7qzWC7f+btrZTiVNtP52PyROAAAAQDMsFqscLa48az2Sen+ysrbZ+8dFBJ1VRbK3/jaxHskjkDgBAADA66nW34cLK3RytPdYqWw84CtP7Vh50dbfTRs2qGNaf3s2EicAAAB4X+tvvXHsD1WkjOLKc1p/qyqRWQL9TNLLPs2u8X3PuHAJCeA02tvwEwcAAIDHtv5Wbb6b7o+kji/U+rtjiH/jnkihUld4VO65caz0iIsUP1/WI4HECQAAAB6gQbf+rnK0/LYnSmVn6pu9f1JUsFwVH/nDVLsuEXqNkmr9XV9fL8uXZ0r3mDCSJjiQOAEAAMCtVKnW3wXlZyVIBwuab/3tZ/KR7rHhZ3W16x0fIZHBtP5G65A4AQAAwGUVV9TaqkiNne0OHC+XrAu0/g4L9HM0bNBv8RHSPTZMAv18jQgdHobECQAAAC7R+junpLqxivRDoqQSp+bERgQ6Wn7bp9sldQyh9TechsQJAAAA7aq2wSyHC9T+SGWO/ZEOnCiXqmZaf/uo1t+dQ6VPl0hHZzuVKHWm9TfaGYkTAAAAnKasur6xemSrIqkkKaOoUhrOav1to1t/x4XrJMleRVIf0/obroBnIQAAANqk9ffxspofpto1Nm04drr51t8ddOvvxql2jVUkVVmiix1cFYkTAAAAWt36++jJ81t/l1Y33/o7sWNw4zS7xul2XSIkPtLW+htwFyROAAAAuKDqugY5cKJCvm8y1U61/q69QOvvbjFhjoYN9koSrb/hCQxPnObNmyd/+ctfpKCgQAYMGCAvvviiDBs27IL3//vf/y7z58+X3Nxc6dy5s9xxxx3y/PPPS1BQULvGDQAA4GlOVqrW3/b9kWyJUtbJ5lt/hwb4OpIje6JE6294MkMTp3fffVeeeOIJWbBggQwfPlwnRddff70cOnRIYmJizrv/22+/LU8++aQsWbJERo0aJYcPH5aHHnpIl3lfeOEFQ/4PAAAA7tj6O1e1/rY3bWicbld0gdbfMeGBjmYN9ul2XaNo/Q3vYmjipJKd6dOny9SpU/XHKoH6/PPPdWKkEqRzbdy4UUaPHi333nuv/jglJUXuuece2bJlS7vHDgAA4C6tv48UVp5VRVJT7yprG867r1pylKpafzepIqnj6HBafwOGJU51dXWyY8cOeeqppxy3mUwmufbaa2XTpk3Nfo2qMr355puydetWPZ3v6NGjsnz5cpkyZcoFv09tba1+sysvL9fv6+vr9ZvR7DG4QiyeiPF1LsbXuRhf52J8nYvxNWZ8y8/Uy4GCCvn+RIV+f0C1/i6uarb1d4CfSXrGhkmf+HDprVqAx0dIj9gwCQ08//TQ236OPH+9Z3zrWxGDj1X1jjTA8ePHJSEhQVeRRo4c6bj9t7/9raxZs+aCVaT/+7//k1//+te65WVDQ4M89thjes3Thfzxj3+UZ599ttlpfyEhIW30vwEAAGg/6uyttE4kv8pHjlWJ5Ff76ONTtc1PnQvxtUpCqFUSQ6XxvVVigkV8mWkHL1ddXa1ns5WVlUlERIRrN4dojdWrV8uf/vQneemll/SaqIyMDHn88cflueeekz/84Q/Nfo2qaKl1VE0rTklJSTJ58uQWB6e9styvv/5arrvuOvH3p+NMW2N8nYvxdS7G17kYX+difJ2nqrZB7ly4RY4UVTX7+YQOQbp6ZKsihUvv+HBaf7cSz1/vGd/yxtlol8KwxEl1xPP19ZXCwsKzblcfx8XFNfs1KjlS0/IeeeQR/XG/fv2kqqpKZsyYIf/+7/+up/qdKzAwUL+dS/2QjP5BuXI8nobxdS7G17kYX+difJ2L8W17H245ppMmk1ile2y4XJVg20DWvpFsZAjj3VZ4/nr++Pq34vsbljgFBATIkCFD5Ntvv5XbbrtN32axWPTHc+bMafZrVCnt3ORIJV+KQTMOAQAA2nXj2cXrs/TxHWkW+a+poww/8QS8haFT9dQUugcffFCGDh2qmz2oduSqgmTvsvfAAw/odVBqnybllltu0Z34Bg0a5Jiqp6pQ6nZ7AgUAAOCpPt97QvJLz0hUqL9c3fn8rngAPDRxuuuuu6S4uFiefvppvQHuwIEDZcWKFRIbG6s/rza5bVph+v3vf6/n56r3+fn5Eh0drZOm//qv/zLwfwEAAOB8anbNonVH9fGU4V0l4Mwho0MCvIrhzSHUtLwLTc1TzSCa8vPzk2eeeUa/AQAAeJNNmadkX365BPmb5N5hSbJ5DYkT0J7O76YAAAAAl7Owsdr0s6FJEhUaYHQ4gNchcQIAAHBxhwoqZPWhYjH5iEwbk2p0OIBXInECAABwcQvX2qpNN/SNk+ROoUaHA3glEicAAAAXVlBWI5/uydfH08emGR0O4LVInAAAAFzY0o1ZUm+2yrCUKBnUtaPR4QBei8QJAADARVXU1Mvbm3P18YxxVJsAI5E4AQAAuKh3t+VJRW2DpEeHyqReMUaHA3g1EicAAAAXVG+2yJL1WY61TSbVUg+AYUicAAAAXNDn352Q42U10jksUG4blGB0OIDXI3ECAABwMVarVV5ubEH+0KhkCfL3NTokwOuROAEAALiY9Rkn5cCJcgkJ8JX7RyQbHQ4AEicAAADX3fD2Z0OTpENIgNHhACBxAgAAcC3fHy+XdUdOiuoFMW1MqtHhAGhE4gQAAOBCXllnqzbd1C9ekqJCjA4HQCMSJwAAABdxvPSMfLrnuD5mw1vAtZA4Abgs27JLpP9/fCNfHWNfEQBoK0s3ZEmDxSoj0qKkf2IHo8MB0ASJE4DLapP75y8Oypl6i3yVb5JTlbVGhwQAbq+8pl7e2Zqnjx8dl250OADOQeIEoNU2Hy2RHTmn9XG9xUeWbsw1OiQAcHvvbMmVytoG6R4TJuN7RBsdDoBzkDgBaLV5qzL0+16xYfr9m1tzpay63uCoAMB91TVYZOmGbH08fVyamFRLPQAuhcQJQKvszivVGzP6mXxk/n2DJD7EKlW1Znltk+0PPgCg9f6157gUlNdITHig/HhgF6PDAdAMEicArTJ3pa3adNugBEnsGCyTEyz64yUbsqSqtsHg6ADAPdeNLmpsQf7Q6BQJ9PM1OiQAzSBxAnDJDpwol28OFIqPj8jMCbaFywM7WSWlU4iUVtfLW1tyjA4RANzO2iMn5WBBhYQG+Mp9w5ONDgfABZA4AWj12qab+8VLerRtfZOahv/oONvO9ovWZUlNvdnQGAHA3Sxcm6nf33V1V4kM9jc6HAAXQOIE4JJkFlfK53tP6OPZE7ud9bkfD4iXhA7BUlxRK+9tt7XSBQC0bF9+mWzIOCW+Jh95eEyK0eEAuAgSJwCXZP7qTLFaRa7tHSO94yPO+py/r0keHW/b4X7BmqNSb7atewIAXJx9bdOP+sdLYscQo8MBcBEkTgBadOx0tXy8K7/ZapPdz4YmSXR4oOSXnpGPGu8LALj4a+tn39kq+dPH2i4+AXBdJE4AWvTymqPSYLHKmG6dZVDXjs3eJ8jfV6aPTXVUp8wWaztHCQDuZcn6bP1aObpbJ+mbEGl0OABaQOIE4KKKymvk3cZ1SxeqNtmpblAdQvwl62SVLG9cDwUAOJ/aNHzZtlx9TLUJcA8kTgBanH+vdrQfktxRRqRFXfS+oYF+MnVUqqMDn4WqEwA0662tOVJdZ5ZeceEyvke00eEAuAQkTgAu6HRVnby1xXZFdM6kbuKjNnBqwUOjUiQs0E/vSfLtwaJ2iBIA3Ettg1le3ZDtqDZdymsrAOOROAG4oKUbsvQV0b4JETLhEq+IRob4y5SRtg0c567KEKtqxQcAcPhk93EpqqiVuIgguWVAF6PDAXCJSJwANKu8pl6WbrRdEZ094dKqTXbTxqRKkL9J9uSVyvqMk06MEgDci5rCvGitrQX51NEpEuDHqRjgLvhtBdCsNzblSEVNg3SLCZPrr4pr1dd2DguUe4Z11cdzV2Y4KUIAcD9rDhfLkaJKPaX5nuG210kA7oHECcB5ztSZZcn6LH08e2K6mEytn38/Y1ya+Pv6yJasEtmeXeKEKAHA/by8NlO/v2dYkkQE+RsdDoBWIHECcJ53tubKqao66RoVIrf0v7z59/GRwXLHkETHWicA8HbfHSuVzUdLxM/kI1NH2zqQAnAfJE4Azuv2ZL8i+tj4dPHzvfyXCfX1qli1+lCx7D1W1oZRAoD7Wdi4tkk1hOjSIdjocAC0EokTgLN8sCNfCstt3Z5uH5JwRY+V3ClUfjwwwbGvEwB4q7ySasfG4Gx4C7gnEicADg1mi8xfk+FYoxTo53vFjzlrQrp+v2J/gRwurLjixwMAd7R4fZaoPcHHdu8sfbpEGB0OgMtA4gTA4dM9xyWv5Ix0Cg1wdMW7Ut1jw+WGxq58L1F1AuCFSqvr5N1teY6LUgDcE4kTAMfeIi+ttq1tmjY2VYIDrrzaZDdnUjdHYpZzqqrNHhcA3MGbm3PkTL1ZesdHyJhunY0OB8BlInECoH25v0AyiiolIshPpoxIbtPH7psQKRN6RutpKgvW2JIzAPAGNfVmeXVjjj6eMS61VZuJA3AtJE4AxGq1OlqGPzQqRcKdsLfInIm2qtP7O47JibIzbf74AOCKPt6VLycra6VLZJD86DK3dwDgGkicAOh24fuPl0tIgK/T9hYZmhIlw1OjpN5slZfX2FryAoCnT4FetM72evfwmFTxv4LtHQAYj99gwMs1rTbdPyJZOoYGOO17/XxSd/1+2bZcfQUWADzZyoNFkllcJeGBfnLX1UlGhwPgCpE4AV5O7WK/I+e0BPiZ5JExzt3JfnS3TjIgqYPU1Ft0a14A8IYNb+8d0dUpU6ABtC8SJ8DLzV11RL+/a2iSxEQEOfV7qUXR9rVOb2zKkbLqeqd+PwAwyq7c07I1u0T8fX1k6ijnXpQC0D5InAAv/8O+IeOU+Jl85NHx7bO3yDW9YqRXXLhU1jbIqxuz2+V7AkB7s69tunVAgsRFOveiFID2QeIEeLF5jWubfjIoQRI7hrTL9zSZfGR2Y9Vp6cYsqaptaJfvCwDtRe1Xt2JfgT5mw1vAc5A4AV7q++Pl8s2BIjH5iMyckN6u3/umfvGS1jlUSqvr5a0ttv1NAMBTqDWcat+68T2ipWdcuNHhAGgjJE6Al5q3OuOHJCY6rF2/t6/JRx5rTNYWrs3SG0QCgCcoqaqTf27P08ePUm0CPAqJE+CFMosrZfneE/rYPm2uvanpgQkdgnVbcvtJBgC4uzc35+jOoX0TImRkeiejwwHQhkicAC80f3WmWK0i1/aOld7xEYbEoDaCfKyxIYXaELeuwWJIHADQVlT1/LXGpjfTx6bpTqIAPAeJE+Bl8kqq5aNd+fp4ziRjqk12dw5NkujwQMkvPSMfN8YEAO7qg53H5FRVna6m39wv3uhwALQxEifAy7y8NlPMFquM7d5ZBiZ1MDSWIH9fmTHWVnWav8YWFwC4I4vFKq+ss23s/fCYVPHz5RQL8DT8VgNepLC8Rv65/Ziha5vOde/wrtIhxF+yTlbJ543rrgDA3Xx9oFC/jkUE+cndVycZHQ4AJyBxArzIorW2tURDkzvK8NQocQWhgX7y8OhUfTxvZYa+agsA7vj6qtw/Ilm/rgHwPCROgBe1yH1rS64+nj2pm0stWn5wVIqEB/rJocIK+eZAodHhAECr7Mg5LdtzTkuAr0keGpVidDgAnITECfASSzdkyZl6s26RO6FHtLiSyGB/mTIyWR/PW5UhVtXyDwDcxMK1mfr9bYO6SExEkNHhAHASEifAC5TX1MurjS1y50x0rWqT3bQxqRLkb5I9x8pkfcZJo8MBgEui1jV99X2howU5AM9F4gR4gTc25UhFTYN0jwmTyX3ixBV1CguUe4Z11ccvrswwOhwAuCSvrDuq98Wb1CtGuseGGx0OACcicQI8XHVdgyxen+XopGcyuV61yW7GuDS9RmBrVolsyy4xOhwAuKhTlbXy/o5jjtcvAJ6NxAnwcO9szdONIbpGhciP+rv2hozxkcFy+5BEfTyXqhMAF/f6phypbbBI/8RIl+lUCsB5SJwAD1bbYHYsWp45Id0tNmScOT5dfE0+suZwsXx3rNTocACgWWfqzPL6pmxHtckV144CaFuufxYF4LKpKSSF5bUSFxEkPx2cIO6ga6cQ+fGALo4OewDgit7feUxOV9dLUlSw3HCVa64dBdC2SJwAD9VgtsiCNbZq06Pj0yTQz1fcxayJ6aIu3n65v1AOF1YYHQ4AnMVsseqmEMq00aluUc0HcOX4TQc81Kd7jkteyRnpFBogd19t61bnLrrFhDuu4L5E1QmAi/lqf4HknKrWe9D97Ooko8MB0E5InAAPZLFYHdPcpo1NleAA96k22akOgPYEMPtkldHhAICmNuh+ea2t2jRlRLKEBPgZHRKAdkLiBHigFfsLJLO4SiKC/PQfdnfUNyFSJvaMFotVHFMOAcBo23NOy+68UgnwM8mDo1KMDgdAOyJxAjzwaqi9lfdDo1MlPMhf3NWcSbaq0wc7j8nx0jNGhwMAsrCx2nT74ASJDg80OhwA7YjECfAwqw4VyfcnyiUkwFemuvnV0CHJUTIiLUrqzVbHyQoAGCWzuFK+OVCoj6eNYcNbwNuQOAEeWm26f0SydAwNEHf380nd9ft3tuZKcUWt0eEA8GKqk57VKnJt71jpFhNmdDgA2hmJE+BBNh09JTtzbXPvHxmbKp5gVHonGZjUQWobLLJ4fZbR4QDwUurCzQc78x0b3gLwPiROgAexd9K7++okiQkPEk/g4+Mjcxo77L2xKVtKq+uMDgmAF3p9U7bUNVj0hZyrUzoaHQ4AA5A4AR5iZ+5p2ZBxSvxMPvLo+HTxJNf0jpFeceFSVWeWVzdmGx0OAC9TXdcgb2zO0cePjkvTF3QAeB8SJ8BDzGtc2/TTwQmS0CFYPImuOjV22Fu6IVsqaxuMDgmAF3lv+zEpra6X5E4hMrlxc24A3ofECfAA+4+XybcHi8TkIzJzgi3B8DQ39o2XtOhQKTtTL281XvkFAGdrMFvklfW2rp6PjEkVX/VCC8ArkTgBHuClVbYNYm/u30VSO4eKJ1InKzMbpyAuWpclNfVmo0MC4AW+3F8oeSVnpGOIv9wxJMnocAAYiMQJcHMZRZWyfN8JfTx7ometbTrXbYNs0xBPVtbKu9vyjA4HgBds8bBwre3C1JSRKRIc4Gt0SAAMROIEuLn5qzP1viLX9YmVXnER4sn8fU3y2ARbcvjymkzd4QoAnGVLVonsOVYmgX4meWBkstHhADAYiRPgxvJKquXj3bZ9Rewtuz3dnUMSJSY8UI6X1cjHu2z/dwBwhkVrbWubbh+SKJ3DAo0OB4DBSJwAN7ZgTaaYLVYZ272zDEjqIN4gyN9Xpo+1bT750uoMvXAbANrakcIK3XRHdR63v+YA8G4kToCbKiyv0S1yvanaZHfv8K56oXb2qWr5fK9tfRcAtKVX1mXp95P7xHps0x0ArUPiBLjxFJI6s0XvYD88rZN4k9BAP3l4dKqjo6DFYjU6JAAepKi8Rj5qnAo8YxzVJgA2JE6AGyqpqpO3tuTq49leVm2ye2BUioQH+smhwgr5+kCh0eEA8CCvbszWF6aGJHeUIclRRocDwEWQOAFuaMn6LDlTb5Z+CZEyvke0eKPIYH95YJSty9W8VRm6bTAAXKmq2gZ5s3GTbdY2AWiKxAlwM2Vn6uW1jdmOapOPWrnspdR0vWB/X/nuWJmsO3LS6HAAeAC1R1x5TYNe16S2eQAAOxInwM28sSlbKmobpEdsmF607M06hQXKPcO66uO5qzKMDgeAm1NdOhevtzWFeGRsqviavPfCFAAXTJzmzZsnKSkpEhQUJMOHD5etW7de9P6lpaUye/ZsiY+Pl8DAQOnRo4csX7683eIFjFRd1+D4oz5rQjcx8UddL9wO8DXJ1qwS/QYAl2v5vgLJLz0jnUID5PbBiUaHA8DFGJo4vfvuu/LEE0/IM888Izt37pQBAwbI9ddfL0VFRc3ev66uTq677jrJzs6W999/Xw4dOiSLFi2ShISEdo8dMMLbW3LldHW9JHcKkR/1jzc6HJcQFxkkdwy1neBQdQJwudQ6yYVrM/XxAyNT9J5xANCUnxjohRdekOnTp8vUqVP1xwsWLJDPP/9clixZIk8++eR591e3l5SUyMaNG8Xf31/fpqpVF1NbW6vf7MrLy/X7+vp6/WY0ewyuEIsn8qTxra03O3axnzEmRawWs9RbzIbG5Crj+8jornpdwtrDxbIz+6RumuEJXGV8PRXj61zuNr6bj5bIvvxyCfI3yd1Du7h83O42vu6G8fWe8a1vRQw+VoNaUanqUUhIiK4c3XbbbY7bH3zwQT0d75NPPjnva2666SaJiorSX6c+Hx0dLffee6/827/9m/j6Nn9l6I9//KM8++yz593+9ttv68cB3MX6Ah95L8tXOgRY5Q+DzOJn+ERb1/LmEZNsO2mSfh0t8kgvi9HhAHAzCw6Y5ECpScbEWuTONF5DAG9RXV2t84mysjKJiIhwzYrTyZMnxWw2S2zs2Yvb1ccHDx5s9muOHj0qK1eulPvuu0+va8rIyJBZs2bpTFFN92vOU089pacDNq04JSUlyeTJk1scnPagYv/666/1FER7FQ1tx1PGt95skb/8fb2I1Mica3vJrSNtbbiN5krj26OoUm6au1H2njZJtyGjpUdsuLg7VxpfT8T4Opc7je/hwgo5sGmTqCalf7x3nCRHuf6FVXcaX3fE+HrP+JY3zkZz+al6rWWxWCQmJkYWLlyoK0xDhgyR/Px8+ctf/nLBxEk1kFBv51I/JKN/UK4cj6dx9/H99Ltjcqy0RjqHBch9I1LF38Xm3rvC+PZO6Cg39o2T5XsLZOH6HPnH3YPEU7jC+Hoyxte53GF8l27K0+9vuCpOusW611Rfdxhfd8b4ev74+rfi+xs22adz5846+SksLDzrdvVxXFxcs1+jOumpLnpNp+X17t1bCgoK9NQ/wBOZLVaZt9rW9GDamDQJDnCtpMmVqE6Dyr/2HJfsk1VGhwPADRSU1cgnu/MdXToBwOUSp4CAAF0x+vbbb8+qKKmPR44c2ezXjB49Wk/PU/ezO3z4sE6o1OMBnmjFvgI5WlwlEUF+cv8I255FaF7fhEiZ2DNaLFaR+att3bEA4GJe3Zgt9WarDEuJkkFdOxodDgAXZujycrX2SLUTf+211+TAgQMyc+ZMqaqqcnTZe+CBB/QaJTv1edVV7/HHH9cJk+rA96c//Unv6wR4ItW7xd5ie+roVAkPYrpAS+ZM6q7ff7jrmBwvPWN0OABcWGVtg7y1JUcfT6faBMCV1zjdddddUlxcLE8//bSebjdw4EBZsWKFo2FEbm6umEw/5HaqqcOXX34pv/zlL6V///56/yaVRKmueoAnWnWoSA6cKJfQAF+ZOvrirfdhMyS5o4xM6ySbjp6ShWuPyh9vvcrokAC4qGVbc6WipkHSokPlml4xRocDwMUZ3hxizpw5+q05q1evPu82NY1v8+bN7RAZYHy16cWVtmrT/SOSpUMI01Ev1ZxJ3XTi9M7WXJk9sZtEh5/fIAaAd1PdSpesz9LH08emicnkY3RIAFwcO8EALmpT5inZlVsqAX4mmTY21ehw3Mqo9E4yqGsHqW2wyCvrbZsGA0BTn393Qo6XqW6lgfKTQQlGhwPADZA4AS7KvrbpnquTJCY8yOhw3IqPj4/MmWjrsPfmphwprabrJoCzK/ovr7VdVHloVLIEudgWDwBcE4kT4IJ25JyWjZmnxM/kIzPGpxsdjlua1CtGesdHSFWdWXfNAgC7DRmn9PrRYH9fuW+4a2woDsD1kTgBLmheY7Xpp4MTJKFDsNHhuG3VafZEW9K5dEO27p4FAMrLa23bFdx1dZJ0DGX9KIBLQ+IEuJj9x8tk5cEiUeuUZzZu6IrLc2PfeN0tq+xMvby52dZyGIB3U5WmdUdO6tfYaWNYPwrg0pE4AS7mpVW2K6E/6t9FUjuHGh2OW/M1+cisxuTzlXVHpabebHRIAAy2qHFt04394iUpKsTocAC4ERInwIVkFFXI8n0n9LFqo40r9+OBXSSxY7CcrKzTe7YA8F5qU+xP9xzXx4+y4S2AViJxAlzIS6szxWoVua5PrPSMCzc6HI/g72uSxxobbKguWnUNFqNDAmAQ1SimwWKV4alR0j+xg9HhAHAzJE6Ai8grqZZPdtuuhNpbaaNt3DEkUWLCA+VEWY18tOuY0eEAMEB5Tb28vcVWdX50PNUmAK1H4gS4iPlrMsVsscrY7p1lQBJXQtuS2qNlRuO0nPmrM6XBTNUJ8DZqqq7qrtk9Jkwm9IgxOhwAbojECXABBWU18v52WyWEapNz3Du8q3QM8ZfsU9Xy+V7bOjIA3kFN0V2y3raf2/SxaWJSLfUAoJVInAAXsGjdUakzW2RYSpQMT+tkdDgeKSTAz9F6WO2TZbFYjQ4JQDv5157jUlBeI9HhgfLjQV2MDgeAmyJxAgx2qrLWMe9+9iSqTc40ZWSKhAf6yeHCSvn6QKHR4QBoB1arVV+cUh4alSKBfr5GhwTATZE4AQZbsiFLztSbpV9CpIzr3tnocDxaZLC/PDAqWR/PXZmhT6gAeLa1R07KwYIKCQnwlfuH237/AeBykDgBBio7Uy+vb8xx7Nvk48O8e2d7eHSqBPv7yt78Mn1CBcCzLVxr21T87qu7SmSIv9HhAHBjJE6Agd7YlC0VtQ3SIzZMJveJNTocr9ApLFA3ilDmrcwwOhwATrQvv0w2ZJwSX5OPPDwmxehwALg5EifAIFW1DbJ4fZaj2kSXp/ajWpMH+Jpka3aJbDl6yuhwADiJfW3Tzf3iJbFjiNHhAHBzJE6AQd7Zmiunq+sluVOI/qOO9hMbESR3DE3Ux3NXUXUCPFF+6Rn57Dvb1gP2fdwA4EqQOAEGqKk3y8K1tiuhsyaki58vv4rtbeb4dD19Z92Rk7Inr9TocAC0sSXrs/Sm4qPSO0nfhEijwwHgAThbAwzw/o5jUlRRK10ig+Qng2yVD7SvpKgQ+fHALo59nQB4VuOdZVtt2zxQbQLQVkicgHZWb7bI/NWZP6y18ePX0CizJqhOhiJffV8oBwvKjQ4HQBtRe+NV1ZmlZ2y4jO8RbXQ4ADwEZ2xAO/tk93E9975zWIDcPczW3Q3G6BYTJjf1ta0ve2mVLZkF4N5qG8yydIOt8c70cWls8wCgzZA4Ae1Izbd/abVtWtgjY9MkyJ8d7I02a2K6fv/Zd8cl62SV0eEAuEKf7j6up0LHRgTKrQNs03EBoC2QOAHt6It9J+RocZVEBvvL/SPYwd4VXNUlUib1ihGLVWR+Y1ILwD1ZrVZHC/Kpo1OZCg2gTfGKArTjH/R5jdPBHhqVImGBfkaHhEZqHy3lw535eholAPe0+lCxHC6s1K+v9o2uAaCtkDgB7WTlwSI5cKJcQgN8ZepodrB3JUOSO+qWxQ0Wqyxcw1onwF3Zt3m4++okiQjyNzocAB6GxAlop2rTiytt08DuH5ksHUICjA4J55jTWHVati1PiipqjA4HQCvtPVYmm46eEj+Tjzw8JtXocAB4IBInoB1szDwlu/NKJdDPJI+MYU8RVzQyvZMM6tpBahsssnidrSMXAPfx8lpbtfiWAV2kS4dgo8MB4IFInIB2MLex2qSmj0SHBxodDpqhWhb/fJKt6vTm5hwpra4zOiQAlyivpFqW7z2hj6eP5eIUAOcgcQKcbEdOiZ4+4u/rIzPG21pfwzVN7BkjfeIj9MaZSzdkGx0OgEu0eH2W7ow5tntn6dMlwuhwAHgoEiegnapNPx2UKAlMH3H5qpO9w57aQLOipt7okAC0QFWH/7k9Tx/PGEe1CYDzkDgBTrQvv0xWHSoWk4/IzAlUm9zBDX3jJC06VMprGuTNzblGhwOgBW9tyZXqOrP0jo+QMd06Gx0OAA9G4gQ40UuNG6qqxcopnUONDgeXwNfkI7Mn2KpOi9cflZp6s9EhAbgA9ftpn1Y7Y1yqrhoDgLOQOAFOklFUIV/sK9DHsxpPxOEebh3YRRI7BsvJyjpZtpWqE+CqPtmdLycrayU+Mkh+1L+L0eEA8HAkToCTvLQqU6xWkcl9YqVnXLjR4aAV/H1N8lhjI4+X1x6VugaL0SEBOIdFbVjduOHtw6NT9e8tADgTrzKAE+SeqpZP9hzXx3MaW1zDvdwxJFFiIwLlRFmNfLjzmNHhADjHqkNFkllcJeGBfnL3sCSjwwHgBUicACdYsDZTzBarjOsRLf0TOxgdDi5DkL+vYz+Y+WsypcFM1QlwJaoarNw7vKuEB/kbHQ4AL0DiBLSxgrIaeX+7rUIxp7G1NdyTOiGLCg2QnFPV8tl3ts01ARhvd16pbM0qET+Tj0wdnWp0OAC8BIkT0MbUnPs6s0WGpUTJsNQoo8PBFQgJ8JOHR6fo43mrMvSaCgDGW9RYbVKNXOIig4wOB4CXIHEC2tCpylp5e2uOPmZtk2d4YFSKhAf5yZGiSvnq+0KjwwG8nlpD+sU+WwWYDW8BtCcSJ6ANLV6fJTX1FumfGClju7MRoyeICPKXB0f+UHWyqlaJAAzzyvqjooq/43tES6+4CKPDAeBFSJyANlJ2pl7e2GSrNs2e2I2NGD3Iw2NSJdjfV/bml8maw8VGhwN4rdNVdfLP7Xn6mGoTgPZG4gS0kdc3ZktFbYP0iA2T63rHGh0O2pBqEHHf8K6OqhMAY7yxOUdX9a/qEiGj0jsZHQ4AL0PiBLSBqtoGWbIhSx+rapPJRLXJ00wflyYBvibZln1athw9ZXQ4gNepqTfLaxuzHdUmqvoA2huJE9AG3t6SK6er6yWlU4j8qH8Xo8OBE8RGBMmdQxP18VyqTkC7+3BnvpyqqpOEDsFyU794o8MB4IVInIA2uAq6cJ2tNe7MCeniS7XJYz023vbzXXfkpOzJKzU6HMBrqK0AXml8nVVrDv19OX0B0P545QGu0Hs7jklxRa10iQySnwyyVSTgmZKiQuS2gQn6mKoT0H6+OVAoR09W6a0B7ro6yehwAHgpEifgCtSbLbJgdaY+fnR8ugT48Svl6WZNTBe1tOLr7wvlYEG50eEAXrOxuHL/iGQJC/QzOhwAXoqzPOAKfLwrX/JLz0jnsECugnqJ9OgwuamvbX3FvFW2pBmA8+zIOS3bc06Lv6+PTB1l21MNAIxA4gRcJrPFKvMbq02PjE2VIH9fo0NCO1GdE5XPvzsuWSerjA4H8GiLGqtNappsTESQ0eEA8GIkTsBl+mLfCT3nPjLYX08fgffo0yVCrukVIxaryPzVrHUCnEVdmPjy+wLHlgAAYCQSJ+AyWK1WmbvSdsI8dXQKc+690OxJ3Rwtko+drjY6HMAjLV5/VKxWkYk9o6VHbLjR4QDwcpd8tvfEE09c8oO+8MILlxsP4Ba+PVAkBwsqJDTAVx5izr1XGty1o4xK7yQbM0/phev/8eO+RocEeJRTlbXy3vZj+njGuHSjwwGAS0+cdu3addbHO3fulIaGBunZs6f++PDhw+Lr6ytDhgxp+ygBV6s2NbainjIyRTqEBBgdEgwyZ1I3nTgt25anj2PCWX8BtJXXN+VIbYNF+idGyoi0KKPDAYBLT5xWrVp1VkUpPDxcXnvtNenYsaO+7fTp0zJ16lQZO3ascyIFXMSGjFOyO69UAv1MMm1MqtHhwEAj0zrJ4K4dZGduqSxelyVP3dTb6JAAj3CmzixvbM7Rx9PHpomP2gMAANxxjdPf/vY3ef755x1Jk6KO//M//1N/DvBkc1cd0e/vGdZVosMDjQ4HBlInc6rSpKiTvNNVdUaHBHiE93cek5KqOknsGCw39o0zOhwAuPzEqby8XIqLi8+7Xd1WUVFxOQ8JuIUdOSWy+WiJ3k9kBh2eIGrReoz0iY+Q6jqzLN2YbXQ4gEds9bB4na0F+SNjUsXPlz5WAFzDZb0a/eQnP9HT8j788EM5duyYfvvggw9k2rRp8tOf/rTtowRchL2T3u2DE6VLh2Cjw4GLVZ1e3ZAlFTX1RocEuLWvvy+Q7FPVequHO4eysTgAN0+cFixYIDfeeKPce++9kpycrN/U8Q033CAvvfRS20cJuIB9+WWy6lCxmHxEHhtPhyf84Iar4iQ9OlTKaxoc6zIAXF7znZcbN7ydMiJZQtnqAYA7J05ms1m2b98u//Vf/yWnTp3S3fbUW0lJiU6aQkNDnRMpYLB5jZ30bhnQRVI68zzHD0wmH5k90VZ1Uk0i1MJ2AK23I+e07MotlQBfkzwwio3FAbh54qRajk+ePFlKS0t1ktS/f3/9RsIET3aksEJW7LftXm8/QQaaunVAF0mKCpZTVXWybFuu0eEAbslebfrp4ATa+wPwjKl6ffv2laNHbS9ugDd4aXWm3r3++qti2b0ezVIL2O1TONWGuLUNVJ2A1sgsrpRvDhTq40fG0nwHgIckTqrt+K9//Wv57LPP5MSJE7rLXtM3wJPknqqWT/cc18dzJnY3Ohy4sDuGJEpsRKCcKKuRD3fmGx0O4FZeWZelL1Bd2ztGusWEGR0OAJznslZd3nTTTfr9rbfeetamdGpRp/pYrYMCPMX8NZm6Pe64HtHSLzHS6HDgwgL9fGXGuHR57rPvZf7qTLlzSCKtlIFLUFxRKx/sPKaP1e8QAHhM4rRq1aq2jwRwQSfKzsgHO2x/zH/e2HIauJh7hiXpRiK5JdXy2Xcn5LZBCUaHBLi8NzZlS12DRQYkdZCrUzoaHQ4AtF3iNH78+Mv5MsDtqLUqdWaLDEuNkqtToowOB24gJMBPpo1Jlb98eUgnUKpphOq6B6B51XUN8npjG/9Hx6WdNZMFAFzJFW2QUF1dLbm5uVJXV3fW7arLHuDuTlbWyjtbbd3R5tBJD60wZWSyLFiTKUeKKuWr7wvkhr7xRocEuKz3th+T0up66RoVItdfFWd0OADQtolTcXGxTJ06Vb744otmP88aJ3iCJeuzpKbeIgMSI2Vs985GhwM3EhHkLw+NSpEXV2bI3FUZ+mSQq+jA+dT60VfW27r0PjI2VXypzgJwYZe1avkXv/iF3sdpy5YtEhwcLCtWrJDXXntNunfvLp9++mnbRwm0s7Lqenl9U45j3yZOetFaU0enSrC/r+zLL5c1h4uNDgdwSSv2FUheyRnpGOIvdw5JMjocAGj7xGnlypXywgsvyNChQ8VkMklycrLcf//98j//8z/y/PPPX85DAi7ltU3ZUlnbID1jw+Xa3rFGhwM3FBUaIPcN76qP567M0F1HAfxA/U4sXJupj6eMSJbgAF+jQwKAtk+cqqqqJCYmRh937NhRT91T+vXrJzt37rychwRcRlVtgyzZkKWPZ01MZ2E/Ltv0cWkS4GeS7TmnZUtWidHhAC5la1aJ7DlWJoF+JnlgVIrR4QCAcxKnnj17yqFDh/TxgAED5OWXX5b8/HxZsGCBxMezCBru7e0tuXqhcmrnUPlR/y5GhwM3FhsRJD8bmqiPVYc9AGd3LVVuH5IoncMCjQ4HAJyTOD3++ONy4sQJffzMM8/oJhFdu3aV//u//5M//elPl/OQgEuoqTfLwnW2P+Yzx6ezUBlX7NFxtufRuiMnZXdeqdHhAC4ho6hCvj1YJGr56CNjUo0OBwCc11VPrWeyGzJkiOTk5MjBgwd18tS5M93H4L7e256nd7DvEhnExqVoE0lRIfKTQQny/o5jeq3TKw8ONTokwHCL1tqmQ1/XO1bSosOMDgcAnFdxOnrUdkXeLiQkRAYPHkzSBLdWb7bIgjW25/ZjE9L12hSgLcyckK6vrH9zoFAOFpQbHQ5gqKLyGvloV74+fnR8mtHhAMAlu6wzw27duunq0pQpU2Tx4sWSkcHcfbg/9Yc8v/SMnmv/s6G0xUXbSY8Ok5v62dZ/zltl6yIGeHPX0jqzRQZ37SBDkqOMDgcAnJs45eXl6bbjag8n1YK8R48ekpiYKPfdd5+88sorl/OQgOGbMM5fbTuhnT42VYL8aYuLtjV7Qjf9/rPvjsvR4kqjwwEM61r65uZcfTxjXLrR4QCA8xOnhIQEnSQtXLhQd9dTb9dee63885//lEcfffRyHhIw1PK9JyTrZJVEBvvLfSOSjQ4HHqhPlwi5tneMqO2c7Ek64G3+uT1Pys7US0qnELmuD3vkAfCCxKm6ulq++uor+d3vfiejRo2S/v37y549e2TOnDny4Ycftn2UgBNZLFZHq+iHR6dKWOBl9UwBWjR7YjfHtNBjp6uNDgdoVw1miyxeb2sK8cjYNLqWAnA7l3WG2KFDB73xrao6PfnkkzJ27Fj9MeCOVEvcgwUVOmF6iE0Y4USDunaU0d06yYaMU/LymqPy3G19jQ4JaDfL9xXIsdNnJCo0QO4YYtvfDAA8vuJ00003idlslmXLlum39957Tw4fPtz20QFOZrVaZW5jten+EckSGeJvdEjwcHMmdtfv392ep7uLAd7yWrtwrW2K6gMjk1lHCsB7EqePP/5YTp48KStWrJCRI0fqaXuq6mRf+wS4C3Xlf09eqQT5m+SRsWzCCOcbkRYlQ5I7Sl2DRV5pnLYEeLpNR0/JvvxyCfQzyQMjqewDcE9XtFFNv379ZPTo0Tp5uvrqq6WoqEjefffdtosOcLIXVx7R7+++uqtuQw44m4+Pj8xpXOv05uYcOV1VZ3RIgNMtWmvbI+/OoYl6qh4AeE3i9MILL8itt94qnTp1kuHDh8s777yjW5J/8MEHUlxc3PZRAk6wPbtEtmSViL+vD5swol1N6BktV3WJkOo6syzdQNUJnu1IYaWsOlSsN4F+ZAyvtQC8rDmESpTGjx8vM2bM0FP0IiMj2z4ywMnsa5tuH5wo8ZHBRocDL6w6zXxrp7y6MVumj0uT8CDW18EzvbIhW7+/4ao4SekcanQ4ANC+idO2bdsu/zsCLmDvsTJZfahYVDfcmRPYhBHt7/qr4qRbTJhkFFXKG5tzZFbjBrmAJymrE/nXdyf0sbpAAABeucZp3bp1cv/99+v1Tfn5+fq2N954Q9avX9/qx5o3b56kpKRIUFCQnvq3devWS/o61dFPXbm97bbbWv094d3s+zbdOqCLJHfiCijan8nkI7Mak/bF67LkTJ3Z6JCANrfmhEnqzVa5OqWjDO7KtiUAvDBxUmuZrr/+egkODpZdu3ZJbW2tvr2srEz+9Kc/teqxVDOJJ554Qp555hnZuXOnDBgwQD+2ajRxMdnZ2fLrX/9aTxUEWuNIYYWs2F+gj2c1LtIHjKAS96SoYDlVVSfvbM01OhygTVXWNsjGQtsmtzPGUdkH4KWJ03/+53/KggULZNGiReLv/8O8fNVhTyU/rW00MX36dJk6dar06dNHP25ISIgsWbLkgl+j9pBSbc+fffZZSUuj9I/WeWl1pmO+fY/YcKPDgRfz8zXJzPG25H3h2qNS20DVCZ7jvR35csbsI2mdQ+SaXjFGhwMAxqxxOnTokIwbN+6821WTiNLS0kt+nLq6OtmxY4c89dRTjttMJpNce+21smnTpgt+3X/8x39ITEyMTJs2TU8ZvBhVDbNXxJTy8nL9vr6+Xr8ZzR6DK8Tiic4d35ySavlkt21q6aNjUxj3K8Tz98rd2j9W/vHtYSkor5H3tuXKXUMTHZ9jfJ2L8XWeerNFljQ2hXhwRJKYzQ1i5rpAm+L561yMr/eMb30rYrisxCkuLk4yMjL0uqSm1Pqm1lSA1Ca6qnoUGxt71u3q44MHDzb7Nep7LF68WHbv3n1J3+P555/XlalzqU17VWXLVXz99ddGh+DR7OO7LNMkFqtJenewSO6e9ZK7x+jIPAPP3yszKspHPir3lf9dsV9CCr8TX9vsJgfG17kY37a3vdhHCsp9JczfKqHF+2X58v1Gh+SxeP46F+Pr+eNbXV3t3MRJTa17/PHH9XQ61Zzh+PHjukL0q1/9Sp5++mlxloqKCpkyZYqeIti5c+dL+hpVzVJrqJpWnJKSkmTy5MkSEREhrpDlqifNddddd9a0R7T9+J6sNsuvt6oKpVWevmO4DE1mofKV4vnbNibUNciaF9bJqap6sSQMlFsGdtG3M77Oxfg6h9VqlQUvbVZ/tWVcnEVuup7xdQaev87F+HrP+JY3zkZzWuL05JNPisVikWuuuUZnaWraXmBgoPzmN7+RRx555JIfRyU/vr6+UlhYeNbt6mNV1TpXZmambgpxyy23OG5Tcej/iJ+fnkKYnn72AlQVl3o7l/ohGf2DcuV4PI0a2yUbs3R3p+GpUTKyG/Pt2xLP3ysT6e8v08akyV++PCQL1mXLT4d01V337Bhf52J829b6IyflQEGFBPubZExsA+PrZIyvczG+nj++/q34/pfVHEJVmf793/9dSkpKZN++fbJ582YpLi7Wa5xSU1Mv+XECAgJkyJAh8u23356VCKmPVZvzc/Xq1Uv27t2rp+nZ32699VaZOHGiPlaVJKA5pyprZdk2W9eyOZPopAfXM2VksoQH+el9nb5s7PoIuKOF647q93cMTpBQzjcBeJBWJU6qyYKa+jZ06FDdQW/58uW6E97+/fulZ8+e8o9//EN++ctftioANY1OTb177bXX5MCBAzJz5kypqqrSXfaUBx54wNE8Qu3z1Ldv37PeOnToIOHh4fpYJWJAc5ZuzJWaeosMSIyUMd0ubZon0J4igvxl6ijbutG5qzL0dCfA3Rw4US5rD9s2F39oVLLR4QBAm2rVVD21funll1/WXe82btwod955p05wVMXpb3/7m/5YTb1rjbvuuktXq9RjFxQUyMCBA2XFihWOhhG5ubm60x5wuaobRN7caa82ddcVU8AVTR2dKq+sz5L9x8tl9eFiGZPGOjy4l0WN1aYb+8ZL16gQ2Wd0QABgVOL03nvvyeuvv66nx6kpev3795eGhgbZs2fPFZ2MzpkzR781Z/Xq1Rf92ldfffWyvy+8w9oTPlJVa5ZeceHsJQKX1jE0QO4b3lUWrcuSuSszZHTqUKNDAi7ZibIz8unu4/p4xjj2WATgeVpVyjl27Jhek6SoqXGq6YKamscVfLiqqtoGWVNge5rPmtjtrAX3gCuaPjZNAvxMsiPntGzNPm10OMAlW7ohWxostgY8A5I6GB0OABibOKk9l5quI1Kd7MLCwto+KqCNvLPtmFQ3+EhKpxC5uV+80eEALYqJCJK7htoa3by0xjbtCXB15TX18vYW25Roqk0APFWrpuqpxcoPPfSQo713TU2NPPbYYxIaGnrW/T788MO2jRK4DDX1ZlncuHP9o+NSxZdqE9zEo+PT5J2tubIxs0SGBxsdDdCyZVtzpbK2QbrFhMnEnkyJBuCZWpU4Pfjgg2d9fP/997d1PECb+ef2PDlZWScdA6zy4wFUm+A+EjuGyG2DEuT9Hcfk63yTzDI6IOAi6hossmS97SLVjLFpTIkG4LFalTgtXbrUeZEAbajebJGXG6c5XZNgEX9fOjPCvcyakC4f7Dwm+06b5GBBhfRLijI6JKBZn313XArKayQ6PFB+PKiL0eEAgNNwNgmP9NGufMkvPSPRYQEyPJr9cOB+0qLD5Kar4vTxgjVZRocDXHAK/8K1totUD41KkUC/1m1JAgDuhMQJHsdsscr81Zn6+OHRKRLA33G4qcfGp+r3y/cXSGZxpdHhAOdZd+SkroiGBPjK/cPZ8BaAZyNxgsf5fO8JyTpZJR1C/OWeqxONDge4bGrvsb4dLWK1iuNiAOBK7NWmu65OksgQf6PDAQCnInGCR7FYrDJvZYY+njoqVUIDW7WMD3A51yVY9PuPd+XLsdPVRocDOOzLL5P1GSd1x9KHR9uqowDgyUic4FG+OVAohworJCzQT8+3B9xdSrjIqPQovbGoveEJ4ApeWWd7Pt7UL16SokKMDgcAnI7ECR61SHneKlu1acrIZKaNwGPMGm/bUPTd7XlSVF5jdDiAbr7zr+9OOFqQA4A3IHGCx1BTRvYcK5Mgf5NMG8O0EXiOYSkdZWhyR71fzqLGq/yAkZauz9KNeEamdZJ+iZFGhwMA7YLECR5jbuPapnuGdZXOYYFGhwO0GR8fH5k9qZs+fmtLrpRU1RkdErxY2Zl6eWdrrj6e0VgNBQBvQOIEj7Atu0S2ZJWIv6+PzBjHH3J4ngk9oqVvQoRU15ll6Qb2dYJx3t6SK1V1ZukZG66flwDgLUic4FHVpjuGJEp8ZLDR4QDOqTpNsFWdXt2YLeU19UaHBC+kpovaE/dHxqbq5yUAeAsSJ7i9vcfKZM3hYjH5qA1D040OB3Ca66+Kk24xYVJR0yBvbMoxOhx4oU9250tRRa3ERgTKjwcmGB0OALQrEie4vbmrjuj36o94cqdQo8MBnMZk8pHZE20XBxavz5LqugajQ4KXdS61NyeZOjpVAvw4hQDgXXjVg1s7XFghX+4v1MezJlBtgue7pX8X6RoVohtEvLM1z+hw4EVWHy6Ww4WVEhrgq5vwAIC3IXGCW3upcd+mG66Kk+6x4UaHAzidn69JZjZeJFi4NlNqG8xGhwQvsbBxA2aVNEUGs08eAO9D4gS3lXOqSj7dc1wfz2ls1Qx4g58OTpC4iCApLK+VD3bkGx0OvGQt6aajp8TX5CNT2ScPgJcicYLbmr86UyxWkQk9VZtmNmCE9wj083W03Z+/JkMazBajQ4KHW9i4tumW/vGS0IHOpQC8E4kT3NLx0jPywc5j+njORKpN8D5qulSn0ADJKznjqLwCzpBXUi3L957Qx9PZJw+AFyNxgltauPao1JutMjw1SoamRBkdDtDuggN8ZdpY25Spl1T1VZVfASdYsiFLzBarjOnWWa7qQnUfgPcicYLbKa6olXe25urjn0/qbnQ4gGGmjEiWiCA/ySiqlC/3FxgdDjxQWXW9vLvN1r3RPj0UALwViRPcjtq/prbBIgOSOsjobp2MDgcwTHiQvzw0KkUfz12VoffZAdrSm1typLrOLL3iwmVs985GhwMAhiJxgttd/Xxzc45jbZOPj4/RIQGGUhuRhgT4yv7j5bL6ULHR4cCDqFb3r27MdlSbeL0F4O1InOBW1B/xytoGffXzml4xRocDGK5jaIDcPyJZH7+48ghVJ7SZj3fl66nRqvX9LQO6GB0OABiOxAluQyVMapGyMntiNzGZuPoJKI+MSZUAP5PszC3Ve+0AV0o1G1m0zvZ6+/CYFPH35XQBAHglhNt4a3OOlJ2pl7TOoXJTv3ijwwFcRkxEkNw1NEkfz1uVYXQ48ACrDhXppiPhgX669T0AgMQJbqKm3uy4+vnYhHS9ez2AHzw6Pk38TD6yIeOU7Mo9bXQ4cHMvr7VteHvv8K66CQkAgMQJbkK1wz1ZWat3rP/JoASjwwFcTmLHEMfvBlUnXIndeaWyNatEJ+IPjbZ1bQQAkDjBDdQ1WOTlNZn6+LHxacy1By5g5oR0UcXYbw4UyffHy40OB25qUWO16daBXSQ+MtjocADAZXAGCrfo7HS8rEaiwwPlzsZ1HADOlxYdJjf3t3U/m7eaqhNaL/dUtXyx74Q+nj6WDW8BoCkSJ7i0BrNFXmo8AZwxNk2C/H2NDglwabMnpuv3y/eekMziSqPDgZtZvP6oWKwi43pES+/4CKPDAQCXQuIEl/b53hOSfapaOoT460XKAC6uV1yEXNs7VtR2Ti+tsk1xBS7F6ao6+ef2Y/r40XFUmwDgXCROcOl9ROwnfg+PTpXQQD+jQwLcwpxJ3fT7j3fnS15JtdHhwE28uTlHztSbpU98hIxK72R0OADgckic4LK+OVAohworJCzQTx4cSWcn4FINTOogY7t3FrPFKi+vpeqES9vy4bVN2Y7W9j4+bPkAAOcicYJLslqtMrexpfIDI5MlMoR9RIDWmD3RVnVSU68Ky2uMDgcu7sOd+XKysk66RAaxwTgAXACJE1zSuiMn5btjZRLkb5JpY1KNDgdwO8NTo2Rockfdzt/eXhq40LToV9bZniMPj0llywcAuABeHeGS7NWme4Z1lU5hgUaHA7gdNdXKvtbprS25UlJVZ3RIcOFp0UdPVkl4kJ/cPYwmPABwISROcDlqx3r1FuBrkhl0dgIu2/ge0dIvIVIv+F+6IcvocOCiFjVWm+4bnqzXlAIAmkfiBJetNt0+JJFd64ErrDrZ93V6dWO2lNfUGx0SXMzO3NOyLfu0+Pv6yNTRNOEBgIshcYJL+e5Yqaw9XCy+Jh+ZOd52wgfg8k3uEyfdY8KkoqZB3tiUY3Q4cDEL19iqTbcNTJDYiCCjwwEAl0biBJcyr7HadOuALtK1U4jR4QBuz2RSVSfbWqfF67Okuq7B6JDgIrJPVsmX3xfo4+lMiwaAFpE4wWUcKqiQL/cXito+ZNYEqk1AW/lR/3jpGhWiG0S8szXP6HDgIl5Zf1SsVpGJPaOlR2y40eEAgMsjcYLLeGm1rdp0w1Vx0p0/4kCb8fM1yczGixEL12ZKbYPZ6JBgsFOVtfLe9mP6mGoTAFwaEie4zJSRf+05ro/t04oAtJ2fDk6Q+MggKSyvlfd32E6Y4b3e2JwjtQ0W3XVxZFono8MBALdA4gSXMH91plgap4z0TYg0OhzA4wT6+Tra+6vft3qzxeiQYJAzdWZ5vbFRiHpOqO6LAICWkTjBcMdLz8iHu2xXwO0bdgJoe3df3VU6hQbIsdNn5NPdtgovvM8HO4/p9W6JHYPlxr5xRocDAG6DxAmGW7j2qNSbrTIiLUqGJEcZHQ7gsYIDfGXa2FTHmkKLKvPCq5gtVnmlccPbaWNS9fo3AMCl4RUThiquqJV3tubq4zkTuxsdDuDxpoxIloggP8ksrpIV+22tqOE9vv6+QLJPVUtksL/8bGiS0eEAgFshcYLh7XDVAuWBSR1kdDcWKAPOFh7kLw+NtlWd5q7MEKvqRw2vqvAr94/oKqGBfkaHAwBuhcQJhimtrpM3Gxcoz5nYjQXKQDuZOipFQgJ85fsT5bLqUJHR4aCdbM8ukZ25pRLga5IHR6UYHQ4AuB0SJxjm1Y3ZUlVnll5x4XJN7xijwwG8RsfQAD1lT6Hq5H3Vpp8MSpCY8CCjwwEAt0PiBENU1jbI0g3Zjk56VJuA9qWaRAT4mXQFYtPRU0aHAyc7WlwpXx8o1MfTx9mmagIAWofECYZ4c3OOlJ2pl7ToULmxb7zR4QBeR1Uc7r46yVF1gmdbtC5LVGHx2t4x0i0m3OhwAMAtkTih3dXUm+WVdVn6eOb4dPE1UW0CjPDo+HTxM/nIxsxTsjP3tNHhwElOVtbqvZuU6WNtmyADAFqPxAnt7t1tefoPeUKHYLltUILR4QBeS/0O/nSw7XdwHlUnj/X6xmypa7DIgKQOMiyVvfIA4HKROKFdqT/eC9Zk6uPHJqSLP5svAoaaOaGbqKLvtweLZP/xMqPDQRs7U2eW1zfbupfOGJvGelIAuAKctaJdfbTrmJwoq5GY8EC5c0ii0eEAXi+1c6jc3L+LPn5ple2iBjzHezvypLS6XrpGhcgNfeOMDgcA3BqJE9pNg9ki81dnOubZB/n7Gh0SABGZPTFdv1++74RkFFUaHQ7aiNlidawnfWRsKutJAeAKkTih3Xy+94Rkn6qWjiH+cu/wrkaHA6BRr7gIua5PrO66Zr+4Aff35f4CyS2plg4h/nIHFX4AuGIkTmgXFotV5q2yLT5/eHSqhAb6GR0SgCbmTOym33+8O1/ySqqNDgdXSG1q/HLjhrcPjEiWkABecwHgSpE4oV2ojRcPF1ZKeKCfPDAqxehwAJxDdVwb272znt5lb+AC97U1q0T25JXqTY6njOQ1FwDaAokT2uXKp73aNGVkskQG+xsdEoCLVJ3e235MCstrjA4HV2DROlu16fbBiRIdHmh0OADgEUic4HRrj5yU746VSZC/SaaNSTU6HAAXMDytk1yd0lHqzBZZ2DjNC+4no6hCvjlQJKrzuGoKAQBoGyROcDr7xpr3DkuWTmFc+QRc2ezGqtPbW3KlpKrO6HBwGeyd9K7tHSvp0WFGhwMAHoPECU615egp2ZpdIgG+JpkxLs3ocAC0YHyPaOmXECln6s2yZL3tBBzuo6iiRj7cma+PH+U1FwDaFIkTnGpu49qmO4YmSlxkkNHhAGiBj4+Po+r02sZsKTtTb3RIaAX1M1NTLQd37SBDU6KMDgcAPAqJE5xGdXRad+Sk3nRx5njbBpsAXN/kPrHSIzZMKmob5I1N2UaHg0tUVdsgb27O1cdU+AGg7ZE4wWnsnfR+PKCLJEWFGB0OgEtkMv1QdVq8Pkuq6xqMDgmX4J/b83SFMKVTiFzXJ87ocADA45A4wSkOFVTIV98X6q5OsyZSbQLczc394iW5U4icrq7XjSLg2hrMFp3kKtPGpulKPwCgbZE4wanVphv7xkm3mHCjwwHQSn6+JscUW9WavKbebHRIuIgv9hXIsdNnJCo0QO4ckmh0OADgkUic0OayT1bJZ98d18ezJtim+wBwPz8dnCjxkUFSVFEr7+84ZnQ4uMgm4/Z9tx4YmSxB/r5GhwQAHonECW1u/upMsVhFJvaMlr4JkUaHA+AyBfiZHC2tF6zJlHqzxeiQ0IzNR0tkb36ZBPqZZMqIZKPDAQCPReKENpVfekY+2Gm7Mj1nUnejwwFwhe4e1lU6hwXoaWCf7rZVkuFaFq7N1O/vHJrIJuMA4EQkTmhTC9dkSoPFKiPTOsmQ5I5GhwPgCqlpX9PG2KpO81ZniFmVk+EyDhdWyKpDxboRj/3nBABwDhIntOmO9cu25enjOZNY2wR4ivtHdJXIYH85WlwlK/YVGB0OmljUuLbp+j5xkto51OhwAMCjkTihzSxelyW1DRYZmNRBRqV3MjocAG0kPMhfHhqVoo/nrsrQzQhgvMLyGvl4d74+njGeahMAOBuJE9pEaXWdvLk5Rx//fFI38VHzRgB4jKmjUyQ0wFcOnCiXlQeLjA4HIvLqxmypN1tlaHJHGdyVqdEA4GwkTmgTSzdkS1WdWXrHR8ikXjFGhwOgjXUICZD7Gzu2UXUyXmVtg+Ni1YzGzocAAOciccIVq6ip11c+ldkT06k2AR5q2thU3fJ6V26pbMo8ZXQ4Xm3Z1lypqGmQtM6hcm3vWKPDAQCvQOKEK/bm5lwpO1MvadGhcmPfeKPDAeAkMeFBcvfVSY6qE4yh9tNSVX7lkbFpYjJxsQoA2gOJE65ITb1ZFq+3dXWaNaGb+PIHHPBoM8ani5/JRzZmnpIdOaeNDscrLd97Qu+Zp/bX+ungBKPDAQCvQeKEK54ucrKyThI7BsuPB3YxOhwATpbQIVhuH5yoj+dRdWp3am3ZwsYW5A+MTNH7bAEA2geJEy5bXYNFXm78A/7o+HTx9+XpBHiDmRPSRRWXVXe9ffllRofjVVSlb//xcgn295Upjc06AADtwyXOdOfNmycpKSkSFBQkw4cPl61bt17wvosWLZKxY8dKx44d9du111570fvDeT7ceUxOlNVITHig3DnEdgUagOdL6RwqP+pvqzC/tJqqU3uyX6z62dBE6RgaYHQ4AOBVDE+c3n33XXniiSfkmWeekZ07d8qAAQPk+uuvl6Ki5vcJWb16tdxzzz2yatUq2bRpkyQlJcnkyZMlP9+2CSDaR4PZIvPXZDpa4TJdBPAusyd20++/2FcgGUUVRofjFdQeWmsPF+tq37QxtCAHAK9LnF544QWZPn26TJ06Vfr06SMLFiyQkJAQWbJkSbP3f+utt2TWrFkycOBA6dWrl7zyyitisVjk22+/bffYvdnne09Izqlq6RjiL/cO72p0OADaWc+4cJncJ1bUdk4vrbZdRIFzLVpnqzap7qVdO4UYHQ4AeB0/I795XV2d7NixQ5566inHbSaTSU+/U9WkS1FdXS319fUSFRXV7Odra2v1m115ebl+r75GvRnNHoMrxHKpLBarvPjtEX380Mhk8fexumz87ji+7oTx9e7xfWxcinz1faF8svu4zJmQKkkd3etk3tXHtyk1LfrT3cf18dRRXd0iZncaX3fE+DoX4+s941vfihh8rAZu/378+HFJSEiQjRs3ysiRIx23//a3v5U1a9bIli1bWnwMVX368ssvZf/+/XqN1Ln++Mc/yrPPPnve7W+//baubKH19pzykSWHfSXI1yrPDDZLiKHpNwAjzf/eJAfLTDIq1iJ3pVmMDsdjfZJjkpXHTZIebpX/19dsdDgA4DFUEebee++VsrIyiYiIuOh93fqU989//rMsW7ZMr3tqLmlSVDVLraFqWnGyr4tqaXDaK8v9+uuv5brrrhN/f39xdSrPXrRAJbTlMnV0mtxxXXdxZe42vu6G8XUudxjf6D6n5d7F22TbSV/57wcmSFxE86/FrsgdxlepqGmQf//rWrW6VH7748EyqWe0uAN3GV93xfg6F+PrPeNb3jgb7VIYmjh17txZfH19pbCw8Kzb1cdxcXEX/dq//vWvOnH65ptvpH///he8X2BgoH47l/ohGf2DcuV4LmT1oSLZ19gK95Fx6W4RszuNr7tifL13fEd1j5FhKVGyNbtEXt2UJ3/4UR9xN648vsoHm/KksrZB0qND5bo+8WJys43GXX183R3j61yMr+ePr38rvr+hzSECAgJkyJAhZzV2sDd6aDp171z/8z//I88995ysWLFChg4d2k7RoumGl6ohRKew8xNSAN5n9iRbh723tuTIqcof1pTiytWbLbJkQ5ajg6m7JU0A4EkM76qnptGpvZlee+01OXDggMycOVOqqqp0lz3lgQceOKt5xH//93/LH/7wB911T+39VFBQoN8qKysN/F94hy1HT8m27NMS4GvSf8ABQBnXvbP0S4iUmvofTvLRNv6157huDNE5LFBuG5RgdDgA4NUMT5zuuusuPe3u6aef1i3Gd+/erStJsbGx+vO5ubly4sQJx/3nz5+vu/HdcccdEh8f73hTjwHnmttYbbpjaKLEutE6BgDO5ePjI3Maq06vb8yRsjPGd0nyBGpN6cLGDW+njk6RQD/2ywMAI7lEc4g5c+bot+aoxg9NZWdnt1NUaGpPXqmsO3JSfE0+MnN8utHhAHAx1/WOlR6xYXK4sFLe2JQtcya5duMYd6Becw8WVEhIgK/cx355AGA4wytOcK9q048HdpGkKNq4AzibWnsze6Kt6rR4fZZU1TYYHZLbs1ebfjY0STqEBBgdDgB4PRIntOhgQbl8/X2h+PiIzJpgOzECgHPd3C9eUjqFyOnqenlna67R4bi1/cfLZH2Grco/bUyq0eEAAEiccCnmrcrU72/sGyfdYsKMDgeAi/LzNcnMCemOaklNPRu1Xq5FjdWmm/rFU+UHABdB4oSLyjpZJZ9/d1wf26fhAMCF/GRQonSJDJKiilp5b8cxo8NxS8dLz8i/vrM1RZoxlg6mAOAqSJxwUfNXZ4jFKjKpV4xc1SXS6HAAuLgAvx+2K1iwOlPvQ4TWWbI+S8wWq4xM6yT9EnndBQBXQeKEC8ovPSMf7szXx1SbAFyqu4d1lc5hAfo15JPdtoo1Lo1q5W5fH8Z+eQDgWkiccEEvr8mUBotVRqV3kiHJHY0OB4CbCPL3lUcap5i9tDpDV09waVTSVFVn1q3dJ/SMNjocAEATJE5oVlFFjSzblqeP51BtAtBK949IlshgfzlaXCVf7PthE3NcWF2DRZZuyNLH08em6Y2FAQCug8QJzVq8Lkv/ER/UtYOMTO9kdDgA3ExYoJ9MHZ3i6MxptVJ1asmne45LYXmtxIQHyq0DuxgdDgDgHCROOM/pqjp5Y3OOo9rEVU8Al+OhUSkSGuArB06Uy8qDRUaH49JUYmlvQT51dKoE+vkaHRIA4BwkTjjP0o3ZUl1nlt7xEbqbHgBcjg4hAXL/yGR9/OLKDKpOF7H6cLEcKqzQiea9w7saHQ4AoBkkTjhLRU29vNo4x55qE4Ar9ciYNAn0M8nuvFLZmHnK6HBclr3apDoSqrVhAADXQ+KEs7y5OVfKaxokLTpUbugbZ3Q4ANxcdHig3DPMVkGZuzLD6HBc0r78Mp1U+pp85OExqUaHAwC4ABInOJypM8sr62xXPWdP6Kb/iAPAlVL7Efn7+simo6dkR06J0eG4nJcbq00/6h8vCR2CjQ4HAHABJE5wWLYtV05V1Ulix2A6OgFoM106BMtPByXqY6pOZ8srqZble23t2tnwFgBcG4kTtNoGsyxsvOr52Ph08fflqQGg7cyckC6qiL3qULGemgabJRuy9AbBY7p1lqu6RBodDgDgIjg7hvbhznw5UVaj9w+5Y4jtyjAAtJWUzqFyywBbJful1VSdlLLqenm3caPx6VSbAMDlkThBGswWmb860zFVJMif/UMAtL1ZE7rp91/sK5CMogrxdm9uydFbP/SKC5dx3TsbHQ4AoAUkTpDPvjshuSXVEhUawP4hAJymZ1y4TO4TK2o7p5dW2S7WePP06Fc3Zuvj6WPT2PoBANwAiZOXs1isMm+VbdrMw6NTJCTAz+iQAHiwOZNsVadP9hyX3FPV4q0+2XVciitqJS4iyDGFEQDg2kicvNxX3xfIkaJKCQ/ykwdGpRgdDgAP1z+xg4zrEa0bIixYm+m1F6wWNm798PCYFAnw408xALgDXq29mNVqlbmN1aYHR6ZIRBC71QNwvjkTbVWn97cfk4KyGvE2qw8XSUZRpYQF+sndjZsDAwBcH4mTF1tzWLUFLpdgf192qwfQboalRum3OrPFsQ2CN3l5je3/rNaUcsEKANwHiZM3V5saN6JUf7xVYwgAaO+q09tbc+RUZa3R4bSbPXmlsiWrRPxMPjJ1NNOjAcCdkDh5KfWHe3vOaQnwNbFbPYB2N7Z7Z+mfGCk19RZZvD5LvIV9bdOtA7pIfGSw0eEAAFqBxMlL2Tvp3Tk0UWIjgowOB4CXUe23ZzdWnd7YlCNlZ+rF06kugl/sPaGP2fAWANwPiZMX2p1XKuuOnBRfk488Nj7d6HAAeKnresdKz9hwqahtkNcb9zTyZIvXHxWL1VZt6x0fYXQ4AIBWInHyQva1TbcNTJCkqBCjwwHgpUwmH5k10XbxZsmGLKmqbRBPdbqqTv65/Zg+fnQcF6wAwB2ROHmZgwXl8s2BQlGb1NtPWADAKD/q30VSOoXI6ep6eXtLrniqNzfnyJl6s/SJj5DR3ToZHQ4A4DKQOHmZeatsG07e1Dde0qPDjA4HgJdTU4ZnTejmaJxQU28WT6P+T69tsk1FVM141PouAID7IXHyIkeLK+Wz747rY6pNAFzFbYMSpEtkkBRX1Mp7O2zT2TzJR7vy5WRlnf4/3tw/3uhwAACXicTJi8xfnSlWq8g1vWLkqi6RRocDAFqAn0kebWxUs2B1ptSbLeIpLBarLGpsQa42Gvf35c8uALgrXsG9xLHT1fqqpzJ7km1aDAC4iruuTpLOYYGSX3pGPm58rfIE3x4skqPFVRIe5Cd3D+tqdDgAgCtA4uQlXl5zVBosVhmV3kkGd+1odDgAcJYgf1+ZPjbVUR03q77dHmDhWtu60vuGJ0tYoJ/R4QAArgCJkxcoKq+Rd7fn6eM5VJsAuKj7RiRLZLC/HD1ZJV/ss20U68525p6Wbdmnxd/XR6aOTjE6HADAFSJx8gKvrM+SugaLDO7aQUam0QYXgGtSFRl7gqH2m7OqRZlubNFa29qmHw9MkNiIIKPDAQBcIRInD6c2XVT7h9irTbTBBeDKHhqVohOogwUV8u2BInFX2SerZMX+AkcLcgCA+yNx8nBLN2RJdZ1t08WJPWOMDgcALqpDSIDcPyJZH89d5b5Vp8Xrs3QX0wk9o6VHbLjR4QAA2gCJkwerqKmXVzfaNl2k2gTAXUwbkyqBfibZnVcqGzJOibspqaqT93bY1pVSbQIAz0Hi5MHe2Jwj5TUNkh4dKjdcFWd0OABwSaLDA+Wextbdc1cdEXfz+qZsqam3SN+ECNaVAoAHIXHyUGfqzLJ4XZY+njWhm5hMVJsAuA9VqVHd6DYfLZHt2SXiLmrqzfL6Jtu60hnj0qn0A4AHIXHyUO9szZVTVXWSFBUstw7sYnQ4ANAqXToEy+2DEx1rndzF+zuO6al6CR2C5aa+VPoBwJOQOHmg2gazLGxsg/vY+HTx9+XHDMD9qNcvVSxffahY9uWXiatTm/aqphD2dVp+vPYCgEfhVd0DfbgzXwrKayQ2IlDuGGK7YgsA7ialc6jcOsBWMZ/nBlWnr78vlKyTVRIR5Cd3XZ1kdDgAgDZG4uRhGswWmb86Ux9PH5smgX6+RocEAJdt1sRu+r3aE+lIYYW4soVrba+9qp16aKCf0eEAANoYiZOH+dd3xyW3pFqiQgPk3uG2rlQA4K7UHkjXXxWr90R6qfGikCvakVMiO3NLJcDXpDfxBQB4HhInD2KxWGXeqkzH/PqQAK54AnB/cyZ21+8/3XNcck9Viyt6eY1tXelPBiVITESQ0eEAAJyAxMmDfLm/QDKKKiU8yE+mjEw2OhwAaBP9EiNlfI9o3Xxh/hrXqzodLa6Urw8U6uPp41KNDgcA4CQkTh7CarU6WvaqaSIRQf5GhwQAbWbOJNtap/d35MmJsjPiSl5Zn6WnEl7TK0a6xYQbHQ4AwElInDzE6sPFsv94uQT7+8rU0VzxBOBZrk6JkmGpUVJvtjq2W3AFJytr9d5N9k17AQCei8TJU6pNK23VpvuGd9WNIQDA0/y8seqkNvhWCYsreH1TjtQ1WGRAYqRO7AAAnovEyQNsPloiO3JO625O07niCcBDjenWWScoNfUWWdK40ayRztSZ5Y1N2fp4xrh08fHxMTokAIATkTh5APvGkD+7OlFi6eYEwEOpxGR2475OqtJTVl1vaDzv7ciT09X1khQVrFumAwA8G4mTm9uVe1rWZ5wUX5OPPDou3ehwAMCpru0dKz1jw6WytkFea6z2GEF1+Htlna3q9ciYNPHz5c8pAHg6Xuk9pNp028AESYoKMTocAHAqk8lHZjeudVqyIUuqahsM2/5BbTbeIcRf7hyaaEgMAID2ReLkxg6cKJdvDhSJmlY/ayLVJgDe4eZ+8ZLaOVRKq+vlrS05hjTkebmxs9+UEclsNg4AXoLEyQOqTTf1i5f06DCjwwGAdqGmJs8cb7tYtGhdltTUm9v1+2/LPi178kolwM8kD4xMadfvDQAwDomTm8osrpTP957Qx7Mn2KatAIC3uG1QgiR0CJbiilp5b3teu37vhWsz9fvbBydIdHhgu35vAIBxSJzc1PzVmY6d6vt0iTA6HABoV6ra8+h42/YLC9YclXqzpV2+b0ZRpWOK9CNj2f4BALwJiZMbOna6Wj7ela+P7YukAcDb/GxoknQOC5T80jPyUeNrorO9su6oo7sfU6QBwLuQOLmhl9cclQaLVUZ36ySDu3Y0OhwAMESQv6/MGJfqqMKrFuHOVFRRIx/utCVoM9hsHAC8DomTmykqr5F3G+fz2zeCBABvdd/wZN0SPOtklSxvXPfpLK9vzJE6s0UGde0gQ5O5aAUA3obEyc0sWndU6hosMiS5o4xM62R0OABgqNBAP5k6KtXRadTipKqT2i/qjc221uePjksTH7XICQDgVUic3Mjpqjp5a0uuPp4zsRt/uAFARB4alSJhgX5ysKBCvj1Y5JTvoTr3lZ2pl5ROIXJdnzinfA8AgGsjcXIjSzdkSXWdWa7qEiETekYbHQ4AuITIEH+ZMjJZH89dlaE3qG1LDWaLvLI+Sx9PG5um95ECAHgfEic3UV5TL0s3ZjvWNlFtAoAfTBuTKkH+Jr0x7YaMU2362F/sK5Bjp89IVGiA3DE4sU0fGwDgPkic3MQbm3KkoqZBusWEyQ1XMU0EAJpSbcnvvrqrPn5x5ZE2e1xVvVq41taCfMqIZAkO8G2zxwYAuBcSJzdQXdcgixunicyakC4mpokAwHnUhrj+vj6yJatEtmeXtMljbj5aInvzyyTQzyQPNE4HBAB4JxInN/DO1jwpqaqTpKhguXVAF6PDAQCXFB8ZLHcMSXSsdWqrTqaKetxOYYFt8pgAAPdE4uTiahvMsnBtpj6eOb6b+PnyIwOAC3lsfLqoovzqQ8Wy91jZFT3WkcIKWXmwSNSS0kfGsuEtAHg7zsJd3Ac78qWwvFbiIoLk9iEJRocDAC4tuVOoozKv9nW6Eva1TZP7xEpq59A2iQ8A4L5InFyYaoE7f43tD//0cWkS6MeiZABoieo8qqzYX6CrRpejqLxGPt6dr49njEtv0/gAAO6JxMmFfbrnuOSV2Frg3jMsyehwAMAtdI8Nd3QffWm1bapza6ntH+rNVhma3FGGJHds4wgBAO6IxMlFWSxWxx98tT9JSICf0SEBgNtVnT7ZnS85p6pa9bWVtQ3y5uYcR7UfAACFxMlFfbm/QDKKKiU8yE+m0AIXAFqlX2KkTOgZLRaryII1ras6vbstT++bl9Y5VK7rHeu0GAEA7oXEyQWpDRftrXQfGpUiEUH+RocEAG5nTmPV6f0dx+RE2ZlL+pp6s0WWNO6bpzrpsW8eAMCOxMkFqTa6+4+XS0iAr0wdnWp0OADgloamRMnw1Ci9VunlNbYOeS1ZvveE5JeekU6hAfLTwXQyBQD8gMTJBatNL648oo/vG95VN4YAAFyeOZNsVadl23LlZGVti6+/9hbkD45KkSB/OpkCAH5A4uRiNh09JTtzSyXAzyTT2XARAK7ImG6dZUBSB6mpt8jixil4F7Ix85Su9gf5m+T+EawtBQCcjcTJxdg3bPzZ0ESJiQgyOhwAcGs+Pj6OtU5vbMqRsur6C97XXm362dAkqv0AgPOQOLmQXbmnZUPGKfEz+cijbLgIAG3iml4x0isuXLcZf3VjdrP3OVhQLmsOF4vqBaG2gAAA4FwkTi5YbbptUIIkRYUYHQ4AeATVGc++r9PSjVlSVdtw3n0WrbVN47uhb5wkdwpt9xgBAK6PxMlFHDhRId8cKBIfH5GZE6g2AUBbuqlfvN6XqbS6Xt7aYtvc1q6gvEY+3ZOvj1lbCgC4EBInF7GgcW79zf3iJT06zOhwAMCj+Jp85LHGi1IL12ZJTb3Z8bnXNuXqluXDUqJkUNeOBkYJAHBlJE4uoPCMyBf7C/WxfToJAKBt/WRQgiR0CNZtyf+5PU/fVtOgWpUf08czxlFtAgBcGImTC/gm3yRWq8i1vWOkd3yE0eEAgEfy9zXJY+NtyZHaELfebJGNRT66aUR6dKhM6hVjdIgAABfmEonTvHnzJCUlRYKCgmT48OGydevWi97/vffek169eun79+vXT5YvXy7u6tjpM7K92EcfU20CAOe6c2iSRIcHSn7pGflg53FZc8LkWNukmkgAAOCyidO7774rTzzxhDzzzDOyc+dOGTBggFx//fVSVFTU7P03btwo99xzj0ybNk127dolt912m37bt2+fuKNF67PEIj4yKp259QDgbEH+vjJ9rK3d+HPLD0ppnY90DgvQ3UwBAHDpxOmFF16Q6dOny9SpU6VPnz6yYMECCQkJkSVLljR7/3/84x9yww03yG9+8xvp3bu3PPfcczJ48GCZO3euuJvC8hp5f+dxfTyrcfoIAMC57hueLB1C/KWuwaI/fmBEV51QAQBwMX5ioLq6OtmxY4c89dRTjttMJpNce+21smnTpma/Rt2uKlRNqQrVxx9/3Oz9a2tr9ZtdeXm5fl9fX6/fjPTy6gz9hzs13CqDEsIMj8cT2ceUsXUOxte5GF/nCDCJPDiiq/xjZaYEmKxyx6A4xtgJeP46F+PrXIyv94xvfStiMDRxOnnypJjNZomNjT3rdvXxwYMHm/2agoKCZu+vbm/O888/L88+++x5t3/11Ve6smWkumIf6RRokskJFvnmm28MjcXTff3110aH4NEYX+difNteF7PIsGiT9Ii0yrb1q40Ox6Px/HUuxte5GF/PH9/q6mr3SJzag6pmNa1QqYpTUlKSTJ48WSIijO1gd5OI/La2VlZ+861cd9114u/vb2g8nkhdRVC/lIyvczC+zsX4OtctjK9T8fx1LsbXuRhf7xnf8sbZaC6fOHXu3Fl8fX2lsNC2h5Gd+jguLq7Zr1G3t+b+gYGB+u1c6odk9A/KzsfHteLxRIyvczG+zsX4Ohfj61yMr3Mxvs7F+Hr++Pq34vsb2hwiICBAhgwZIt9++63jNovFoj8eOXJks1+jbm96f0VlrBe6PwAAAABcKcOn6qlpdA8++KAMHTpUhg0bJn//+9+lqqpKd9lTHnjgAUlISNBrlZTHH39cxo8fL3/729/k5ptvlmXLlsn27dtl4cKFBv9PAAAAAHgqwxOnu+66S4qLi+Xpp5/WDR4GDhwoK1ascDSAyM3N1Z327EaNGiVvv/22/P73v5ff/e530r17d91Rr2/fvgb+LwAAAAB4MsMTJ2XOnDn6rTmrV5/f7ejOO+/UbwAAAADgFRvgAgAAAICrI3ECAAAAgBaQOAEAAABAC0icAAAAAKAFJE4AAAAA0AISJwAAAABoAYkTAAAAALSAxAkAAAAAWkDiBAAAAAAtIHECAAAAgBaQOAEAAABAC0icAAAAAKAFJE4AAAAA0AI/8TJWq1W/Ly8vF1dQX18v1dXVOh5/f3+jw/E4jK9zMb7Oxfg6F+PrXIyvczG+zsX4es/4ljfmBPYc4WK8LnGqqKjQ75OSkowOBQAAAICL5AiRkZEXvY+P9VLSKw9isVjk+PHjEh4eLj4+PkaHo7NclcTl5eVJRESE0eF4HMbXuRhf52J8nYvxdS7G17kYX+difL1nfK1Wq06aunTpIibTxVcxeV3FSQ1IYmKiuBr1pDH6iePJGF/nYnydi/F1LsbXuRhf52J8nYvx9Y7xjWyh0mRHcwgAAAAAaAGJEwAAAAC0gMTJYIGBgfLMM8/o92h7jK9zMb7Oxfg6F+PrXIyvczG+zsX4Olegm46v1zWHAAAAAIDWouIEAAAAAC0gcQIAAACAFpA4AQAAAEALSJwAAAAAoAUkTk42b948SUlJkaCgIBk+fLhs3br1ovd/7733pFevXvr+/fr1k+XLl7dbrN4wxq+++qr4+Pic9aa+Dudbu3at3HLLLXonbTVOH3/8cYtfs3r1ahk8eLDuktOtWzc93mib8VVje+5zV70VFBS0W8zu5Pnnn5err75awsPDJSYmRm677TY5dOhQi1/Ha7DzxpfX30s3f/586d+/v2Nz0JEjR8oXX3xx0a/hueu88eW5e2X+/Oc/6zH7xS9+4fbPYRInJ3r33XfliSee0O0Wd+7cKQMGDJDrr79eioqKmr3/xo0b5Z577pFp06bJrl279B8i9bZv3752j91Tx1hRL5InTpxwvOXk5LRrzO6iqqpKj6dKTC9FVlaW3HzzzTJx4kTZvXu3foF85JFH5Msvv3R6rN4wvnbq5LTp81edtOJ8a9askdmzZ8vmzZvl66+/lvr6epk8ebIe9wvhNdi546vw+ntpEhMT9cnmjh07ZPv27TJp0iT58Y9/LPv372/2/jx3nTu+Cs/dy7Nt2zZ5+eWXdaJ6MW7zHFbtyOEcw4YNs86ePdvxsdlstnbp0sX6/PPPN3v/n/3sZ9abb775rNuGDx9uffTRR50eq7eM8dKlS62RkZHtGKFnUC8VH3300UXv89vf/tZ61VVXnXXbXXfdZb3++uudHJ13jO+qVav0/U6fPt1ucXmSoqIiPX5r1qy54H14DXbu+PL6e2U6duxofeWVV5r9HM9d544vz93LU1FRYe3evbv166+/to4fP976+OOPX/C+7vIcpuLkJHV1dfpKxrXXXuu4zWQy6Y83bdrU7Neo25veX1HVkwvd39tdzhgrlZWVkpycLElJSS1eYcKl4/nbPgYOHCjx8fFy3XXXyYYNG4wOx22UlZXp91FRURe8D89h546vwutv65nNZlm2bJmu5qkpZc3huevc8VV47raeqkqrmSjnPjfd+TlM4uQkJ0+e1L+MsbGxZ92uPr7QmgR1e2vu7+0uZ4x79uwpS5YskU8++UTefPNNsVgsMmrUKDl27Fg7Re25LvT8LS8vlzNnzhgWl6dQydKCBQvkgw8+0G/qj/eECRP0FFVcnPo9V1NHR48eLX379r3g/XgNdu748vrbOnv37pWwsDC9ZvSxxx6Tjz76SPr06dPsfXnuOnd8ee623rJly/TfJ7Ue8lK4y3PYz+gAgPakriY1vaKkXvh69+6t598+99xzhsYGXIz6w63emj53MzMz5X//93/ljTfeMDQ2d7jqqebJr1+/3uhQvHp8ef1tHfX7rtaLqmre+++/Lw8++KBeW3ahk3s4b3x57rZOXl6ePP7443r9o6c10SBxcpLOnTuLr6+vFBYWnnW7+jguLq7Zr1G3t+b+3u5yxvhc/v7+MmjQIMnIyHBSlN7jQs9ftaA2ODjYsLg82bBhw0gGWjBnzhz57LPPdBdDtSD8YngNdu74novX34sLCAjQ3UmVIUOG6EX2//jHP/TJ+rl47jp3fM/Fc/fi1DIK1aRLddm1UzOE1OvE3Llzpba2Vp+/ueNzmKl6TvyFVL+I3377reM2VdpVH19oDq26ven9FZWtX2zOrTe7nDE+l/pFVuV6NQ0KV4bnb/tTV0t57jZP9dxQJ/Vq+s3KlSslNTW1xa/hOezc8T0Xr7+to/6+qRPO5vDcde74novn7sVdc801enzU3yj729ChQ+W+++7Tx+cmTW71HDa6O4UnW7ZsmTUwMND66quvWr///nvrjBkzrB06dLAWFBToz0+ZMsX65JNPOu6/YcMGq5+fn/Wvf/2r9cCBA9ZnnnnG6u/vb927d6+B/wvPGuNnn33W+uWXX1ozMzOtO3bssN59993WoKAg6/79+w38X7huN5xdu3bpN/VS8cILL+jjnJwc/Xk1rmp87Y4ePWoNCQmx/uY3v9HP33nz5ll9fX2tK1asMPB/4Tnj+7//+7/Wjz/+2HrkyBH9mqC6E5lMJus333xj4P/Cdc2cOVN3wVq9erX1xIkTjrfq6mrHfXgNbt/x5fX30qlxUx0Ks7KyrN99953+2MfHx/rVV1/pz/Pcbd/x5bl75caf01XPXZ/DJE5O9uKLL1q7du1qDQgI0K2zN2/efNaT6MEHHzzr/v/85z+tPXr00PdXrZ0///xzA6L23DH+xS9+4bhvbGys9aabbrLu3LnToMhdm7399blv9vFU79X4nvs1AwcO1OOblpamW7iibcb3v//7v63p6en6j3VUVJR1woQJ1pUrVxr4P3BtzY2temv6nOQ1uH3Hl9ffS/fwww9bk5OT9VhFR0dbr7nmGsdJvcJzt33Hl+du2ydO4930Oeyj/jG66gUAAAAArow1TgAAAADQAhInAAAAAGgBiRMAAAAAtIDECQAAAABaQOIEAAAAAC0gcQIAAACAFpA4AQAAAEALSJwAAAAAoAUkTgAAj5OdnS0+Pj6ye/dup32Phx56SG677TanPT4AwLWQOAEAXI5KSlTic+7bDTfccElfn5SUJCdOnJC+ffs6PVYAgHfwMzoAAACao5KkpUuXnnVbYGDgJX2tr6+vxMXFOSkyAIA3ouIEAHBJKklSyU/Tt44dO+rPqerT/Pnz5cYbb5Tg4GBJS0uT999//4JT9U6fPi333XefREdH6/t37979rKRs7969MmnSJP25Tp06yYwZM6SystLxebPZLE888YR06NBBf/63v/2tWK3Ws+K1WCzy/PPPS2pqqn6cAQMGnBUTAMC9kTgBANzSH/7wB7n99ttlz549Oim6++675cCBAxe87/fffy9ffPGFvo9Kujp37qw/V1VVJddff71OyrZt2ybvvfeefPPNNzJnzhzH1//tb3+TV199VZYsWSLr16+XkpIS+eijj876Hippev3112XBggWyf/9++eUvfyn333+/rFmzxskjAQBoDz7Wcy+ZAQDgAmuc3nzzTQkKCjrr9t/97nf6TVWTHnvsMZ0A2Y0YMUIGDx4sL730kq44qcrPrl27ZODAgXLrrbfqREklPudatGiR/Nu//Zvk5eVJaGiovm358uVyyy23yPHjxyU2Nla6dOmiE6Hf/OY3+vMNDQ368YcMGSIff/yx1NbWSlRUlE64Ro4c6XjsRx55RKqrq+Xtt9924mgBANoDa5wAAC5p4sSJZyVGikpO7JomKPaPL9RFb+bMmbo6tXPnTpk8ebLuhjdq1Cj9OVWBUtPq7EmTMnr0aD317tChQzp5U40mhg8f7vi8n5+fDB061DFdLyMjQydI11133Vnft66uTgYNGnRF4wAAcA0kTgAAl6QSmW7durXJY6m1UDk5ObqS9PXXX8s111wjs2fPlr/+9a9t8vj29VCff/65JCQkXFZDCwCAa2ONEwDALW3evPm8j3v37n3B+6vGEA8++KCeAvj3v/9dFi5cqG9XX6PWSam1TnYbNmwQk8kkPXv2lMjISImPj5ctW7Y4Pq+m6u3YscPxcZ8+fXSClJubq5O9pm+qNToAwP1RcQIAuCS1bqigoOCs29QUOXtTB9XEQU2XGzNmjLz11luydetWWbx4cbOP9fTTT+v1SFdddZV+3M8++8yRZKnGEs8884xOqv74xz9KcXGx/PznP5cpU6bo9U3K448/Ln/+8591N75evXrJCy+8IKWlpY7HDw8Pl1//+td6HZSa4qdiKisr0wlYRESEfmwAgHsjcQIAuKQVK1boSk9TqgJ08OBBffzss8/KsmXLZNasWfp+77zzjq78NCcgIECeeuop3TRCtQofO3as/lolJCREvvzyS50cXX311fpjtR5KJUd2v/rVr/Q6J5UAqUrUww8/LD/5yU90cmT33HPP6aqW6q539OhR3bpcNatQzSwAAO6PrnoAALejuuqpduCqyQMAAO2BNU4AAAAA0AISJwAAAABoAWucAABuh1nmAID2RsUJAAAAAFpA4gQAAAAALSBxAgAAAIAWkDgBAAAAQAtInAAAAACgBSROAAAAANACEicAAAAAaAGJEwAAAADIxf1/ca9w/ggdBLkAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Compare the performance of the simple RAG pipeline and the RL-enhanced RAG pipeline\n", + "# using the sample query and its expected answer.\n", + "# The function returns:\n", + "# - simple_response: The response generated by the simple RAG pipeline.\n", + "# - rl_response: The best response generated by the RL-enhanced RAG pipeline.\n", + "# - simple_sim: The similarity score of the simple RAG response to the ground truth.\n", + "# - rl_sim: The similarity score of the RL-enhanced RAG response to the ground truth.\n", + "simple_response, rl_response, simple_sim, rl_sim = compare_rag_approaches(sample_query, expected_answer)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can clearly see that the response generated by the RL-enhanced RAG model is more accurate and relevant compared to the simple RAG pipeline. The improvement in similarity to the ground truth is evident, indicating that the RL-enhanced model has learned to generate better responses through training." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Saving the Comparison Results\n", + "\n", + "After implementing the RL algorithm, we can save the comparison results to check the performance of the RL implementation later." + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Results saved to rl_rag_results.json\n" + ] + } + ], + "source": [ + "# Save the results for later comparison\n", + "results = {\n", + " \"query\": query_text, # The input query text\n", + " \"ground_truth\": expected_answer, # The expected correct answer for the query\n", + " \"simple_rag\": {\n", + " \"response\": simple_response, # The response generated by the simple RAG pipeline\n", + " \"similarity\": float(simple_sim) # The similarity score of the simple RAG response to the ground truth\n", + " },\n", + " \"rl_rag\": {\n", + " \"response\": rl_response, # The response generated by the RL-enhanced RAG pipeline\n", + " \"similarity\": float(rl_sim) # The similarity score of the RL-enhanced RAG response to the ground truth\n", + " },\n", + " \"improvement\": float(rl_sim - simple_sim) # The improvement in similarity score achieved by RL-enhanced RAG\n", + "}\n", + "\n", + "# Save the results to a JSON file for future reference\n", + "with open('rl_rag_results.json', 'w') as f:\n", + " json.dump(results, f, indent=2) # Write the results dictionary to the file with indentation for readability\n", + "\n", + "# Print a confirmation message to indicate that the results have been saved\n", + "print(\"\\nResults saved to rl_rag_results.json\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## What can we conclude?\n", + "\n", + "- The performance of the simple RAG is lower compared to the RL-enhanced RAG on factual queries.\n", + "- The RL-enhanced RAG achieved a 19.5% improvement in the similarity score within 5 episodes.\n", + "- Further improvements can be achieved by:\n", + " - Training for more episodes.\n", + " - Tuning hyperparameters.\n", + "- Time is a key constraint for training.\n", + "- Parallel implementation of the RL algorithm can help reduce training time." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".vene-rag-rl", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/22_Big_data_with_KG.ipynb b/22_Big_data_with_KG.ipynb new file mode 100644 index 0000000..17cd636 --- /dev/null +++ b/22_Big_data_with_KG.ipynb @@ -0,0 +1,3257 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "intro-0", + "metadata": {}, + "source": [ + "## End-to-End Pipeline: Big Data with Knowledge Graph (Book-Referenced)\n", + "\n", + "### Goal:\n", + "Transform news articles about technology company acquisitions into a structured Knowledge Graph, using modern techniques for extraction, refinement, and reasoning — guided by foundational principles outlined in a conceptual book.\n", + "\n", + "### Dataset: CNN/DailyMail\n", + "\n", + "### Approach Overview:\n", + "This notebook walks through a multi-phase process:\n", + "1. **Data Acquisition & Preparation:** Sourcing and cleaning raw news text.\n", + "2. **Information Extraction:** Identifying key entities (organizations, people, money, dates) and the relationships between them (e.g., 'acquire', 'invested_in').\n", + "3. **Knowledge Graph Construction:** Structuring the extracted information into RDF triples, forming the nodes and edges of our KG.\n", + "4. **KG Refinement (Conceptual):** Using embeddings to represent KG components and conceptually exploring link prediction.\n", + "5. **Persistence & Utilization:** Storing, querying (SPARQL), and visualizing the KG.\n", + "\n", + "We will leverage Large Language Models (LLMs) for complex NLP tasks like nuanced entity and relationship extraction, while also using traditional libraries like spaCy for initial exploration and `rdflib` for KG management." + ] + }, + { + "cell_type": "markdown", + "id": "b94b29f8", + "metadata": {}, + "source": [ + "# Table of Contents\n", + "\n", + "- [End-to-End Pipeline: Big Data with Knowledge Graph (Book-Referenced)](#intro-0)\n", + " - [Initial Setup: Imports and Configuration](#intro-setup)\n", + " - [Initialize LLM Client and spaCy Model](#llm-spacy-init-desc)\n", + " - [Define RDF Namespaces](#namespace-init-desc)\n", + "- [Phase 1: Data Acquisition and Preparation](#phase1)\n", + " - [Step 1.1: Data Acquisition](#step1-1-desc)\n", + " - [Execute Data Acquisition](#data-acquisition-exec-desc)\n", + " - [Step 1.2: Data Cleaning & Preprocessing](#step1-2-desc)\n", + " - [Execute Data Cleaning](#data-cleaning-exec-desc)\n", + "- [Phase 2: Information Extraction](#phase2)\n", + " - [Step 2.1: Entity Extraction (Named Entity Recognition - NER)](#step2-1-desc)\n", + " - [2.1.1: Entity Exploration with spaCy - Function Definition](#step2-1-1-spacy-desc)\n", + " - [2.1.1: Entity Exploration with spaCy - Plotting Function Definition](#plot_entity_distribution_func_def_desc)\n", + " - [2.1.1: Entity Exploration with spaCy - Execution](#spacy-ner-exec-desc)\n", + " - [Generic LLM Call Function Definition](#llm-call-func-def-desc)\n", + " - [2.1.2: Entity Type Selection using LLM - Execution](#step2-1-2-llm-type-selection-desc)\n", + " - [LLM JSON Output Parsing Function Definition](#parse_llm_json_func_def_desc)\n", + " - [2.1.3: Targeted Entity Extraction using LLM - Execution](#step2-1-3-llm-ner-exec-desc)\n", + " - [Step 2.2: Relationship Extraction](#step2-2-desc)\n", + "- [Phase 3: Knowledge Graph Construction](#phase3)\n", + " - [Step 3.1: Entity Disambiguation & Linking (Simplified) - Normalization Function](#step3-1-normalize-entity-text-func-def-desc)\n", + " - [Execute Entity Normalization and URI Generation](#entity-normalization-exec-desc)\n", + " - [Step 3.2: Schema/Ontology Alignment - RDF Type Mapping Function](#step3-2-rdf-type-func-def-desc)\n", + " - [Schema/Ontology Alignment - RDF Predicate Mapping Function](#step3-2-rdf-predicate-func-def-desc)\n", + " - [Schema/Ontology Alignment - Examples](#schema-alignment-example-desc)\n", + " - [Step 3.3: Triple Generation](#step3-3-triple-generation-exec-desc)\n", + "- [Phase 4: Knowledge Graph Refinement Using Embeddings](#phase4)\n", + " - [Step 4.1: Generate KG Embeddings - Embedding Function Definition](#step4-1-embedding-func-def-desc)\n", + " - [Generate KG Embeddings - Execution](#kg-embedding-exec-desc)\n", + " - [Step 4.2: Link Prediction (Knowledge Discovery - Conceptual) - Cosine Similarity Function](#step4-2-cosine-sim-func-def-desc)\n", + " - [Link Prediction (Conceptual) - Similarity Calculation Example](#link-prediction-exec-desc)\n", + " - [Step 4.3: Add Predicted Links (Optional & Conceptual) - Function Definition](#step4-3-add-inferred-func-def-desc)\n", + " - [Add Predicted Links (Conceptual) - Execution Example](#add-predicted-links-exec-desc)\n", + "- [Phase 5: Persistence and Utilization](#phase5)\n", + " - [Step 5.1: Knowledge Graph Storage - Save Function Definition](#step5-1-save-graph-func-def-desc)\n", + " - [Knowledge Graph Storage - Execution](#kg-storage-exec-desc)\n", + " - [Step 5.2: Querying and Analysis - SPARQL Execution Function](#step5-2-sparql-func-def-desc)\n", + " - [SPARQL Querying and Analysis - Execution Examples](#sparql-querying-exec-desc)\n", + " - [Step 5.3: Visualization (Optional) - Visualization Function Definition](#step5-3-viz-func-def-desc)\n", + " - [KG Visualization - Execution](#visualization-exec-desc)\n", + "- [Conclusion and Future Work](#conclusion)" + ] + }, + { + "cell_type": "markdown", + "id": "intro-setup", + "metadata": {}, + "source": [ + "### Initial Setup: Imports and Configuration\n", + "\n", + "**Theory:**\n", + "Before any data processing or analysis can begin, we need to set up our environment. This involves:\n", + "* **Importing Libraries:** Bringing in the necessary Python packages. These include `datasets` for data loading, `openai` for interacting with LLMs, `spacy` for foundational NLP, `rdflib` for Knowledge Graph manipulation, `re` for text processing with regular expressions, `json` for handling LLM outputs, `matplotlib` and `pyvis` for visualization, and standard libraries like `os`, `collections`, and `tqdm`.\n", + "* **API Configuration:** Setting up credentials and endpoints for external services, specifically the Nebius LLM API. **Security Note:** In a production environment, API keys should never be hardcoded. Use environment variables or secure secret management systems.\n", + "* **Model Initialization:** Loading pre-trained models like spaCy's `en_core_web_sm` for basic NLP tasks and configuring the LLM client to use specific models deployed on Nebius for generation and embeddings.\n", + "* **Namespace Definitions:** For RDF-based Knowledge Graphs, namespaces (like `EX` for our custom terms, `SCHEMA` for schema.org) are crucial for creating unique and resolvable URIs for entities and properties. This aligns with the Linked Data principles." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "setup-code", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Libraries imported.\n" + ] + } + ], + "source": [ + "# Import necessary libraries\n", + "import os\n", + "import re\n", + "import json\n", + "from collections import Counter\n", + "import matplotlib.pyplot as plt\n", + "from tqdm.auto import tqdm\n", + "import pandas as pd\n", + "import time\n", + "\n", + "# NLP and KG libraries\n", + "import spacy\n", + "from rdflib import Graph, Literal, Namespace, URIRef\n", + "from rdflib.namespace import RDF, RDFS, XSD, SKOS # Added SKOS for altLabel\n", + "\n", + "# OpenAI client for LLM\n", + "from openai import OpenAI\n", + "\n", + "# Visualization\n", + "from pyvis.network import Network\n", + "\n", + "# Hugging Face datasets library\n", + "from datasets import load_dataset\n", + "\n", + "# For embedding similarity\n", + "import numpy as np\n", + "from sklearn.metrics.pairwise import cosine_similarity\n", + "\n", + "# --- API Configuration (IMPORTANT: Replace with your actual credentials and model names) ---\n", + "NEBIUS_API_KEY = os.getenv(\"NEBIUS_API_KEY\", \"your_nebius_api_key_here\") # Replace with your actual API key\n", + "NEBIUS_BASE_URL = \"https://api.studio.nebius.com/v1/\"\n", + "\n", + "# --- Model Names (IMPORTANT: Replace with your deployed model names) ---\n", + "TEXT_GEN_MODEL_NAME = \"deepseek-ai/DeepSeek-V3\" # e.g., phi-4, deepseek or any other model\n", + "EMBEDDING_MODEL_NAME = \"BAAI/bge-multilingual-gemma2\" # e.g., text-embedding-ada-002, BAAI/bge-multilingual-gemma2 or any other model\n", + "\n", + "print(\"Libraries imported.\")" + ] + }, + { + "cell_type": "markdown", + "id": "setup-code-output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "This block simply confirms that the necessary libraries have been imported without error." + ] + }, + { + "cell_type": "markdown", + "id": "llm-spacy-init-desc", + "metadata": {}, + "source": [ + "#### Initialize LLM Client and spaCy Model\n", + "\n", + "**Theory:**\n", + "Here, we instantiate the clients for our primary NLP tools:\n", + "* **OpenAI Client:** Configured to point to the Nebius API. This client will be used to send requests to the deployed LLM for tasks like entity extraction, relation extraction, and generating embeddings. A basic check is performed to see if the configuration parameters are set.\n", + "* **spaCy Model:** We load `en_core_web_sm`, a small English model from spaCy. This model provides efficient capabilities for tokenization, part-of-speech tagging, lemmatization, and basic Named Entity Recognition (NER). It's useful for initial text exploration and can complement LLM-based approaches." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "llm-spacy-init-code", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI client initialized for base_url: https://api.studio.nebius.com/v1/ using model: deepseek-ai/DeepSeek-V3\n", + "spaCy model 'en_core_web_sm' loaded.\n" + ] + } + ], + "source": [ + "client = None # Initialize client to None\n", + "if NEBIUS_API_KEY != \"YOUR_NEBIUS_API_KEY\" and NEBIUS_BASE_URL != \"YOUR_NEBIUS_BASE_URL\" and TEXT_GEN_MODEL_NAME != \"YOUR_TEXT_GENERATION_MODEL_NAME\":\n", + " try:\n", + " client = OpenAI(\n", + " base_url=NEBIUS_BASE_URL,\n", + " api_key=NEBIUS_API_KEY \n", + " )\n", + " print(f\"OpenAI client initialized for base_url: {NEBIUS_BASE_URL} using model: {TEXT_GEN_MODEL_NAME}\")\n", + " except Exception as e:\n", + " print(f\"Error initializing OpenAI client: {e}\")\n", + " client = None # Ensure client is None if initialization fails\n", + "else:\n", + " print(\"Warning: OpenAI client not fully configured. LLM features will be disabled. Please set NEBIUS_API_KEY, NEBIUS_BASE_URL, and TEXT_GEN_MODEL_NAME.\")\n", + "\n", + "nlp_spacy = None # Initialize nlp_spacy to None\n", + "try:\n", + " nlp_spacy = spacy.load(\"en_core_web_sm\")\n", + " print(\"spaCy model 'en_core_web_sm' loaded.\")\n", + "except OSError:\n", + " print(\"spaCy model 'en_core_web_sm' not found. Downloading... (This might take a moment)\")\n", + " try:\n", + " spacy.cli.download(\"en_core_web_sm\")\n", + " nlp_spacy = spacy.load(\"en_core_web_sm\")\n", + " print(\"spaCy model 'en_core_web_sm' downloaded and loaded successfully.\")\n", + " except Exception as e:\n", + " print(f\"Failed to download or load spaCy model: {e}\")\n", + " print(\"Please try: python -m spacy download en_core_web_sm in your terminal and restart the kernel.\")\n", + " nlp_spacy = None # Ensure nlp_spacy is None if loading fails" + ] + }, + { + "cell_type": "markdown", + "id": "llm-spacy-init-output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "This block prints messages indicating the status of the OpenAI client and spaCy model initialization. Warnings are shown if configurations are missing or models can't be loaded." + ] + }, + { + "cell_type": "markdown", + "id": "namespace-init-desc", + "metadata": {}, + "source": [ + "#### Define RDF Namespaces\n", + "\n", + "**Theory:**\n", + "In RDF, namespaces are used to avoid naming conflicts and to provide context for terms (URIs). \n", + "* `EX`: A custom namespace for terms specific to our project (e.g., our entities and relationships if not mapped to standard ontologies).\n", + "* `SCHEMA`: Refers to Schema.org, a widely used vocabulary for structured data on the internet. We'll try to map some of our extracted types to Schema.org terms for better interoperability.\n", + "* `RDFS`: RDF Schema, provides basic vocabulary for describing RDF vocabularies (e.g., `rdfs:label`, `rdfs:Class`).\n", + "* `RDF`: The core RDF vocabulary (e.g., `rdf:type`).\n", + "* `XSD`: XML Schema Datatypes, used for specifying literal data types (e.g., `xsd:string`, `xsd:date`).\n", + "* `SKOS`: Simple Knowledge Organization System, useful for thesauri, taxonomies, and controlled vocabularies (e.g., `skos:altLabel` for alternative names)." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "namespace-init-code", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Custom namespace EX defined as: http://example.org/kg/\n", + "Schema.org namespace SCHEMA defined as: http://schema.org/\n" + ] + } + ], + "source": [ + "EX = Namespace(\"http://example.org/kg/\")\n", + "SCHEMA = Namespace(\"http://schema.org/\")\n", + "\n", + "print(f\"Custom namespace EX defined as: {EX}\")\n", + "print(f\"Schema.org namespace SCHEMA defined as: {SCHEMA}\")" + ] + }, + { + "cell_type": "markdown", + "id": "namespace-init-output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "This confirms the definition of our primary custom namespace (`EX`) and the `SCHEMA` namespace from Schema.org." + ] + }, + { + "cell_type": "markdown", + "id": "phase1", + "metadata": {}, + "source": [ + "## Phase 1: Data Acquisition and Preparation\n", + "**(Ref: Ch. 1 – Big Data Ecosystem; Ch. 3 – Value Chain of Big Data Processing)**\n", + "\n", + "**Theory (Phase Overview):**\n", + "This initial phase is critical in any data-driven project. It corresponds to the early stages of the Big Data value chain: \"Data Acquisition\" and parts of \"Data Preparation/Preprocessing\". The goal is to obtain the raw data and transform it into a state suitable for further processing and information extraction. Poor quality input data (the \"Garbage In, Garbage Out\" principle) will inevitably lead to a poor quality Knowledge Graph." + ] + }, + { + "cell_type": "markdown", + "id": "step1-1-desc", + "metadata": {}, + "source": [ + "### Step 1.1: Data Acquisition\n", + "**Task:** Gather a collection of news articles.\n", + "\n", + "**Book Concept:** (Ch. 1, Figures 1 & 2; Ch. 3 - Data Acquisition stage)\n", + "This step represents the \"Data Sources\" and \"Ingestion\" components of a Big Data ecosystem. We're tapping into an existing dataset (CNN/DailyMail via Hugging Face `datasets`) rather than scraping live news, but the principle is the same: bringing external data into our processing pipeline.\n", + "\n", + "**Methodology:**\n", + "We will define a function `acquire_articles` to load the CNN/DailyMail dataset. To manage processing time and costs for this demonstration, and to focus on potentially relevant articles, this function will:\n", + "1. Load a specified split (e.g., 'train') of the dataset.\n", + "2. Optionally filter articles based on a list of keywords. For our goal of technology company acquisitions, keywords like \"acquire\", \"merger\", \"technology\", \"startup\" would be relevant. This is a simple heuristic; more advanced topic modeling or classification could be used for better filtering on larger datasets.\n", + "3. Take a small sample of the (filtered) articles.\n", + "\n", + "**Output:** A list of raw article data structures (typically dictionaries containing 'id', 'article' text, etc.)." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "acquire_articles_func_def", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Function 'acquire_articles' defined.\n" + ] + } + ], + "source": [ + "def acquire_articles(dataset_name=\"cnn_dailymail\", version=\"3.0.0\", split='train', sample_size=1000, keyword_filter=None):\n", + " \"\"\"Loads articles from the specified Hugging Face dataset, optionally filters them, and takes a sample.\"\"\"\n", + " print(f\"Attempting to load dataset: {dataset_name} (version: {version}, split: '{split}')...\")\n", + " try:\n", + " full_dataset = load_dataset(dataset_name, version, split=split, streaming=False) # Use streaming=False for easier slicing on smaller datasets\n", + " print(f\"Successfully loaded dataset. Total records in split: {len(full_dataset)}\")\n", + " except Exception as e:\n", + " print(f\"Error loading dataset {dataset_name}: {e}\")\n", + " print(\"Please ensure the dataset is available or you have internet connectivity.\")\n", + " return [] # Return empty list on failure\n", + " \n", + " raw_articles_list = []\n", + " if keyword_filter:\n", + " print(f\"Filtering articles containing any of keywords: {keyword_filter}...\")\n", + " # This is a simple keyword search. For very large datasets, this can be slow.\n", + " # Consider .filter() method of Hugging Face datasets for more efficiency if not streaming.\n", + " count = 0\n", + " # To avoid iterating the whole dataset if it's huge and we only need a small sample after filtering:\n", + " # We'll iterate up to a certain point or until we have enough filtered articles.\n", + " # This is a heuristic for balancing filtering with performance on potentially large datasets.\n", + " iteration_limit = min(len(full_dataset), sample_size * 20) # Look through at most 20x sample_size articles\n", + " for i in tqdm(range(iteration_limit), desc=\"Filtering articles\"):\n", + " record = full_dataset[i]\n", + " if any(keyword.lower() in record['article'].lower() for keyword in keyword_filter):\n", + " raw_articles_list.append(record)\n", + " count += 1\n", + " if count >= sample_size:\n", + " print(f\"Found {sample_size} articles matching filter criteria within {i+1} records checked.\")\n", + " break\n", + " if not raw_articles_list:\n", + " print(f\"Warning: No articles found with keywords {keyword_filter} within the first {iteration_limit} records. Returning an empty list.\")\n", + " return []\n", + " # If we found articles but less than sample_size, we take what we found.\n", + " # If we found more, we still only take sample_size.\n", + " raw_articles_list = raw_articles_list[:sample_size]\n", + " else:\n", + " print(f\"Taking the first {sample_size} articles without keyword filtering.\")\n", + " # Ensure sample_size does not exceed dataset length\n", + " actual_sample_size = min(sample_size, len(full_dataset))\n", + " raw_articles_list = list(full_dataset.select(range(actual_sample_size)))\n", + " \n", + " print(f\"Acquired {len(raw_articles_list)} articles.\")\n", + " return raw_articles_list\n", + "\n", + "print(\"Function 'acquire_articles' defined.\")" + ] + }, + { + "cell_type": "markdown", + "id": "acquire_articles_func_def_output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "This cell defines the `acquire_articles` function. It will print a confirmation once the function is defined in the Python interpreter's memory." + ] + }, + { + "cell_type": "markdown", + "id": "data-acquisition-exec-desc", + "metadata": {}, + "source": [ + "#### Execute Data Acquisition\n", + "\n", + "**Theory:**\n", + "Now we call the `acquire_articles` function. We define keywords relevant to our goal (technology company acquisitions) to guide the filtering process. A `SAMPLE_SIZE` is set to keep the amount of data manageable for this demonstration. Smaller samples allow for faster iteration, especially when using LLMs which can have associated costs and latency." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "data-acquisition-exec-code", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Attempting to load dataset: cnn_dailymail (version: 3.0.0, split: 'train')...\n", + "Successfully loaded dataset. Total records in split: 287113\n", + "Filtering articles containing any of keywords: ['acquire', 'acquisition', 'merger', 'buyout', 'purchased by', 'acquired by', 'takeover']...\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "a09b9f3b82bc4649af79f8216c3b5034", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Filtering articles: 0%| | 0/200 [00:00', '', text)\n", + " # Remove email addresses\n", + " text = re.sub(r'\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,}\\b', '[EMAIL]', text)\n", + " # Normalize whitespace: replace newlines, tabs with a single space, then multiple spaces with a single space\n", + " text = text.replace('\\n', ' ').replace('\\r', ' ').replace('\\t', ' ')\n", + " text = re.sub(r'\\s+', ' ', text).strip()\n", + " # Optional: escape quotes if LLM has issues, but usually not needed for good models\n", + " # text = text.replace('\"', \"\\\\\\\"\").replace(\"'\", \"\\\\'\") \n", + " return text\n", + "\n", + "print(\"Function 'clean_article_text' defined.\")" + ] + }, + { + "cell_type": "markdown", + "id": "clean_article_text_func_def_output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "Confirms that the `clean_article_text` function, which will be used to preprocess article content, has been defined." + ] + }, + { + "cell_type": "markdown", + "id": "data-cleaning-exec-desc", + "metadata": {}, + "source": [ + "#### Execute Data Cleaning\n", + "\n", + "**Theory:**\n", + "This block iterates through the `raw_data_sample` (acquired in the previous step). For each article, it calls the `clean_article_text` function. The cleaned text, along with the original article ID and potentially other useful fields like 'summary' (if available from the dataset as 'highlights'), is stored in a new list called `cleaned_articles`. This new list will be the primary input for the subsequent Information Extraction phase." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "data-cleaning-exec-code", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Cleaning 3 acquired articles...\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "9aeade12fca241dfbf636992cddfdc44", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Cleaning articles: 0%| | 0/3 [00:00" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "spacy_entity_counts = Counter() # Initialize an empty counter\n", + "\n", + "if cleaned_articles and nlp_spacy:\n", + " # Use a small, fixed sample size for spaCy analysis to keep it quick\n", + " spacy_analysis_sample_size = min(len(cleaned_articles), 20) \n", + " print(f\"Running spaCy NER on a sample of {spacy_analysis_sample_size} cleaned articles...\")\n", + " spacy_entity_counts = get_spacy_entity_counts(cleaned_articles, sample_size_spacy=spacy_analysis_sample_size)\n", + " \n", + " if spacy_entity_counts:\n", + " print(\"\\nspaCy Entity Counts (from sample):\")\n", + " for label, count in spacy_entity_counts.most_common():\n", + " print(f\" {label}: {count}\")\n", + " plot_entity_distribution(spacy_entity_counts)\n", + " else:\n", + " print(\"spaCy NER did not return any entity counts from the sample.\")\n", + "else:\n", + " print(\"Skipping spaCy entity analysis: No cleaned articles available or spaCy model not loaded.\")" + ] + }, + { + "cell_type": "markdown", + "id": "spacy-ner-exec-output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "This block will print:\n", + "* The frequency of different entity types found by spaCy in the sample.\n", + "* A bar chart visualizing this distribution.\n", + "If prerequisites are not met, it will print a message indicating why this step was skipped." + ] + }, + { + "cell_type": "markdown", + "id": "llm-call-func-def-desc", + "metadata": {}, + "source": [ + "#### Generic LLM Call Function Definition\n", + "\n", + "**Theory:**\n", + "To interact with the LLM for various tasks (entity type selection, NER, relation extraction), we define a reusable helper function `call_llm_for_response`. This function encapsulates the logic for:\n", + "* Taking a system prompt (instructions for the LLM) and a user prompt (the specific input/query).\n", + "* Making the API call to the configured LLM endpoint.\n", + "* Extracting the textual content from the LLM's response.\n", + "* Basic error handling if the LLM client is not initialized or if the API call fails.\n", + "Using a helper function promotes code reusability and makes the main logic cleaner." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "llm-call-func-def", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Function 'call_llm_for_response' defined.\n" + ] + } + ], + "source": [ + "def call_llm_for_response(system_prompt, user_prompt, model_to_use=TEXT_GEN_MODEL_NAME, temperature=0.2):\n", + " \"\"\"Generic function to call the LLM and get a response, with basic error handling.\"\"\"\n", + " if not client:\n", + " print(\"LLM client not initialized. Skipping LLM call.\")\n", + " return \"LLM_CLIENT_NOT_INITIALIZED\"\n", + " try:\n", + " print(f\"\\nCalling LLM (model: {model_to_use}, temperature: {temperature})...\")\n", + " # For debugging, uncomment to see prompts (can be very long)\n", + " # print(f\"System Prompt (first 200 chars): {system_prompt[:200]}...\")\n", + " # print(f\"User Prompt (first 200 chars): {user_prompt[:200]}...\")\n", + " \n", + " response = client.chat.completions.create(\n", + " model=model_to_use,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " temperature=temperature # Lower temperature for more focused/deterministic output\n", + " )\n", + " content = response.choices[0].message.content.strip()\n", + " print(\"LLM response received.\")\n", + " return content\n", + " except Exception as e:\n", + " print(f\"Error calling LLM: {e}\")\n", + " return f\"LLM_ERROR: {str(e)}\"\n", + "\n", + "print(\"Function 'call_llm_for_response' defined.\")" + ] + }, + { + "cell_type": "markdown", + "id": "llm-call-func-def-output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "Confirms the definition of the `call_llm_for_response` helper function." + ] + }, + { + "cell_type": "markdown", + "id": "step2-1-2-llm-type-selection-desc", + "metadata": {}, + "source": [ + "#### 2.1.2: Entity Type Selection using LLM - Execution\n", + "\n", + "**Theory:**\n", + "While spaCy provides a general set of entity types, not all may be relevant for our specific goal of building a KG about technology company acquisitions. For instance, `WORK_OF_ART` might be less important than `ORG` (organization) or `MONEY`. \n", + "In this step, we leverage the LLM's understanding to refine this list. \n", + "1. **System Prompt:** We craft a detailed system prompt instructing the LLM to act as an expert in KG construction for technology news. It's asked to select the *most relevant* entity labels from the spaCy-derived list, focusing on our domain, and to provide an explanation for each chosen type.\n", + "2. **User Prompt:** The user prompt contains the actual list of entity labels and their frequencies obtained from spaCy.\n", + "3. **LLM Call:** We use our `call_llm_for_response` function.\n", + "The LLM's output should be a comma-separated string of chosen entity types with their descriptions (e.g., `ORG (Organizations involved in acquisitions, e.g., Google, Microsoft)`). This curated list forms a more targeted schema for our subsequent LLM-based NER." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "llm-entity-type-selection-exec-code", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Calling LLM (model: deepseek-ai/DeepSeek-V3, temperature: 0.2)...\n", + "LLM response received.\n", + "\n", + "LLM Suggested Entity Types for Tech Acquisitions KG:\n", + "\n", + "Final list of Entity Types to be used for NER: ORG (Company involved in acquisition, e.g., Google, Microsoft), PERSON (Key individuals like CEOs or founders, e.g., Satya Nadella), DATE (Date of acquisition announcement or closing, e.g., July 26, 2023), MONEY (Transaction value or investment, e.g., $1 billion), PRODUCT (Technology products or services involved, e.g., cloud computing platform), GPE (Geopolitical entities related to the acquisition, e.g., United States, California), CARDINAL (Numerical values relevant to the deal, e.g., number of employees transferred)\n" + ] + } + ], + "source": [ + "ENTITY_TYPE_SELECTION_SYSTEM_PROMPT = (\n", + " \"You are an expert assistant specializing in Knowledge Graph construction for technology news analysis. \"\n", + " \"You will be provided with a list of named entity labels and their frequencies, derived from news articles. \"\n", + " \"Your task is to select and return a comma-separated list of the MOST RELEVANT entity labels for building a Knowledge Graph focused on **technology company acquisitions**. \"\n", + " \"Prioritize labels like organizations (acquirer, acquired), financial amounts (deal value), dates (announcement/closing), key persons (CEOs, founders), and relevant technology products/services or sectors. \"\n", + " \"For EACH entity label you include in your output list, provide a concise parenthetical explanation or a clear, illustrative example. \"\n", + " \"Example: ORG (Company involved in acquisition, e.g., Google, Microsoft), MONEY (Transaction value or investment, e.g., $1 billion), DATE (Date of acquisition announcement or closing, e.g., July 26, 2023). \"\n", + " \"The output MUST be ONLY the comma-separated list of labels and their parenthetical explanations. \"\n", + " \"Do not include any introductory phrases, greetings, summaries, or any other text whatsoever outside of this formatted list.\"\n", + ")\n", + "\n", + "llm_selected_entity_types_str = \"\" # Initialize\n", + "DEFAULT_ENTITY_TYPES_STR = \"ORG (Acquiring or acquired company, e.g., TechCorp), PERSON (Key executives, e.g., CEO), MONEY (Acquisition price, e.g., $500 million), DATE (Date of acquisition announcement), PRODUCT (Key product/service involved), GPE (Location of companies, e.g., Silicon Valley)\"\n", + "\n", + "if spacy_entity_counts and client: # Proceed if we have spaCy counts and LLM client is available\n", + " # Create a string from spaCy entity counts for the prompt\n", + " spacy_labels_for_prompt = \", \".join([f\"{label} (frequency: {count})\" for label, count in spacy_entity_counts.most_common()])\n", + " user_prompt_for_types = f\"From the following entity labels and their frequencies found in news articles: [{spacy_labels_for_prompt}]. Please select and format the most relevant entity types for a knowledge graph about technology company acquisitions, as per the instructions.\"\n", + " \n", + " llm_selected_entity_types_str = call_llm_for_response(ENTITY_TYPE_SELECTION_SYSTEM_PROMPT, user_prompt_for_types)\n", + " \n", + " if \"LLM_CLIENT_NOT_INITIALIZED\" in llm_selected_entity_types_str or \"LLM_ERROR\" in llm_selected_entity_types_str or not llm_selected_entity_types_str.strip():\n", + " print(\"\\nLLM entity type selection failed or returned empty. Using default entity types.\")\n", + " llm_selected_entity_types_str = DEFAULT_ENTITY_TYPES_STR\n", + " else:\n", + " print(\"\\nLLM Suggested Entity Types for Tech Acquisitions KG:\")\n", + " # Post-process to ensure it's a clean list if LLM adds extra verbiage despite instructions\n", + " # This is a simple heuristic, more robust parsing might be needed for less compliant LLMs\n", + " if not re.match(r\"^([A-Z_]+ \\(.*?\\))(, [A-Z_]+ \\(.*?\\))*$\", llm_selected_entity_types_str.strip()):\n", + " print(f\"Warning: LLM output for entity types might not be in the expected strict format. Raw: '{llm_selected_entity_types_str}'\")\n", + " # Attempt a simple cleanup: take the longest line that looks like a list of terms\n", + " lines = llm_selected_entity_types_str.strip().split('\\n')\n", + " best_line = \"\"\n", + " for line in lines:\n", + " if '(' in line and ')' in line and len(line) > len(best_line):\n", + " best_line = line\n", + " if best_line:\n", + " llm_selected_entity_types_str = best_line\n", + " print(f\"Attempted cleanup: '{llm_selected_entity_types_str}'\")\n", + " else:\n", + " print(\"Cleanup failed, falling back to default entity types.\")\n", + " llm_selected_entity_types_str = DEFAULT_ENTITY_TYPES_STR\n", + "else:\n", + " print(\"\\nSkipping LLM entity type selection (spaCy counts unavailable or LLM client not initialized). Using default entity types.\")\n", + " llm_selected_entity_types_str = DEFAULT_ENTITY_TYPES_STR\n", + "\n", + "print(f\"\\nFinal list of Entity Types to be used for NER: {llm_selected_entity_types_str}\")" + ] + }, + { + "cell_type": "markdown", + "id": "llm-entity-type-selection-exec-output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "This block will print:\n", + "* The comma-separated list of entity types and their descriptions as suggested by the LLM (or the default list if the LLM call fails/is skipped).\n", + "* This list will guide the next step: targeted Named Entity Recognition." + ] + }, + { + "cell_type": "markdown", + "id": "parse_llm_json_func_def_desc", + "metadata": {}, + "source": [ + "#### LLM JSON Output Parsing Function Definition\n", + "\n", + "**Theory:**\n", + "LLMs, even when prompted for specific formats like JSON, can sometimes produce output that includes extra text, markdown formatting (like ` ```json ... ``` `), or slight deviations from perfect JSON. The `parse_llm_json_output` function is a utility to robustly parse the LLM's string output into a Python list of dictionaries (representing entities or relations).\n", + "It attempts to:\n", + "1. Handle common markdown code block syntax.\n", + "2. Use `json.loads()` for parsing.\n", + "3. Include error handling for `JSONDecodeError` and provide fallback mechanisms like regex-based extraction if simple parsing fails.\n", + "This function is crucial for reliably converting LLM responses into usable structured data." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "parse_llm_json_func_def", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Function 'parse_llm_json_output' defined.\n" + ] + } + ], + "source": [ + "def parse_llm_json_output(llm_output_str):\n", + " \"\"\"Parses JSON output from LLM, handling potential markdown code blocks and common issues.\"\"\"\n", + " if not llm_output_str or \"LLM_CLIENT_NOT_INITIALIZED\" in llm_output_str or \"LLM_ERROR\" in llm_output_str:\n", + " print(\"Cannot parse LLM output: LLM did not run, errored, or output was empty.\")\n", + " return [] # Return empty list\n", + "\n", + " # Attempt to extract JSON from within markdown code blocks\n", + " match = re.search(r'```json\\s*([\\s\\S]*?)\\s*```', llm_output_str, re.IGNORECASE)\n", + " if match:\n", + " json_str = match.group(1).strip()\n", + " else:\n", + " # If no markdown block, assume the whole string is the JSON (or needs cleaning)\n", + " # LLMs sometimes add introductory text before the JSON list. Try to find the start of the list.\n", + " list_start_index = llm_output_str.find('[')\n", + " list_end_index = llm_output_str.rfind(']')\n", + " if list_start_index != -1 and list_end_index != -1 and list_start_index < list_end_index:\n", + " json_str = llm_output_str[list_start_index : list_end_index+1].strip()\n", + " else:\n", + " json_str = llm_output_str.strip() # Fallback to the whole string\n", + "\n", + " try:\n", + " parsed_data = json.loads(json_str)\n", + " if isinstance(parsed_data, list):\n", + " return parsed_data\n", + " else:\n", + " print(f\"Warning: LLM output was valid JSON but not a list (type: {type(parsed_data)}). Returning empty list.\")\n", + " print(f\"Problematic JSON string (or part of it): {json_str[:200]}...\")\n", + " return []\n", + " except json.JSONDecodeError as e:\n", + " print(f\"Error decoding JSON from LLM output: {e}\")\n", + " print(f\"Problematic JSON string (or part of it): {json_str[:500]}...\")\n", + " # Optional: A more aggressive regex fallback if standard parsing fails badly\n", + " # This is risky as it might grab partial JSONs. Use with caution.\n", + " # entities_found = []\n", + " # for match_obj in re.finditer(r'\\{\\s*\"text\":\\s*\".*?\",\\s*\"type\":\\s*\".*?\"\\s*\\}', json_str):\n", + " # try:\n", + " # entities_found.append(json.loads(match_obj.group(0)))\n", + " # except json.JSONDecodeError: continue # Skip malformed individual objects\n", + " # if entities_found:\n", + " # print(f\"Warning: Recovered {len(entities_found)} entities using aggressive regex due to JSON error.\")\n", + " # return entities_found\n", + " return []\n", + " except Exception as e:\n", + " print(f\"An unexpected error occurred during LLM JSON output parsing: {e}\")\n", + " return []\n", + "\n", + "print(\"Function 'parse_llm_json_output' defined.\")" + ] + }, + { + "cell_type": "markdown", + "id": "parse_llm_json_func_def_output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "Confirms the definition of the `parse_llm_json_output` utility function." + ] + }, + { + "cell_type": "markdown", + "id": "step2-1-3-llm-ner-exec-desc", + "metadata": {}, + "source": [ + "#### 2.1.3: Targeted Entity Extraction using LLM - Execution\n", + "\n", + "**Theory:**\n", + "Now, equipped with our curated list of entity types (`llm_selected_entity_types_str`), we instruct the LLM to perform NER on each (cleaned) article. \n", + "1. **System Prompt:** The system prompt for NER is carefully constructed. It tells the LLM:\n", + " * Its role (expert NER system for tech acquisitions).\n", + " * The specific entity types to focus on (from `llm_selected_entity_types_str`).\n", + " * The required output format: a JSON list of objects, where each object has `\"text\"` (the exact extracted entity span) and `\"type\"` (one of the specified entity types).\n", + " * An example of the desired JSON output.\n", + " * To output an empty JSON list `[]` if no relevant entities are found.\n", + "2. **User Prompt:** For each article, the user prompt is simply its `cleaned_text`.\n", + "3. **Processing Loop:** We iterate through a small sample of `cleaned_articles` (defined by `MAX_ARTICLES_FOR_LLM_NER` to manage time/cost). For each:\n", + " * The article text is (optionally truncated if too long for LLM context window).\n", + " * `call_llm_for_response` is invoked.\n", + " * `parse_llm_json_output` processes the LLM's response.\n", + " * The extracted entities are stored alongside the article data in a new list, `articles_with_entities`.\n", + "A small delay (`time.sleep`) is added between API calls to be polite to the API endpoint and avoid potential rate limiting." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "llm-ner-exec-code", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Starting LLM NER for 3 articles...\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "84d4cca6263341858b79d2b43219c17a", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "LLM NER Processing: 0%| | 0/3 [00:00 3000 words for some models)\n", + " # Character count is a rough proxy for token count. Adjust as needed based on model limits.\n", + " max_text_chars = 12000 # Approx 3000 words. Should be safe for many models.\n", + " article_text_for_llm = article_dict['cleaned_text'][:max_text_chars]\n", + " if len(article_dict['cleaned_text']) > max_text_chars:\n", + " print(f\" Warning: Article text truncated from {len(article_dict['cleaned_text'])} to {max_text_chars} characters for LLM NER.\")\n", + "\n", + " llm_ner_raw_output = call_llm_for_response(ner_system_prompt, article_text_for_llm)\n", + " extracted_entities_list = parse_llm_json_output(llm_ner_raw_output)\n", + " \n", + " # Store results\n", + " current_article_data = article_dict.copy() # Make a copy to avoid modifying original list items directly\n", + " current_article_data['llm_entities'] = extracted_entities_list\n", + " articles_with_entities.append(current_article_data)\n", + " \n", + " print(f\" Extracted {len(extracted_entities_list)} entities for article ID {article_dict['id']}.\")\n", + " if extracted_entities_list:\n", + " # Print a sample of entities, max 3\n", + " print(f\" Sample entities: {json.dumps(extracted_entities_list[:min(3, len(extracted_entities_list))], indent=2)}\")\n", + " \n", + " if i < num_articles_to_process_ner - 1: # Avoid sleeping after the last article\n", + " time.sleep(1) # Small delay to be polite to API\n", + " \n", + " if articles_with_entities:\n", + " print(f\"\\nFinished LLM NER. Processed {len(articles_with_entities)} articles and stored entities.\")\n", + "else:\n", + " print(\"Skipping LLM NER: Prerequisites (cleaned articles, LLM client, or valid entity types string) not met.\")\n", + " # If NER is skipped, ensure articles_with_entities is populated with empty entity lists for subsequent steps\n", + " if cleaned_articles: # only if we have cleaned articles to begin with\n", + " num_articles_to_fallback = min(len(cleaned_articles), MAX_ARTICLES_FOR_LLM_NER)\n", + " for article_dict_fallback in cleaned_articles[:num_articles_to_fallback]:\n", + " fallback_data = article_dict_fallback.copy()\n", + " fallback_data['llm_entities'] = []\n", + " articles_with_entities.append(fallback_data)\n", + " print(f\"Populated 'articles_with_entities' with {len(articles_with_entities)} entries having empty 'llm_entities' lists.\")\n", + "\n", + "# Ensure articles_with_entities is defined\n", + "if 'articles_with_entities' not in globals():\n", + " articles_with_entities = []\n", + " print(\"Initialized 'articles_with_entities' as an empty list.\")" + ] + }, + { + "cell_type": "markdown", + "id": "llm-ner-exec-output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "This block will show the progress of LLM-based NER:\n", + "* For each processed article: its ID, a message about truncation (if any), the number of entities extracted, and a sample of the extracted entities in JSON format.\n", + "* A final message indicating completion or why the step was skipped.\n", + "The `articles_with_entities` list now contains the original article data plus a new key `llm_entities` holding the list of entities extracted by the LLM for that article." + ] + }, + { + "cell_type": "markdown", + "id": "step2-2-desc", + "metadata": {}, + "source": [ + "### Step 2.2: Relationship Extraction\n", + "**Task:** Identify semantic relationships between extracted entities, such as acquisition events or affiliations.\n", + "\n", + "**Book Concept:** (Ch. 2 - Relationships as edges)\n", + "Relationships define how entities are connected, forming the *edges* in our Knowledge Graph. Extracting these relationships is crucial for capturing the actual knowledge (e.g., \"Company A *acquired* Company B\", \"Acquisition *has_price* $X Million\").\n", + "\n", + "**Methodology:**\n", + "Similar to NER, we'll use the LLM for Relationship Extraction (RE). For each article:\n", + "1. **System Prompt:** We design a system prompt that instructs the LLM to act as a relationship extraction expert for technology acquisitions. It specifies:\n", + " * The desired relationship types (predicates) like `ACQUIRED`, `HAS_PRICE`, `ANNOUNCED_ON`, `ACQUIRER_IS`, `ACQUIRED_COMPANY_IS`, `INVOLVES_PRODUCT`. These are chosen to capture key aspects of an acquisition event.\n", + " * The requirement that the subject and object of a relationship must be exact text spans from the list of entities provided for that article.\n", + " * The desired output format: a JSON list of objects, each with `subject_text`, `subject_type`, `predicate`, `object_text`, and `object_type`.\n", + " * An example of the output format.\n", + "2. **User Prompt:** The user prompt for each article will contain:\n", + " * The article's `cleaned_text`.\n", + " * The list of `llm_entities` extracted in the previous step for that specific article (serialized as a JSON string within the prompt).\n", + "3. **Processing Loop:** Iterate through `articles_with_entities`. If an article has entities:\n", + " * Construct the user prompt.\n", + " * Call the LLM.\n", + " * Parse the JSON output.\n", + " * Optionally, validate that the subject/object texts in the extracted relations indeed come from the provided entity list to maintain consistency.\n", + " * Store the extracted relations in a new list, `articles_with_relations` (each item will be the article data augmented with `llm_relations`)." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "relationship-extraction-exec-code", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Starting LLM Relationship Extraction for 3 articles (that have entities)...\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "4a06030691d84109b131e4b83754c510", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "LLM Relationship Extraction: 0%| | 0/3 [00:00 max_text_chars_re:\n", + " print(f\" Warning: Article text truncated from {len(article_text_content)} to {max_text_chars_re} characters for LLM RE.\")\n", + " \n", + " # Serialize entities list to a JSON string for the prompt\n", + " # Ensure quotes within entity text are escaped for valid JSON string in prompt\n", + " entities_for_prompt_str = json.dumps([{'text': e['text'], 'type': e['type']} for e in entities_for_article])\n", + " \n", + " user_prompt_for_relations = (\n", + " f\"Article Text:\\n```\\n{article_text_for_llm_re}\\n```\\n\\n\" \n", + " f\"Extracted Entities (use these exact texts for subjects/objects):\\n```json\\n{entities_for_prompt_str}\\n```\\n\\n\" \n", + " f\"Identify and extract relationships between these entities based on the system instructions.\"\n", + " )\n", + " \n", + " llm_relations_raw_output = call_llm_for_response(relation_system_prompt, user_prompt_for_relations)\n", + " extracted_relations_list = parse_llm_json_output(llm_relations_raw_output)\n", + " \n", + " # Optional: Validate that subject/object texts in relations are from the provided entities\n", + " valid_relations_list = []\n", + " entity_texts_in_article = {e['text'] for e in entities_for_article}\n", + " for rel in extracted_relations_list:\n", + " # Check structure and presence of texts\n", + " if isinstance(rel, dict) and rel.get('subject_text') in entity_texts_in_article and rel.get('object_text') in entity_texts_in_article:\n", + " valid_relations_list.append(rel)\n", + " else:\n", + " print(f\" Warning: Discarding relation due to missing fields or mismatched entity text: {str(rel)[:100]}...\")\n", + " \n", + " current_article_output = article_data_with_ents.copy()\n", + " current_article_output['llm_relations'] = valid_relations_list\n", + " articles_with_relations.append(current_article_output)\n", + " \n", + " print(f\" Extracted {len(valid_relations_list)} valid relationships for article ID {article_data_with_ents['id']}.\")\n", + " if valid_relations_list:\n", + " print(f\" Sample relations: {json.dumps(valid_relations_list[:min(2, len(valid_relations_list))], indent=2)}\")\n", + " \n", + " if i < len(articles_with_entities) - 1:\n", + " time.sleep(1) # Small delay\n", + " \n", + " if articles_with_relations:\n", + " print(f\"\\nFinished LLM Relationship Extraction. Processed {len(articles_with_relations)} articles and stored relations.\")\n", + "else:\n", + " print(\"Skipping LLM Relationship Extraction: Prerequisites (articles with entities, LLM client) not met.\")\n", + " # If RE is skipped, populate articles_with_relations with empty relation lists\n", + " if articles_with_entities: # only if we have articles from NER step\n", + " for article_data_fallback_re in articles_with_entities:\n", + " fallback_data_re = article_data_fallback_re.copy()\n", + " fallback_data_re['llm_relations'] = []\n", + " articles_with_relations.append(fallback_data_re)\n", + " print(f\"Populated 'articles_with_relations' with {len(articles_with_relations)} entries having empty 'llm_relations' lists.\")\n", + "\n", + "# Ensure articles_with_relations is defined\n", + "if 'articles_with_relations' not in globals():\n", + " articles_with_relations = []\n", + " print(\"Initialized 'articles_with_relations' as an empty list.\")" + ] + }, + { + "cell_type": "markdown", + "id": "relationship-extraction-exec-output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "This block will show the progress of LLM-based Relationship Extraction:\n", + "* For each processed article: its ID, number of relationships extracted, and a sample of these relations in JSON format.\n", + "* Warnings if any extracted relations are discarded due to validation failures.\n", + "* A final message indicating completion or why the step was skipped.\n", + "The `articles_with_relations` list now contains items that have `llm_entities` and `llm_relations`." + ] + }, + { + "cell_type": "markdown", + "id": "phase3", + "metadata": {}, + "source": [ + "## Phase 3: Knowledge Graph Construction\n", + "**(Ref: Ch. 2 – KG Layers; Ch. 4 – Mapping and Materialization)**\n", + "\n", + "**Theory (Phase Overview):**\n", + "Having extracted entities and relationships, this phase focuses on formally constructing the Knowledge Graph. This involves several key sub-tasks:\n", + "* **Entity Disambiguation & Linking:** Ensuring that different textual mentions of the same real-world entity are resolved to a single, canonical identifier (URI). This is crucial for graph coherence and data integration (Ch. 6, Ch. 8 concepts like COMET).\n", + "* **Schema/Ontology Alignment:** Mapping the extracted entity types and relationship predicates to a predefined schema or ontology (Ch. 2 - Ontology Layer; Ch. 4 - R2RML-like mapping). This provides semantic structure and enables interoperability and reasoning.\n", + "* **Triple Generation:** Converting the structured entity and relation data into Subject-Predicate-Object (S-P-O) triples, the fundamental data model of RDF-based KGs (Ch. 2, Ch. 4 - RML output).\n", + "The output of this phase is a populated `rdflib.Graph` object." + ] + }, + { + "cell_type": "markdown", + "id": "step3-1-normalize-entity-text-func-def-desc", + "metadata": {}, + "source": [ + "### Step 3.1: Entity Disambiguation & Linking (Simplified) - Normalization Function\n", + "**Task:** Resolve different mentions of the same real-world entity.\n", + "\n", + "**Book Concept:** (Ch. 6 - Entity Resolution; Ch. 8 - Context-aware linking)\n", + "True entity disambiguation and linking (EDL) is a complex NLP task, often involving linking entities to large external KGs like Wikidata or DBpedia, or using sophisticated clustering and coreference resolution. \n", + "\n", + "**Methodology (Simplified):**\n", + "For this demonstration, we'll perform a simplified version: **text normalization**. The `normalize_entity_text` function will:\n", + "* Trim whitespace.\n", + "* For `ORG` entities, attempt to remove common corporate suffixes (e.g., \"Inc.\", \"Ltd.\", \"Corp.\") to group variations like \"Example Corp\" and \"Example Corporation\" under a common normalized form like \"Example\".\n", + "* (Optionally, one might consider lowercasing, but this can sometimes lose important distinctions, e.g., between \"IT\" the pronoun and \"IT\" the sector).\n", + "This normalized text will then be used to create a unique URI for each distinct entity." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "normalize_entity_text_func_def", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Function 'normalize_entity_text' defined.\n" + ] + } + ], + "source": [ + "def normalize_entity_text(text_to_normalize, entity_type_str):\n", + " \"\"\"Normalizes entity text for better linking (simplified version).\"\"\"\n", + " normalized_t = text_to_normalize.strip()\n", + " \n", + " if entity_type_str == 'ORG':\n", + " # Common suffixes to remove for ORG names. Order can matter for nested suffixes.\n", + " # More comprehensive list would be needed for production.\n", + " suffixes = [\n", + " 'Inc.', 'Incorporated', 'Ltd.', 'Limited', 'LLC', 'L.L.C.', \n", + " 'Corp.', 'Corporation', 'PLC', 'Public Limited Company', \n", + " 'GmbH', 'AG', 'S.A.', 'S.A.S.', 'B.V.', 'Pty Ltd', 'Co.', 'Company',\n", + " 'Solutions', 'Technologies', 'Systems', 'Group', 'Holdings'\n", + " ]\n", + " # Sort suffixes by length descending to remove longer matches first (e.g., 'Pty Ltd' before 'Ltd')\n", + " suffixes.sort(key=len, reverse=True)\n", + " for suffix in suffixes:\n", + " # Case-insensitive suffix removal from the end of the string\n", + " if normalized_t.lower().endswith(suffix.lower()):\n", + " # Find the actual start of the suffix to preserve casing of the main name\n", + " suffix_start_index = normalized_t.lower().rfind(suffix.lower())\n", + " normalized_t = normalized_t[:suffix_start_index].strip()\n", + " break # Remove one suffix type, then re-evaluate if more complex logic is needed\n", + " \n", + " # Remove trailing commas or periods that might be left after suffix removal\n", + " normalized_t = re.sub(r'[-,.]*$', '', normalized_t).strip()\n", + " \n", + " # General cleaning: remove possessives like 's sometimes caught by NER\n", + " if normalized_t.endswith(\"'s\") or normalized_t.endswith(\"s'\"):\n", + " normalized_t = normalized_t[:-2].strip()\n", + " \n", + " # Consider lowercasing carefully. For ORGs it might be okay, for PERSONs less so.\n", + " # For this demo, we'll keep original casing for the most part after suffix stripping.\n", + " # normalized_t = normalized_t.lower() # Uncomment if aggressive normalization is desired\n", + " \n", + " return normalized_t.strip() if normalized_t else text_to_normalize # Return original if normalization results in empty\n", + "\n", + "print(\"Function 'normalize_entity_text' defined.\")" + ] + }, + { + "cell_type": "markdown", + "id": "normalize_entity_text_func_def_output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "Confirms the definition of the `normalize_entity_text` function." + ] + }, + { + "cell_type": "markdown", + "id": "entity-normalization-exec-desc", + "metadata": {}, + "source": [ + "#### Execute Entity Normalization and URI Generation\n", + "\n", + "**Theory:**\n", + "This block processes the `articles_with_relations` list. For each entity extracted by the LLM:\n", + "1. Its text is normalized using `normalize_entity_text`.\n", + "2. A unique URI (Uniform Resource Identifier) is generated for each distinct normalized entity. We use a simple scheme: `EX:_`. The `EX` is our custom namespace. This creates a canonical identifier for each unique real-world concept (as per our normalization). A `unique_entities_map` dictionary stores the mapping from `(normalized_text, entity_type)` to its URI, ensuring that the same normalized entity always gets the same URI across all articles.\n", + "3. The normalized text and the URI are added to the entity's dictionary.\n", + "The results are stored in `articles_with_normalized_entities`." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "entity-normalization-exec-code", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Normalizing entities and preparing for triple generation...\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "7e3112e4d60f4ffea53ceea5e7230a73", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Normalizing Entities & URI Gen: 0%| | 0/3 [00:00 URI, to ensure URI consistency\n", + "\n", + "if articles_with_relations: # Process only if we have articles with relations (which implies entities)\n", + " print(\"Normalizing entities and preparing for triple generation...\")\n", + " for article_data_rel in tqdm(articles_with_relations, desc=\"Normalizing Entities & URI Gen\"):\n", + " current_article_normalized_ents = []\n", + " # Ensure 'llm_entities' key exists and is a list\n", + " if 'llm_entities' in article_data_rel and isinstance(article_data_rel['llm_entities'], list):\n", + " for entity_dict in article_data_rel['llm_entities']:\n", + " # Ensure entity_dict is a dictionary with 'text' and 'type'\n", + " if not (isinstance(entity_dict, dict) and 'text' in entity_dict and 'type' in entity_dict):\n", + " print(f\" Skipping malformed entity object: {str(entity_dict)[:100]} in article {article_data_rel['id']}\")\n", + " continue\n", + "\n", + " original_entity_text = entity_dict['text']\n", + " entity_type_val = entity_dict['type'] \n", + " # LLM might return type with description, e.g. \"ORG (Company)\". We need just \"ORG\".\n", + " # We'll take the first word as the type for simplicity, assuming it's the label like ORG, PERSON.\n", + " simple_entity_type = entity_type_val.split(' ')[0].upper()\n", + " entity_dict['simple_type'] = simple_entity_type # Store the simplified type\n", + " \n", + " normalized_entity_text = normalize_entity_text(original_entity_text, simple_entity_type)\n", + " if not normalized_entity_text: # if normalization resulted in empty string, use original\n", + " normalized_entity_text = original_entity_text \n", + " \n", + " # Create a unique key for the map using normalized text and simple type\n", + " entity_map_key = (normalized_entity_text, simple_entity_type)\n", + " \n", + " if entity_map_key not in unique_entities_map:\n", + " # Sanitize text for URI: replace spaces and special characters not allowed in URIs\n", + " # A more robust IRI->URI conversion might be needed for non-ASCII characters.\n", + " safe_uri_text_part = re.sub(r'[^a-zA-Z0-9_\\-]', '_', normalized_entity_text.replace(' ', '_'))\n", + " # Prevent extremely long URIs from very long entity texts (rare but possible)\n", + " safe_uri_text_part = safe_uri_text_part[:80] \n", + " if not safe_uri_text_part: # If sanitization results in empty string, use a hash or generic id\n", + " import hashlib\n", + " safe_uri_text_part = f\"entity_{hashlib.md5(normalized_entity_text.encode()).hexdigest()[:8]}\"\n", + " unique_entities_map[entity_map_key] = EX[f\"{safe_uri_text_part}_{simple_entity_type}\"]\n", + " \n", + " # Update the entity dictionary\n", + " entity_dict_copy = entity_dict.copy()\n", + " entity_dict_copy['normalized_text'] = normalized_entity_text\n", + " entity_dict_copy['uri'] = unique_entities_map[entity_map_key]\n", + " current_article_normalized_ents.append(entity_dict_copy)\n", + " \n", + " # Store results for the current article\n", + " article_data_output_norm = article_data_rel.copy()\n", + " article_data_output_norm['normalized_entities'] = current_article_normalized_ents\n", + " articles_with_normalized_entities.append(article_data_output_norm)\n", + " \n", + " if articles_with_normalized_entities and articles_with_normalized_entities[0].get('normalized_entities'):\n", + " print(\"\\nExample of first article's normalized entities (first 3):\")\n", + " for ent_example in articles_with_normalized_entities[0]['normalized_entities'][:3]:\n", + " print(f\" Original: '{ent_example['text']}', Type: {ent_example['type']} (Simple: {ent_example['simple_type']}), Normalized: '{ent_example['normalized_text']}', URI: <{ent_example['uri']}>\")\n", + " print(f\"\\nProcessed {len(articles_with_normalized_entities)} articles for entity normalization and URI generation.\")\n", + " print(f\"Total unique canonical entity URIs created: {len(unique_entities_map)}\")\n", + "else:\n", + " print(\"Skipping entity normalization and URI generation: No articles with relations available.\")\n", + "\n", + "# Ensure articles_with_normalized_entities is defined\n", + "if 'articles_with_normalized_entities' not in globals():\n", + " articles_with_normalized_entities = []\n", + " print(\"Initialized 'articles_with_normalized_entities' as an empty list.\")" + ] + }, + { + "cell_type": "markdown", + "id": "entity-normalization-exec-output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "This block will show:\n", + "* Progress of the normalization and URI generation process.\n", + "* Examples of original entity text vs. their normalized versions and the generated URIs for the first few entities in the first processed article.\n", + "* The total count of unique entity URIs created across all processed articles." + ] + }, + { + "cell_type": "markdown", + "id": "step3-2-rdf-type-func-def-desc", + "metadata": {}, + "source": [ + "### Step 3.2: Schema/Ontology Alignment - RDF Type Mapping Function\n", + "**Task:** Map extracted entities and relationships to a consistent schema or ontology.\n", + "\n", + "**Book Concept:** (Ch. 2 - Ontology Layer; Ch. 4 - Mapping)\n", + "Schema/Ontology alignment involves mapping our locally defined entity types (e.g., \"ORG\", \"PERSON\" from the LLM NER step) and relationship predicates to standard vocabularies (like Schema.org) or custom-defined RDF classes and properties. This adds semantic rigor and enables interoperability.\n", + "\n", + "**Methodology:**\n", + "The `get_rdf_type_for_entity` function takes our simple entity type string (e.g., \"ORG\") and maps it to an RDF Class. \n", + "* It uses a predefined dictionary (`type_mapping`) to link common types to `SCHEMA` (Schema.org) classes (e.g., `ORG` -> `SCHEMA.Organization`).\n", + "* If a type is not in the map, it defaults to creating a class within our custom `EX` namespace (e.g., `EX.CUSTOM_TYPE`).\n", + "This function ensures that each entity in our KG will be assigned a formal RDF type." + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "get_rdf_type_for_entity_func_def", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Function 'get_rdf_type_for_entity' defined.\n" + ] + } + ], + "source": [ + "def get_rdf_type_for_entity(simple_entity_type_str):\n", + " \"\"\"Maps our simple entity type string (e.g., 'ORG') to an RDF Class.\"\"\"\n", + " type_mapping = {\n", + " 'ORG': SCHEMA.Organization,\n", + " 'PERSON': SCHEMA.Person,\n", + " 'MONEY': SCHEMA.PriceSpecification, # Or a custom EX.MonetaryValue\n", + " 'DATE': SCHEMA.Date, # Note: schema.org/Date is a datatype, consider schema.org/Event for events with dates\n", + " 'PRODUCT': SCHEMA.Product,\n", + " 'GPE': SCHEMA.Place, # Geopolitical Entity\n", + " 'LOC': SCHEMA.Place, # General Location\n", + " 'EVENT': SCHEMA.Event,\n", + " # Add other mappings as derived from llm_selected_entity_types_str if needed\n", + " 'CARDINAL': RDF.Statement, # Or more specific if context known, often just a literal value\n", + " 'FAC': SCHEMA.Place # Facility\n", + " }\n", + " return type_mapping.get(simple_entity_type_str.upper(), EX[simple_entity_type_str.upper()]) # Fallback to custom type\n", + "\n", + "print(\"Function 'get_rdf_type_for_entity' defined.\")" + ] + }, + { + "cell_type": "markdown", + "id": "get_rdf_type_for_entity_func_def_output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "Confirms the definition of the `get_rdf_type_for_entity` mapping function." + ] + }, + { + "cell_type": "markdown", + "id": "step3-2-rdf-predicate-func-def-desc", + "metadata": {}, + "source": [ + "#### Schema/Ontology Alignment - RDF Predicate Mapping Function\n", + "\n", + "**Theory:**\n", + "The `get_rdf_predicate` function maps our string-based relationship predicates (e.g., \"ACQUIRED\", \"HAS_PRICE\" from the LLM RE step) to RDF Properties. For simplicity and custom control, these are typically mapped to properties within our `EX` namespace. The function ensures that predicate strings are converted into valid URI components (e.g., by replacing spaces with underscores)." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "get_rdf_predicate_func_def", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Function 'get_rdf_predicate' defined.\n" + ] + } + ], + "source": [ + "def get_rdf_predicate(predicate_str_from_llm):\n", + " \"\"\"Maps our predicate string (from LLM relation extraction) to an RDF Property in our EX namespace.\"\"\"\n", + " # Predicates are already somewhat like properties (e.g., 'ACQUIRED')\n", + " # We'll ensure they are valid URI components for our custom namespace EX\n", + " # Replace spaces with underscores and ensure it's a simple, clean string\n", + " sanitized_predicate = predicate_str_from_llm.strip().replace(\" \", \"_\").upper()\n", + " return EX[sanitized_predicate]\n", + "\n", + "print(\"Function 'get_rdf_predicate' defined.\")" + ] + }, + { + "cell_type": "markdown", + "id": "get_rdf_predicate_func_def_output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "Confirms the definition of the `get_rdf_predicate` mapping function." + ] + }, + { + "cell_type": "markdown", + "id": "schema-alignment-example-desc", + "metadata": {}, + "source": [ + "#### Schema/Ontology Alignment - Examples\n", + "\n", + "**Theory:**\n", + "This block simply prints out a few examples of how our entity types and relationship predicates would be mapped to RDF terms using the functions defined above. This serves as a quick check and illustration of the mapping logic." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "schema-alignment-example-code", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Schema alignment functions ready. Example mappings:\n", + " Entity Type 'ORG' maps to RDF Class: \n", + " Predicate 'ACQUIRED' maps to RDF Property: \n", + " Entity Type 'MONEY' maps to RDF Class: \n", + " Predicate 'HAS_PRICE' maps to RDF Property: \n" + ] + } + ], + "source": [ + "print(\"Schema alignment functions ready. Example mappings:\")\n", + "example_entity_type = 'ORG'\n", + "example_predicate_str = 'ACQUIRED'\n", + "print(f\" Entity Type '{example_entity_type}' maps to RDF Class: <{get_rdf_type_for_entity(example_entity_type)}>\")\n", + "print(f\" Predicate '{example_predicate_str}' maps to RDF Property: <{get_rdf_predicate(example_predicate_str)}>\")\n", + "\n", + "example_entity_type_2 = 'MONEY'\n", + "example_predicate_str_2 = 'HAS_PRICE'\n", + "print(f\" Entity Type '{example_entity_type_2}' maps to RDF Class: <{get_rdf_type_for_entity(example_entity_type_2)}>\")\n", + "print(f\" Predicate '{example_predicate_str_2}' maps to RDF Property: <{get_rdf_predicate(example_predicate_str_2)}>\")" + ] + }, + { + "cell_type": "markdown", + "id": "schema-alignment-example-output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "Shows example RDF URIs that would be generated for sample entity types and predicate strings, illustrating the mapping functions." + ] + }, + { + "cell_type": "markdown", + "id": "step3-3-triple-generation-exec-desc", + "metadata": {}, + "source": [ + "### Step 3.3: Triple Generation\n", + "**Task:** Convert the structured entity and relation data into subject–predicate–object triples.\n", + "\n", + "**Book Concept:** (Ch. 2 - KG structure; Ch. 4 - RML output)\n", + "This is where the Knowledge Graph materializes. We iterate through our processed articles (`articles_with_normalized_entities`) and convert the extracted information into RDF triples using `rdflib`.\n", + "\n", + "**Methodology:**\n", + "1. **Initialize Graph:** An `rdflib.Graph` object (`kg`) is created.\n", + "2. **Bind Namespaces:** Namespaces (EX, SCHEMA, RDFS, etc.) are bound to prefixes for cleaner serialization of the RDF (e.g., `ex:AcmeCorp` instead of the full URI).\n", + "3. **Iterate Articles:** For each article:\n", + " * An RDF resource is created for the article itself (e.g., `ex:article_123`), typed as `schema:Article`.\n", + " * Its summary (or ID) can be added as a `schema:headline` or `rdfs:label`.\n", + "4. **Iterate Entities:** For each `normalized_entity` within an article:\n", + " * The entity's URI (from `entity['uri']`) is used as the subject.\n", + " * A triple `(entity_uri, rdf:type, rdf_entity_type)` is added, where `rdf_entity_type` comes from `get_rdf_type_for_entity()`.\n", + " * A triple `(entity_uri, rdfs:label, Literal(normalized_text))` is added to provide a human-readable label.\n", + " * If the original text differs from the normalized text, `(entity_uri, skos:altLabel, Literal(original_text))` can be added for the original mention.\n", + " * A triple `(article_uri, schema:mentions, entity_uri)` links the article to the entities it mentions.\n", + " * A local map (`entity_uri_map_for_article`) is built for the current article, mapping original entity texts to their canonical URIs. This is crucial for resolving relations in the next step, as relations were extracted based on original text spans.\n", + "5. **Iterate Relations:** For each `llm_relation` within an article:\n", + " * The URIs for the subject and object entities are looked up from `entity_uri_map_for_article` using their original text spans.\n", + " * The predicate string is converted to an RDF property using `get_rdf_predicate()`.\n", + " * If both subject and object URIs are found, the triple `(subject_uri, predicate_rdf, object_uri)` is added to the graph.\n", + "\n", + "**Output:** A populated `rdflib.Graph` (`kg`) containing all the extracted knowledge as RDF triples." + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "triple-generation-exec-code", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Generating RDF triples for 3 processed articles...\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "818e44fbf2b24002a0c9ff572e0266ed", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Generating Triples: 0%| | 0/3 [00:00 canonical URI\n", + " # This is important because relations were extracted based on original text spans.\n", + " entity_text_to_uri_map_current_article = {}\n", + "\n", + " # Add entity triples\n", + " for entity_obj in article_data_final.get('normalized_entities', []):\n", + " entity_uri_val = entity_obj['uri'] # This is the canonical URI from unique_entities_map\n", + " rdf_entity_type_val = get_rdf_type_for_entity(entity_obj['simple_type'])\n", + " normalized_label = entity_obj['normalized_text']\n", + " original_label = entity_obj['text']\n", + " \n", + " kg.add((entity_uri_val, RDF.type, rdf_entity_type_val))\n", + " kg.add((entity_uri_val, RDFS.label, Literal(normalized_label, lang='en')))\n", + " triples_generated_count += 2\n", + " if normalized_label != original_label:\n", + " kg.add((entity_uri_val, SKOS.altLabel, Literal(original_label, lang='en')))\n", + " triples_generated_count += 1\n", + " \n", + " # Link article to mentioned entities\n", + " kg.add((article_uri, SCHEMA.mentions, entity_uri_val))\n", + " triples_generated_count += 1\n", + " \n", + " # Populate the local map for resolving relations within this article\n", + " entity_text_to_uri_map_current_article[original_label] = entity_uri_val\n", + "\n", + " # Add relation triples\n", + " for relation_obj in article_data_final.get('llm_relations', []):\n", + " subject_orig_text = relation_obj.get('subject_text')\n", + " object_orig_text = relation_obj.get('object_text')\n", + " predicate_str = relation_obj.get('predicate')\n", + " \n", + " # Resolve subject and object texts to their canonical URIs using the article-specific map\n", + " subject_resolved_uri = entity_text_to_uri_map_current_article.get(subject_orig_text)\n", + " object_resolved_uri = entity_text_to_uri_map_current_article.get(object_orig_text)\n", + " \n", + " if subject_resolved_uri and object_resolved_uri and predicate_str:\n", + " predicate_rdf_prop = get_rdf_predicate(predicate_str)\n", + " kg.add((subject_resolved_uri, predicate_rdf_prop, object_resolved_uri))\n", + " triples_generated_count += 1\n", + " else:\n", + " if not subject_resolved_uri:\n", + " print(f\" Warning: Could not find URI for subject '{subject_orig_text}' in article {article_data_final['id']}. Relation skipped: {relation_obj}\")\n", + " if not object_resolved_uri:\n", + " print(f\" Warning: Could not find URI for object '{object_orig_text}' in article {article_data_final['id']}. Relation skipped: {relation_obj}\")\n", + "\n", + " print(f\"\\nFinished generating triples. Approximately {triples_generated_count} triples were candidates for addition.\")\n", + " print(f\"Total actual triples in the graph: {len(kg)}\")\n", + " if len(kg) > 0:\n", + " print(\"\\nSample of first 5 triples in N3 format:\")\n", + " for i, (s, p, o) in enumerate(kg):\n", + " # Use n3() for a readable representation respecting prefixes\n", + " print(f\" {s.n3(kg.namespace_manager)} {p.n3(kg.namespace_manager)} {o.n3(kg.namespace_manager)}.\")\n", + " if i >= 4: # Print first 5\n", + " break\n", + "else:\n", + " print(\"Skipping triple generation: No processed articles with normalized entities available.\")\n", + "\n", + "# Ensure kg is defined\n", + "if 'kg' not in globals():\n", + " kg = Graph()\n", + " print(\"Initialized 'kg' as an empty rdflib.Graph object.\")" + ] + }, + { + "cell_type": "markdown", + "id": "triple-generation-exec-output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "This block will show:\n", + "* Progress of triple generation.\n", + "* The approximate number of triples considered for addition and the final total number of triples in the `kg` graph.\n", + "* Warnings if subject/object entities for a relation couldn't be resolved to URIs.\n", + "* A sample of the first few triples in N3 (Notation3) format, which is a human-readable RDF serialization." + ] + }, + { + "cell_type": "markdown", + "id": "phase4", + "metadata": {}, + "source": [ + "## Phase 4: Knowledge Graph Refinement Using Embeddings\n", + "**(Ref: Ch. 6 – Embedding-Based Reasoning; Ch. 7 – ML on KGs with SANSA)**\n", + "\n", + "**Theory (Phase Overview):**\n", + "Knowledge Graph embeddings (KGEs) learn low-dimensional vector representations for entities and relations in a KG. These embeddings capture the semantic properties of KG components and their interactions. This phase explores using such embeddings for KG refinement, a concept aligned with embedding-based reasoning (Ch. 6).\n", + "Key tasks include:\n", + "* **Generating Embeddings:** Creating vector representations for nodes (entities).\n", + "* **Link Prediction (Knowledge Discovery):** Using embeddings to infer missing connections or predict new potential relationships (Ch. 6). This is a powerful way to discover implicit knowledge and enrich the KG.\n", + "While full KGE model training (like TransE, ComplEx, DistMult mentioned in Ch. 6) is beyond this demo's scope, we'll use pre-trained text embeddings for entity names as a proxy to demonstrate semantic similarity, a foundational concept for some link prediction approaches." + ] + }, + { + "cell_type": "markdown", + "id": "step4-1-embedding-func-def-desc", + "metadata": {}, + "source": [ + "### Step 4.1: Generate KG Embeddings - Embedding Function Definition\n", + "**Task:** Create vector representations for nodes (entities) in the graph.\n", + "\n", + "**Book Concept:** (Ch. 6 - Embeddings bridging symbolic & sub-symbolic)\n", + "Embeddings transform symbolic entities (represented by URIs and labels) into numerical vectors in a continuous vector space. This allows us to apply machine learning techniques and measure semantic similarity.\n", + "\n", + "**Methodology:**\n", + "The `get_embeddings_for_texts` function will:\n", + "* Take a list of unique entity texts (e.g., their normalized labels).\n", + "* Use the configured OpenAI/Nebius embedding API (with `EMBEDDING_MODEL_NAME`) to fetch pre-trained embeddings for these texts.\n", + "* Handle batching or individual requests as appropriate for the API.\n", + "* Return a dictionary mapping each input text to its embedding vector.\n", + "These embeddings represent the semantic meaning of the entity names." + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "get_embeddings_for_texts_func_def", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Function 'get_embeddings_for_texts' defined.\n" + ] + } + ], + "source": [ + "def get_embeddings_for_texts(texts_list, embedding_model_name=EMBEDDING_MODEL_NAME):\n", + " \"\"\"Gets embeddings for a list of texts using the specified model via the LLM client.\"\"\"\n", + " if not client:\n", + " print(\"LLM client not initialized. Skipping embedding generation.\")\n", + " return {text: [] for text in texts_list} # Return dict with empty embeddings\n", + " if not texts_list:\n", + " print(\"No texts provided for embedding generation.\")\n", + " return {}\n", + " \n", + " embeddings_map_dict = {}\n", + " print(f\"Fetching embeddings for {len(texts_list)} unique texts using model '{embedding_model_name}'...\")\n", + " \n", + " # Process texts in batches to be efficient and respect potential API limits\n", + " # Some APIs handle list inputs directly, others might require batching.\n", + " # The OpenAI client's `embeddings.create` can often take a list of inputs.\n", + " # If your specific endpoint requires single inputs or smaller batches, adjust this loop.\n", + " # For this example, assuming the client can handle a list, but will iterate if not or for safety.\n", + "\n", + " # Check if the input texts_list itself is a list of strings\n", + " if not all(isinstance(text, str) for text in texts_list):\n", + " print(\"Error: Input 'texts_list' must be a list of strings.\")\n", + " return {text: [] for text in texts_list if isinstance(text, str)} # Try to salvage what we can\n", + " \n", + " # Remove empty strings to avoid API errors\n", + " valid_texts_list = [text for text in texts_list if text.strip()]\n", + " if not valid_texts_list:\n", + " print(\"No valid (non-empty) texts to embed.\")\n", + " return {}\n", + "\n", + " try:\n", + " # Assuming the client.embeddings.create can take a list of inputs.\n", + " # If it can only take one string at a time, you'd loop here.\n", + " response = client.embeddings.create(\n", + " model=embedding_model_name,\n", + " input=valid_texts_list # Pass the list of valid texts\n", + " )\n", + " # The response.data should be a list of embedding objects, in the same order as input\n", + " for i, data_item in enumerate(response.data):\n", + " embeddings_map_dict[valid_texts_list[i]] = data_item.embedding\n", + " \n", + " print(f\"Embeddings received for {len(embeddings_map_dict)} texts.\")\n", + " # For texts that were empty or failed, add them with empty embeddings if needed by caller\n", + " for text in texts_list:\n", + " if text not in embeddings_map_dict:\n", + " embeddings_map_dict[text] = []\n", + " return embeddings_map_dict\n", + " \n", + " except Exception as e:\n", + " print(f\"Error getting embeddings (batch attempt): {e}\")\n", + " print(\"Falling back to individual embedding requests if batch failed...\")\n", + " # Fallback to individual requests if batching failed or is not supported by the specific client/endpoint setup\n", + " embeddings_map_dict_fallback = {}\n", + " for text_input_item in tqdm(valid_texts_list, desc=\"Generating Embeddings (Fallback Mode)\"):\n", + " try:\n", + " response_item = client.embeddings.create(\n", + " model=embedding_model_name,\n", + " input=text_input_item\n", + " )\n", + " embeddings_map_dict_fallback[text_input_item] = response_item.data[0].embedding\n", + " if len(valid_texts_list) > 10: # Only sleep if processing many items\n", + " time.sleep(0.1) # Small delay per request in fallback\n", + " except Exception as e_item:\n", + " print(f\" Error getting embedding for text '{text_input_item[:50]}...': {e_item}\")\n", + " embeddings_map_dict_fallback[text_input_item] = [] # Store empty list on error for this item\n", + " \n", + " # For texts that were empty or failed, add them with empty embeddings if needed by caller\n", + " for text in texts_list:\n", + " if text not in embeddings_map_dict_fallback:\n", + " embeddings_map_dict_fallback[text] = []\n", + " return embeddings_map_dict_fallback\n", + "\n", + "print(\"Function 'get_embeddings_for_texts' defined.\")" + ] + }, + { + "cell_type": "markdown", + "id": "get_embeddings_for_texts_func_def_output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "Confirms the definition of the `get_embeddings_for_texts` function." + ] + }, + { + "cell_type": "markdown", + "id": "kg-embedding-exec-desc", + "metadata": {}, + "source": [ + "#### Generate KG Embeddings - Execution\n", + "\n", + "**Theory:**\n", + "This block orchestrates the generation of embeddings for our KG entities:\n", + "1. It extracts the set of unique, normalized entity texts from our `unique_entities_map` (which maps `(normalized_text, type)` to URIs). We are interested in embedding the textual representation of entities.\n", + "2. It calls `get_embeddings_for_texts` with this list of unique texts.\n", + "3. The returned embeddings (which are mapped to texts) are then re-mapped to our canonical entity URIs, creating the `entity_embeddings` dictionary: `{entity_uri: embedding_vector}`.\n", + "This dictionary will store the vector representation for each unique entity in our graph." + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "kg-embedding-exec-code", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Preparing to fetch embeddings for 23 unique normalized entity texts.\n", + "Fetching embeddings for 23 unique texts using model 'BAAI/bge-multilingual-gemma2'...\n", + "Embeddings received for 23 texts.\n", + "\n", + "Successfully generated and mapped embeddings for 23 entity URIs.\n", + " Example embedding for URI (Label: 'United Nations'):\n", + " Vector (first 5 dims): [-0.00027251243591308594, 0.0006837844848632812, -0.01216888427734375, -0.01558685302734375, 0.0038166046142578125]...\n", + " Vector dimension: 3584\n" + ] + } + ], + "source": [ + "entity_embeddings = {} # Initialize: maps entity URI -> embedding vector\n", + "\n", + "if unique_entities_map and client: # Proceed if we have unique entities and LLM client\n", + " # Extract unique normalized entity texts for which we need embeddings.\n", + " # unique_entities_map maps (normalized_text, type) -> URI\n", + " # We need just the normalized_text part for embedding.\n", + " entity_normalized_texts_to_embed = list(set([key[0] for key in unique_entities_map.keys() if key[0].strip()]))\n", + " \n", + " if entity_normalized_texts_to_embed:\n", + " print(f\"Preparing to fetch embeddings for {len(entity_normalized_texts_to_embed)} unique normalized entity texts.\")\n", + " \n", + " # Get embeddings for these unique texts\n", + " text_to_embedding_map = get_embeddings_for_texts(entity_normalized_texts_to_embed)\n", + " \n", + " # Map these embeddings back to our entity URIs\n", + " for (normalized_text_key, entity_type_key), entity_uri_val_emb in unique_entities_map.items():\n", + " if normalized_text_key in text_to_embedding_map and text_to_embedding_map[normalized_text_key]:\n", + " entity_embeddings[entity_uri_val_emb] = text_to_embedding_map[normalized_text_key]\n", + " # else: # This case should be handled by get_embeddings_for_texts returning empty list for failed/empty texts\n", + " # print(f\" Warning: No embedding found or generated for text '{normalized_text_key}' (URI: {entity_uri_val_emb})\")\n", + " \n", + " if entity_embeddings:\n", + " print(f\"\\nSuccessfully generated and mapped embeddings for {len(entity_embeddings)} entity URIs.\")\n", + " # Show an example\n", + " first_uri_with_embedding = next(iter(entity_embeddings.keys()), None)\n", + " if first_uri_with_embedding:\n", + " emb_example = entity_embeddings[first_uri_with_embedding]\n", + " # Get label for this URI from KG\n", + " label_for_uri = kg.value(subject=first_uri_with_embedding, predicate=RDFS.label, default=str(first_uri_with_embedding))\n", + " print(f\" Example embedding for URI <{first_uri_with_embedding}> (Label: '{label_for_uri}'):\")\n", + " print(f\" Vector (first 5 dims): {str(emb_example[:5])}...\")\n", + " print(f\" Vector dimension: {len(emb_example)}\")\n", + " else:\n", + " print(\"No embeddings were successfully mapped to entity URIs.\")\n", + " else:\n", + " print(\"No unique entity texts found to generate embeddings for.\")\n", + "else:\n", + " print(\"Skipping embedding generation: No unique entities identified, or LLM client not available.\")\n", + "\n", + "# Ensure entity_embeddings is defined\n", + "if 'entity_embeddings' not in globals():\n", + " entity_embeddings = {}\n", + " print(\"Initialized 'entity_embeddings' as an empty dictionary.\")" + ] + }, + { + "cell_type": "markdown", + "id": "kg-embedding-exec-output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "This block will show:\n", + "* Progress of fetching embeddings.\n", + "* The number of unique entity texts for which embeddings are requested.\n", + "* The number of entity URIs for which embeddings were successfully generated and mapped.\n", + "* An example of an embedding vector (first few dimensions and total length) for one of the entities, along with its URI and label." + ] + }, + { + "cell_type": "markdown", + "id": "step4-2-cosine-sim-func-def-desc", + "metadata": {}, + "source": [ + "### Step 4.2: Link Prediction (Knowledge Discovery - Conceptual) - Cosine Similarity Function\n", + "**Task:** Use embeddings to infer new or missing connections.\n", + "\n", + "**Book Concept:** (Ch. 6 - Link prediction as reasoning)\n", + "Link prediction aims to identify missing edges (triples) in a KG. KGE models are trained to score potential triples (s, p, o), and high-scoring triples not already in the KG are candidate new links. \n", + "\n", + "**Methodology (Simplified):**\n", + "A full link prediction model is complex. Here, we'll demonstrate a simpler, related concept: **semantic similarity** between entities based on their name embeddings. The `get_cosine_similarity` function calculates the cosine similarity between two embedding vectors. High cosine similarity (close to 1) between entity name embeddings suggests that the entities are semantically related in terms of their textual description. This *could* hint at potential relationships (e.g., two similarly named software products might be competitors or complementary), but it's not direct link prediction for specific predicates." + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "get_cosine_similarity_func_def", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Function 'get_cosine_similarity' defined.\n" + ] + } + ], + "source": [ + "def get_cosine_similarity(embedding1, embedding2):\n", + " \"\"\"Calculates cosine similarity between two embedding vectors using sklearn.\"\"\"\n", + " if not isinstance(embedding1, (list, np.ndarray)) or not isinstance(embedding2, (list, np.ndarray)):\n", + " # print(\"Warning: One or both embeddings are not lists/arrays.\")\n", + " return 0.0\n", + " if not embedding1 or not embedding2:\n", + " # print(\"Warning: One or both embeddings are empty.\")\n", + " return 0.0\n", + " \n", + " # Ensure they are numpy arrays and 2D for cosine_similarity function\n", + " vec1 = np.array(embedding1).reshape(1, -1)\n", + " vec2 = np.array(embedding2).reshape(1, -1)\n", + " \n", + " # Check if dimensions match, though reshape should handle 1D to 2D conversion.\n", + " # Cosine similarity doesn't strictly require same length for this usage, but it's implied for valid comparison.\n", + " if vec1.shape[1] != vec2.shape[1]:\n", + " print(f\"Warning: Embedding dimensions do not match for cosine similarity: {vec1.shape[1]} vs {vec2.shape[1]}\")\n", + " return 0.0 # Or handle as an error\n", + " \n", + " return cosine_similarity(vec1, vec2)[0][0]\n", + "\n", + "print(\"Function 'get_cosine_similarity' defined.\")" + ] + }, + { + "cell_type": "markdown", + "id": "get_cosine_similarity_func_def_output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "Confirms the definition of the `get_cosine_similarity` function." + ] + }, + { + "cell_type": "markdown", + "id": "link-prediction-exec-desc", + "metadata": {}, + "source": [ + "#### Link Prediction (Conceptual) - Similarity Calculation Example\n", + "\n", + "**Theory:**\n", + "This block demonstrates the use of `get_cosine_similarity`. It selects a couple of entities (preferably organizations, if available with embeddings) from our `entity_embeddings` map and calculates the similarity between their name embeddings. A high similarity might suggest they operate in similar domains or have related roles, which could be a starting point for investigating potential (unobserved) connections if we had a more sophisticated link prediction model." + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "link-prediction-exec-code", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Conceptual Link Prediction: Calculating semantic similarity between a sample of entities using their name embeddings.\n", + "Found at least two ORG entities with embeddings for similarity comparison.\n", + "\n", + " Similarity between 'United Nations' () and 'CNN' (): 0.7679\n", + " Interpretation: These entities show moderate similarity based on their name embeddings.\n", + "\n", + "Note: This is a conceptual demonstration of semantic similarity. True link prediction involves training specialized KGE models (e.g., TransE, ComplEx) on existing graph triples to predict missing (subject, predicate, object) facts with specific predicates, not just general entity similarity.\n" + ] + } + ], + "source": [ + "if len(entity_embeddings) >= 2:\n", + " print(\"\\nConceptual Link Prediction: Calculating semantic similarity between a sample of entities using their name embeddings.\")\n", + " \n", + " # Get all URIs that have embeddings\n", + " uris_with_embeddings = [uri for uri, emb in entity_embeddings.items() if emb] # Check if emb is not empty\n", + " \n", + " # Try to find two ORG entities for a more meaningful comparison\n", + " org_entity_uris_with_embeddings = []\n", + " for uri_cand in uris_with_embeddings:\n", + " # Check the type from the KG\n", + " rdf_types_for_uri = list(kg.objects(subject=uri_cand, predicate=RDF.type))\n", + " if SCHEMA.Organization in rdf_types_for_uri or EX.ORG in rdf_types_for_uri:\n", + " org_entity_uris_with_embeddings.append(uri_cand)\n", + " \n", + " entity1_uri_sim = None\n", + " entity2_uri_sim = None\n", + "\n", + " if len(org_entity_uris_with_embeddings) >= 2:\n", + " entity1_uri_sim = org_entity_uris_with_embeddings[0]\n", + " entity2_uri_sim = org_entity_uris_with_embeddings[1]\n", + " print(f\"Found at least two ORG entities with embeddings for similarity comparison.\")\n", + " elif len(uris_with_embeddings) >= 2: # Fallback to any two entities if not enough ORGs\n", + " entity1_uri_sim = uris_with_embeddings[0]\n", + " entity2_uri_sim = uris_with_embeddings[1]\n", + " print(f\"Could not find two ORGs with embeddings. Using two generic entities for similarity comparison.\")\n", + " else:\n", + " print(\"Not enough entities (less than 2) with valid embeddings to demonstrate similarity.\")\n", + "\n", + " if entity1_uri_sim and entity2_uri_sim:\n", + " embedding1_val = entity_embeddings.get(entity1_uri_sim)\n", + " embedding2_val = entity_embeddings.get(entity2_uri_sim)\n", + " \n", + " # Retrieve labels for these URIs from the graph for context\n", + " label1_val = kg.value(subject=entity1_uri_sim, predicate=RDFS.label, default=str(entity1_uri_sim))\n", + " label2_val = kg.value(subject=entity2_uri_sim, predicate=RDFS.label, default=str(entity2_uri_sim))\n", + "\n", + " calculated_similarity = get_cosine_similarity(embedding1_val, embedding2_val)\n", + " print(f\"\\n Similarity between '{label1_val}' (<{entity1_uri_sim}>) and '{label2_val}' (<{entity2_uri_sim}>): {calculated_similarity:.4f}\")\n", + " \n", + " # Interpret similarity (example thresholds)\n", + " if calculated_similarity > 0.8:\n", + " print(f\" Interpretation: These entities are highly similar based on their name embeddings.\")\n", + " elif calculated_similarity > 0.6:\n", + " print(f\" Interpretation: These entities show moderate similarity based on their name embeddings.\")\n", + " else:\n", + " print(f\" Interpretation: These entities show low similarity based on their name embeddings.\")\n", + " \n", + " print(\"\\nNote: This is a conceptual demonstration of semantic similarity. True link prediction involves training specialized KGE models (e.g., TransE, ComplEx) on existing graph triples to predict missing (subject, predicate, object) facts with specific predicates, not just general entity similarity.\")\n", + "else:\n", + " print(\"Skipping conceptual link prediction: Not enough entity embeddings available (need at least 2).\")" + ] + }, + { + "cell_type": "markdown", + "id": "link-prediction-exec-output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "This block will:\n", + "* Select two entities that have embeddings.\n", + " * Calculate and print the cosine similarity score between their embeddings.\n", + " * Provide a simple interpretation of the similarity score.\n", + " * Include a disclaimer that this is a simplified concept, not full link prediction." + ] + }, + { + "cell_type": "markdown", + "id": "step4-3-add-inferred-func-def-desc", + "metadata": {}, + "source": [ + "### Step 4.3: Add Predicted Links (Optional & Conceptual) - Function Definition\n", + "**Task:** Integrate high-confidence predicted links into the main graph.\n", + "\n", + "**Book Concept:** (Ch. 6 - KG enrichment and lifecycle)\n", + "If a link prediction model were to identify new, high-confidence relationships, these could be added to the KG, enriching it with inferred knowledge. This is part of the KG lifecycle, where the graph evolves and grows.\n", + "\n", + "**Methodology:**\n", + "The function `add_inferred_triples_to_graph` is a placeholder to illustrate this. It would take a list of (subject_uri, predicate_uri, object_uri) triples (presumably from a link prediction model) and add them to our main `rdflib.Graph`." + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "add_inferred_triples_to_graph_func_def", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Function 'add_inferred_triples_to_graph' defined.\n" + ] + } + ], + "source": [ + "def add_inferred_triples_to_graph(target_graph, list_of_inferred_triples):\n", + " \"\"\"Adds a list of inferred (subject_uri, predicate_uri, object_uri) triples to the graph.\"\"\"\n", + " if not list_of_inferred_triples:\n", + " print(\"No inferred triples provided to add.\")\n", + " return target_graph, 0\n", + " \n", + " added_count = 0\n", + " for s_uri, p_uri, o_uri in list_of_inferred_triples:\n", + " # Basic validation: ensure they are URIRefs or Literals as appropriate\n", + " if isinstance(s_uri, URIRef) and isinstance(p_uri, URIRef) and (isinstance(o_uri, URIRef) or isinstance(o_uri, Literal)):\n", + " target_graph.add((s_uri, p_uri, o_uri))\n", + " added_count +=1\n", + " else:\n", + " print(f\" Warning: Skipping malformed conceptual inferred triple: ({s_uri}, {p_uri}, {o_uri})\")\n", + " \n", + " print(f\"Added {added_count} conceptually inferred triples to the graph.\")\n", + " return target_graph, added_count\n", + "\n", + "print(\"Function 'add_inferred_triples_to_graph' defined.\")" + ] + }, + { + "cell_type": "markdown", + "id": "add_inferred_triples_to_graph_func_def_output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "Confirms the definition of the `add_inferred_triples_to_graph` function." + ] + }, + { + "cell_type": "markdown", + "id": "add-predicted-links-exec-desc", + "metadata": {}, + "source": [ + "#### Add Predicted Links (Conceptual) - Execution Example\n", + "\n", + "**Theory:**\n", + "This block provides a conceptual example. Since we haven't trained a full link prediction model, we create a dummy `conceptual_inferred_triples` list. If this list contained actual high-confidence predictions (e.g., from a TransE model scoring `(CompanyX, ex:potentialAcquirerOf, CompanyY)` highly), the `add_inferred_triples_to_graph` function would integrate them into our `kg`. In this demo, it will likely state that no triples were added unless you manually populate the dummy list." + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "add-predicted-links-exec-code", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "No conceptual inferred triples generated to add in this demonstration.\n" + ] + } + ], + "source": [ + "# Conceptual: Assume we have some high-confidence predicted links from a separate model.\n", + "# For example, if entity1_uri_sim and entity2_uri_sim from the similarity check showed very high similarity,\n", + "# and we had a predicate like ex:isSemanticallySimilarTo, we might add it.\n", + "conceptual_inferred_triples_list = [] \n", + "\n", + "# Example: If we had variables entity1_uri_sim, entity2_uri_sim, and calculated_similarity from the previous step\n", + "# This is just to illustrate, these variables might not be in scope if that cell wasn't run or had no valid entities\n", + "SIMILARITY_THRESHOLD_FOR_INFERENCE = 0.85 # Example threshold\n", + "if 'entity1_uri_sim' in locals() and 'entity2_uri_sim' in locals() and 'calculated_similarity' in locals():\n", + " if entity1_uri_sim and entity2_uri_sim and calculated_similarity > SIMILARITY_THRESHOLD_FOR_INFERENCE:\n", + " print(f\"Conceptual inference: Entities '{kg.label(entity1_uri_sim)}' and '{kg.label(entity2_uri_sim)}' are highly similar ({calculated_similarity:.2f}).\")\n", + " # Let's define a conceptual predicate for this\n", + " EX.isHighlySimilarTo = EX[\"isHighlySimilarTo\"] # Define if not already\n", + " conceptual_inferred_triples_list.append((entity1_uri_sim, EX.isHighlySimilarTo, entity2_uri_sim))\n", + " # Symmetrical relationship (optional, depends on predicate definition)\n", + " # conceptual_inferred_triples_list.append((entity2_uri_sim, EX.isHighlySimilarTo, entity1_uri_sim))\n", + "\n", + "if conceptual_inferred_triples_list:\n", + " print(f\"\\nAttempting to add {len(conceptual_inferred_triples_list)} conceptual inferred triples...\")\n", + " kg, num_added = add_inferred_triples_to_graph(kg, conceptual_inferred_triples_list)\n", + " if num_added > 0:\n", + " print(f\"Total triples in graph after adding conceptual inferences: {len(kg)}\")\n", + "else:\n", + " print(\"\\nNo conceptual inferred triples generated to add in this demonstration.\")" + ] + }, + { + "cell_type": "markdown", + "id": "add-predicted-links-exec-output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "This block will indicate if any conceptual inferred triples were added to the graph. If the dummy example for high similarity was triggered, it will show that these triples were added and the new total triple count." + ] + }, + { + "cell_type": "markdown", + "id": "phase5", + "metadata": {}, + "source": [ + "## Phase 5: Persistence and Utilization\n", + "**(Ref: Ch. 3 – Data Storage; Ch. 5 – Querying and Access)**\n", + "\n", + "**Theory (Phase Overview):**\n", + "Once the Knowledge Graph is constructed (and potentially refined), it needs to be stored for long-term access and utilized to derive insights. This phase covers:\n", + "* **KG Storage:** Persisting the graph. Options include RDF serialization formats (like Turtle, RDF/XML), native triple stores (e.g., Fuseki, GraphDB), or graph databases (e.g., Neo4j, if modeled appropriately) (Ch. 3).\n", + "* **Querying and Analysis:** Using query languages like SPARQL (for RDF KGs) to retrieve specific information, answer complex questions, and perform analytical tasks (Ch. 5).\n", + "* **Visualization:** Presenting parts of the KG or query results graphically for better human interpretation and understanding (Ch. 1 & 3 - Value of Visualization in Big Data)." + ] + }, + { + "cell_type": "markdown", + "id": "step5-1-save-graph-func-def-desc", + "metadata": {}, + "source": [ + "### Step 5.1: Knowledge Graph Storage - Save Function Definition\n", + "**Task:** Persist the KG in a suitable format (e.g., RDF/Turtle).\n", + "\n", + "**Book Concept:** (Ch. 3 - Data Storage options)\n", + "Serializing the KG to a file allows for persistence, sharing, and loading into other RDF-compliant tools or triple stores.\n", + "\n", + "**Methodology:**\n", + "The `save_graph_to_turtle` function uses `rdflib.Graph.serialize()` method to save the `kg` object into a file. Turtle (`.ttl`) is chosen as it's a human-readable and common RDF serialization format." + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "save_graph_to_turtle_func_def", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Function 'save_graph_to_turtle' defined.\n" + ] + } + ], + "source": [ + "def save_graph_to_turtle(graph_to_save, output_filepath=\"knowledge_graph.ttl\"):\n", + " \"\"\"Saves the RDF graph to a Turtle file.\"\"\"\n", + " if not len(graph_to_save):\n", + " print(\"Graph is empty. Nothing to save.\")\n", + " return False\n", + " try:\n", + " # Ensure the format string is correct, e.g., 'turtle', 'xml', 'n3', 'nt'\n", + " graph_to_save.serialize(destination=output_filepath, format='turtle')\n", + " print(f\"Knowledge Graph with {len(graph_to_save)} triples successfully saved to: {output_filepath}\")\n", + " return True\n", + " except Exception as e:\n", + " print(f\"Error saving graph to {output_filepath}: {e}\")\n", + " return False\n", + "\n", + "print(\"Function 'save_graph_to_turtle' defined.\")" + ] + }, + { + "cell_type": "markdown", + "id": "save_graph_to_turtle_func_def_output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "Confirms the definition of the `save_graph_to_turtle` function." + ] + }, + { + "cell_type": "markdown", + "id": "kg-storage-exec-desc", + "metadata": {}, + "source": [ + "#### Knowledge Graph Storage - Execution\n", + "\n", + "**Theory:**\n", + "This block calls the `save_graph_to_turtle` function to persist our constructed `kg` to a file named `tech_acquisitions_kg.ttl`. If the graph contains triples, it will be saved; otherwise, a message indicating an empty graph will be shown." + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "kg-storage-exec-code", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Attempting to save the graph with 75 triples...\n", + "Knowledge Graph with 75 triples successfully saved to: tech_acquisitions_kg.ttl\n" + ] + } + ], + "source": [ + "KG_OUTPUT_FILENAME = \"tech_acquisitions_kg.ttl\"\n", + "if len(kg) > 0:\n", + " print(f\"Attempting to save the graph with {len(kg)} triples...\")\n", + " save_graph_to_turtle(kg, KG_OUTPUT_FILENAME)\n", + "else:\n", + " print(f\"Knowledge Graph ('kg') is empty. Skipping save to '{KG_OUTPUT_FILENAME}'.\")" + ] + }, + { + "cell_type": "markdown", + "id": "kg-storage-exec-output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "This block will print a confirmation message if the graph is successfully saved, including the file path and the number of triples saved. If the graph is empty, it will state that." + ] + }, + { + "cell_type": "markdown", + "id": "step5-2-sparql-func-def-desc", + "metadata": {}, + "source": [ + "### Step 5.2: Querying and Analysis - SPARQL Execution Function\n", + "**Task:** Execute SPARQL queries to extract insights.\n", + "\n", + "**Book Concept:** (Ch. 5 - Querying and Access, SPARQL)\n", + "SPARQL (SPARQL Protocol and RDF Query Language) is the standard query language for RDF Knowledge Graphs. It allows for pattern matching against the graph structure to retrieve data, infer new information (through more complex queries), and answer analytical questions.\n", + "\n", + "**Methodology:**\n", + "The `execute_sparql_query` function takes our `rdflib.Graph` and a SPARQL query string. It uses `graph.query()` to execute the query and then iterates through the results, printing them in a readable format. This function will be used to run several example queries." + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "id": "execute_sparql_query_func_def", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Function 'execute_sparql_query' defined.\n" + ] + } + ], + "source": [ + "def execute_sparql_query(graph_to_query, query_string_sparql):\n", + " \"\"\"Executes a SPARQL query on the graph and prints results, returning them as a list of dicts.\"\"\"\n", + " if not len(graph_to_query):\n", + " print(\"Cannot execute SPARQL query: The graph is empty.\")\n", + " return []\n", + " \n", + " print(f\"\\nExecuting SPARQL Query:\\n{query_string_sparql}\")\n", + " try:\n", + " query_results = graph_to_query.query(query_string_sparql)\n", + " except Exception as e:\n", + " print(f\"Error executing SPARQL query: {e}\")\n", + " return []\n", + "\n", + " if not query_results:\n", + " print(\"Query executed successfully but returned no results.\")\n", + " return []\n", + " \n", + " results_list_of_dicts = []\n", + " print(f\"Query Results ({len(query_results)} found): \")\n", + " for row_idx, row_data in enumerate(query_results):\n", + " # Convert row to a dictionary for easier access and printing\n", + " # row_data is a ResultRow object, access by variable name from SELECT clause\n", + " result_item_dict = {}\n", + " if hasattr(row_data, 'labels'): # rdflib 6.x+ provides .labels and .asdict()\n", + " result_item_dict = {str(label): str(value) for label, value in row_data.asdict().items()}\n", + " else: # Fallback for older rdflib or if .asdict() is not available\n", + " # This part might need adjustment based on the actual structure of row_data in older versions\n", + " # For now, we'll assume it's an iterable of values corresponding to SELECT variables\n", + " # This requires knowing the SELECT variables' order, which is less robust\n", + " # For simplicity, if .labels is not present, we just make a list of string values\n", + " result_item_dict = {f\"col_{j}\": str(item_val) for j, item_val in enumerate(row_data)}\n", + " \n", + " results_list_of_dicts.append(result_item_dict)\n", + " \n", + " # Print a sample of results\n", + " if row_idx < 10: # Print up to 10 results\n", + " print(f\" Row {row_idx+1}: {result_item_dict}\")\n", + " elif row_idx == 10:\n", + " print(f\" ... ( dalších {len(query_results) - 10} výsledků )\") # and more results in Czech, should be English\n", + " print(f\" ... (and {len(query_results) - 10} more results)\")\n", + " \n", + " return results_list_of_dicts\n", + "\n", + "print(\"Function 'execute_sparql_query' defined.\")" + ] + }, + { + "cell_type": "markdown", + "id": "execute_sparql_query_func_def_output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "Confirms the definition of the `execute_sparql_query` function." + ] + }, + { + "cell_type": "markdown", + "id": "sparql-querying-exec-desc", + "metadata": {}, + "source": [ + "#### SPARQL Querying and Analysis - Execution Examples\n", + "\n", + "**Theory:**\n", + "This block demonstrates the power of SPARQL by executing several example queries against our constructed KG (`kg`). Each query targets different aspects of the acquisition data:\n", + "* **Query 1: List Organizations:** Retrieves all entities explicitly typed as `schema:Organization` (or `ex:ORG`) and their labels. This is a basic check to see what organizations are in our KG.\n", + "* **Query 2: Find Acquisition Relationships:** Identifies pairs of companies where one acquired the other, based on our `ex:ACQUIRED` predicate. This directly extracts acquisition events.\n", + "* **Query 3: Find Acquisitions with Price:** Retrieves companies (or acquisition events) that have an associated `ex:HAS_PRICE` relationship pointing to a monetary value (`schema:PriceSpecification` or `ex:MONEY`).\n", + "The results of each query are printed, showcasing how structured queries can extract specific insights from the graph." + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "id": "sparql-querying-exec-code", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "--- Executing Sample SPARQL Queries ---\n", + "\n", + "Executing SPARQL Query:\n", + "\n", + " PREFIX ex: \n", + " PREFIX schema: \n", + " PREFIX rdfs: \n", + " \n", + " SELECT DISTINCT ?org_uri ?org_label\n", + " WHERE {\n", + " ?org_uri a schema:Organization ;\n", + " rdfs:label ?org_label .\n", + " }\n", + " ORDER BY ?org_label\n", + " LIMIT 10\n", + " \n", + "Query Results (7 found): \n", + " Row 1: {'org_uri': 'http://example.org/kg/Algeria_Press_Agency_ORG', 'org_label': 'Algeria Press Agency'}\n", + " Row 2: {'org_uri': 'http://example.org/kg/CNN_ORG', 'org_label': 'CNN'}\n", + " Row 3: {'org_uri': 'http://example.org/kg/GSPC_ORG', 'org_label': 'GSPC'}\n", + " Row 4: {'org_uri': 'http://example.org/kg/Salafist_Group_for_Preaching_and_Combat_ORG', 'org_label': 'Salafist Group for Preaching and Combat'}\n", + " Row 5: {'org_uri': 'http://example.org/kg/UN_High_Commissioner_for_Refugees_ORG', 'org_label': 'UN High Commissioner for Refugees'}\n", + " Row 6: {'org_uri': 'http://example.org/kg/United_Nations_ORG', 'org_label': 'United Nations'}\n", + " Row 7: {'org_uri': 'http://example.org/kg/al_Qaeda_ORG', 'org_label': 'al Qaeda'}\n", + "\n", + "Executing SPARQL Query:\n", + "\n", + " PREFIX ex: \n", + " PREFIX rdfs: \n", + " PREFIX schema: \n", + " \n", + " SELECT ?acquiredCompanyLabel ?acquiringCompanyLabel\n", + " WHERE {\n", + " ?acquiredCompany ex:ACQUIRED ?acquiringCompany .\n", + " ?acquiredCompany rdfs:label ?acquiredCompanyLabel .\n", + " ?acquiringCompany rdfs:label ?acquiringCompanyLabel .\n", + " # Ensure both are organizations\n", + " ?acquiredCompany a schema:Organization .\n", + " ?acquiringCompany a schema:Organization .\n", + " }\n", + " LIMIT 10\n", + " \n", + "Query executed successfully but returned no results.\n", + "\n", + "Executing SPARQL Query:\n", + "\n", + " PREFIX ex: \n", + " PREFIX rdfs: \n", + " PREFIX schema: \n", + " \n", + " SELECT ?companyLabel ?priceLabel ?dateLabel\n", + " WHERE {\n", + " ?company ex:HAS_PRICE ?priceEntity .\n", + " ?company rdfs:label ?companyLabel .\n", + " ?priceEntity rdfs:label ?priceLabel .\n", + " # Ensure ?company is an ORG and ?priceEntity is a PriceSpecification (MONEY)\n", + " ?company a schema:Organization .\n", + " ?priceEntity a schema:PriceSpecification .\n", + " \n", + " # Optionally, try to find a date associated with this acquisition event/company\n", + " OPTIONAL { \n", + " ?company ex:ANNOUNCED_ON ?dateEntity .\n", + " ?dateEntity rdfs:label ?dateLabelRaw .\n", + " # If dateEntity is schema:Date, its label might be the date string directly\n", + " # If dateEntity is an Event, it might have a schema:startDate or similar\n", + " BIND(COALESCE(?dateLabelRaw, STR(?dateEntity)) As ?dateLabel) \n", + " }\n", + " }\n", + " LIMIT 10\n", + " \n", + "Query executed successfully but returned no results.\n", + "\n", + "Executing SPARQL Query:\n", + "\n", + " PREFIX ex: \n", + " PREFIX rdfs: \n", + " PREFIX schema: \n", + " \n", + " SELECT ?acquiringCompanyLabel (COUNT(?acquiredCompany) AS ?numberOfAcquisitions)\n", + " WHERE {\n", + " ?acquiredCompany ex:ACQUIRED ?acquiringCompany .\n", + " ?acquiringCompany rdfs:label ?acquiringCompanyLabel .\n", + " ?acquiringCompany a schema:Organization .\n", + " ?acquiredCompany a schema:Organization .\n", + " }\n", + " GROUP BY ?acquiringCompanyLabel\n", + " ORDER BY DESC(?numberOfAcquisitions)\n", + " LIMIT 10\n", + " \n", + "Query Results (1 found): \n" + ] + } + ], + "source": [ + "if len(kg) > 0:\n", + " print(\"\\n--- Executing Sample SPARQL Queries ---\")\n", + " # Query 1: Find all organizations mentioned in the KG and their labels\n", + " sparql_query_1 = \"\"\"\n", + " PREFIX ex: \n", + " PREFIX schema: \n", + " PREFIX rdfs: \n", + " \n", + " SELECT DISTINCT ?org_uri ?org_label\n", + " WHERE {\n", + " ?org_uri a schema:Organization ;\n", + " rdfs:label ?org_label .\n", + " }\n", + " ORDER BY ?org_label\n", + " LIMIT 10\n", + " \"\"\"\n", + " query1_results = execute_sparql_query(kg, sparql_query_1)\n", + "\n", + " # Query 2: Find companies that acquired another company, and the acquired company\n", + " # Assumes: ?acquiredCompany ex:ACQUIRED ?acquiringCompany.\n", + " sparql_query_2 = \"\"\"\n", + " PREFIX ex: \n", + " PREFIX rdfs: \n", + " PREFIX schema: \n", + " \n", + " SELECT ?acquiredCompanyLabel ?acquiringCompanyLabel\n", + " WHERE {\n", + " ?acquiredCompany ex:ACQUIRED ?acquiringCompany .\n", + " ?acquiredCompany rdfs:label ?acquiredCompanyLabel .\n", + " ?acquiringCompany rdfs:label ?acquiringCompanyLabel .\n", + " # Ensure both are organizations\n", + " ?acquiredCompany a schema:Organization .\n", + " ?acquiringCompany a schema:Organization .\n", + " }\n", + " LIMIT 10\n", + " \"\"\"\n", + " query2_results = execute_sparql_query(kg, sparql_query_2)\n", + "\n", + " # Query 3: Find acquisitions (represented by a company involved) with a price mentioned\n", + " sparql_query_3 = \"\"\"\n", + " PREFIX ex: \n", + " PREFIX rdfs: \n", + " PREFIX schema: \n", + " \n", + " SELECT ?companyLabel ?priceLabel ?dateLabel\n", + " WHERE {\n", + " ?company ex:HAS_PRICE ?priceEntity .\n", + " ?company rdfs:label ?companyLabel .\n", + " ?priceEntity rdfs:label ?priceLabel .\n", + " # Ensure ?company is an ORG and ?priceEntity is a PriceSpecification (MONEY)\n", + " ?company a schema:Organization .\n", + " ?priceEntity a schema:PriceSpecification .\n", + " \n", + " # Optionally, try to find a date associated with this acquisition event/company\n", + " OPTIONAL { \n", + " ?company ex:ANNOUNCED_ON ?dateEntity .\n", + " ?dateEntity rdfs:label ?dateLabelRaw .\n", + " # If dateEntity is schema:Date, its label might be the date string directly\n", + " # If dateEntity is an Event, it might have a schema:startDate or similar\n", + " BIND(COALESCE(?dateLabelRaw, STR(?dateEntity)) As ?dateLabel) \n", + " }\n", + " }\n", + " LIMIT 10\n", + " \"\"\"\n", + " query3_results = execute_sparql_query(kg, sparql_query_3)\n", + " \n", + " # Query 4: Count number of acquisitions per acquiring company\n", + " sparql_query_4 = \"\"\"\n", + " PREFIX ex: \n", + " PREFIX rdfs: \n", + " PREFIX schema: \n", + " \n", + " SELECT ?acquiringCompanyLabel (COUNT(?acquiredCompany) AS ?numberOfAcquisitions)\n", + " WHERE {\n", + " ?acquiredCompany ex:ACQUIRED ?acquiringCompany .\n", + " ?acquiringCompany rdfs:label ?acquiringCompanyLabel .\n", + " ?acquiringCompany a schema:Organization .\n", + " ?acquiredCompany a schema:Organization .\n", + " }\n", + " GROUP BY ?acquiringCompanyLabel\n", + " ORDER BY DESC(?numberOfAcquisitions)\n", + " LIMIT 10\n", + " \"\"\"\n", + " query4_results = execute_sparql_query(kg, sparql_query_4)\n", + "\n", + "else:\n", + " print(\"Knowledge Graph ('kg') is empty. Skipping SPARQL query execution.\")" + ] + }, + { + "cell_type": "markdown", + "id": "sparql-querying-exec-output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "This block will print:\n", + "* Each SPARQL query string.\n", + "* The results (up to a limit) for each query, typically as a list of dictionaries where keys are the `SELECT` variables.\n", + "If the KG is empty, it will indicate that queries are skipped." + ] + }, + { + "cell_type": "markdown", + "id": "step5-3-viz-func-def-desc", + "metadata": {}, + "source": [ + "### Step 5.3: Visualization (Optional) - Visualization Function Definition\n", + "**Task:** Visualize parts of the KG or results from queries for better interpretability.\n", + "\n", + "**Book Concept:** (Ch. 1 & 3 - Visualization in Big Data)\n", + "Visualizing graph structures can make complex relationships much easier to understand for humans. Interactive visualizations allow for exploration and discovery.\n", + "\n", + "**Methodology:**\n", + "The `visualize_subgraph_pyvis` function uses the `pyvis` library to create an interactive HTML-based network visualization. It:\n", + "* Takes the `rdflib.Graph` and an optional filename.\n", + " * (A more advanced version could take a central node URI and depth to explore from that node).\n", + "* For simplicity in this demo, it visualizes a sample of triples from the graph.\n", + "* Adds nodes and edges to a `pyvis.Network` object.\n", + "* Nodes are labeled with their `rdfs:label` (or a part of their URI if no label).\n", + "* Edges are labeled with the predicate name.\n", + "* Saves the visualization to an HTML file and attempts to display it inline if in a Jupyter environment." + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "id": "visualize_subgraph_pyvis_func_def", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Function 'visualize_subgraph_pyvis' defined.\n" + ] + } + ], + "source": [ + "def visualize_subgraph_pyvis(graph_to_viz, output_filename=\"kg_visualization.html\", sample_size_triples=75):\n", + " \"\"\"Visualizes a sample subgraph using pyvis and saves to HTML.\"\"\"\n", + " if not len(graph_to_viz):\n", + " print(\"Graph is empty, nothing to visualize.\")\n", + " return None\n", + "\n", + " net = Network(notebook=True, height=\"800px\", width=\"100%\", cdn_resources='remote', directed=True)\n", + " net.repulsion(node_distance=150, spring_length=200)\n", + " # net.show_buttons(filter_=['physics', 'nodes', 'edges', 'interaction'])\n", + " net.set_options(\"\"\"\n", + " var options = {\n", + " \"physics\": {\n", + " \"forceAtlas2Based\": {\n", + " \"gravitationalConstant\": -50,\n", + " \"centralGravity\": 0.01,\n", + " \"springLength\": 100,\n", + " \"springConstant\": 0.08,\n", + " \"damping\": 0.4,\n", + " \"avoidOverlap\": 0.5\n", + " },\n", + " \"maxVelocity\": 50,\n", + " \"minVelocity\": 0.1,\n", + " \"solver\": \"forceAtlas2Based\",\n", + " \"timestep\": 0.5,\n", + " \"stabilization\": {\"iterations\": 150}\n", + " }\n", + " }\n", + " \"\"\")\n", + "\n", + " added_nodes_set = set()\n", + " \n", + " # For a more meaningful visualization, focus on triples where subject and object are resources (URIs)\n", + " # And try to get a sample that includes some structure, not just attribute assignments to single nodes.\n", + " # Here, we'll take a sample of all triples for simplicity.\n", + " triples_for_visualization = list(graph_to_viz)[:min(sample_size_triples, len(graph_to_viz))]\n", + " \n", + " if not triples_for_visualization:\n", + " print(\"No triples selected from the sample for visualization.\")\n", + " return None\n", + " \n", + " print(f\"Preparing visualization for {len(triples_for_visualization)} sample triples...\")\n", + "\n", + " for s_uri, p_uri, o_val in tqdm(triples_for_visualization, desc=\"Building Pyvis Visualization\"):\n", + " # Get labels or use URI parts\n", + " s_label_str = str(graph_to_viz.value(subject=s_uri, predicate=RDFS.label, default=s_uri.split('/')[-1].split('#')[-1]))\n", + " p_label_str = str(p_uri.split('/')[-1].split('#')[-1])\n", + " \n", + " s_node_id = str(s_uri)\n", + " s_node_title = f\"{s_label_str}\\nURI: {s_uri}\"\n", + " s_node_group_uri = graph_to_viz.value(s_uri, RDF.type)\n", + " s_node_group = str(s_node_group_uri.split('/')[-1].split('#')[-1]) if s_node_group_uri else \"UnknownType\"\n", + "\n", + "\n", + " if s_uri not in added_nodes_set:\n", + " net.add_node(s_node_id, label=s_label_str, title=s_node_title, group=s_node_group)\n", + " added_nodes_set.add(s_uri)\n", + " \n", + " if isinstance(o_val, URIRef): # If object is a resource, add it as a node and draw an edge\n", + " o_label_str = str(graph_to_viz.value(subject=o_val, predicate=RDFS.label, default=o_val.split('/')[-1].split('#')[-1]))\n", + " o_node_id = str(o_val)\n", + " o_node_title = f\"{o_label_str}\\nURI: {o_val}\"\n", + " o_node_group_uri = graph_to_viz.value(o_val, RDF.type)\n", + " o_node_group = str(o_node_group_uri.split('/')[-1].split('#')[-1]) if o_node_group_uri else \"UnknownType\"\n", + " \n", + " if o_val not in added_nodes_set:\n", + " net.add_node(o_node_id, label=o_label_str, title=o_node_title, group=o_node_group)\n", + " added_nodes_set.add(o_val)\n", + " net.add_edge(s_node_id, o_node_id, title=p_label_str, label=p_label_str)\n", + " else: # If object is a literal, add it as a property to the subject node's title (tooltip)\n", + " # This avoids cluttering the graph with many literal nodes.\n", + " # Update subject node's title if it's already added\n", + " for node_obj in net.nodes:\n", + " if node_obj['id'] == s_node_id:\n", + " node_obj['title'] += f\"\\n{p_label_str}: {str(o_val)}\"\n", + " break\n", + " \n", + " try:\n", + " net.save_graph(output_filename)\n", + " print(f\"Interactive KG visualization saved to HTML file: {output_filename}\")\n", + " # In Jupyter Lab/Notebook, the graph should render inline if notebook=True was set and environment supports it.\n", + " # Sometimes, an explicit display is needed, or opening the HTML file manually.\n", + " except Exception as e:\n", + " print(f\"Error saving or attempting to show graph visualization: {e}\")\n", + " return net # Return the network object\n", + "\n", + "print(\"Function 'visualize_subgraph_pyvis' defined.\")" + ] + }, + { + "cell_type": "markdown", + "id": "visualize_subgraph_pyvis_func_def_output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "Confirms the definition of the `visualize_subgraph_pyvis` function." + ] + }, + { + "cell_type": "markdown", + "id": "visualization-exec-desc", + "metadata": {}, + "source": [ + "#### KG Visualization - Execution\n", + "\n", + "**Theory:**\n", + "This block calls `visualize_subgraph_pyvis` with our `kg`. It will generate an HTML file (e.g., `tech_acquisitions_kg_viz.html`) containing the interactive graph. If running in a compatible Jupyter environment, the visualization might also render directly in the notebook output. This allows for a visual exploration of the connections and entities within a sample of our KG." + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "id": "visualization-exec-code", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Attempting to visualize a sample of the graph with 75 triples...\n", + "Preparing visualization for 75 sample triples...\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "372adb1f2db1432cbc080375a0760437", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Building Pyvis Visualization: 0%| | 0/75 [00:00 0:\n", + " print(f\"Attempting to visualize a sample of the graph with {len(kg)} triples...\")\n", + " # Visualize a sample of up to 75 triples from the graph\n", + " pyvis_network_object = visualize_subgraph_pyvis(kg, output_filename=VIZ_OUTPUT_FILENAME, sample_size_triples=75)\n", + "else:\n", + " print(f\"Knowledge Graph ('kg') is empty. Skipping visualization.\")\n", + "\n", + "# Attempt to display inline in Jupyter (might require trusting the notebook or specific Jupyter setup)\n", + "if pyvis_network_object is not None:\n", + " try:\n", + " # This should trigger inline display in classic notebook or if properly configured in Lab\n", + " from IPython.display import HTML, display\n", + " # display(HTML(VIZ_OUTPUT_FILENAME)) # This loads from file, pyvis might render directly\n", + " # pyvis_network_object.show(VIZ_OUTPUT_FILENAME) # Alternative: opens in new tab/tries inline\n", + " print(f\"\\nTo view the visualization, open the file '{VIZ_OUTPUT_FILENAME}' in a web browser.\")\n", + " print(\"If in a Jupyter Notebook/Lab, the graph might also be rendered above this message.\")\n", + " # If pyvis_network_object is returned from a cell and notebook=True, it often renders automatically.\n", + " except Exception as e_display:\n", + " print(f\"Could not automatically display visualization inline ({e_display}). Please open '{VIZ_OUTPUT_FILENAME}' manually.\")\n", + " \n", + "# This cell will return the pyvis_network_object. If it's the last statement and notebook=True, \n", + "# Jupyter will attempt to render it.\n", + "if pyvis_network_object:\n", + " pyvis_network_object # This line is crucial for auto-display in some Jupyter environments" + ] + }, + { + "cell_type": "markdown", + "id": "visualization-exec-output", + "metadata": {}, + "source": [ + "**Output Explanation:**\n", + "This block will:\n", + "* Generate an HTML file (e.g., `tech_acquisitions_kg_interactive_viz.html`) with the interactive graph visualization.\n", + "* Print a message confirming the save and provide the filename.\n", + "* If in a compatible Jupyter environment, it might also render the graph directly below the cell. Otherwise, you'll need to open the HTML file manually in a browser." + ] + }, + { + "cell_type": "markdown", + "id": "conclusion", + "metadata": {}, + "source": [ + "## Conclusion and Future Work\n", + "\n", + "This notebook has demonstrated a comprehensive, albeit simplified, end-to-end pipeline for constructing a Knowledge Graph from unstructured news articles, with a focus on technology company acquisitions. We navigated through critical phases, referencing conceptual underpinnings from Big Data and Knowledge Graph literature:\n", + "\n", + "1. **Data Acquisition and Preparation:** We loaded articles from the CNN/DailyMail dataset and performed essential cleaning to prepare the text for analysis. This underscored the importance of data quality as a foundation (Ch. 1, Ch. 3).\n", + "2. **Information Extraction:** \n", + " * Named Entity Recognition (NER) was performed first exploratively with spaCy, then more targetedly using an LLM guided by a refined entity schema. This created the *nodes* of our graph (Ch. 2).\n", + " * Relationship Extraction (RE) using an LLM identified semantic connections between these entities, forming the *edges* (Ch. 2).\n", + "3. **Knowledge Graph Construction:** \n", + " * Entities were normalized for consistency, and unique URIs were generated, aiding in entity resolution (Ch. 6, Ch. 8).\n", + " * Extracted information was mapped to a schema (mixing custom `EX:` terms and `schema.org` terms) and materialized into RDF triples using `rdflib` (Ch. 2, Ch. 4).\n", + "4. **Knowledge Graph Refinement (Conceptual):** \n", + " * We generated text embeddings for entity names, bridging symbolic and sub-symbolic representations (Ch. 6).\n", + " * The concept of link prediction via semantic similarity was introduced, hinting at KG enrichment capabilities (Ch. 6).\n", + "5. **Persistence and Utilization:** \n", + " * The KG was persisted by serializing it to a Turtle file (Ch. 3).\n", + " * SPARQL queries were executed to retrieve structured insights, demonstrating the analytical power of KGs (Ch. 5).\n", + " * A sample subgraph was visualized, highlighting the importance of making KGs accessible (Ch. 1, Ch. 3).\n", + "\n", + "### Potential Future Enhancements:\n", + "* **Advanced Entity Disambiguation & Linking (EDL):** Implement robust EDL to link extracted entities to canonical entries in external KGs like Wikidata or DBpedia. This would greatly improve graph integration and consistency.\n", + "* **Richer Ontology/Schema:** Develop a more detailed custom ontology for technology acquisitions or align more comprehensively with existing financial or business ontologies (e.g., FIBO).\n", + "* **Sophisticated Relationship Extraction:** Explore more advanced RE techniques, including classifying a wider range of relation types, handling n-ary relations, and event extraction (modeling acquisitions as complex events with multiple participants and attributes).\n", + "* **Knowledge Graph Embedding Models:** Train dedicated KGE models (e.g., TransE, ComplEx, RotatE from Ch. 6) on the generated triples for more accurate link prediction and KG completion.\n", + "* **Reasoning and Inference:** Implement ontological reasoning (e.g., using RDFS/OWL reasoners) to infer new facts based on the schema and asserted triples.\n", + "* **Scalability and Performance:** For larger datasets, utilize distributed processing frameworks (like Apache Spark, conceptually linked to SANSA in Ch. 7 for ML on KGs) and deploy the KG in a scalable graph database or triple store.\n", + "* **LLM Fine-tuning:** Fine-tune smaller, open-source LLMs specifically on NER and RE tasks for the technology/financial domain to potentially achieve better performance and cost-efficiency than general-purpose models for these specific tasks.\n", + "* **Temporal Dynamics:** Incorporate the temporal aspect of news data more explicitly, tracking how information and relationships evolve over time.\n", + "* **User Interface:** Develop a user-friendly interface for exploring, querying, and visualizing the KG beyond programmatic access.\n", + "\n", + "This project serves as a foundational example, illustrating how modern NLP techniques, particularly LLMs, can be integrated with traditional KG methodologies to extract and structure valuable knowledge from vast amounts of unstructured text." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv-big-data", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.0" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..4f1b584 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 Fareed Khan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md index 85e96d6..e60a7d0 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,151 @@ -# all-rag-techniques -Applied all RAG techniques in a simpler way +# All RAG Techniques: A Simpler, Hands-On Approach ✨ + +[![Python 3.7+](https://img.shields.io/badge/python-3.7+-blue.svg)](https://www.python.org/downloads/release/python-370/) [![Nebius AI](https://img.shields.io/badge/Nebius%20AI-API-brightgreen)](https://cloud.nebius.ai/services/llm-embedding) [![OpenAI](https://img.shields.io/badge/OpenAI-API-lightgrey)](https://openai.com/) [![Medium](https://img.shields.io/badge/Medium-Blog-black?logo=medium)](https://medium.com/@fareedkhandev/testing-every-rag-technique-to-find-the-best-094d166af27f) + +This repository takes a clear, hands-on approach to **Retrieval-Augmented Generation (RAG)**, breaking down advanced techniques into straightforward, understandable implementations. Instead of relying on frameworks like `LangChain` or `FAISS`, everything here is built using familiar Python libraries `openai`, `numpy`, `matplotlib`, and a few others. + +The goal is simple: provide code that is readable, modifiable, and educational. By focusing on the fundamentals, this project helps demystify RAG and makes it easier to understand how it really works. + +## Update: 📢 + +- (12-May-2025) Added a new notebook on how to handle big data using Knowledge Graphs. +- (27-April-2025) Added a new notebook which finds best RAG technique for a given query (Simple RAG + Reranker + Query Rewrite). +- (20-Mar-2025) Added a new notebook on RAG with Reinforcement Learning. +- (07-Mar-2025) Added 20 RAG techniques to the repository. + +## 🚀 What's Inside? + +This repository contains a collection of Jupyter Notebooks, each focusing on a specific RAG technique. Each notebook provides: + +- A concise explanation of the technique. +- A step-by-step implementation from scratch. +- Clear code examples with inline comments. +- Evaluations and comparisons to demonstrate the technique's effectiveness. +- Visualization to visualize the results. + +Here's a glimpse of the techniques covered: + +| Notebook | Description | +| :-------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| [1. Simple RAG](01_simple_rag.ipynb) | A basic RAG implementation. A great starting point! | +| [2. Semantic Chunking](02_semantic_chunking.ipynb) | Splits text based on semantic similarity for more meaningful chunks. | +| [3. Chunk Size Selector](03_chunk_size_selector.ipynb) | Explores the impact of different chunk sizes on retrieval performance. | +| [4. Context Enriched RAG](04_context_enriched_rag.ipynb) | Retrieves neighboring chunks to provide more context. | +| [5. Contextual Chunk Headers](05_contextual_chunk_headers_rag.ipynb) | Prepends descriptive headers to each chunk before embedding. | +| [6. Document Augmentation RAG](06_doc_augmentation_rag.ipynb) | Generates questions from text chunks to augment the retrieval process. | +| [7. Query Transform](07_query_transform.ipynb) | Rewrites, expands, or decomposes queries to improve retrieval. Includes **Step-back Prompting** and **Sub-query Decomposition**. | +| [8. Reranker](08_reranker.ipynb) | Re-ranks initially retrieved results using an LLM for better relevance. | +| [9. RSE](09_rse.ipynb) | Relevant Segment Extraction: Identifies and reconstructs continuous segments of text, preserving context. | +| [10. Contextual Compression](10_contextual_compression.ipynb) | Implements contextual compression to filter and compress retrieved chunks, maximizing relevant information. | +| [11. Feedback Loop RAG](11_feedback_loop_rag.ipynb) | Incorporates user feedback to learn and improve RAG system over time. | +| [12. Adaptive RAG](12_adaptive_rag.ipynb) | Dynamically selects the best retrieval strategy based on query type. | +| [13. Self RAG](13_self_rag.ipynb) | Implements Self-RAG, dynamically decides when and how to retrieve, evaluates relevance, and assesses support and utility. | +| [14. Proposition Chunking](14_proposition_chunking.ipynb) | Breaks down documents into atomic, factual statements for precise retrieval. | +| [15. Multimodel RAG](15_multimodel_rag.ipynb) | Combines text and images for retrieval, generating captions for images using LLaVA. | +| [16. Fusion RAG](16_fusion_rag.ipynb) | Combines vector search with keyword-based (BM25) retrieval for improved results. | +| [17. Graph RAG](17_graph_rag.ipynb) | Organizes knowledge as a graph, enabling traversal of related concepts. | +| [18. Hierarchy RAG](18_hierarchy_rag.ipynb) | Builds hierarchical indices (summaries + detailed chunks) for efficient retrieval. | +| [19. HyDE RAG](19_HyDE_rag.ipynb) | Uses Hypothetical Document Embeddings to improve semantic matching. | +| [20. CRAG](20_crag.ipynb) | Corrective RAG: Dynamically evaluates retrieval quality and uses web search as a fallback. | +| [21. Rag with RL](21_rag_with_rl.ipynb) | Maximize the reward of the RAG model using Reinforcement Learning. | +| [Best RAG Finder](best_rag_finder.ipynb) | Finds the best RAG technique for a given query using Simple RAG + Reranker + Query Rewrite. | +| [22. Big Data with Knowledge Graphs](22_Big_data_with_KG.ipynb) | Handles large datasets using Knowledge Graphs. | + +## 🗂️ Repository Structure + +``` +fareedkhan-dev-all-rag-techniques/ +├── README.md <- You are here! +├── 01_simple_rag.ipynb +├── 02_semantic_chunking.ipynb +├── 03_chunk_size_selector.ipynb +├── 04_context_enriched_rag.ipynb +├── 05_contextual_chunk_headers_rag.ipynb +├── 06_doc_augmentation_rag.ipynb +├── 07_query_transform.ipynb +├── 08_reranker.ipynb +├── 09_rse.ipynb +├── 10_contextual_compression.ipynb +├── 11_feedback_loop_rag.ipynb +├── 12_adaptive_rag.ipynb +├── 13_self_rag.ipynb +├── 14_proposition_chunking.ipynb +├── 15_multimodel_rag.ipynb +├── 16_fusion_rag.ipynb +├── 17_graph_rag.ipynb +├── 18_hierarchy_rag.ipynb +├── 19_HyDE_rag.ipynb +├── 20_crag.ipynb +├── 21_rag_with_rl.ipynb +├── 22_big_data_with_KG.ipynb +├── best_rag_finder.ipynb +├── requirements.txt <- Python dependencies +└── data/ + └── val.json <- Sample validation data (queries and answers) + └── AI_Information.pdf <- A sample PDF document for testing. + └── attention_is_all_you_need.pdf <- A sample PDF document for testing (for Multi-Modal RAG). +``` + +## 🛠️ Getting Started + +1. **Clone the repository:** + + ```bash + git clone https://github.com/FareedKhan-dev/all-rag-techniques.git + cd all-rag-techniques + ``` + +2. **Install dependencies:** + + ```bash + pip install -r requirements.txt + ``` + +3. **Set up your OpenAI API key:** + + - Obtain an API key from [Nebius AI](https://studio.nebius.com/). + - Set the API key as an environment variable: + + ```bash + export OPENAI_API_KEY='YOUR_NEBIUS_AI_API_KEY' + ``` + + or + + ```bash + setx OPENAI_API_KEY "YOUR_NEBIUS_AI_API_KEY" # On Windows + ``` + + or, within your Python script/notebook: + + ```python + import os + os.environ["OPENAI_API_KEY"] = "YOUR_NEBIUS_AI_API_KEY" + ``` + +4. **Run the notebooks:** + + Open any of the Jupyter Notebooks (`.ipynb` files) using Jupyter Notebook or JupyterLab. Each notebook is self-contained and can be run independently. The notebooks are designed to be executed sequentially within each file. + + **Note:** The `data/AI_Information.pdf` file provides a sample document for testing. You can replace it with your own PDF. The `data/val.json` file contains sample queries and ideal answers for evaluation. + The 'attention_is_all_you_need.pdf' is for testing Multi-Modal RAG Notebook. + +## 💡 Core Concepts + +- **Embeddings:** Numerical representations of text that capture semantic meaning. We use Nebius AI's embedding API and, in many notebooks, also the `BAAI/bge-en-icl` embedding model. + +- **Vector Store:** A simple database to store and search embeddings. We create our own `SimpleVectorStore` class using NumPy for efficient similarity calculations. + +- **Cosine Similarity:** A measure of similarity between two vectors. Higher values indicate greater similarity. + +- **Chunking:** Dividing text into smaller, manageable pieces. We explore various chunking strategies. + +- **Retrieval:** The process of finding the most relevant text chunks for a given query. + +- **Generation:** Using a Large Language Model (LLM) to create a response based on the retrieved context and the user's query. We use the `meta-llama/Llama-3.2-3B-Instruct` model via Nebius AI's API. + +- **Evaluation:** Assessing the quality of the RAG system's responses, often by comparing them to a reference answer or using an LLM to score relevance. + +## 🤝 Contributing + +Contributions are welcome! diff --git a/best_rag_finder.ipynb b/best_rag_finder.ipynb new file mode 100644 index 0000000..f860d72 --- /dev/null +++ b/best_rag_finder.ipynb @@ -0,0 +1,1415 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "intro-header-v5", + "metadata": {}, + "source": [ + "# Learning RAG: Testing Configurations Step-by-Step\n", + "## An Educational End-to-End Pipeline with Enhanced Evaluation\n", + "\n", + "This notebook is designed as a learning project to understand how different settings impact Retrieval-Augmented Generation (RAG) systems. We'll build and test a pipeline step-by-step using the **Nebius AI API**.\n", + "\n", + "**What we'll learn:**\n", + "* How text chunking (`chunk_size`, `chunk_overlap`) affects what the RAG system retrieves.\n", + "* How the number of retrieved documents (`top_k`) influences the context provided to the LLM.\n", + "* The difference between three common RAG strategies (Simple, Query Rewrite, Rerank).\n", + "* How to use an LLM (like Nebius AI) to automatically evaluate the quality of generated answers using multiple metrics: **Faithfulness**, **Relevancy**, and **Semantic Similarity** to a ground truth answer.\n", + "* How to combine these metrics into an average score for easier comparison.\n", + "\n", + "We'll focus on understanding *why* we perform each step and observing the outcomes clearly, with detailed explanations and commented code." + ] + }, + { + "cell_type": "markdown", + "id": "toc-v5", + "metadata": {}, + "source": [ + "### Table of Contents\n", + "1. **Setup: Installing Libraries**: Get the necessary tools.\n", + "2. **Setup: Importing Libraries**: Bring the tools into our workspace.\n", + "3. **Configuration: Setting Up Our Experiment**: Define API details, models, evaluation prompts, and parameters to test.\n", + "4. **Input Data: The Knowledge Source & Our Question**: Define the documents the RAG system will learn from and the question we'll ask.\n", + "5. **Core Component: Text Chunking Function**: Create a function to break documents into smaller pieces.\n", + "6. **Core Component: Connecting to Nebius AI**: Establish the connection to use Nebius models.\n", + "7. **Core Component: Cosine Similarity Function**: Create a function to measure semantic similarity between texts.\n", + "8. **The Experiment: Iterating Through Configurations**: The main loop where we test different settings.\n", + " * 8.1 Processing a Chunking Configuration (Chunk, Embed, Index)\n", + " * 8.2 Testing RAG Strategies for a `top_k` Value\n", + " * 8.3 Running & Evaluating a Single RAG Strategy (including Similarity)\n", + "9. **Analysis: Reviewing the Results**: Use Pandas to organize and display the results.\n", + "10. **Conclusion: What Did We Learn?**: Reflect on the findings and potential next steps." + ] + }, + { + "cell_type": "markdown", + "id": "setup-install-v5", + "metadata": {}, + "source": [ + "### 1. Setup: Installing Libraries\n", + "\n", + "First, we need to install the Python packages required for this notebook. \n", + "- `openai`: Interacts with the Nebius API (which uses an OpenAI-compatible interface).\n", + "- `pandas`: For creating and managing data tables (DataFrames).\n", + "- `numpy`: For numerical operations, especially with vectors (embeddings).\n", + "- `faiss-cpu`: For efficient similarity search on vectors (the retrieval part).\n", + "- `ipywidgets`, `tqdm`: For displaying progress bars in Jupyter.\n", + "- `scikit-learn`: For calculating cosine similarity." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "install-libs-v5", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Install libraries (run this cell only once if needed)\n", + "# !pip install openai pandas numpy faiss-cpu ipywidgets tqdm scikit-learn" + ] + }, + { + "cell_type": "markdown", + "id": "install-note-v5", + "metadata": {}, + "source": [ + "**Remember!** After the installation finishes, you might need to **Restart the Kernel** (or Runtime) for Jupyter/Colab to recognize the newly installed packages. Look for this option in the menu (e.g., 'Kernel' -> 'Restart Kernel...' or 'Runtime' -> 'Restart Runtime')." + ] + }, + { + "cell_type": "markdown", + "id": "setup-import-v5", + "metadata": {}, + "source": [ + "### 2. Setup: Importing Libraries\n", + "\n", + "With the libraries installed, we import them into our Python environment to make their functions available." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "import-code-v5", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Libraries imported successfully!\n" + ] + } + ], + "source": [ + "import os # For accessing environment variables (like API keys)\n", + "import time # For timing operations\n", + "import re # For regular expressions (text cleaning)\n", + "import warnings # For controlling warning messages\n", + "import itertools # For creating parameter combinations easily\n", + "import getpass # For securely prompting for API keys if not set\n", + "\n", + "import numpy as np # Numerical library for vector operations\n", + "import pandas as pd # Data manipulation library for tables (DataFrames)\n", + "import faiss # Library for fast vector similarity search\n", + "from openai import OpenAI # Client library for Nebius API interaction\n", + "from tqdm.notebook import tqdm # Library for displaying progress bars\n", + "from sklearn.metrics.pairwise import cosine_similarity # For calculating similarity score\n", + "\n", + "# Configure display options for Pandas DataFrames for better readability\n", + "pd.set_option('display.max_colwidth', 150) # Show more text content in table cells\n", + "pd.set_option('display.max_rows', 100) # Display more rows in tables\n", + "warnings.filterwarnings('ignore', category=FutureWarning) # Suppress specific non-critical warnings\n", + "\n", + "print(\"Libraries imported successfully!\")" + ] + }, + { + "cell_type": "markdown", + "id": "config-params-v5", + "metadata": {}, + "source": [ + "### 3. Configuration: Setting Up Our Experiment\n", + "\n", + "Here, we define all the settings and parameters for our experiment directly as Python variables. This makes it easy to see and modify the configuration in one place.\n", + "\n", + "**Key Configuration Areas:**\n", + "* **Nebius API Details:** Credentials and model identifiers for connecting to Nebius AI.\n", + "* **LLM Settings:** Parameters controlling the behavior of the language model during answer generation (e.g., `temperature` for creativity).\n", + "* **Evaluation Prompts:** The specific instructions (prompts) given to the LLM when it acts as an evaluator for Faithfulness and Relevancy.\n", + "* **Tuning Parameters:** The different values for chunk size, overlap, and retrieval `top_k` that we want to systematically test.\n", + "* **Reranking Setting:** Configuration for the simulated reranking strategy." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "config-setup-v5", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--- Configuration Check --- \n", + "Attempting to load Nebius API Key from environment variable 'NEBIUS_API_KEY'...\n", + "Nebius API Key loaded successfully from environment variable.\n", + "Models: Embed='BAAI/bge-multilingual-gemma2', Gen='deepseek-ai/DeepSeek-V3', Eval='deepseek-ai/DeepSeek-V3'\n", + "Chunk Sizes to Test: [150, 250]\n", + "Overlaps to Test: [30, 50]\n", + "Top-K Values to Test: [3, 5]\n", + "Generation Temp: 0.1, Max Tokens: 400\n", + "Configuration ready.\n", + "-------------------------\n" + ] + } + ], + "source": [ + "# --- NebiusAI API Configuration ---\n", + "# It's best practice to store API keys as environment variables rather than hardcoding them.\n", + "# Provide your actual key here or set it as an environment variable\n", + "NEBIUS_API_KEY = os.getenv('NEBIUS_API_KEY', None) # Load API key from environment variable\n", + "if NEBIUS_API_KEY is None:\n", + " print(\"Warning: NEBIUS_API_KEY not set. Please set it in your environment variables or provide it directly in the code.\") \n", + "NEBIUS_BASE_URL = \"https://api.studio.nebius.com/v1/\" \n", + "NEBIUS_EMBEDDING_MODEL = \"BAAI/bge-multilingual-gemma2\" # Model for converting text to vector embeddings\n", + "NEBIUS_GENERATION_MODEL = \"deepseek-ai/DeepSeek-V3\" # LLM for generating the final answers\n", + "NEBIUS_EVALUATION_MODEL = \"deepseek-ai/DeepSeek-V3\" # LLM used for evaluating the generated answers\n", + "\n", + "# --- Text Generation Parameters (for RAG answer generation) ---\n", + "GENERATION_TEMPERATURE = 0.1 # Lower values (e.g., 0.1-0.3) make output more focused and deterministic, good for fact-based answers.\n", + "GENERATION_MAX_TOKENS = 400 # Maximum number of tokens (roughly words/sub-words) in the generated answer.\n", + "GENERATION_TOP_P = 0.9 # Nucleus sampling parameter (alternative to temperature, usually fine at default).\n", + "\n", + "# --- Evaluation Prompts (Instructions for the Evaluator LLM) ---\n", + "# Faithfulness: Does the answer stay true to the provided context?\n", + "FAITHFULNESS_PROMPT = \"\"\"\n", + "System: You are an objective evaluator. Evaluate the faithfulness of the AI Response compared to the True Answer, considering only the information present in the True Answer as the ground truth.\n", + "Faithfulness measures how accurately the AI response reflects the information in the True Answer, without adding unsupported facts or contradicting it.\n", + "Score STRICTLY using a float between 0.0 and 1.0, based on this scale:\n", + "- 0.0: Completely unfaithful, contradicts or fabricates information.\n", + "- 0.1-0.4: Low faithfulness with significant inaccuracies or unsupported claims.\n", + "- 0.5-0.6: Partially faithful but with noticeable inaccuracies or omissions.\n", + "- 0.7-0.8: Mostly faithful with only minor inaccuracies or phrasing differences.\n", + "- 0.9: Very faithful, slight wording differences but semantically aligned.\n", + "- 1.0: Completely faithful, accurately reflects the True Answer.\n", + "Respond ONLY with the numerical score.\n", + "\n", + "User:\n", + "Query: {question}\n", + "AI Response: {response}\n", + "True Answer: {true_answer}\n", + "Score:\"\"\"\n", + "\n", + "# Relevancy: Does the answer directly address the user's query?\n", + "RELEVANCY_PROMPT = \"\"\"\n", + "System: You are an objective evaluator. Evaluate the relevance of the AI Response to the specific User Query.\n", + "Relevancy measures how well the response directly answers the user's question, avoiding unnecessary or off-topic information.\n", + "Score STRICTLY using a float between 0.0 and 1.0, based on this scale:\n", + "- 0.0: Not relevant at all.\n", + "- 0.1-0.4: Low relevance, addresses a different topic or misses the core question.\n", + "- 0.5-0.6: Partially relevant, answers only a part of the query or is tangentially related.\n", + "- 0.7-0.8: Mostly relevant, addresses the main aspects of the query but might include minor irrelevant details.\n", + "- 0.9: Highly relevant, directly answers the query with minimal extra information.\n", + "- 1.0: Completely relevant, directly and fully answers the exact question asked.\n", + "Respond ONLY with the numerical score.\n", + "\n", + "User:\n", + "Query: {question}\n", + "AI Response: {response}\n", + "Score:\"\"\"\n", + "\n", + "# --- Parameters to Tune (The experimental variables) ---\n", + "CHUNK_SIZES_TO_TEST = [150, 250] # List of chunk sizes (in words) to experiment with.\n", + "CHUNK_OVERLAPS_TO_TEST = [30, 50] # List of chunk overlaps (in words) to experiment with.\n", + "RETRIEVAL_TOP_K_TO_TEST = [3, 5] # List of 'k' values (number of chunks to retrieve) to test.\n", + "\n", + "# --- Reranking Configuration (Only used for the Rerank strategy) ---\n", + "RERANK_RETRIEVAL_MULTIPLIER = 3 # For simulated reranking: retrieve K * multiplier chunks initially.\n", + "\n", + "# --- Validate API Key --- \n", + "print(\"--- Configuration Check --- \")\n", + "print(f\"Attempting to load Nebius API Key from environment variable 'NEBIUS_API_KEY'...\")\n", + "if not NEBIUS_API_KEY:\n", + " print(\"Nebius API Key not found in environment variables.\")\n", + " # Prompt the user securely if the key is not found.\n", + " NEBIUS_API_KEY = getpass.getpass(\"Please enter your Nebius API Key: \")\n", + "else:\n", + " print(\"Nebius API Key loaded successfully from environment variable.\")\n", + "\n", + "# Print a summary of key settings for verification\n", + "print(f\"Models: Embed='{NEBIUS_EMBEDDING_MODEL}', Gen='{NEBIUS_GENERATION_MODEL}', Eval='{NEBIUS_EVALUATION_MODEL}'\")\n", + "print(f\"Chunk Sizes to Test: {CHUNK_SIZES_TO_TEST}\")\n", + "print(f\"Overlaps to Test: {CHUNK_OVERLAPS_TO_TEST}\")\n", + "print(f\"Top-K Values to Test: {RETRIEVAL_TOP_K_TO_TEST}\")\n", + "print(f\"Generation Temp: {GENERATION_TEMPERATURE}, Max Tokens: {GENERATION_MAX_TOKENS}\")\n", + "print(\"Configuration ready.\")\n", + "print(\"-\" * 25)" + ] + }, + { + "cell_type": "markdown", + "id": "input-data-v5", + "metadata": {}, + "source": [ + "### 4. Input Data: The Knowledge Source & Our Question\n", + "\n", + "Every RAG system needs a knowledge base to draw information from. Here, we define:\n", + "* `corpus_texts`: A list of strings, where each string is a document containing information (in this case, about renewable energy sources).\n", + "* `test_query`: The specific question we want the RAG system to answer using the `corpus_texts`.\n", + "* `true_answer_for_query`: A carefully crafted 'ground truth' answer based *only* on the information available in `corpus_texts`. This is essential for evaluating Faithfulness and Semantic Similarity accurately." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "corpus-def-v5", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loaded 5 documents into our corpus.\n", + "Test Query: 'Compare the consistency and environmental impact of solar power versus hydropower.'\n", + "Reference (True) Answer for evaluation: 'Solar power's consistency varies with weather and time of day, requiring storage like batteries. Hydropower is generally reliable, but large dams have significant environmental impacts on ecosystems and communities, unlike solar power's primary impact being land use for panels.'\n", + "Input data is ready.\n", + "-------------------------\n" + ] + } + ], + "source": [ + "# Our knowledge base: A list of text documents about renewable energy\n", + "corpus_texts = [\n", + " \"Solar power uses PV panels or CSP systems. PV converts sunlight directly to electricity. CSP uses mirrors to heat fluid driving a turbine. It's clean but varies with weather/time. Storage (batteries) is key for consistency.\", # Doc 0\n", + " \"Wind energy uses turbines in wind farms. It's sustainable with low operating costs. Wind speed varies, siting can be challenging (visual/noise). Offshore wind is stronger and more consistent.\", # Doc 1\n", + " \"Hydropower uses moving water, often via dams spinning turbines. Reliable, large-scale power with flood control/water storage benefits. Big dams harm ecosystems and displace communities. Run-of-river is smaller, less disruptive.\", # Doc 2\n", + " \"Geothermal energy uses Earth's heat via steam/hot water for turbines. Consistent 24/7 power, small footprint. High initial drilling costs, sites are geographically limited.\", # Doc 3\n", + " \"Biomass energy from organic matter (wood, crops, waste). Burned directly or converted to biofuels. Uses waste, provides dispatchable power. Requires sustainable sourcing. Combustion releases emissions (carbon-neutral if balanced by regrowth).\" # Doc 4\n", + "]\n", + "\n", + "# The question we will ask the RAG system\n", + "test_query = \"Compare the consistency and environmental impact of solar power versus hydropower.\"\n", + "\n", + "# !!! CRITICAL: The 'True Answer' MUST be derivable ONLY from the corpus_texts above !!!\n", + "# This is our ground truth for evaluation.\n", + "true_answer_for_query = \"Solar power's consistency varies with weather and time of day, requiring storage like batteries. Hydropower is generally reliable, but large dams have significant environmental impacts on ecosystems and communities, unlike solar power's primary impact being land use for panels.\"\n", + "\n", + "print(f\"Loaded {len(corpus_texts)} documents into our corpus.\")\n", + "print(f\"Test Query: '{test_query}'\")\n", + "print(f\"Reference (True) Answer for evaluation: '{true_answer_for_query}'\")\n", + "print(\"Input data is ready.\")\n", + "print(\"-\" * 25)" + ] + }, + { + "cell_type": "markdown", + "id": "chunking-func-md-v5", + "metadata": {}, + "source": [ + "### 5. Core Component: Text Chunking Function\n", + "\n", + "LLMs and embedding models have limits on the amount of text they can process at once. Furthermore, retrieval works best when searching over smaller, focused pieces of text rather than entire large documents. \n", + "\n", + "**Chunking** is the process of splitting large documents into smaller, potentially overlapping, segments.\n", + "\n", + "- **`chunk_size`**: Determines the approximate size (here, in words) of each chunk.\n", + "- **`chunk_overlap`**: Specifies how many words from the end of one chunk should also be included at the beginning of the next chunk. This helps prevent relevant information from being lost if it spans across the boundary between two chunks.\n", + "\n", + "We define a function `chunk_text` to perform this splitting based on word counts." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "chunking-func-v5", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Defining the 'chunk_text' function.\n", + "Test chunking on first doc (size=150 words, overlap=30 words): Created 1 chunks.\n", + "First sample chunk:\n", + "'Solar power uses PV panels or CSP systems. PV converts sunlight directly to electricity. CSP uses mirrors to heat fluid driving a turbine. It's clean but varies with weather/time. Storage (batteries) is key for consistency.'\n", + "-------------------------\n" + ] + } + ], + "source": [ + "def chunk_text(text, chunk_size, chunk_overlap):\n", + " \"\"\"Splits a single text document into overlapping chunks based on word count.\n", + "\n", + " Args:\n", + " text (str): The input text to be chunked.\n", + " chunk_size (int): The target number of words per chunk.\n", + " chunk_overlap (int): The number of words to overlap between consecutive chunks.\n", + "\n", + " Returns:\n", + " list[str]: A list of text chunks.\n", + " \"\"\"\n", + " words = text.split() # Split the text into a list of individual words\n", + " total_words = len(words) # Calculate the total number of words in the text\n", + " chunks = [] # Initialize an empty list to store the generated chunks\n", + " start_index = 0 # Initialize the starting word index for the first chunk\n", + "\n", + " # --- Input Validation ---\n", + " # Ensure chunk_size is a positive integer.\n", + " if not isinstance(chunk_size, int) or chunk_size <= 0:\n", + " print(f\" Warning: Invalid chunk_size ({chunk_size}). Must be a positive integer. Returning the whole text as one chunk.\")\n", + " return [text]\n", + " # Ensure chunk_overlap is a non-negative integer smaller than chunk_size.\n", + " if not isinstance(chunk_overlap, int) or chunk_overlap < 0:\n", + " print(f\" Warning: Invalid chunk_overlap ({chunk_overlap}). Must be a non-negative integer. Setting overlap to 0.\")\n", + " chunk_overlap = 0\n", + " if chunk_overlap >= chunk_size:\n", + " # If overlap is too large, adjust it to a reasonable fraction (e.g., 1/3) of chunk_size\n", + " # This prevents infinite loops or nonsensical chunking.\n", + " adjusted_overlap = chunk_size // 3\n", + " print(f\" Warning: chunk_overlap ({chunk_overlap}) >= chunk_size ({chunk_size}). Adjusting overlap to {adjusted_overlap}.\")\n", + " chunk_overlap = adjusted_overlap\n", + "\n", + " # --- Chunking Loop ---\n", + " # Continue chunking as long as the start_index is within the bounds of the text\n", + " while start_index < total_words:\n", + " # Determine the end index for the current chunk.\n", + " # It's the minimum of (start + chunk_size) and the total number of words.\n", + " end_index = min(start_index + chunk_size, total_words)\n", + " \n", + " # Extract the words for the current chunk and join them back into a single string.\n", + " current_chunk_text = \" \".join(words[start_index:end_index])\n", + " chunks.append(current_chunk_text) # Add the generated chunk to the list\n", + " \n", + " # Calculate the starting index for the *next* chunk.\n", + " # Move forward by (chunk_size - chunk_overlap) words.\n", + " next_start_index = start_index + chunk_size - chunk_overlap\n", + " \n", + " # --- Safety Checks ---\n", + " # Check 1: Prevent infinite loops if overlap causes no progress.\n", + " # This can happen if chunk_size is very small or overlap is very large relative to chunk_size.\n", + " if next_start_index <= start_index:\n", + " if end_index == total_words: # If we are already at the end, we can safely break.\n", + " break\n", + " else: \n", + " # Force progress by moving forward by at least one word.\n", + " print(f\" Warning: Chunking logic stuck (start={start_index}, next_start={next_start_index}). Forcing progress.\")\n", + " next_start_index = start_index + 1 \n", + " \n", + " # Check 2: If the calculated next start index is already at or beyond the total number of words, we are done.\n", + " if next_start_index >= total_words:\n", + " break\n", + " \n", + " # Move the start_index to the calculated position for the next iteration.\n", + " start_index = next_start_index\n", + " \n", + " return chunks # Return the complete list of text chunks\n", + "\n", + "# --- Quick Test ---\n", + "# Test the function with the first document and sample parameters.\n", + "print(\"Defining the 'chunk_text' function.\")\n", + "sample_chunk_size = 150\n", + "sample_overlap = 30\n", + "sample_chunks = chunk_text(corpus_texts[0], sample_chunk_size, sample_overlap) \n", + "print(f\"Test chunking on first doc (size={sample_chunk_size} words, overlap={sample_overlap} words): Created {len(sample_chunks)} chunks.\")\n", + "if sample_chunks: # Only print if chunks were created\n", + " print(f\"First sample chunk:\\n'{sample_chunks[0]}'\")\n", + "print(\"-\" * 25)" + ] + }, + { + "cell_type": "markdown", + "id": "client-setup-md-v5", + "metadata": {}, + "source": [ + "### 6. Core Component: Connecting to Nebius AI\n", + "\n", + "To use the Nebius AI models (for embedding, generation, evaluation), we need to establish a connection to their API. We use the `openai` Python library, which provides a convenient way to interact with OpenAI-compatible APIs like Nebius.\n", + "\n", + "We instantiate an `OpenAI` client object, providing our API key and the specific Nebius API endpoint URL." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "client-setup-v5", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Attempting to initialize the Nebius AI client...\n", + "Nebius AI client initialized successfully. Ready to make API calls.\n", + "Client setup step complete.\n", + "-------------------------\n" + ] + } + ], + "source": [ + "client = None # Initialize client variable to None globally\n", + "\n", + "print(\"Attempting to initialize the Nebius AI client...\")\n", + "try:\n", + " # Check if the API key is actually available before creating the client\n", + " if not NEBIUS_API_KEY:\n", + " raise ValueError(\"Nebius API Key is missing. Cannot initialize client.\")\n", + " \n", + " # Create the OpenAI client object, configured for the Nebius API.\n", + " client = OpenAI(\n", + " api_key=NEBIUS_API_KEY, # Pass the API key loaded earlier\n", + " base_url=NEBIUS_BASE_URL # Specify the Nebius API endpoint\n", + " )\n", + " \n", + " # Optional: Add a quick test call to verify the client connection,\n", + " # e.g., listing models (if supported and desired). This might incur costs.\n", + " # try:\n", + " # client.models.list() \n", + " # print(\"Client connection verified by listing models.\")\n", + " # except Exception as test_e:\n", + " # print(f\"Warning: Could not verify client connection with test call: {test_e}\")\n", + " \n", + " print(\"Nebius AI client initialized successfully. Ready to make API calls.\")\n", + " \n", + "except Exception as e:\n", + " # Catch any errors during client initialization (e.g., invalid key, network issues)\n", + " print(f\"Error initializing Nebius AI client: {e}\")\n", + " print(\"!!! Execution cannot proceed without a valid client. Please check your API key and network connection. !!!\")\n", + " # Setting client back to None to prevent further attempts if initialization failed\n", + " client = None \n", + "\n", + "print(\"Client setup step complete.\")\n", + "print(\"-\" * 25)" + ] + }, + { + "cell_type": "markdown", + "id": "similarity-func-md-v5", + "metadata": {}, + "source": [ + "### 7. Core Component: Cosine Similarity Function\n", + "\n", + "To evaluate how semantically similar the generated answer is to our ground truth answer, we use **Cosine Similarity**. This metric measures the cosine of the angle between two vectors (in our case, the embedding vectors of the two answers).\n", + "\n", + "- A score of **1** means the vectors point in the same direction (maximum similarity).\n", + "- A score of **0** means the vectors are orthogonal (no similarity).\n", + "- A score of **-1** means the vectors point in opposite directions (maximum dissimilarity).\n", + "\n", + "For text embeddings, scores typically range from 0 to 1, where higher values indicate greater semantic similarity.\n", + "\n", + "We define a function `calculate_cosine_similarity` that takes two text strings, generates their embeddings using the Nebius client, and returns their cosine similarity score." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "similarity-func-v5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Defining the 'calculate_cosine_similarity' function.\n", + "Testing similarity function: Similarity between 'apple' and 'orange' = 0.77\n", + "-------------------------\n" + ] + } + ], + "source": [ + "def calculate_cosine_similarity(text1, text2, client, embedding_model):\n", + " \"\"\"Calculates cosine similarity between the embeddings of two texts.\n", + "\n", + " Args:\n", + " text1 (str): The first text string.\n", + " text2 (str): The second text string.\n", + " client (OpenAI): The initialized Nebius AI client.\n", + " embedding_model (str): The name of the embedding model to use.\n", + "\n", + " Returns:\n", + " float: The cosine similarity score (between 0.0 and 1.0), or 0.0 if an error occurs.\n", + " \"\"\"\n", + " if not client:\n", + " print(\" Error: Nebius client not available for similarity calculation.\")\n", + " return 0.0\n", + " if not text1 or not text2:\n", + " # Handle cases where one or both texts might be empty or None\n", + " return 0.0\n", + " \n", + " try:\n", + " # Generate embeddings for both texts in a single API call if possible\n", + " response = client.embeddings.create(model=embedding_model, input=[text1, text2])\n", + " \n", + " # Extract the embedding vectors\n", + " embedding1 = np.array(response.data[0].embedding)\n", + " embedding2 = np.array(response.data[1].embedding)\n", + " \n", + " # Reshape vectors to be 2D arrays as expected by cosine_similarity\n", + " embedding1 = embedding1.reshape(1, -1)\n", + " embedding2 = embedding2.reshape(1, -1)\n", + " \n", + " # Calculate cosine similarity using scikit-learn\n", + " # cosine_similarity returns a 2D array, e.g., [[similarity]], so we extract the value.\n", + " similarity_score = cosine_similarity(embedding1, embedding2)[0][0]\n", + " \n", + " # Clamp the score between 0.0 and 1.0 for safety/consistency\n", + " return max(0.0, min(1.0, similarity_score))\n", + " \n", + " except Exception as e:\n", + " print(f\" Error calculating cosine similarity: {e}\")\n", + " return 0.0 # Return 0.0 in case of any API or calculation errors\n", + "\n", + "# --- Quick Test ---\n", + "print(\"Defining the 'calculate_cosine_similarity' function.\")\n", + "if client: # Only run test if client is initialized\n", + " test_sim = calculate_cosine_similarity(\"apple\", \"orange\", client, NEBIUS_EMBEDDING_MODEL)\n", + " print(f\"Testing similarity function: Similarity between 'apple' and 'orange' = {test_sim:.2f}\")\n", + "else:\n", + " print(\"Skipping similarity function test as Nebius client is not initialized.\")\n", + "print(\"-\" * 25)" + ] + }, + { + "cell_type": "markdown", + "id": "main-loop-md-v5", + "metadata": {}, + "source": [ + "### 8. The Experiment: Iterating Through Configurations\n", + "\n", + "This section contains the main experimental loop. We will systematically iterate through all combinations of the tuning parameters we defined earlier (`CHUNK_SIZES_TO_TEST`, `CHUNK_OVERLAPS_TO_TEST`, `RETRIEVAL_TOP_K_TO_TEST`).\n", + "\n", + "**Workflow for Each Parameter Combination:**\n", + "\n", + "1. **Prepare Data (Chunking/Embedding/Indexing - Step 8.1):**\n", + " * **Check if Re-computation Needed:** If the `chunk_size` or `chunk_overlap` has changed from the previous iteration, we need to re-process the corpus.\n", + " * **Chunking:** Split all documents in `corpus_texts` using the current `chunk_size` and `chunk_overlap` via the `chunk_text` function.\n", + " * **Embedding:** Convert each text chunk into a numerical vector (embedding) using the specified Nebius embedding model (`NEBIUS_EMBEDDING_MODEL`). We do this in batches for efficiency.\n", + " * **Indexing:** Build a FAISS index (`IndexFlatL2`) from the generated embeddings. FAISS allows for very fast searching to find the chunks whose embeddings are most similar to the query embedding.\n", + " * *Optimization:* If chunk settings haven't changed, we reuse the existing chunks, embeddings, and index from the previous iteration to save time and API calls.\n", + "\n", + "2. **Test RAG Strategies (Step 8.2):**\n", + " * For the current `top_k` value, run each of the defined RAG strategies:\n", + " * **Simple RAG:** Retrieve `top_k` chunks based on similarity to the original query.\n", + " * **Query Rewrite RAG:** First, ask the LLM to rewrite the original query to be potentially better for vector search. Then, retrieve `top_k` chunks based on similarity to the *rewritten* query.\n", + " * **Rerank RAG (Simulated):** Retrieve more chunks initially (`top_k * RERANK_RETRIEVAL_MULTIPLIER`). Then, *simulate* reranking by simply taking the top `top_k` results from this larger initial set. (A real implementation would use a more sophisticated reranking model).\n", + "\n", + "3. **Evaluate & Store Results (Step 8.3 within `run_and_evaluate`):**\n", + " * For each strategy run:\n", + " * **Retrieve:** Find the relevant chunk indices using the FAISS index.\n", + " * **Generate:** Construct a prompt containing the retrieved chunk(s) as context and the *original* `test_query`. Send this to the Nebius generation model (`NEBIUS_GENERATION_MODEL`) to get the final answer.\n", + " * **Evaluate (Faithfulness):** Use the LLM evaluator (`NEBIUS_EVALUATION_MODEL`) with the `FAITHFULNESS_PROMPT` to score how well the generated answer aligns with the `true_answer_for_query`.\n", + " * **Evaluate (Relevancy):** Use the LLM evaluator with the `RELEVANCY_PROMPT` to score how well the generated answer addresses the `test_query`.\n", + " * **Evaluate (Similarity):** Use our `calculate_cosine_similarity` function to get the semantic similarity score between the generated answer and the `true_answer_for_query`.\n", + " * **Calculate Average Score:** Compute the average of Faithfulness, Relevancy, and Similarity scores.\n", + " * **Record:** Store all parameters (`chunk_size`, `overlap`, `top_k`, `strategy`), the retrieved indices, the rewritten query (if applicable), the generated answer, the individual scores, the average score, and the execution time for this specific run.\n", + "\n", + "We use `tqdm` to display a progress bar for the outer loop iterating through parameter combinations." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "main-loop-exec-v5", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "=== Starting RAG Experiment Loop ===\n", + "\n", + "Total parameter combinations to test: 8\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "29485f798215461b910fc4eb4c8546d7", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Testing Configurations: 0%| | 0/8 [00:00 ntotal)\n", + " retrieved_indices_all = indices[0]\n", + " valid_indices = retrieved_indices_all[retrieved_indices_all != -1].tolist()\n", + " \n", + " # 4. Apply simulated reranking (if applicable)\n", + " # In this simulation, we just take the top 'k_retrieve' results from the initially retrieved set.\n", + " # A real reranker would re-score these 'k_for_search' candidates based on relevance to the query.\n", + " if use_simulated_rerank:\n", + " final_indices = valid_indices[:k_retrieve]\n", + " # print(f\" Rerank: Selected top {len(final_indices)} indices after simulated rerank.\") # Uncomment for verbose logging\n", + " else:\n", + " final_indices = valid_indices # Use all valid retrieved indices up to k_retrieve\n", + " \n", + " result['retrieved_indices'] = final_indices\n", + " \n", + " # 5. Get the actual text chunks corresponding to the final indices\n", + " retrieved_chunks = [current_chunks[i] for i in final_indices]\n", + " \n", + " # Handle case where no chunks were retrieved (should be rare with valid indices)\n", + " if not retrieved_chunks:\n", + " print(f\" Warning: No relevant chunks found for {strategy_name} (C={chunk_size}, O={chunk_overlap}, K={k_retrieve}). Setting answer to indicate this.\")\n", + " result['answer'] = \"No relevant context found in the documents based on the query.\"\n", + " # Keep scores at 0.0 as no answer was generated from context\n", + " else:\n", + " # --- Generation Step --- \n", + " # Combine the retrieved chunks into a single context string\n", + " context_str = \"\\n\\n\".join(retrieved_chunks)\n", + " \n", + " # Define the system prompt for the generation LLM\n", + " sys_prompt_gen = \"You are a helpful AI assistant. Answer the user's query based strictly on the provided context. If the context doesn't contain the answer, state that clearly. Be concise.\"\n", + " \n", + " # Construct the user prompt including the context and the *original* query\n", + " # It's important to use the original query here for generating the final answer, even if a rewritten query was used for retrieval.\n", + " user_prompt_gen = f\"Context:\\n------\\n{context_str}\\n------\\n\\nQuery: {test_query}\\n\\nAnswer:\"\n", + " \n", + " # Make the API call to the Nebius generation model\n", + " gen_response = client.chat.completions.create(\n", + " model=NEBIUS_GENERATION_MODEL, \n", + " messages=[\n", + " {\"role\": \"system\", \"content\": sys_prompt_gen},\n", + " {\"role\": \"user\", \"content\": user_prompt_gen}\n", + " ],\n", + " temperature=GENERATION_TEMPERATURE,\n", + " max_tokens=GENERATION_MAX_TOKENS,\n", + " top_p=GENERATION_TOP_P\n", + " )\n", + " # Extract the generated text answer\n", + " generated_answer = gen_response.choices[0].message.content.strip()\n", + " result['answer'] = generated_answer\n", + " # Optional: print a snippet of the generated answer\n", + " # print(f\" Generated Answer: {generated_answer[:100].replace('\\n', ' ')}...\") \n", + "\n", + " # --- Evaluation Step --- \n", + " # Evaluate the generated answer using Faithfulness, Relevancy, and Similarity\n", + " # print(f\" Evaluating answer... (Faithfulness, Relevancy, Similarity)\") # Uncomment for verbose logging\n", + " \n", + " # Prepare parameters for evaluation calls (use low temperature for deterministic scoring)\n", + " eval_params = {'model': NEBIUS_EVALUATION_MODEL, 'temperature': 0.0, 'max_tokens': 10}\n", + " \n", + " # 1. Faithfulness Evaluation Call\n", + " prompt_f = FAITHFULNESS_PROMPT.format(question=test_query, response=generated_answer, true_answer=true_answer_for_query)\n", + " try:\n", + " resp_f = client.chat.completions.create(messages=[{\"role\": \"user\", \"content\": prompt_f}], **eval_params)\n", + " # Attempt to parse the score, clamp between 0.0 and 1.0\n", + " result['faithfulness'] = max(0.0, min(1.0, float(resp_f.choices[0].message.content.strip())))\n", + " except Exception as eval_e:\n", + " print(f\" Warning: Faithfulness score parsing error for {strategy_name} - {eval_e}. Score set to 0.0\")\n", + " result['faithfulness'] = 0.0\n", + "\n", + " # 2. Relevancy Evaluation Call\n", + " prompt_r = RELEVANCY_PROMPT.format(question=test_query, response=generated_answer)\n", + " try:\n", + " resp_r = client.chat.completions.create(messages=[{\"role\": \"user\", \"content\": prompt_r}], **eval_params)\n", + " # Attempt to parse the score, clamp between 0.0 and 1.0\n", + " result['relevancy'] = max(0.0, min(1.0, float(resp_r.choices[0].message.content.strip())))\n", + " except Exception as eval_e:\n", + " print(f\" Warning: Relevancy score parsing error for {strategy_name} - {eval_e}. Score set to 0.0\")\n", + " result['relevancy'] = 0.0\n", + " \n", + " # 3. Similarity Score Calculation\n", + " result['similarity_score'] = calculate_cosine_similarity(\n", + " generated_answer, \n", + " true_answer_for_query, \n", + " client, \n", + " NEBIUS_EMBEDDING_MODEL\n", + " )\n", + " \n", + " # 4. Calculate Average Score (Faithfulness, Relevancy, Similarity)\n", + " result['avg_score'] = (result['faithfulness'] + result['relevancy'] + result['similarity_score']) / 3.0\n", + " \n", + " except Exception as e:\n", + " # Catch any unexpected errors during the retrieve/generate/evaluate process\n", + " error_message = f\"ERROR during {strategy_name} (C={chunk_size}, O={chunk_overlap}, K={k_retrieve}): {str(e)[:200]}...\"\n", + " print(f\" {error_message}\")\n", + " result['answer'] = error_message # Store the error in the answer field\n", + " # Ensure scores remain at their default error state (0.0)\n", + " result['faithfulness'] = 0.0\n", + " result['relevancy'] = 0.0\n", + " result['similarity_score'] = 0.0\n", + " result['avg_score'] = 0.0\n", + " \n", + " # Record the total time taken for this run\n", + " run_end_time = time.time()\n", + " result['time_sec'] = run_end_time - run_start_time\n", + " \n", + " # Print a summary line for this run (useful for monitoring progress)\n", + " print(f\" Finished: {strategy_name} (C={chunk_size}, O={chunk_overlap}, K={k_retrieve}). AvgScore={result['avg_score']:.2f}, Time={result['time_sec']:.2f}s\")\n", + " return result\n", + " # --- End of run_and_evaluate nested function ---\n", + "\n", + " # --- Execute the RAG Strategies using the run_and_evaluate function --- \n", + " \n", + " # Strategy 1: Simple RAG (Use original query for retrieval)\n", + " result_simple = run_and_evaluate(\"Simple RAG\", test_query, top_k)\n", + " all_results.append(result_simple)\n", + "\n", + " # Strategy 2: Query Rewrite RAG \n", + " rewritten_q = test_query # Default to original query if rewrite fails\n", + " try:\n", + " # print(\" Attempting query rewrite for Rewrite RAG...\") # Uncomment for verbose logging\n", + " # Define prompts for the query rewriting task\n", + " sys_prompt_rw = \"You are an expert query optimizer. Rewrite the user's query to be ideal for vector database retrieval. Focus on key entities, concepts, and relationships. Remove conversational fluff. Output ONLY the rewritten query text.\"\n", + " user_prompt_rw = f\"Original Query: {test_query}\\n\\nRewritten Query:\"\n", + " \n", + " # Call the LLM to rewrite the query\n", + " resp_rw = client.chat.completions.create(\n", + " model=NEBIUS_GENERATION_MODEL, # Can use the generation model for this task too\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": sys_prompt_rw},\n", + " {\"role\": \"user\", \"content\": user_prompt_rw}\n", + " ],\n", + " temperature=0.1, # Low temp for focused rewrite\n", + " max_tokens=100, \n", + " top_p=0.9\n", + " )\n", + " # Clean up the LLM's response to get just the query text\n", + " candidate_q = resp_rw.choices[0].message.content.strip()\n", + " # Remove potential prefixes like \"Rewritten Query:\" or \"Query:\"\n", + " candidate_q = re.sub(r'^(rewritten query:|query:)\\s*', '', candidate_q, flags=re.IGNORECASE).strip('\"')\n", + " \n", + " # Use the rewritten query only if it's reasonably different and not too short\n", + " if candidate_q and len(candidate_q) > 5 and candidate_q.lower() != test_query.lower(): \n", + " rewritten_q = candidate_q\n", + " # print(f\" Using rewritten query: '{rewritten_q}'\") # Uncomment for verbose logging\n", + " # else: \n", + " # print(\" Rewrite failed, too short, or same as original. Using original query.\") # Uncomment for verbose logging\n", + " except Exception as e:\n", + " print(f\" Warning: Error during query rewrite: {e}. Using original query.\")\n", + " rewritten_q = test_query # Fallback to original query on error\n", + " \n", + " # Run evaluation using the (potentially) rewritten query for retrieval\n", + " result_rewrite = run_and_evaluate(\"Query Rewrite RAG\", rewritten_q, top_k)\n", + " all_results.append(result_rewrite)\n", + "\n", + " # Strategy 3: Rerank RAG (Simulated)\n", + " # Use original query for retrieval, but simulate the reranking process\n", + " result_rerank = run_and_evaluate(\"Rerank RAG (Simulated)\", test_query, top_k, use_simulated_rerank=True)\n", + " all_results.append(result_rerank)\n", + "\n", + " print(\"\\n=== RAG Experiment Loop Finished ===\")\n", + " print(\"-\" * 25)" + ] + }, + { + "cell_type": "markdown", + "id": "analysis-v5", + "metadata": {}, + "source": [ + "### 9. Analysis: Reviewing the Results\n", + "\n", + "Now that the experiment loop has completed and `all_results` contains the data from each run, we'll use the Pandas library to analyze the findings.\n", + "\n", + "1. **Create DataFrame:** Convert the list of result dictionaries (`all_results`) into a Pandas DataFrame for easy manipulation and viewing.\n", + "2. **Sort Results:** Sort the DataFrame by the `avg_score` (the average of Faithfulness, Relevancy, and Similarity) in descending order, so the best-performing configurations appear first.\n", + "3. **Display Top Configurations:** Show the top N rows of the sorted DataFrame, including key parameters, scores, and the generated answer, to quickly identify promising settings.\n", + "4. **Summarize Best Run:** Print a clear summary of the single best-performing configuration based on the average score, showing its parameters, individual scores, time taken, and the full answer it generated." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "compare-results-v5", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--- Analyzing Experiment Results ---\n", + "Total results collected: 24\n", + "\n", + "--- Top 10 Performing Configurations (Sorted by Average Score) ---\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
chunk_sizeoverlaptop_kstrategyavg_scorefaithfulnessrelevancysimilarity_scoretime_secanswer
0250503Simple RAG0.8994170.91.00.7982518.975824Solar power and hydropower differ significantly in consistency and environmental impact:\\n\\n- **Consistency**: \\n - **Solar Power**: Inconsisten...
1250503Query Rewrite RAG0.8968590.91.00.7905786.550637**Consistency:**\\n- **Hydropower** is highly reliable and provides consistent, large-scale power, as it is not dependent on weather conditions onc...
2150303Rerank RAG (Simulated)0.8941250.91.00.7823749.526656**Consistency:** \\n- **Hydropower** is highly reliable and consistent, providing large-scale power 24/7, as it is not dependent on weather or tim...
3150503Query Rewrite RAG0.8938230.91.00.7814689.675948**Consistency:** \\n- **Hydropower** is highly reliable and provides consistent, large-scale power, as it is not dependent on weather conditions o...
4150303Query Rewrite RAG0.8936660.91.00.78099710.357061**Consistency:**\\n- **Hydropower** is highly reliable and consistent, providing large-scale power 24/7, as it relies on the continuous flow of wat...
5150503Simple RAG0.8927740.91.00.7783219.777294**Consistency:** \\n- **Hydropower** is highly consistent and reliable, providing large-scale power 24/7, especially with dams that store water fo...
6250503Rerank RAG (Simulated)0.8915700.91.00.77470941.228211**Consistency:** \\n- **Hydropower** is highly reliable and provides consistent, large-scale power, especially with dams that can store water and ...
7250303Query Rewrite RAG0.8908780.91.00.7726358.359087**Consistency:** \\n- **Hydropower** is highly reliable and provides consistent, large-scale power, especially with dams that can store water and ...
8250305Simple RAG0.8908670.91.00.7726018.767287**Consistency:** \\n- **Solar Power:** Inconsistent due to dependence on weather and daylight. Requires storage solutions (e.g., batteries) for re...
9150505Simple RAG0.8906560.91.00.7719679.743746**Consistency:** \\n- **Solar Power:** Inconsistent due to dependence on weather and daylight. Requires storage solutions (e.g., batteries) for re...
\n", + "
" + ], + "text/plain": [ + " chunk_size overlap top_k strategy avg_score \\\n", + "0 250 50 3 Simple RAG 0.899417 \n", + "1 250 50 3 Query Rewrite RAG 0.896859 \n", + "2 150 30 3 Rerank RAG (Simulated) 0.894125 \n", + "3 150 50 3 Query Rewrite RAG 0.893823 \n", + "4 150 30 3 Query Rewrite RAG 0.893666 \n", + "5 150 50 3 Simple RAG 0.892774 \n", + "6 250 50 3 Rerank RAG (Simulated) 0.891570 \n", + "7 250 30 3 Query Rewrite RAG 0.890878 \n", + "8 250 30 5 Simple RAG 0.890867 \n", + "9 150 50 5 Simple RAG 0.890656 \n", + "\n", + " faithfulness relevancy similarity_score time_sec \\\n", + "0 0.9 1.0 0.798251 8.975824 \n", + "1 0.9 1.0 0.790578 6.550637 \n", + "2 0.9 1.0 0.782374 9.526656 \n", + "3 0.9 1.0 0.781468 9.675948 \n", + "4 0.9 1.0 0.780997 10.357061 \n", + "5 0.9 1.0 0.778321 9.777294 \n", + "6 0.9 1.0 0.774709 41.228211 \n", + "7 0.9 1.0 0.772635 8.359087 \n", + "8 0.9 1.0 0.772601 8.767287 \n", + "9 0.9 1.0 0.771967 9.743746 \n", + "\n", + " answer \n", + "0 Solar power and hydropower differ significantly in consistency and environmental impact:\\n\\n- **Consistency**: \\n - **Solar Power**: Inconsisten... \n", + "1 **Consistency:**\\n- **Hydropower** is highly reliable and provides consistent, large-scale power, as it is not dependent on weather conditions onc... \n", + "2 **Consistency:** \\n- **Hydropower** is highly reliable and consistent, providing large-scale power 24/7, as it is not dependent on weather or tim... \n", + "3 **Consistency:** \\n- **Hydropower** is highly reliable and provides consistent, large-scale power, as it is not dependent on weather conditions o... \n", + "4 **Consistency:**\\n- **Hydropower** is highly reliable and consistent, providing large-scale power 24/7, as it relies on the continuous flow of wat... \n", + "5 **Consistency:** \\n- **Hydropower** is highly consistent and reliable, providing large-scale power 24/7, especially with dams that store water fo... \n", + "6 **Consistency:** \\n- **Hydropower** is highly reliable and provides consistent, large-scale power, especially with dams that can store water and ... \n", + "7 **Consistency:** \\n- **Hydropower** is highly reliable and provides consistent, large-scale power, especially with dams that can store water and ... \n", + "8 **Consistency:** \\n- **Solar Power:** Inconsistent due to dependence on weather and daylight. Requires storage solutions (e.g., batteries) for re... \n", + "9 **Consistency:** \\n- **Solar Power:** Inconsistent due to dependence on weather and daylight. Requires storage solutions (e.g., batteries) for re... " + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "--- Best Configuration Summary ---\n", + "Chunk Size: 250 words\n", + "Overlap: 50 words\n", + "Top-K Retrieved: 3 chunks\n", + "Strategy: Simple RAG\n", + "---> Average Score (Faith+Rel+Sim): 0.899\n", + " (Faithfulness: 0.900, Relevancy: 1.000, Similarity: 0.798)\n", + "Time Taken: 8.98 seconds\n", + "\n", + "Best Answer Generated:\n", + "Solar power and hydropower differ significantly in consistency and environmental impact:\n", + "\n", + "- **Consistency**: \n", + " - **Solar Power**: Inconsistent, as it depends on weather conditions and time of day. Requires storage solutions (like batteries) for reliable supply. \n", + " - **Hydropower**: Highly consistent, providing large-scale, reliable power 24/7, especially with dams. \n", + "\n", + "- **Environmental Impact**: \n", + " - **Solar Power**: Clean with minimal emissions during operation, but manufacturing panels and disposal can have environmental impacts. \n", + " - **Hydropower**: Large dams can severely harm ecosystems, disrupt fish migration, and displace communities. Run-of-river systems are less disruptive but still impact local environments. \n", + "\n", + "In summary, hydropower is more consistent but has greater environmental risks, while solar power is cleaner but less reliable without storage.\n", + "\n", + "--- Analysis Complete --- \n" + ] + } + ], + "source": [ + "print(\"--- Analyzing Experiment Results ---\")\n", + "\n", + "# First, check if any results were actually collected\n", + "if not all_results:\n", + " print(\"No results were generated during the experiment. Cannot perform analysis.\")\n", + "else:\n", + " # Convert the list of result dictionaries into a Pandas DataFrame\n", + " results_df = pd.DataFrame(all_results)\n", + " print(f\"Total results collected: {len(results_df)}\")\n", + " \n", + " # Sort the DataFrame based on the 'avg_score' column in descending order (best first)\n", + " # Use reset_index(drop=True) to get a clean 0-based index after sorting.\n", + " results_df_sorted = results_df.sort_values(by='avg_score', ascending=False).reset_index(drop=True)\n", + " \n", + " print(\"\\n--- Top 10 Performing Configurations (Sorted by Average Score) ---\")\n", + " # Define the columns we want to display in the summary table\n", + " display_cols = [\n", + " 'chunk_size', 'overlap', 'top_k', 'strategy', \n", + " 'avg_score', 'faithfulness', 'relevancy', 'similarity_score', # Added similarity\n", + " 'time_sec', \n", + " 'answer' # Including the answer helps qualitatively assess the best runs\n", + " ]\n", + " # Filter out any columns that might not exist (e.g., if an error occurred before population)\n", + " display_cols = [col for col in display_cols if col in results_df_sorted.columns]\n", + " \n", + " # Display the head (top 10 rows) of the sorted DataFrame using the selected columns\n", + " # The display() function provides richer output in Jupyter environments.\n", + " display(results_df_sorted[display_cols].head(10))\n", + " \n", + " # --- Summary of the Single Best Run --- \n", + " print(\"\\n--- Best Configuration Summary ---\")\n", + " # Check if the sorted DataFrame is not empty before accessing the first row\n", + " if not results_df_sorted.empty:\n", + " # Get the first row (index 0), which corresponds to the best score after sorting\n", + " best_run = results_df_sorted.iloc[0]\n", + " \n", + " # Print the parameters and results of the best configuration\n", + " print(f\"Chunk Size: {best_run.get('chunk_size', 'N/A')} words\")\n", + " print(f\"Overlap: {best_run.get('overlap', 'N/A')} words\")\n", + " print(f\"Top-K Retrieved: {best_run.get('top_k', 'N/A')} chunks\")\n", + " print(f\"Strategy: {best_run.get('strategy', 'N/A')}\")\n", + " # Use .get(col, default) for robustness in case a column is missing\n", + " avg_score = best_run.get('avg_score', 0.0)\n", + " faithfulness = best_run.get('faithfulness', 0.0)\n", + " relevancy = best_run.get('relevancy', 0.0)\n", + " similarity = best_run.get('similarity_score', 0.0)\n", + " time_sec = best_run.get('time_sec', 0.0)\n", + " best_answer = best_run.get('answer', 'N/A')\n", + " \n", + " print(f\"---> Average Score (Faith+Rel+Sim): {avg_score:.3f}\")\n", + " print(f\" (Faithfulness: {faithfulness:.3f}, Relevancy: {relevancy:.3f}, Similarity: {similarity:.3f})\")\n", + " print(f\"Time Taken: {time_sec:.2f} seconds\")\n", + " print(f\"\\nBest Answer Generated:\")\n", + " # Print the full answer generated by the best configuration\n", + " print(best_answer)\n", + " else:\n", + " # Handle the case where no results were successfully processed\n", + " print(\"Could not determine the best configuration (no valid results found).\")\n", + " \n", + "print(\"\\n--- Analysis Complete --- \")" + ] + }, + { + "cell_type": "markdown", + "id": "conclusion-v5", + "metadata": {}, + "source": [ + "### 10. Conclusion: What Did We Learn?\n", + "\n", + "We have successfully constructed and executed an end-to-end pipeline to experiment with various RAG configurations and evaluate their performance using multiple metrics on the Nebius AI platform.\n", + "\n", + "By examining the results table and the best configuration summary above, we can gain insights specific to *our chosen corpus, query, and models*.\n", + "\n", + "**Reflection Points:**\n", + "\n", + "* **Chunking Impact:** Did a specific `chunk_size` or `overlap` tend to produce better average scores? Consider why smaller chunks might capture specific facts better, while larger chunks might provide more context. How did overlap seem to influence the results?\n", + "* **Retrieval Quantity (`top_k`):** How did increasing `top_k` affect the scores? Did retrieving more chunks always lead to better answers, or did it sometimes introduce noise or irrelevant information, potentially lowering faithfulness or similarity?\n", + "* **Strategy Comparison:** Did the 'Query Rewrite' or 'Rerank (Simulated)' strategies offer a consistent advantage over 'Simple RAG' in terms of the average score? Was the potential improvement significant enough to justify the extra steps (e.g., additional LLM call for rewrite, larger initial retrieval for rerank)?\n", + "* **Evaluation Metrics:** \n", + " * Look at the 'Best Answer' and compare it to the `true_answer_for_query`. Do the individual scores (Faithfulness, Relevancy, Similarity) seem to reflect the quality you perceive?\n", + " * Did high similarity always correlate with high faithfulness? Could an answer be similar but unfaithful, or faithful but dissimilar? \n", + " * How reliable do you feel the automated LLM evaluation (Faithfulness, Relevancy) is compared to the more objective Cosine Similarity? What are the potential limitations of LLM-based evaluation (e.g., sensitivity to prompt wording, model biases)?\n", + "* **Overall Performance:** Did any configuration achieve a near-perfect average score? What might be preventing a perfect score (e.g., limitations of the source documents, inherent ambiguity in language, imperfect retrieval)?\n", + "\n", + "**Key Takeaway:** Optimizing a RAG system is an iterative process. The best configuration often depends heavily on the specific dataset, the nature of the user queries, the chosen embedding and LLM models, and the evaluation criteria. Systematic experimentation, like the process followed in this notebook, is crucial for finding settings that perform well for a particular use case.\n", + "\n", + "**Potential Next Steps & Further Exploration:**\n", + "\n", + "* **Expand Test Parameters:** Try a wider range of `chunk_size`, `overlap`, and `top_k` values.\n", + "* **Different Queries:** Test the same configurations with different types of queries (e.g., fact-based, comparison, summarization) to see how performance varies.\n", + "* **Larger/Different Corpus:** Use a more extensive or domain-specific knowledge base.\n", + "* **Implement True Reranking:** Replace the simulated reranking with a dedicated cross-encoder reranking model (e.g., from Hugging Face Transformers or Cohere Rerank) to re-score the initially retrieved documents based on relevance.\n", + "* **Alternative Models:** Experiment with different Nebius AI models for embedding, generation, or evaluation to see their impact.\n", + "* **Advanced Chunking:** Explore more sophisticated chunking strategies (e.g., recursive character splitting, semantic chunking).\n", + "* **Human Evaluation:** Complement the automated metrics with human judgment for a more nuanced understanding of answer quality." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv-best-rag-finder", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.0" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/data/attention_is_all_you_need.pdf b/data/attention_is_all_you_need.pdf new file mode 100644 index 0000000..97d7c51 Binary files /dev/null and b/data/attention_is_all_you_need.pdf differ diff --git a/data/quantum.txt b/data/quantum.txt new file mode 100644 index 0000000..b708cd2 --- /dev/null +++ b/data/quantum.txt @@ -0,0 +1,89 @@ +Quantum Computing: Principles, Progress, and Possibilities + +Introduction + +Quantum computing represents one of the most significant paradigm shifts in computational theory since the advent of electronic computers. Unlike classical computers that process information in binary digits (bits), quantum computers leverage quantum mechanical phenomena to process information in quantum bits, or qubits. This fundamental difference enables quantum computers to potentially solve certain problems exponentially faster than classical computers, challenging our current understanding of computational complexity and creating new possibilities across various domains including cryptography, material science, optimization, and artificial intelligence. + +The field traces its conceptual origins to the early 1980s when physicist Richard Feynman proposed the idea of quantum computers. Feynman observed that simulating quantum systems on classical computers was exponentially complex and suggested that computers based on quantum principles might be more efficient for such simulations. This insight laid the foundation for quantum computing as we understand it today. + +Quantum Mechanical Foundations + +Superposition + +At the heart of quantum computing lies the principle of superposition. While a classical bit must be either 0 or 1, a qubit can exist in a state that is effectively both 0 and 1 simultaneously until measured. Mathematically, we represent this as: + +|ψ⟩ = α|0⟩ + β|1⟩ + +Where α and β are complex numbers satisfying |α|² + |β|² = 1, representing the probability amplitudes for measuring the qubit in state |0⟩ or |1⟩ respectively. This property allows quantum computers to process multiple possibilities simultaneously, contributing to their computational advantage. + +Entanglement + +Quantum entanglement represents another cornerstone of quantum computing. When qubits become entangled, the state of one qubit becomes intrinsically linked to the state of another, regardless of the physical distance separating them. This phenomenon, which Einstein famously referred to as "spooky action at a distance," enables quantum computers to establish correlations that have no classical analog. + +A simple representation of an entangled state is the Bell state: + +|Φ⁺⟩ = (|00⟩ + |11⟩)/√2 + +In this state, measuring one qubit immediately determines the state of the other, regardless of separation distance. Entanglement allows quantum algorithms to perform operations with an efficiency unattainable in classical computing. + +Quantum Interference + +Quantum interference occurs when the probability amplitudes of quantum states interact. Through careful manipulation, constructive interference can enhance desired computational outcomes while destructive interference can suppress undesired ones. Quantum algorithms are specifically designed to harness interference patterns to increase the probability of measuring the correct answer. + +Quantum Gates and Circuits + +Quantum computers process information through quantum gates, analogous to logic gates in classical computing but operating according to quantum mechanical principles. Common quantum gates include: + +- Pauli-X Gate: The quantum equivalent of a classical NOT gate, flipping |0⟩ to |1⟩ and vice versa. +- Hadamard Gate (H): Creates superposition by transforming |0⟩ to (|0⟩ + |1⟩)/√2 and |1⟩ to (|0⟩ - |1⟩)/√2. +- CNOT Gate: A two-qubit gate that flips the second qubit's state if the first qubit is |1⟩, critical for creating entanglement. +- Phase Gates (S, T): Introduce phase shifts in quantum states, essential for quantum algorithms. + +Quantum circuits combine these gates in specific sequences to implement algorithms. Unlike classical circuits, quantum circuits are inherently reversible until measurement occurs, a property termed quantum coherence. + +Major Quantum Algorithms + +Shor's Algorithm + +Developed by Peter Shor in 1994, Shor's algorithm represents a watershed moment in quantum computing. It provides an efficient method for finding the prime factors of large integers, a problem believed to be computationally difficult for classical computers. The security of widely-used RSA encryption relies precisely on this difficulty. Shor's algorithm can theoretically factor large numbers in polynomial time, posing a significant threat to current cryptographic systems once sufficiently powerful quantum computers become available. + +The algorithm works by reducing the factoring problem to the task of finding the period of a function, which can be solved efficiently using the quantum Fourier transform. The time complexity is O((log N)³), exponentially faster than the best-known classical algorithms. + +Grover's Algorithm + +Developed by Lov Grover in 1996, Grover's algorithm provides a quadratic speedup for unstructured search problems. While a classical computer requires O(N) operations to search an unsorted database of N items, Grover's algorithm accomplishes this in O(√N) operations. + +Quantum Error Correction + +One of the greatest challenges in quantum computing is maintaining quantum coherence in the presence of environmental noise. Quantum error correction (QEC) addresses this challenge by encoding logical qubits across multiple physical qubits, enabling the detection and correction of errors without directly measuring the quantum state (which would collapse superposition). + +Current Quantum Hardware Approaches + +Superconducting Qubits + +Currently the most advanced quantum computing platform, superconducting qubits operate using Josephson junctions—two superconductors separated by a thin insulating barrier. + +Trapped Ions + +Trapped ion quantum computers use electromagnetic fields to suspend individual ions (charged atoms) in vacuum. + +Quantum Advantage and Practical Applications + +Quantum Chemistry and Materials Science + +Quantum computers are naturally suited to simulating quantum systems, potentially revolutionizing our ability to design new materials and chemical compounds. + +Optimization Problems + +Many industrial processes involve complex optimization challenges that quantum computers may address more efficiently than classical methods. + +Future Outlook + +The path to practical quantum computing will likely proceed through distinct stages: +1. NISQ Era (current) +2. Error-Corrected Systems +3. Fault-Tolerant Quantum Computing + +Conclusion + +Quantum computing represents both a revolutionary computational paradigm and one of the most ambitious engineering projects of our time. While current quantum computers remain limited in capability, the field is advancing rapidly through global investment from governments, corporations, and academic institutions. \ No newline at end of file diff --git a/data/reward_plot.png b/data/reward_plot.png new file mode 100644 index 0000000..4a6c754 Binary files /dev/null and b/data/reward_plot.png differ diff --git a/data/val_rl.json b/data/val_rl.json new file mode 100644 index 0000000..00b0451 --- /dev/null +++ b/data/val_rl.json @@ -0,0 +1,68 @@ +{ + "basic_factual_questions": [ + { + "question": "What is the mathematical representation of a qubit in superposition?", + "answer": "|ψ⟩ = α|0⟩ + β|1⟩, where α and β are complex numbers satisfying |α|² + |β|² = 1, representing the probability amplitudes for measuring the qubit in state |0⟩ or |1⟩ respectively." + }, + { + "question": "Who proposed the original concept of quantum computers in the 1980s?", + "answer": "Physicist Richard Feynman proposed the idea of quantum computers in the early 1980s, observing that simulating quantum systems on classical computers was exponentially complex and suggesting that computers based on quantum principles might be more efficient for such simulations." + }, + { + "question": "What temperature do superconducting qubits typically operate at?", + "answer": "Superconducting qubits operate when cooled to near absolute zero, typically 10-15 millikelvin." + }, + { + "question": "What is the time complexity of Shor's algorithm for factoring large numbers?", + "answer": "The time complexity of Shor's algorithm is O((log N)³), which is exponentially faster than the best-known classical algorithms for factoring large numbers." + } + ], + "complex_synthesis_questions": [ + { + "question": "How do quantum error correction codes protect information without violating the no-cloning theorem?", + "answer": "Quantum error correction codes protect information by encoding a single logical qubit across multiple physical qubits, creating an entangled state that distributes quantum information redundantly across a larger system. This approach doesn't violate the no-cloning theorem because it doesn't make perfect copies of unknown quantum states. Instead, it uses syndrome measurements that detect error patterns without directly measuring the encoded quantum information. These measurements collapse only the error information while preserving the quantum state in the code space. The surface code mentioned in the document is a prominent example that can detect and correct both bit-flip and phase-flip errors without measuring the logical qubit state itself." + }, + { + "question": "Compare and contrast the hardware approaches used by Google and IonQ for their quantum computers, including their relative advantages and disadvantages.", + "answer": "Google uses superconducting qubits, which operate using Josephson junctions cooled to near absolute zero (10-15 millikelvin). Advantages include faster gate operations and a more straightforward path to scalability using established semiconductor fabrication techniques. Disadvantages include extremely low operating temperatures, shorter coherence times, and higher error rates. IonQ uses trapped ion technology, which suspends individual ions in electromagnetic fields with quantum information stored in their electronic states. Advantages include significantly longer coherence times, extremely high fidelity operations (lower error rates), and the ability to have all-to-all connectivity between qubits. Disadvantages include slower gate operations compared to superconducting qubits and greater challenges in scaling to very large numbers of qubits. Google demonstrated 'quantum supremacy' with its 53-qubit Sycamore processor in 2019, while IonQ focuses on achieving higher-quality qubits even at lower counts." + }, + { + "question": "Explain how Grover's algorithm achieves quadratic speedup and why it cannot achieve exponential speedup for unstructured search problems.", + "answer": "Grover's algorithm achieves quadratic speedup through amplitude amplification, which iteratively increases the probability amplitude of the target state in a superposition. Starting with an equal superposition of all possible states, it applies a series of operations (oracle consultation followed by diffusion) approximately π√N/4 times to gradually amplify the amplitude of the solution state while diminishing others. It cannot achieve exponential speedup because of fundamental limits in quantum mechanics. Mathematically, each iteration can only increase the probability amplitude of the correct answer by O(1/√N). Therefore, O(√N) iterations are necessary to reach a high probability of measuring the correct answer. This quadratic limit was proven by Bennett, Bernstein, Brassard, and Vazirani to be the theoretical maximum possible speedup for any quantum algorithm solving unstructured search problems." + } + ], + "nuanced_evaluation_questions": [ + { + "question": "What are the most significant technical barriers to achieving fault-tolerant quantum computing, and which ones might be overcome first?", + "answer": "The most significant technical barriers include: 1) Quantum Decoherence: Current quantum systems lose quantum properties (coherence) in microseconds to milliseconds due to environmental interactions. 2) High Error Rates: Current quantum gates have error rates between 0.1% and 1% per operation, several orders of magnitude too high for complex algorithms. 3) Scalability Issues: Building larger systems introduces challenges like cross-talk between qubits, increased error rates with system size, and complex control electronics. 4) Qubit Quality and Quantity: Creating enough high-quality qubits (estimated 1,000 physical qubits per logical qubit for error correction) with sufficient coherence times. 5) Quantum-Classical Interface: Efficiently transferring information between classical and quantum systems. Error reduction in individual physical qubits will likely be overcome first, as incremental improvements continue through better materials and control techniques. The development of small-scale error correction demonstrations will follow. The most challenging barriers are likely the massive scale-up required for fully fault-tolerant systems and the engineering complexities of maintaining quantum coherence across thousands of interacting qubits." + }, + { + "question": "How might quantum computing impact modern cryptographic systems, and what timeline concerns should cybersecurity experts consider?", + "answer": "Quantum computing primarily threatens public key cryptography systems that rely on mathematical problems like integer factorization (RSA) and discrete logarithm (ECC, DSA). Shor's algorithm can theoretically solve these problems exponentially faster than classical algorithms, potentially breaking most current internet security. Cybersecurity experts should consider: Near-term (5-10 years): While fully fault-tolerant quantum computers aren't expected during this period, organizations should begin inventorying cryptographic dependencies and planning transitions. Medium-term (10-20 years): The emergence of error-corrected quantum computers with hundreds of logical qubits could begin to threaten 1024 or 2048-bit RSA keys. Long-term considerations: Data with long-term sensitivity is already at risk from 'harvest now, decrypt later' attacks where encrypted data is stored until quantum computers can break it. Timeline uncertainties are substantial, with expert estimates for fault-tolerant systems ranging from 5 to 30+ years. Organizations should implement quantum-resistant cryptographic algorithms (post-quantum cryptography) well before quantum computers can break current systems, with NIST currently standardizing such algorithms." + }, + { + "question": "For what specific types of machine learning tasks might quantum computers offer the most significant advantages over classical approaches?", + "answer": "Quantum computers may offer the most significant advantages for: 1) Dimensionality Reduction: Quantum principal component analysis could exponentially speed up the processing of high-dimensional data, benefiting image and speech recognition. 2) Kernel Methods: Quantum computers can implement kernel functions that would be computationally prohibitive classically, potentially improving support vector machines and other kernel-based algorithms. 3) Sampling from Complex Distributions: Quantum computers could more efficiently sample from complex probability distributions, beneficial for generative models and Boltzmann machines. 4) Optimization Problems: Finding optimal parameters in deep learning models could be accelerated using quantum optimization algorithms for certain classes of problems. 5) Recommendation Systems: Quantum recommendation systems could process large user-item matrices more efficiently for personalized recommendations. The advantages would be most pronounced for problems involving large feature spaces, complex correlation structures, or where quantum states can naturally represent the problem structure." + } + ], + "application_questions": [ + { + "question": "How could quantum computing potentially revolutionize drug discovery processes?", + "answer": "Quantum computing could revolutionize drug discovery by: 1) Molecular Modeling: Accurately simulating molecular structures and interactions at the quantum level, allowing precise modeling of drug candidates and their interactions with biological targets. 2) Protein Folding: Better predicting three-dimensional protein structures from amino acid sequences, critical for understanding disease mechanisms and designing targeted therapies. 3) Binding Affinity Calculations: More accurately calculating how strongly potential drug molecules bind to their targets, improving lead compound selection. 4) Metabolic Pathway Simulation: Modeling complex biochemical pathways to predict drug metabolism and potential side effects. 5) Combinatorial Optimization: Efficiently searching vast chemical spaces to identify novel drug candidates with desired properties. These capabilities could dramatically reduce the time and cost of bringing new drugs to market by minimizing failed clinical trials due to unforeseen side effects or efficacy issues. Companies like Zapata Computing are already developing quantum algorithms specifically for chemical simulations applicable to pharmaceutical research." + }, + { + "question": "What industries might see the earliest practical benefits from quantum computing, and why?", + "answer": "Industries likely to see earliest benefits include: 1) Materials Science & Chemical Industries: Problems in these fields directly map to quantum mechanical simulations. Even modest quantum advantage could immediately impact catalyst design, battery development, and novel materials discovery. 2) Financial Services: Portfolio optimization, risk assessment, and fraud detection could benefit from quantum algorithms for optimization and machine learning, with direct profit incentives driving adoption. 3) Logistics & Supply Chain: Combinatorial optimization problems like routing, scheduling, and resource allocation could see improvements from quantum approaches to optimization, with immediate cost-saving applications. 4) Pharmaceuticals: Drug discovery processes could be accelerated through better molecular simulations, with high-value outcomes justifying investment in early quantum capabilities. 5) Energy Sector: Grid optimization, material design for solar cells, and more efficient carbon capture technologies could benefit from quantum computation. These industries share characteristics of high-value problems that map well to quantum algorithms, the ability to benefit from even partial quantum advantage, and sufficient profit potential to justify investment in early-stage quantum technologies." + } + ], + "conceptual_understanding_questions": [ + { + "question": "Explain the relationship between quantum entanglement and the potential computational advantage of quantum computers.", + "answer": "Quantum entanglement establishes correlations between qubits that have no classical equivalent, creating a computational resource essential to quantum advantage. When qubits become entangled, their states cannot be described independently—the quantum state of the entire system must be considered as a whole. This enables: 1) Exponential State Space: An n-qubit system can represent 2^n states simultaneously through entanglement, allowing quantum computers to process multiple possibilities in parallel. 2) Non-local Correlations: Entanglement enables information to be distributed across multiple qubits in ways that classical bits cannot, creating computational patterns inaccessible to classical systems. 3) Algorithm Speedups: Entanglement is crucial for quantum algorithms like Shor's and Grover's. Without entanglement, quantum computation can be efficiently simulated on classical computers, eliminating any potential speed advantage. 4) Multi-qubit Operations: Entangling operations like the CNOT gate are fundamental building blocks for quantum circuits, establishing the correlations necessary for complex quantum algorithms. Simply put, entanglement is the resource that allows quantum computers to perform calculations in ways fundamentally different from classical computers, accessing computational paths that scale exponentially better for certain problems." + }, + { + "question": "How does the principle of quantum interference contribute to the functioning of quantum algorithms?", + "answer": "Quantum interference is a fundamental mechanism that enables quantum algorithms to outperform classical counterparts: 1) Probability Amplitude Manipulation: Quantum interference occurs when probability amplitudes (not just probabilities) of quantum states interact. Through careful algorithm design, constructive interference can enhance desired computational outcomes while destructive interference cancels out undesired ones. 2) Path Selection: In algorithms like Deutsch-Jozsa or Shor's, quantum interference effectively allows the system to evaluate multiple computational paths simultaneously and have the correct answers 'interfere constructively' while incorrect answers 'interfere destructively.' 3) Amplitude Amplification: Grover's algorithm specifically exploits interference to gradually increase the amplitude of target states through repeated application of operations that cause interference patterns. 4) Phase Information Processing: Quantum algorithms encode information in the phases of quantum states. Interference allows these phases to interact, performing computational work that has no classical counterpart. Quantum interference, combined with superposition and entanglement, forms the computational basis that allows quantum algorithms to solve certain problems more efficiently than any known classical algorithm. Without interference, the advantages of quantum computing would largely disappear, as the system would behave more like probabilistic classical computing." + } + ] +} \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..15c2000 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,19 @@ +fitz +numpy +openai +requests +rank-bm25 +scikit-learn +networkx +matplotlib +tqdm +Pillow +faiss-cpu +pandas +ipywidgets +rdflib +spacy +pyvis +datasets +transformers +accelerate \ No newline at end of file