From 876cd27fb8ef8badbcfa86fa1ea3ab0a6638c313 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kov=C3=A1cs=20=C3=81d=C3=A1m?= Date: Tue, 26 Aug 2025 14:47:54 +0200 Subject: [PATCH 01/15] Integrate ragfactchecker --- lettucedetect/__init__.py | 27 ++ lettucedetect/detectors/__init__.py | 2 + lettucedetect/detectors/factory.py | 12 +- lettucedetect/detectors/llm.py | 18 +- lettucedetect/detectors/rag_fact_checker.py | 215 ++++++++++++++++ lettucedetect/detectors/transformer.py | 5 + lettucedetect/models/generation.py | 71 ++++++ lettucedetect/ragfactchecker.py | 260 ++++++++++++++++++++ pyproject.toml | 3 +- 9 files changed, 603 insertions(+), 10 deletions(-) create mode 100644 lettucedetect/detectors/rag_fact_checker.py create mode 100644 lettucedetect/models/generation.py create mode 100644 lettucedetect/ragfactchecker.py diff --git a/lettucedetect/__init__.py b/lettucedetect/__init__.py index e69de29..928b8f6 100644 --- a/lettucedetect/__init__.py +++ b/lettucedetect/__init__.py @@ -0,0 +1,27 @@ +"""LettuceDetect: Hallucination detection and generation for RAG systems.""" + +# Main detection interface +# Core data structures +from lettucedetect.datasets.hallucination_dataset import ( + HallucinationData, + HallucinationDataset, + HallucinationSample, +) + +# Generation interface +from lettucedetect.models.generation import HallucinationGenerator +from lettucedetect.models.inference import HallucinationDetector + +# Direct RAGFactChecker access for advanced users +from lettucedetect.ragfactchecker import RAGFactChecker + +__version__ = "0.1.7" + +__all__ = [ + "HallucinationData", + "HallucinationDataset", + "HallucinationDetector", + "HallucinationGenerator", + "HallucinationSample", + "RAGFactChecker", # Direct access to triplet functionality +] diff --git a/lettucedetect/detectors/__init__.py b/lettucedetect/detectors/__init__.py index 4b0df18..86e1189 100644 --- a/lettucedetect/detectors/__init__.py +++ b/lettucedetect/detectors/__init__.py @@ -3,11 +3,13 @@ from lettucedetect.detectors.base import BaseDetector from lettucedetect.detectors.factory import make_detector as _make_detector from lettucedetect.detectors.llm import LLMDetector +from lettucedetect.detectors.rag_fact_checker import RAGFactCheckerDetector from lettucedetect.detectors.transformer import TransformerDetector __all__ = [ "BaseDetector", "LLMDetector", + "RAGFactCheckerDetector", "TransformerDetector", "_make_detector", ] diff --git a/lettucedetect/detectors/factory.py b/lettucedetect/detectors/factory.py index 430e616..fc13219 100644 --- a/lettucedetect/detectors/factory.py +++ b/lettucedetect/detectors/factory.py @@ -10,10 +10,10 @@ def make_detector(method: str, **kwargs) -> BaseDetector: """Create a detector of the requested type with the given parameters. - :param method: One of "transformer" or "llm". + :param method: One of "transformer", "llm", or "rag_fact_checker". :param kwargs: Passed to the concrete detector constructor. :return: A concrete detector instance. - :raises ValueError: If method is not one of "transformer" or "llm". + :raises ValueError: If method is not supported. """ if method == "transformer": from lettucedetect.detectors.transformer import TransformerDetector @@ -23,5 +23,11 @@ def make_detector(method: str, **kwargs) -> BaseDetector: from lettucedetect.detectors.llm import LLMDetector return LLMDetector(**kwargs) + elif method == "rag_fact_checker": + from lettucedetect.detectors.rag_fact_checker import RAGFactCheckerDetector + + return RAGFactCheckerDetector(**kwargs) else: - raise ValueError(f"Unknown detector method: {method}. Use one of: transformer, llm") + raise ValueError( + f"Unknown detector method: {method}. Use one of: transformer, llm, rag_fact_checker" + ) diff --git a/lettucedetect/detectors/llm.py b/lettucedetect/detectors/llm.py index c24208a..9bd5baf 100644 --- a/lettucedetect/detectors/llm.py +++ b/lettucedetect/detectors/llm.py @@ -204,8 +204,10 @@ def predict( :param output_format: ``"spans"`` for character spans. :returns: List of spans. """ - if output_format != "spans": - raise ValueError("LLMDetector only supports 'spans' output_format.") + if output_format not in ["tokens", "spans"]: + raise ValueError( + f"LLMDetector doesn't support '{output_format}' format. Use 'tokens' or 'spans'" + ) # Use PromptUtils to format the context and question full_prompt = PromptUtils.format_context(context, question, self.lang) return self._predict(full_prompt, answer) @@ -218,8 +220,10 @@ def predict_prompt(self, prompt: str, answer: str, output_format: str = "spans") :param output_format: ``"spans"`` for character spans. :returns: List of spans. """ - if output_format != "spans": - raise ValueError("LLMDetector only supports 'spans' output_format.") + if output_format not in ["tokens", "spans"]: + raise ValueError( + f"LLMDetector doesn't support '{output_format}' format. Use 'tokens' or 'spans'" + ) return self._predict(prompt, answer) def predict_prompt_batch( @@ -232,8 +236,10 @@ def predict_prompt_batch( :param output_format: ``"spans"`` for character spans. :returns: List of spans. """ - if output_format != "spans": - raise ValueError("LLMDetector only supports 'spans' output_format.") + if output_format not in ["tokens", "spans"]: + raise ValueError( + f"LLMDetector doesn't support '{output_format}' format. Use 'tokens' or 'spans'" + ) with ThreadPoolExecutor(max_workers=30) as pool: futs = [pool.submit(self._predict, p, a) for p, a in zip(prompts, answers)] diff --git a/lettucedetect/detectors/rag_fact_checker.py b/lettucedetect/detectors/rag_fact_checker.py new file mode 100644 index 0000000..e0ed6fa --- /dev/null +++ b/lettucedetect/detectors/rag_fact_checker.py @@ -0,0 +1,215 @@ +"""Simple RAGFactChecker detector wrapper for lettuceDetect factory pattern.""" + +from typing import Any, Dict, List + +from lettucedetect.detectors.base import BaseDetector + + +class RAGFactCheckerDetector(BaseDetector): + """Simple wrapper around RAGFactChecker for lettuceDetect's factory pattern. + + This provides a minimal adapter between lettuceDetect's detector interface + and our clean RAGFactChecker wrapper. + """ + + def __init__( + self, openai_api_key: str = None, model: str = "gpt-4o", base_url: str = None, **kwargs + ): + """Initialize the RAGFactChecker detector. + + :param openai_api_key: OpenAI API key + :param model: OpenAI model to use (default: "gpt-4o") + :param base_url: Optional base URL for API (e.g., "http://localhost:1234/v1" for local servers) + :param kwargs: Additional arguments (ignored for simplicity) + :return: RAGFactChecker instance + """ + from lettucedetect.ragfactchecker import RAGFactChecker + + # Use our simple, clean wrapper internally + self.rag = RAGFactChecker(openai_api_key=openai_api_key, model=model, base_url=base_url) + + def predict( + self, + context: List[str], + answer: str, + question: str = None, + output_format: str = "tokens", + **kwargs, + ) -> List[Dict[str, Any]] | Dict[str, Any]: + """Predict hallucinations using RAGFactChecker. + + :param context: List of context documents + :param answer: Answer text to check for hallucinations + :param question: Question (optional) + :param output_format: "tokens", "spans", or "detailed" + :param kwargs: Additional arguments + + :return: List of predictions in lettuceDetect format, or dict for detailed format + """ + if output_format not in ["tokens", "spans", "detailed"]: + raise ValueError( + f"Invalid output format '{output_format}'. " + "RAGFactChecker supports 'tokens', 'spans', or 'detailed'" + ) + + # Use our simple wrapper's detection method + result = self.rag.detect_hallucinations(context, answer, question) + + # Convert to lettuceDetect's expected format + if output_format == "detailed": + return { + "spans": self._convert_to_spans(answer, result), + "triplets": { + "answer": result.get("answer_triplets", []), + "context": result.get("context_triplets", []), + "hallucinated": result.get("hallucinated_triplets", []), + }, + "fact_check_results": result.get("fact_check_results", {}), + } + elif output_format == "spans": + return self._convert_to_spans(answer, result) + else: # tokens + return self._convert_to_tokens(answer, result) + + def predict_prompt( + self, prompt: str, answer: str, output_format: str = "tokens" + ) -> List[Dict[str, Any]]: + """Predict using a single prompt string as context.""" + return self.predict([prompt], answer, output_format=output_format) + + def predict_prompt_batch( + self, prompts: List[str], answers: List[str], output_format: str = "tokens" + ) -> List[List[Dict[str, Any]]]: + """Batch prediction using RAGFactChecker's batch processing.""" + if len(prompts) != len(answers): + raise ValueError("Number of prompts must match number of answers") + + contexts = [[prompt] for prompt in prompts] # Convert prompts to context lists + rag_results = self.rag.detect_hallucinations_batch(contexts, answers) + + # Convert each result to lettuceDetect format + converted_results = [] + for i, (answer, rag_result) in enumerate(zip(answers, rag_results)): + if output_format == "tokens": + converted = self._convert_to_tokens(answer, rag_result) + elif output_format == "spans": + converted = self._convert_to_spans(answer, rag_result) + else: + raise ValueError(f"Unknown output format: {output_format}") + converted_results.append(converted) + + return converted_results + + def _convert_to_tokens(self, answer: str, rag_result: Dict[str, Any]) -> List[Dict[str, Any]]: + """Convert RAGFactChecker result to token format.""" + tokens = answer.split() + hallucinated_triplets = rag_result.get("hallucinated_triplets", []) + + token_predictions = [] + for i, token in enumerate(tokens): + # Simple check if token appears in any hallucinated triplet + is_hallucinated = any( + token.lower() in " ".join(triplet).lower() for triplet in hallucinated_triplets + ) + + token_predictions.append( + { + "token": token, + "pred": 1 if is_hallucinated else 0, + "prob": 0.9 if is_hallucinated else 0.1, + } + ) + + return token_predictions + + def _convert_to_spans(self, answer: str, rag_result: Dict[str, Any]) -> List[Dict[str, Any]]: + """Convert RAGFactChecker result to span format with improved triplet matching.""" + spans = [] + hallucinated_triplets = rag_result.get("hallucinated_triplets", []) + + for triplet in hallucinated_triplets: + if len(triplet) < 3: + continue + + # Try different patterns to find triplet elements in text + patterns = [ + f"{triplet[0]} {triplet[1]} {triplet[2]}", # Full triplet phrase + f"{triplet[0]} {triplet[2]}", # Subject + object + triplet[2], # Object (often contains the hallucination) + triplet[0], # Subject + triplet[1], # Predicate + ] + + found_span = False + for pattern in patterns: + if not pattern or not pattern.strip(): + continue + + # Try exact match first, then case-insensitive + start = answer.find(pattern) + if start == -1: + start = answer.lower().find(pattern.lower()) + if start != -1: + # Get the actual text from the answer with correct case + pattern = answer[start : start + len(pattern)] + + if start != -1: + spans.append( + { + "start": start, + "end": start + len(pattern), + "text": pattern, + "confidence": 0.9, + "triplet": triplet, # Include source triplet for transparency + } + ) + found_span = True + break + + # If no pattern matched, try individual words from the triplet + if not found_span: + for element in triplet: + if element and element.strip() and len(element) > 3: # Skip short words + start = answer.lower().find(element.lower()) + if start != -1: + actual_text = answer[start : start + len(element)] + spans.append( + { + "start": start, + "end": start + len(element), + "text": actual_text, + "confidence": 0.7, # Lower confidence for partial matches + "triplet": triplet, + } + ) + break + + # Merge overlapping spans + return self._merge_overlapping_spans(spans) + + def _merge_overlapping_spans(self, spans: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + """Merge overlapping spans to avoid duplicates.""" + if not spans: + return spans + + # Sort spans by start position + sorted_spans = sorted(spans, key=lambda x: x["start"]) + merged = [sorted_spans[0]] + + for current in sorted_spans[1:]: + last = merged[-1] + + # Check if spans overlap + if current["start"] <= last["end"]: + # Merge spans - extend the end and combine triplets + merged[-1] = { + "start": last["start"], + "end": max(last["end"], current["end"]), + "text": last["text"], # Keep original text + "confidence": max(last["confidence"], current["confidence"]), + "triplet": last.get("triplet", current.get("triplet")), + } + else: + merged.append(current) + + return merged diff --git a/lettucedetect/detectors/transformer.py b/lettucedetect/detectors/transformer.py index e289744..3ef8c23 100644 --- a/lettucedetect/detectors/transformer.py +++ b/lettucedetect/detectors/transformer.py @@ -43,6 +43,11 @@ def _predict(self, prompt: str, answer: str, output_format: str) -> list: :param answer: The answer string. :param output_format: "tokens" to return token-level predictions, or "spans" to return grouped spans. """ + if output_format not in ["tokens", "spans"]: + raise ValueError( + f"TransformerDetector doesn't support '{output_format}' format. " + "Use 'tokens' or 'spans'" + ) # Use the shared tokenization logic from HallucinationDataset encoding, _, offsets, answer_start_token = HallucinationDataset.prepare_tokenized_input( self.tokenizer, prompt, answer, self.max_length diff --git a/lettucedetect/models/generation.py b/lettucedetect/models/generation.py new file mode 100644 index 0000000..694901e --- /dev/null +++ b/lettucedetect/models/generation.py @@ -0,0 +1,71 @@ +"""Simple hallucination generation using RAGFactChecker.""" + +from typing import Any, Dict, List + +from lettucedetect.ragfactchecker import RAGFactChecker + + +class HallucinationGenerator: + """Simple hallucination generator using RAGFactChecker. + + This provides the same interface as before but uses our clean RAGFactChecker wrapper. + """ + + def __init__( + self, + method: str = "rag_fact_checker", + openai_api_key: str = None, + model: str = "gpt-4o", + base_url: str = None, + **kwargs, + ): + """Initialize hallucination generator. + + :param method: Method name (kept for compatibility, only "rag_fact_checker" exists) + :param openai_api_key: OpenAI API key + :param model: OpenAI model to use (default: "gpt-4o") + :param base_url: Optional base URL for API (e.g., "http://localhost:1234/v1" for local servers) + :param kwargs: Additional arguments (ignored) + + """ + self.rag = RAGFactChecker(openai_api_key=openai_api_key, model=model, base_url=base_url) + + def generate( + self, context: List[str], question: str, answer: str = None, **kwargs + ) -> Dict[str, Any]: + """Generate hallucinated content. + + :param context: List of context documents + :param question: Question to generate answer for + :param answer: Original answer (optional, for answer-based generation) + :param kwargs: Additional parameters + + :return: Generation results + + """ + if answer: + # Answer-based generation + return self.rag.generate_hallucination_from_answer(answer, question) + else: + # Context-based generation + return self.rag.generate_hallucination_from_context(context, question) + + def generate_batch( + self, contexts: List[List[str]], questions: List[str], answers: List[str] = None, **kwargs + ) -> List[Dict[str, Any]]: + """Generate hallucinated content for multiple inputs. + + :param contexts: List of context lists + :param questions: List of questions + :param answers: List of answers (optional) + :param kwargs: Additional parameters + + :return: List of generation results + + """ + results = [] + for i, (context, question) in enumerate(zip(contexts, questions)): + answer = answers[i] if answers and i < len(answers) else None + result = self.generate(context, question, answer, **kwargs) + results.append(result) + return results diff --git a/lettucedetect/ragfactchecker.py b/lettucedetect/ragfactchecker.py new file mode 100644 index 0000000..cd8f5b3 --- /dev/null +++ b/lettucedetect/ragfactchecker.py @@ -0,0 +1,260 @@ +"""Simple, clean RAGFactChecker wrapper for lettuceDetect.""" + +import logging +import os +from typing import Any, Dict, List, Optional + + +class RAGFactChecker: + """Simple wrapper around RAGFactChecker with a clean, unified API. + + This provides all RAGFactChecker functionality through one interface: + - Triplet generation and comparison + - Hallucination detection + - Hallucination generation + - Batch processing + """ + + def __init__( + self, + openai_api_key: Optional[str] = None, + model: str = "gpt-4o", + base_url: Optional[str] = None, + ): + """Initialize RAGFactChecker. + + :param openai_api_key: OpenAI API key. If None, uses OPENAI_API_KEY env var. + :param model: OpenAI model to use (default: "gpt-4o"). Options: "gpt-4o", "gpt-4", "gpt-3.5-turbo", etc. + :param base_url: Optional base URL for API (e.g., "http://localhost:1234/v1" for local servers). + + :return: RAGFactChecker instance + """ + self.openai_api_key = openai_api_key or os.getenv("OPENAI_API_KEY") + if not self.openai_api_key: + raise ValueError( + "OpenAI API key required. Set OPENAI_API_KEY env var or pass explicitly." + ) + + self.model = model + self.base_url = base_url + self.logger = logging.getLogger(__name__) + self._setup_components() + + def _setup_components(self): + """Initialize RAGFactChecker components.""" + try: + from rag_fact_checker.data import Config + from rag_fact_checker.model.fact_checker import LLMFactChecker + from rag_fact_checker.model.hallucination_data_generator import ( + AnswerBasedHallucinationDataGenerator, + LLMHallucinationDataGenerator, + ) + from rag_fact_checker.model.triplet_generator import LLMTripletGenerator + + # Create config with defaults and API key + self.config = Config() + self.config.model.llm.api_key = self.openai_api_key + self.config.model.llm.generator_model = self.model + if self.base_url: + self.config.model.llm.base_url = self.base_url + + # Initialize components + self.triplet_generator = LLMTripletGenerator(self.config, self.logger) + self.fact_checker = LLMFactChecker(self.config, self.logger) + self.reference_generator = LLMHallucinationDataGenerator(self.config, self.logger) + self.answer_generator = AnswerBasedHallucinationDataGenerator(self.config, self.logger) + + except ImportError as e: + raise ImportError( + "RAGFactChecker not available. Install with: pip install rag-fact-checker" + ) from e + + # ============ TRIPLET OPERATIONS ============ + + def generate_triplets(self, text: str) -> List[List[str]]: + """Generate triplets from text. + + :param text: Input text + + :return: List of triplets [subject, predicate, object] + List of triplets [subject, predicate, object] + + """ + result = self.triplet_generator.forward(text) + return result.triplets + + def compare_triplets( + self, answer_triplets: List[List[str]], reference_triplets: List[List[str]] + ) -> Dict[str, Any]: + """Compare answer triplets against reference triplets. + + :param answer_triplets: Triplets from answer to check + :param reference_triplets: Reference triplets to compare against + + :return: Dict with fact check results + + """ + result = self.fact_checker.forward( + answer_triplets=answer_triplets, reference_triplets=[reference_triplets] + ) + return {"fact_check_results": result.fact_check_prediction_binary, "raw_output": result} + + def analyze_text_pair(self, answer_text: str, reference_text: str) -> Dict[str, Any]: + """Generate and compare triplets for two texts. + + :param answer_text: Text to analyze + :param reference_text: Reference text to compare against + + :return: Complete analysis with triplets and comparison results + + """ + answer_triplets = self.generate_triplets(answer_text) + reference_triplets = self.generate_triplets(reference_text) + comparison = self.compare_triplets(answer_triplets, reference_triplets) + + return { + "answer_triplets": answer_triplets, + "reference_triplets": reference_triplets, + "comparison": comparison, + } + + # ============ HALLUCINATION DETECTION ============ + + def detect_hallucinations( + self, context: List[str], answer: str, question: Optional[str] = None + ) -> Dict[str, Any]: + """Detect hallucinations in answer given context. + + :param context: List of context documents + :param answer: Answer to check + :param question: Optional question for context + + :return: Detection results with triplets and fact checking + + """ + # Generate triplets + answer_triplets = self.generate_triplets(answer) + context_text = "\n".join(context) + context_triplets = self.generate_triplets(context_text) + + # Fact check + comparison = self.compare_triplets(answer_triplets, context_triplets) + + return { + "answer_triplets": answer_triplets, + "context_triplets": context_triplets, + "fact_check_results": comparison["fact_check_results"], + "hallucinated_triplets": [ + answer_triplets[i] + for i, fact_is_true in comparison["fact_check_results"].items() + if not fact_is_true and i < len(answer_triplets) + ], + } + + # ============ HALLUCINATION GENERATION ============ + + def generate_hallucination_from_context( + self, context: List[str], question: str + ) -> Dict[str, Any]: + """Generate hallucinated content from context and question. + + :param context: List of context documents + :param question: Question to answer + + :return: Generated hallucinated and non-hallucinated answers + + """ + context_text = "\n".join(context) + result = self.reference_generator.generate_hlcntn_data(context_text, question) + + return { + "hallucinated_answer": result.generated_hlcntn_answer, + "non_hallucinated_answer": result.generated_non_hlcntn_answer, + "hallucinated_parts": result.hlcntn_part, + } + + def generate_hallucination_from_answer( + self, + correct_answer: str, + question: str, + error_types: Optional[List[str]] = None, + intensity: float = 0.3, + ) -> Dict[str, Any]: + """Generate hallucinated version of a correct answer. + + :param correct_answer: The correct answer to modify + :param question: Original question for context + :param error_types: Types of errors to inject (factual, temporal, numerical, etc.) + :param intensity: Error intensity 0.1-1.0 + + :return: Generated hallucinated version with error details + + """ + # Convert string error types to ErrorType enums if provided + error_type_enums = None + if error_types: + from rag_fact_checker.model.hallucination_data_generator.answer_based_hallucination_data_generator import ( + ErrorType, + ) + + error_type_enums = [] + for error_type in error_types: + if hasattr(ErrorType, error_type.upper()): + error_type_enums.append(getattr(ErrorType, error_type.upper())) + + result = self.answer_generator.generate_answer_based_hallucination( + correct_answer=correct_answer, + question=question, + error_types=error_type_enums, + intensity=intensity, + ) + + return { + "original_answer": result.generated_non_hlcntn_answer, + "hallucinated_answer": result.generated_hlcntn_answer, + "hallucinated_parts": result.hlcntn_part, + } + + # ============ BATCH OPERATIONS ============ + + def generate_triplets_batch(self, texts: List[str]) -> List[List[List[str]]]: + """Generate triplets for multiple texts. + + :param texts: List of input texts + + :return: List of triplet lists for each text + + """ + batch_result = self.triplet_generator.forward_batch(texts) + + # Create results list with empty lists for failed items + results = [[] for _ in texts] # Initialize with empty lists + + # Fill in successful results + result_index = 0 + for i in range(len(texts)): + if i not in batch_result.failed_indices: + if result_index < len(batch_result.results): + results[i] = batch_result.results[result_index].triplets + result_index += 1 + + return results + + def detect_hallucinations_batch( + self, contexts: List[List[str]], answers: List[str], questions: Optional[List[str]] = None + ) -> List[Dict[str, Any]]: + """Detect hallucinations for multiple context-answer pairs. + + :param contexts: List of context document lists + :param answers: List of answers to check + :param questions: Optional list of questions + + :return: List of detection results + + """ + results = [] + for i, (context, answer) in enumerate(zip(contexts, answers)): + question = questions[i] if questions and i < len(questions) else None + result = self.detect_hallucinations(context, answer, question) + results.append(result) + return results diff --git a/pyproject.toml b/pyproject.toml index 8995fb5..ce255a4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,7 +23,8 @@ dependencies = [ "tqdm>=4.65.0", "scikit-learn>=1.6.1", "numpy>=2.2.2", - "openai==1.66.3", + "openai>=1.66.3", + "rag-fact-checker", ] [project.urls] From af2432345ce8e0f923f8e3b7db02a935eba3c3a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kov=C3=A1cs=20=C3=81d=C3=A1m?= Date: Tue, 26 Aug 2025 17:37:22 +0200 Subject: [PATCH 02/15] Added async support for ragchecker --- lettucedetect/detectors/rag_fact_checker.py | 12 ++- lettucedetect/models/generation.py | 36 +++++-- lettucedetect/ragfactchecker.py | 100 ++++++++++++++++++++ 3 files changed, 139 insertions(+), 9 deletions(-) diff --git a/lettucedetect/detectors/rag_fact_checker.py b/lettucedetect/detectors/rag_fact_checker.py index e0ed6fa..fb6cb73 100644 --- a/lettucedetect/detectors/rag_fact_checker.py +++ b/lettucedetect/detectors/rag_fact_checker.py @@ -13,20 +13,28 @@ class RAGFactCheckerDetector(BaseDetector): """ def __init__( - self, openai_api_key: str = None, model: str = "gpt-4o", base_url: str = None, **kwargs + self, + openai_api_key: str = None, + model: str = "gpt-4o", + base_url: str = None, + temperature: float = 0.0, + **kwargs, ): """Initialize the RAGFactChecker detector. :param openai_api_key: OpenAI API key :param model: OpenAI model to use (default: "gpt-4o") :param base_url: Optional base URL for API (e.g., "http://localhost:1234/v1" for local servers) + :param temperature: Temperature for model sampling (default: 0.0 for deterministic outputs) :param kwargs: Additional arguments (ignored for simplicity) :return: RAGFactChecker instance """ from lettucedetect.ragfactchecker import RAGFactChecker # Use our simple, clean wrapper internally - self.rag = RAGFactChecker(openai_api_key=openai_api_key, model=model, base_url=base_url) + self.rag = RAGFactChecker( + openai_api_key=openai_api_key, model=model, base_url=base_url, temperature=temperature + ) def predict( self, diff --git a/lettucedetect/models/generation.py b/lettucedetect/models/generation.py index 694901e..47906db 100644 --- a/lettucedetect/models/generation.py +++ b/lettucedetect/models/generation.py @@ -17,6 +17,7 @@ def __init__( openai_api_key: str = None, model: str = "gpt-4o", base_url: str = None, + temperature: float = 0.0, **kwargs, ): """Initialize hallucination generator. @@ -25,10 +26,13 @@ def __init__( :param openai_api_key: OpenAI API key :param model: OpenAI model to use (default: "gpt-4o") :param base_url: Optional base URL for API (e.g., "http://localhost:1234/v1" for local servers) + :param temperature: Temperature for model sampling (default: 0.0 for deterministic outputs) :param kwargs: Additional arguments (ignored) """ - self.rag = RAGFactChecker(openai_api_key=openai_api_key, model=model, base_url=base_url) + self.rag = RAGFactChecker( + openai_api_key=openai_api_key, model=model, base_url=base_url, temperature=temperature + ) def generate( self, context: List[str], question: str, answer: str = None, **kwargs @@ -63,9 +67,27 @@ def generate_batch( :return: List of generation results """ - results = [] - for i, (context, question) in enumerate(zip(contexts, questions)): - answer = answers[i] if answers and i < len(answers) else None - result = self.generate(context, question, answer, **kwargs) - results.append(result) - return results + if answers: + return self.rag.generate_hallucination_from_answer_batch(answers, questions) + else: + return self.rag.generate_hallucination_from_context_batch(contexts, questions) + + async def generate_batch_async( + self, contexts: List[List[str]], questions: List[str], answers: List[str] = None, **kwargs + ) -> List[Dict[str, Any]]: + """Generate hallucinated content for multiple inputs. + + :param contexts: List of context lists + :param questions: List of questions + :param answers: List of answers (optional) + :param kwargs: Additional parameters + + :return: List of generation results + + """ + if answers: + return await self.rag.generate_hallucination_from_answer_batch_async(answers, questions) + else: + return await self.rag.generate_hallucination_from_context_batch_async( + contexts, questions + ) diff --git a/lettucedetect/ragfactchecker.py b/lettucedetect/ragfactchecker.py index cd8f5b3..c62d6f6 100644 --- a/lettucedetect/ragfactchecker.py +++ b/lettucedetect/ragfactchecker.py @@ -20,12 +20,14 @@ def __init__( openai_api_key: Optional[str] = None, model: str = "gpt-4o", base_url: Optional[str] = None, + temperature: float = 0.0, ): """Initialize RAGFactChecker. :param openai_api_key: OpenAI API key. If None, uses OPENAI_API_KEY env var. :param model: OpenAI model to use (default: "gpt-4o"). Options: "gpt-4o", "gpt-4", "gpt-3.5-turbo", etc. :param base_url: Optional base URL for API (e.g., "http://localhost:1234/v1" for local servers). + :param temperature: Temperature for model sampling (default: 0.0 for deterministic outputs). :return: RAGFactChecker instance """ @@ -37,6 +39,7 @@ def __init__( self.model = model self.base_url = base_url + self.temperature = temperature self.logger = logging.getLogger(__name__) self._setup_components() @@ -55,6 +58,7 @@ def _setup_components(self): self.config = Config() self.config.model.llm.api_key = self.openai_api_key self.config.model.llm.generator_model = self.model + self.config.model.llm.temperature = self.temperature if self.base_url: self.config.model.llm.base_url = self.base_url @@ -217,6 +221,102 @@ def generate_hallucination_from_answer( # ============ BATCH OPERATIONS ============ + async def generate_hallucination_from_answer_batch_async( + self, + correct_answers: List[str], + questions: List[str], + error_types: Optional[List[List[str]]] = None, + intensities: Optional[List[float]] = None, + ) -> List[Dict[str, Any]]: + """Generate hallucinated version of multiple correct answers.""" + error_type_enums_list = None + if error_types: + from rag_fact_checker.model.hallucination_data_generator.answer_based_hallucination_data_generator import ( + ErrorType, + ) + + error_type_enums_list = [] + for error_type in error_types: + error_type_enums = [] + for error_type in error_type: + if hasattr(ErrorType, error_type.upper()): + error_type_enums.append(getattr(ErrorType, error_type.upper())) + error_type_enums_list.append(error_type_enums) + + result = await self.answer_generator.generate_answer_based_hallucination_batch_async( + correct_answers=correct_answers, + questions=questions, + error_types_list=error_type_enums_list, + intensities=intensities, + ) + return result + + async def generate_hallucination_from_context_batch_async( + self, + contexts: List[List[str]], + questions: List[str], + ) -> List[Dict[str, Any]]: + """Generate hallucinated version of multiple correct answers.""" + result = await self.reference_generator.generate_hlcntn_data_batch_async( + contexts, questions + ) + return result + + def generate_hallucination_from_answer_batch( + self, + correct_answers: List[str], + questions: List[str], + error_types: Optional[List[List[str]]] = None, + intensities: Optional[List[float]] = None, + ) -> List[Dict[str, Any]]: + """Generate hallucinated version of multiple correct answers. + + :param correct_answers: List of correct answers to modify + :param questions: List of original questions for context + :param error_types: List of lists of types of errors to inject (factual, temporal, numerical, etc.) + :param intensities: List of error intensities 0.1-1.0 + + :return: List of generated hallucinated versions with error details + + """ + error_type_enums_list = None + if error_types: + from rag_fact_checker.model.hallucination_data_generator.answer_based_hallucination_data_generator import ( + ErrorType, + ) + + error_type_enums_list = [] + for error_type in error_types: + error_type_enums = [] + for error_type in error_type: + if hasattr(ErrorType, error_type.upper()): + error_type_enums.append(getattr(ErrorType, error_type.upper())) + error_type_enums_list.append(error_type_enums) + + result = self.answer_generator.generate_answer_based_hallucination_batch( + correct_answers=correct_answers, + questions=questions, + error_types_list=error_type_enums_list, + intensities=intensities, + ) + return result + + def generate_hallucination_from_context_batch( + self, + contexts: List[List[str]], + questions: List[str], + ) -> List[Dict[str, Any]]: + """Generate hallucinated version of multiple correct answers. + + :param contexts: List of context document lists + :param questions: List of original questions for context + + :return: List of generated hallucinated versions with error details + + """ + result = self.reference_generator.generate_hlcntn_data_batch(contexts, questions) + return result + def generate_triplets_batch(self, texts: List[str]) -> List[List[List[str]]]: """Generate triplets for multiple texts. From c120bc60c0240da6011aa04947eedfdf1ab19fbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kov=C3=A1cs=20=C3=81d=C3=A1m?= Date: Wed, 27 Aug 2025 16:52:35 +0200 Subject: [PATCH 03/15] Added generation script --- lettucedetect/models/generation.py | 56 ++- scripts/generate_synthetic_data.py | 526 +++++++++++++++++++++++++++++ 2 files changed, 571 insertions(+), 11 deletions(-) create mode 100755 scripts/generate_synthetic_data.py diff --git a/lettucedetect/models/generation.py b/lettucedetect/models/generation.py index 47906db..939b3f0 100644 --- a/lettucedetect/models/generation.py +++ b/lettucedetect/models/generation.py @@ -1,6 +1,6 @@ """Simple hallucination generation using RAGFactChecker.""" -from typing import Any, Dict, List +from typing import Any, Dict, List, Optional from lettucedetect.ragfactchecker import RAGFactChecker @@ -35,7 +35,12 @@ def __init__( ) def generate( - self, context: List[str], question: str, answer: str = None, **kwargs + self, + context: List[str], + question: str, + answer: str = None, + error_types: Optional[List[str]] = None, + intensity: float = 0.3, ) -> Dict[str, Any]: """Generate hallucinated content. @@ -49,13 +54,22 @@ def generate( """ if answer: # Answer-based generation - return self.rag.generate_hallucination_from_answer(answer, question) + return self.rag.generate_hallucination_from_answer( + answer, question, error_types, intensity + ) else: # Context-based generation - return self.rag.generate_hallucination_from_context(context, question) + return self.rag.generate_hallucination_from_context( + context, question, error_types, intensity + ) def generate_batch( - self, contexts: List[List[str]], questions: List[str], answers: List[str] = None, **kwargs + self, + contexts: List[List[str]], + questions: List[str], + answers: List[str] = None, + error_types: Optional[List[str]] = None, + intensity: float = 0.3, ) -> List[Dict[str, Any]]: """Generate hallucinated content for multiple inputs. @@ -65,15 +79,28 @@ def generate_batch( :param kwargs: Additional parameters :return: List of generation results - """ + if error_types: + error_types = [error_types] * len(contexts) + if intensity: + intensity = [intensity] * len(contexts) + if answers: - return self.rag.generate_hallucination_from_answer_batch(answers, questions) + return self.rag.generate_hallucination_from_answer_batch( + answers, questions, error_types, intensity + ) else: - return self.rag.generate_hallucination_from_context_batch(contexts, questions) + return self.rag.generate_hallucination_from_context_batch( + contexts, questions, error_types, intensity + ) async def generate_batch_async( - self, contexts: List[List[str]], questions: List[str], answers: List[str] = None, **kwargs + self, + contexts: List[List[str]], + questions: List[str], + answers: List[str] = None, + error_types: Optional[List[str]] = None, + intensity: float = 0.3, ) -> List[Dict[str, Any]]: """Generate hallucinated content for multiple inputs. @@ -85,9 +112,16 @@ async def generate_batch_async( :return: List of generation results """ + if error_types: + error_types = [error_types] * len(contexts) + if intensity: + intensity = [intensity] * len(contexts) + if answers: - return await self.rag.generate_hallucination_from_answer_batch_async(answers, questions) + return await self.rag.generate_hallucination_from_answer_batch_async( + answers, questions, error_types, intensity + ) else: return await self.rag.generate_hallucination_from_context_batch_async( - contexts, questions + contexts, questions, error_types, intensity ) diff --git a/scripts/generate_synthetic_data.py b/scripts/generate_synthetic_data.py new file mode 100755 index 0000000..9fa7cb2 --- /dev/null +++ b/scripts/generate_synthetic_data.py @@ -0,0 +1,526 @@ +#!/usr/bin/env python3 +"""Generate synthetic hallucination data using RAGFactChecker.""" + +import argparse +import asyncio +import json +import logging +import os +import random +import sys +import time +from typing import Any, Dict, List, Optional + +from lettucedetect import HallucinationGenerator +from lettucedetect.detectors.prompt_utils import PromptUtils + +# Setup rich logging +try: + from rich.console import Console + from rich.logging import RichHandler + from rich.progress import ( + BarColumn, + Progress, + SpinnerColumn, + TaskID, + TextColumn, + TimeElapsedColumn, + ) + + RICH_AVAILABLE = True + console = Console() +except ImportError: + RICH_AVAILABLE = False + console = None + + +def setup_logging(verbose: bool = False) -> logging.Logger: + """Setup logging with rich output if available.""" + level = logging.DEBUG if verbose else logging.INFO + + if RICH_AVAILABLE: + logging.basicConfig( + level=level, + format="%(message)s", + datefmt="[%X]", + handlers=[RichHandler(rich_tracebacks=True, show_path=False)], + ) + else: + logging.basicConfig( + level=level, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" + ) + + return logging.getLogger(__name__) + + +def load_rag_mini_bioasq(split: str = "train", filter_min_words: int = 10) -> List[Dict[str, Any]]: + """Load rag-mini-bioasq dataset and prepare for generation.""" + try: + from datasets import load_dataset + except ImportError: + raise ImportError("datasets package required. Install with: pip install datasets") + + logger = logging.getLogger(__name__) + logger.info(f"Loading rag-mini-bioasq dataset ({split} split)...") + + # Load dataset + qa_dataset = load_dataset("enelpol/rag-mini-bioasq", "question-answer-passages") + corpus_dataset = load_dataset("enelpol/rag-mini-bioasq", "text-corpus") + + # Create corpus lookup + corpus_lookup = {item["id"]: item["passage"] for item in corpus_dataset["test"]} + + # Process data + processed_data = [] + for item in qa_dataset[split]: + passage_ids = item["relevant_passage_ids"] + context_passages = [corpus_lookup.get(pid, None) for pid in passage_ids] + context_passages = [p for p in context_passages if p is not None] + + # Filter by answer length + if len(item["answer"].split()) >= filter_min_words: + processed_data.append( + { + "question": item["question"], + "answer": item["answer"], + "context": context_passages, + } + ) + + logger.info( + f"Loaded {len(processed_data)} samples after filtering (min {filter_min_words} words)" + ) + return processed_data + + +def load_custom_dataset(file_path: str) -> List[Dict[str, Any]]: + """Load custom dataset from JSON file.""" + logger = logging.getLogger(__name__) + logger.info(f"Loading custom dataset from {file_path}...") + + with open(file_path) as f: + data = json.load(f) + + # Validate format + required_fields = ["question", "context"] + for i, item in enumerate(data): + for field in required_fields: + if field not in item: + raise ValueError(f"Missing required field '{field}' in item {i}") + + logger.info(f"Loaded {len(data)} samples from custom dataset") + return data + + +async def generate_batch_async( + generator: HallucinationGenerator, + samples: List[Dict[str, Any]], + method: str = "answer_based", + error_types: Optional[List[str]] = None, + intensity: float = 0.3, +) -> List[Dict[str, Any]]: + """Generate hallucinated data for a batch of samples.""" + logger = logging.getLogger(__name__) + + if method == "answer_based": + # Use existing answers + contexts = [sample["context"] for sample in samples] + questions = [sample["question"] for sample in samples] + answers = [sample["answer"] for sample in samples] + + result = await generator.generate_batch_async( + contexts=contexts, + questions=questions, + answers=answers, + error_types=error_types, + intensity=intensity, + ) + else: + # Context-based generation + contexts = [sample["context"] for sample in samples] + questions = [sample["question"] for sample in samples] + + result = await generator.generate_batch_async( + contexts=contexts, questions=questions, error_types=error_types, intensity=intensity + ) + + return result.results if hasattr(result, "results") else result + + +def convert_to_ragtruth_format( + samples: List[Dict[str, Any]], + results: List[Any], + language: str = "en", + dataset_name: str = "synthetic", +) -> List[Dict[str, Any]]: + """Convert generation results to RAGTruth format.""" + ragtruth_data = [] + + for i, (sample, result) in enumerate(zip(samples, results)): + # Format context using prompt utils + formatted_prompt = PromptUtils.format_context( + sample["context"], sample["question"], lang=language + ) + + # Original answer (non-hallucinated) + if hasattr(result, "generated_non_hlcntn_answer"): + real_answer = result.generated_non_hlcntn_answer + else: + real_answer = sample.get("answer", "") + + ragtruth_data.append( + { + "prompt": formatted_prompt, + "answer": real_answer, + "labels": [], + "split": "train", + "task_type": "qa", + "dataset": dataset_name, + "language": language, + } + ) + + # Hallucinated answer with labels + if hasattr(result, "generated_hlcntn_answer"): + hallucinated_answer = result.generated_hlcntn_answer + hallucinated_labels = [] + + # Create span labels from hallucinated parts + if hasattr(result, "hlcntn_part") and result.hlcntn_part: + for part in result.hlcntn_part: + if isinstance(part, str) and part in hallucinated_answer: + start = hallucinated_answer.find(part) + if start != -1: + hallucinated_labels.append( + {"start": start, "end": start + len(part), "label": "hallucinated"} + ) + + ragtruth_data.append( + { + "prompt": formatted_prompt, + "answer": hallucinated_answer, + "labels": hallucinated_labels, + "split": "train", + "task_type": "qa", + "dataset": dataset_name, + "language": language, + } + ) + + return ragtruth_data + + +async def generate_synthetic_data( + samples: List[Dict[str, Any]], + num_samples: int, + model: str = "gpt-4o", + base_url: Optional[str] = None, + temperature: float = 0.0, + method: str = "answer_based", + error_types: Optional[List[str]] = None, + intensity: float = 0.3, + batch_size: int = 10, + output_format: str = "json", + language: str = "en", + dataset_name: str = "synthetic", +) -> List[Dict[str, Any]]: + """Generate synthetic hallucination data.""" + logger = logging.getLogger(__name__) + + # Initialize generator + generator = HallucinationGenerator( + method="rag_fact_checker", model=model, base_url=base_url, temperature=temperature + ) + + # Sample data if needed + if num_samples < len(samples): + samples = random.sample(samples, num_samples) + logger.info(f"Randomly sampled {num_samples} examples from dataset") + else: + samples = samples[:num_samples] + + # Process in batches + all_results = [] + + if RICH_AVAILABLE: + with Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + BarColumn(), + TextColumn("[progress.percentage]{task.percentage:>3.0f}%"), + TimeElapsedColumn(), + console=console, + ) as progress: + task = progress.add_task(f"Generating hallucinations ({method})", total=len(samples)) + + for i in range(0, len(samples), batch_size): + batch = samples[i : i + batch_size] + + try: + batch_results = await generate_batch_async( + generator, batch, method, error_types, intensity + ) + all_results.extend(batch_results) + + progress.update(task, advance=len(batch)) + logger.debug( + f"Completed batch {i // batch_size + 1}/{(len(samples) + batch_size - 1) // batch_size}" + ) + + except Exception as e: + logger.error(f"Error processing batch {i // batch_size + 1}: {e}") + continue + else: + # Fallback without rich + for i in range(0, len(samples), batch_size): + batch = samples[i : i + batch_size] + logger.info( + f"Processing batch {i // batch_size + 1}/{(len(samples) + batch_size - 1) // batch_size}" + ) + + try: + batch_results = await generate_batch_async( + generator, batch, method, error_types, intensity + ) + all_results.extend(batch_results) + + except Exception as e: + logger.error(f"Error processing batch {i // batch_size + 1}: {e}") + continue + + logger.info(f"Generated {len(all_results)} hallucination samples") + + # Convert to requested format + if output_format == "ragtruth": + return convert_to_ragtruth_format(samples, all_results, language, dataset_name) + else: + # Standard JSON format + formatted_results = [] + for sample, result in zip(samples, all_results): + formatted_result = { + "question": sample["question"], + "context": sample["context"], + "method": method, + "model": model, + "temperature": temperature, + } + + if hasattr(result, "generated_non_hlcntn_answer"): + formatted_result["original_answer"] = result.generated_non_hlcntn_answer + if hasattr(result, "generated_hlcntn_answer"): + formatted_result["hallucinated_answer"] = result.generated_hlcntn_answer + if hasattr(result, "hlcntn_part"): + formatted_result["hallucinated_parts"] = result.hlcntn_part + + formatted_results.append(formatted_result) + + return formatted_results + + +def print_statistics(results: List[Dict[str, Any]], output_format: str): + """Print generation statistics.""" + logger = logging.getLogger(__name__) + + if not results: + logger.warning("No results to analyze") + return + + total_samples = len(results) + + if output_format == "ragtruth": + # Count hallucinated vs non-hallucinated samples + hallucinated_count = sum(1 for r in results if r.get("labels")) + non_hallucinated_count = total_samples - hallucinated_count + + logger.info("📊 Generation Statistics:") + logger.info(f" Total samples: {total_samples}") + logger.info(f" Hallucinated samples: {hallucinated_count}") + logger.info(f" Non-hallucinated samples: {non_hallucinated_count}") + + if hallucinated_count > 0: + # Average number of hallucination spans + total_spans = sum(len(r.get("labels", [])) for r in results if r.get("labels")) + avg_spans = total_spans / hallucinated_count + logger.info(f" Average spans per hallucinated sample: {avg_spans:.1f}") + else: + logger.info("📊 Generation Statistics:") + logger.info(f" Total samples: {total_samples}") + + # Calculate average lengths + if results and "hallucinated_answer" in results[0]: + avg_hal_len = ( + sum(len(r["hallucinated_answer"].split()) for r in results) / total_samples + ) + logger.info(f" Average hallucinated answer length: {avg_hal_len:.1f} words") + + +async def main(): + """Main function.""" + parser = argparse.ArgumentParser( + description="Generate synthetic hallucination data using RAGFactChecker", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Generate from rag-mini-bioasq dataset + python scripts/generate_synthetic_data.py \\ + --dataset rag-mini-bioasq \\ + --split train \\ + --num-samples 100 \\ + --model gpt-4o-mini \\ + --output data/synthetic_train.json + + # Generate with custom parameters + python scripts/generate_synthetic_data.py \\ + --dataset rag-mini-bioasq \\ + --split test \\ + --num-samples 50 \\ + --model gpt-4o \\ + --temperature 0.7 \\ + --error-types factual temporal numerical \\ + --intensity 0.5 \\ + --output-format ragtruth \\ + --output data/synthetic_test_ragtruth.json + """, + ) + + # Data source + data_group = parser.add_mutually_exclusive_group(required=True) + data_group.add_argument("--dataset", choices=["rag-mini-bioasq"], help="Use built-in dataset") + data_group.add_argument("--custom-data", type=str, help="Path to custom JSON dataset file") + + # Dataset options + parser.add_argument( + "--split", + choices=["train", "test"], + default="train", + help="Dataset split to use (default: train)", + ) + parser.add_argument( + "--num-samples", type=int, default=100, help="Number of samples to generate (default: 100)" + ) + parser.add_argument( + "--filter-min-words", + type=int, + default=10, + help="Minimum words in answer for filtering (default: 10)", + ) + + # Generation parameters + parser.add_argument("--model", default="gpt-4o", help="OpenAI model to use (default: gpt-4o)") + parser.add_argument( + "--base-url", type=str, help="Base URL for OpenAI-compatible API (for local models)" + ) + parser.add_argument( + "--temperature", type=float, default=0.0, help="Temperature for generation (default: 0.0)" + ) + parser.add_argument( + "--method", + choices=["context_based", "answer_based"], + default="answer_based", + help="Generation method (default: answer_based)", + ) + parser.add_argument( + "--error-types", + nargs="+", + choices=["factual", "temporal", "numerical", "logical", "causal"], + default=None, + help="Error types for answer-based generation (default: None)", + ) + parser.add_argument( + "--intensity", type=float, default=0.3, help="Error intensity 0.1-1.0 (default: 0.3)" + ) + parser.add_argument( + "--batch-size", type=int, default=5, help="Batch size for processing (default: 5)" + ) + + # Output options + parser.add_argument("--output", required=True, help="Output file path") + parser.add_argument( + "--output-format", + choices=["json", "ragtruth"], + default="json", + help="Output format (default: json)", + ) + parser.add_argument( + "--language", default="en", help="Language code for RAGTruth format (default: en)" + ) + parser.add_argument( + "--dataset-name", + default="synthetic", + help="Dataset name for RAGTruth format (default: synthetic)", + ) + + # Logging + parser.add_argument("--verbose", "-v", action="store_true", help="Enable verbose logging") + + args = parser.parse_args() + + # Setup logging + logger = setup_logging(args.verbose) + + # Check API key + if not os.getenv("OPENAI_API_KEY"): + logger.error("OPENAI_API_KEY environment variable is required") + sys.exit(1) + + # Load data + try: + if args.dataset == "rag-mini-bioasq": + samples = load_rag_mini_bioasq(args.split, args.filter_min_words) + else: + samples = load_custom_dataset(args.custom_data) + + except Exception as e: + logger.error(f"Failed to load dataset: {e}") + sys.exit(1) + + # Validate parameters + if args.num_samples <= 0: + logger.error("Number of samples must be positive") + sys.exit(1) + + if not (0.1 <= args.intensity <= 1.0): + logger.error("Intensity must be between 0.1 and 1.0") + sys.exit(1) + + # Generate data + start_time = time.time() + + try: + results = await generate_synthetic_data( + samples=samples, + num_samples=args.num_samples, + model=args.model, + base_url=args.base_url, + temperature=args.temperature, + method=args.method, + error_types=args.error_types, + intensity=args.intensity, + batch_size=args.batch_size, + output_format=args.output_format, + language=args.language, + dataset_name=args.dataset_name, + ) + + # Save results + os.makedirs(os.path.dirname(args.output), exist_ok=True) + with open(args.output, "w") as f: + json.dump(results, f, indent=2) + + elapsed_time = time.time() - start_time + + # Print statistics + print_statistics(results, args.output_format) + logger.info(f"Generated {len(results)} samples in {elapsed_time:.1f}s") + logger.info(f"Results saved to {args.output}") + + except KeyboardInterrupt: + logger.info("Generation interrupted by user") + sys.exit(1) + except Exception as e: + logger.error(f"Generation failed: {e}") + sys.exit(1) + + +if __name__ == "__main__": + asyncio.run(main()) From b8d9cd528cd99aa2281be8b50d59da434a6df440 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kov=C3=A1cs=20=C3=81d=C3=A1m?= Date: Thu, 28 Aug 2025 01:00:19 +0200 Subject: [PATCH 04/15] Change to response format --- lettucedetect/detectors/llm.py | 38 +++++++++++++++++----------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/lettucedetect/detectors/llm.py b/lettucedetect/detectors/llm.py index 9bd5baf..0d8e90d 100644 --- a/lettucedetect/detectors/llm.py +++ b/lettucedetect/detectors/llm.py @@ -12,25 +12,26 @@ from lettucedetect.detectors.cache import CacheManager from lettucedetect.detectors.prompt_utils import LANG_TO_PASSAGE, Lang, PromptUtils -ANNOTATE_SCHEMA = [ - { - "type": "function", - "function": { - "name": "annotate", - "description": "Return hallucinated substrings from the answer relative to the source.", - "parameters": { - "type": "object", - "properties": { - "hallucination_list": { - "type": "array", - "items": {"type": "string"}, - } - }, - "required": ["hallucination_list"], +# JSON schema for structured response format +HALLUCINATION_SCHEMA = { + "type": "json_schema", + "json_schema": { + "name": "hallucination_detection", + "schema": { + "type": "object", + "properties": { + "hallucination_list": { + "type": "array", + "items": {"type": "string"}, + "description": "List of exact text spans from the answer that are hallucinated" + } }, + "required": ["hallucination_list"], + "additionalProperties": False }, + "strict": True } -] +} class LLMDetector: @@ -174,11 +175,10 @@ def _predict(self, prompt: str, answer: str) -> list[dict]: # Use the full LLM prompt here, not the raw context {"role": "user", "content": llm_prompt}, ], - tools=ANNOTATE_SCHEMA, - tool_choice={"type": "function", "function": {"name": "annotate"}}, + response_format=HALLUCINATION_SCHEMA, temperature=self.temperature, ) - cached = resp.choices[0].message.tool_calls[0].function.arguments + cached = resp.choices[0].message.content self.cache.set(cache_key, cached) try: From da6a6c46b62750a2613ff4d0653eb69efabbfac4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kov=C3=A1cs=20=C3=81d=C3=A1m?= Date: Fri, 29 Aug 2025 16:23:33 +0200 Subject: [PATCH 05/15] Langchain integration --- lettucedetect/detectors/llm.py | 8 +- lettucedetect/integrations/__init__.py | 5 + .../integrations/langchain/README.md | 186 ++++++++++ .../integrations/langchain/__init__.py | 29 ++ .../integrations/langchain/callbacks.py | 325 ++++++++++++++++++ .../integrations/langchain/requirements.txt | 6 + 6 files changed, 555 insertions(+), 4 deletions(-) create mode 100644 lettucedetect/integrations/__init__.py create mode 100644 lettucedetect/integrations/langchain/README.md create mode 100644 lettucedetect/integrations/langchain/__init__.py create mode 100644 lettucedetect/integrations/langchain/callbacks.py create mode 100644 lettucedetect/integrations/langchain/requirements.txt diff --git a/lettucedetect/detectors/llm.py b/lettucedetect/detectors/llm.py index 0d8e90d..4c4b010 100644 --- a/lettucedetect/detectors/llm.py +++ b/lettucedetect/detectors/llm.py @@ -23,14 +23,14 @@ "hallucination_list": { "type": "array", "items": {"type": "string"}, - "description": "List of exact text spans from the answer that are hallucinated" + "description": "List of exact text spans from the answer that are hallucinated", } }, "required": ["hallucination_list"], - "additionalProperties": False + "additionalProperties": False, }, - "strict": True - } + "strict": True, + }, } diff --git a/lettucedetect/integrations/__init__.py b/lettucedetect/integrations/__init__.py new file mode 100644 index 0000000..de7bde5 --- /dev/null +++ b/lettucedetect/integrations/__init__.py @@ -0,0 +1,5 @@ +"""LettuceDetect integrations with popular frameworks. + +This package provides clean, professional integrations between LettuceDetect +and popular AI/ML frameworks for seamless hallucination detection. +""" diff --git a/lettucedetect/integrations/langchain/README.md b/lettucedetect/integrations/langchain/README.md new file mode 100644 index 0000000..6ff8a42 --- /dev/null +++ b/lettucedetect/integrations/langchain/README.md @@ -0,0 +1,186 @@ +# LettuceDetect + LangChain Integration + +Clean, professional hallucination detection for LangChain applications. + +## Installation + +```bash +pip install lettucedetect +pip install -r lettucedetect/integrations/langchain/requirements.txt +export OPENAI_API_KEY=your_key +``` + +## Quick Start + +```python +from langchain.chains import RetrievalQA +from langchain_openai import OpenAI +from lettucedetect.integrations.langchain import LettuceDetectCallback, detect_in_chain + +# Method 1: Use convenience function +chain = RetrievalQA.from_llm(llm, retriever) +result = detect_in_chain(chain, "Your question") + +print(f"Answer: {result['answer']}") +if result['has_issues']: + print("Potential hallucinations detected") + +# Method 2: Use callback directly +callback = LettuceDetectCallback(verbose=True) +answer = chain.run("Your question", callbacks=[callback]) + +if callback.has_issues(): + print("Issues found in response") +``` + +## Real-time Detection Demo + +Interactive Streamlit demo showcasing real-time hallucination detection: + +```bash +streamlit run lettucedetect/integrations/langchain/examples/streamlit_app.py +``` + +Features: +- Real-time token-level detection during streaming +- Visual highlighting of potential hallucinations +- Clean, professional interface +- Uses local transformer models for fast inference + +## API Reference + +### LettuceDetectCallback + +Main callback class for automatic hallucination detection. + +**Parameters:** +- `method` (str): Detection method ("rag_fact_checker", "transformer", "llm") +- `model_path` (str, optional): Path to model for transformer method +- `on_result` (callable, optional): Function to handle detection results +- `verbose` (bool): Whether to print results + +**Methods:** +- `get_results()` - Get all detection results +- `get_last_result()` - Get most recent result +- `has_issues()` - Check if any issues were detected +- `set_context(context)` - Manually set context documents +- `reset()` - Reset callback state + +### LettuceStreamingCallback + +Real-time hallucination detection during streaming generation. + +**Parameters:** +- `method` (str): Detection method +- `model_path` (str, optional): Path to model for transformer method +- `context` (list): Context documents for detection +- `question` (str): Question being answered +- `check_every` (int): Run detection every N tokens +- `on_detection` (callable): Function called when detection runs +- `verbose` (bool): Whether to print detection results + +### detect_in_chain() + +Convenience function to run a chain with detection. + +**Parameters:** +- `chain` - LangChain chain to execute +- `query` (str) - Question to ask +- `context` (list, optional) - Manual context documents +- `**kwargs` - Additional arguments for LettuceDetectCallback + +**Returns:** +Dictionary with `answer`, `detection`, and `has_issues` keys. + +## Detection Methods + +| Method | Description | Use Case | +|--------|-------------|----------| +| `transformer` | Fine-tuned encoder models | High accuracy, local inference | +| `rag_fact_checker` | Triplet-based detection | General purpose, no local models | +| `llm` | LLM-based detection | Flexible, API-based | + +## Examples + +### Basic RAG Pipeline + +```python +from langchain.chains import RetrievalQA +from langchain.vectorstores import Chroma +from langchain_openai import OpenAI, OpenAIEmbeddings +from lettucedetect.integrations.langchain import LettuceDetectCallback + +# Setup RAG chain +embeddings = OpenAIEmbeddings() +vectorstore = Chroma.from_texts(documents, embeddings) +chain = RetrievalQA.from_llm( + llm=OpenAI(), + retriever=vectorstore.as_retriever() +) + +# Add detection +callback = LettuceDetectCallback(verbose=True) +result = chain.run("Your question", callbacks=[callback]) + +# Check results +if callback.has_issues(): + last_result = callback.get_last_result() + print(f"Found {last_result['issue_count']} issues") +``` + +### Manual Context + +```python +from langchain_openai import OpenAI +from lettucedetect.integrations.langchain import LettuceDetectCallback + +llm = OpenAI() +callback = LettuceDetectCallback() + +# Set context manually +callback.set_context([ + "Python was created by Guido van Rossum in 1991.", + "It is known for readable syntax." +]) +callback.set_question("What is Python?") + +# Generate with detection +response = llm.generate(["What is Python?"], callbacks=[callback]) +``` + +## Production Usage + +For production applications: + +1. Use the `transformer` method with local models for fastest inference +2. Set `verbose=False` to avoid console output +3. Use `on_result` callback for custom logging/alerts +4. Monitor detection results for system health + +```python +import logging +from lettucedetect.integrations.langchain import LettuceDetectCallback + +def log_detection(result): + if result['has_issues']: + logging.warning(f"Hallucination detected: {result['issue_count']} issues") + +callback = LettuceDetectCallback( + method="transformer", + model_path="output/hallucination_detection_ettin_17m", + on_result=log_detection, + verbose=False +) +``` + +## Requirements + +- Python 3.8+ +- LangChain +- LettuceDetect +- OpenAI API key (for LLM-based detection) + +For local transformer models: +```bash +pip install transformers torch +``` \ No newline at end of file diff --git a/lettucedetect/integrations/langchain/__init__.py b/lettucedetect/integrations/langchain/__init__.py new file mode 100644 index 0000000..1de2c08 --- /dev/null +++ b/lettucedetect/integrations/langchain/__init__.py @@ -0,0 +1,29 @@ +"""LangChain integration for LettuceDetect hallucination detection. + +This module provides a clean, minimal callback for integrating LettuceDetect +with LangChain applications. The callback automatically detects hallucinations +in LLM responses when used with retrieval chains. + +Example usage: + + from integrations.langchain import LettuceDetectCallback, detect_in_chain + from langchain.chains import RetrievalQA + + # Basic usage + callback = LettuceDetectCallback(verbose=True) + result = chain.run("Your question", callbacks=[callback]) + + if callback.has_issues(): + print("Potential hallucinations detected") + + # Or use convenience function + result = detect_in_chain(chain, "Your question") + print(f"Answer: {result['answer']}") + print(f"Issues: {result['has_issues']}") +""" + +from .callbacks import LettuceDetectCallback, LettuceStreamingCallback, detect_in_chain + +__all__ = ["LettuceDetectCallback", "LettuceStreamingCallback", "detect_in_chain"] + +__version__ = "1.0.0" diff --git a/lettucedetect/integrations/langchain/callbacks.py b/lettucedetect/integrations/langchain/callbacks.py new file mode 100644 index 0000000..b383e70 --- /dev/null +++ b/lettucedetect/integrations/langchain/callbacks.py @@ -0,0 +1,325 @@ +"""Clean, minimal LangChain callbacks for LettuceDetect integration.""" + +from typing import Any, Callable, Dict, List, Optional + +from langchain.callbacks.base import BaseCallbackHandler +from langchain.schema import LLMResult +from langchain.schema.document import Document + +from lettucedetect import HallucinationDetector + + +class LettuceDetectCallback(BaseCallbackHandler): + """Simple callback for post-generation hallucination detection. + + Automatically detects hallucinations in LLM responses when used with + retrieval chains or when context is provided manually. + """ + + def __init__( + self, + method: str = "rag_fact_checker", + model_path: Optional[str] = None, + on_result: Optional[Callable[[Dict[str, Any]], None]] = None, + verbose: bool = False, + ): + """Initialize the callback. + + Args: + method: Detection method ("transformer", "llm", "rag_fact_checker") + model_path: Path to model (for transformer method) + on_result: Optional function to handle detection results + verbose: Whether to print results + + """ + super().__init__() + self.detector = HallucinationDetector(method=method, model_path=model_path) + self.on_result = on_result + self.verbose = verbose + + # State + self.context: List[str] = [] + self.question: Optional[str] = None + self.results: List[Dict[str, Any]] = [] + + def set_context(self, context: List[str]) -> None: + """Manually set context documents.""" + self.context = context + + def set_question(self, question: str) -> None: + """Manually set the question.""" + self.question = question + + def on_retriever_end(self, documents: List[Document], **kwargs: Any) -> None: + """Store retrieved context.""" + self.context = [doc.page_content for doc in documents] + + def on_chain_start( + self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any + ) -> None: + """Extract question from chain inputs.""" + for key in ["question", "query", "input"]: + if key in inputs: + self.question = inputs[key] + break + + def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: + """Run hallucination detection on LLM response.""" + if not self.context or not response.generations: + return + + for generation in response.generations: + if not generation: + continue + + text = generation[0].text + if not text.strip(): + continue + + try: + spans = self.detector.predict( + context=self.context, answer=text, question=self.question, output_format="spans" + ) + + result = { + "text": text, + "question": self.question, + "context": self.context.copy(), + "has_issues": len(spans) > 0, + "confidence": max([s.get("confidence", 0) for s in spans], default=0), + "spans": spans, + "issue_count": len(spans), + } + + self.results.append(result) + + if self.verbose: + status = "ISSUES DETECTED" if result["has_issues"] else "CLEAN" + print(f"LettuceDetect: {status} (confidence: {result['confidence']:.3f})") + + if self.on_result: + self.on_result(result) + + except Exception as e: + if self.verbose: + print(f"LettuceDetect: Detection error: {e}") + + def get_results(self) -> List[Dict[str, Any]]: + """Get all detection results.""" + return self.results.copy() + + def get_last_result(self) -> Optional[Dict[str, Any]]: + """Get the most recent detection result.""" + return self.results[-1] if self.results else None + + def has_issues(self) -> bool: + """Check if any results had issues.""" + return any(r["has_issues"] for r in self.results) + + def reset(self) -> None: + """Reset callback state.""" + self.context = [] + self.question = None + self.results = [] + + +class LettuceStreamingCallback(BaseCallbackHandler): + """Real-time hallucination detection during streaming generation. + + Runs detection periodically during token streaming, enabling real-time + feedback about potential hallucinations as they're being generated. + """ + + def __init__( + self, + method: str = "transformer", + model_path: Optional[str] = None, + context: Optional[List[str]] = None, + question: Optional[str] = None, + check_every: int = 10, + on_detection: Optional[Callable[[Dict[str, Any]], None]] = None, + verbose: bool = False, + ): + """Initialize streaming callback. + + Args: + method: Detection method + model_path: Path to model (for transformer method) + context: Context documents for detection + question: Question being answered + check_every: Run detection every N tokens + on_detection: Function called when detection runs + verbose: Whether to print detection results + + """ + super().__init__() + self.detector = HallucinationDetector(method=method, model_path=model_path) + self.context = context or [] + self.question = question + self.check_every = check_every + self.on_detection = on_detection + self.verbose = verbose + + # Streaming state + self.accumulated_text = "" + self.token_count = 0 + self.last_checked_length = 0 + self.detection_results = [] + + def set_context(self, context: List[str]) -> None: + """Set context documents.""" + self.context = context + + def set_question(self, question: str) -> None: + """Set the question being answered.""" + self.question = question + + def on_llm_start(self, *args, **kwargs): + """Reset state when streaming starts.""" + self.accumulated_text = "" + self.token_count = 0 + self.last_checked_length = 0 + self.detection_results = [] + + def on_chat_model_start(self, *args, **kwargs): + """Handle chat model start for newer LangChain versions.""" + self.on_llm_start(*args, **kwargs) + + def on_llm_new_token(self, token: str, **kwargs): + """Process new token and run detection periodically.""" + self.accumulated_text += token + self.token_count += 1 + + # Run detection every N tokens + if ( + self.token_count >= self.check_every + and len(self.accumulated_text.strip()) > 20 + and self.context + ): + try: + # Run detection on accumulated text + spans = self.detector.predict( + context=self.context, + answer=self.accumulated_text, + question=self.question, + output_format="spans", + ) + + # Create detection result + result = { + "text": self.accumulated_text, + "has_issues": len(spans) > 0, + "spans": spans, + "confidence": max([s.get("confidence", 0) for s in spans], default=0), + "issue_count": len(spans), + "token_count": len(self.accumulated_text.split()), + "new_text": self.accumulated_text[self.last_checked_length :], + "is_incremental": True, + } + + self.detection_results.append(result) + self.last_checked_length = len(self.accumulated_text) + + # Call user handler + if self.on_detection: + self.on_detection(result) + + # Verbose output + if self.verbose and result["has_issues"]: + print(f"Real-time detection: {result['issue_count']} issues found") + + # Reset token counter + self.token_count = 0 + + except Exception as e: + if self.verbose: + print(f"Streaming detection error: {e}") + + def on_llm_end(self, response, **kwargs): + """Run final detection on complete response.""" + if self.accumulated_text and self.context: + try: + spans = self.detector.predict( + context=self.context, + answer=self.accumulated_text, + question=self.question, + output_format="spans", + ) + + final_result = { + "text": self.accumulated_text, + "has_issues": len(spans) > 0, + "spans": spans, + "confidence": max([s.get("confidence", 0) for s in spans], default=0), + "issue_count": len(spans), + "token_count": len(self.accumulated_text.split()), + "is_final": True, + "total_checks": len( + [r for r in self.detection_results if not r.get("is_final", False)] + ), + } + + self.detection_results.append(final_result) + + if self.on_detection: + self.on_detection(final_result) + + if self.verbose: + status = "Issues found" if final_result["has_issues"] else "Clean" + print( + f"Final detection: {status} ({final_result['total_checks']} incremental checks)" + ) + + except Exception as e: + if self.verbose: + print(f"Final detection error: {e}") + + def on_chat_model_end(self, response, **kwargs): + """Handle chat model end for newer LangChain versions.""" + self.on_llm_end(response, **kwargs) + + def get_results(self) -> List[Dict[str, Any]]: + """Get all detection results.""" + return self.detection_results.copy() + + def get_final_result(self) -> Optional[Dict[str, Any]]: + """Get the final detection result.""" + final_results = [r for r in self.detection_results if r.get("is_final", False)] + return final_results[-1] if final_results else None + + def has_issues(self) -> bool: + """Check if any detection found issues.""" + return any(r["has_issues"] for r in self.detection_results) + + +def detect_in_chain( + chain, query: str, context: Optional[List[str]] = None, **kwargs +) -> Dict[str, Any]: + """Convenience function to run a chain with automatic hallucination detection. + + Args: + chain: LangChain chain to execute + query: Query/question to ask + context: Optional context documents (if not using retrieval) + **kwargs: Additional arguments passed to chain + + Returns: + Dictionary with chain result and detection info + + """ + callback = LettuceDetectCallback(**kwargs) + + if context: + callback.set_context(context) + callback.set_question(query) + + # Run chain with callback + result = chain.run(query, callbacks=[callback]) + + detection_result = callback.get_last_result() + + return { + "answer": result, + "detection": detection_result, + "has_issues": detection_result["has_issues"] if detection_result else False, + } diff --git a/lettucedetect/integrations/langchain/requirements.txt b/lettucedetect/integrations/langchain/requirements.txt new file mode 100644 index 0000000..faea511 --- /dev/null +++ b/lettucedetect/integrations/langchain/requirements.txt @@ -0,0 +1,6 @@ +# LangChain Integration Requirements for LettuceDetect +langchain>=0.1.0 +langchain-openai>=0.1.0 + +# For Streamlit demo +streamlit>=1.28.0 \ No newline at end of file From f7561e561737c68f8bcf6c0cfed9df042d7845e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kov=C3=A1cs=20=C3=81d=C3=A1m?= Date: Fri, 29 Aug 2025 16:56:40 +0200 Subject: [PATCH 06/15] Added langchain example --- assets/tinylettuce.jpeg | Bin 0 -> 38335 bytes assets/tinytinylettuce.png | Bin 0 -> 124185 bytes .../integrations/langchain/callbacks.py | 3 +- .../langchain/examples/rag_example.py | 234 +++++++++++++++++ .../langchain/examples/streamlit_app.py | 242 ++++++++++++++++++ .../integrations/langchain/requirements.txt | 6 +- 6 files changed, 483 insertions(+), 2 deletions(-) create mode 100644 assets/tinylettuce.jpeg create mode 100644 assets/tinytinylettuce.png create mode 100644 lettucedetect/integrations/langchain/examples/rag_example.py create mode 100644 lettucedetect/integrations/langchain/examples/streamlit_app.py diff --git a/assets/tinylettuce.jpeg b/assets/tinylettuce.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..9c043517da3d76664842a4b95242979db6f43507 GIT binary patch literal 38335 zcmb@u2UwHI)&Ts{Y*;`LY!p#JK|l;eK?tq{bWMmDA{`Wz5(K0x0fJ?fqF`HrAjAzC zAdw&fhK?&uqzNIk5Rgs+p@%>q@V~fs@80`;cklQA&-0%=nK^ULnKSdgnVECu%)nab z+IK+eXY=#sfQSeHh=3npZ9t^+{MoZE7cU{r&%D&kvCtv9_R)NOoQJ;0bt``XJtWL*5eQS z$U5%&2Yz}T4-L8*1j<}o#{;Y{odxl0Ab!N-UvSrd!QE~Ktjj+FWlsCt2wrb%9oBoi z^|~(_1?HQ;?|vWy@vitA%dR7?bfKOa|Q<0jD!nwt#%36gWV|#6-aki*68? z+^|6m5D^uV7MIwt@xZ2?I%j08uHM-!tKxs+>8^u6U2^?RF5vDn`Q3*gXHjl}_i}&N zP5)5YGJRN8pZK};{__{0!hP#4ferrI8kmC$*Y$x+6Y2HVM8!q6h)ak{NUY2JXzhRm z*p!uw%2oe6Pk+n(y=8;!iRpuopDy*w1?)QH`fNrQb(h$>)(vb0^@>W1Ndr)TXVEw0 z!9ivvghz)i5dy_y!?4K&yf*E%QsB25_y%NA--z2L$)2hh<%+<`DD9qxm2VPLb4LG2 z=^Jl4uK~J%{A83;AY=|O8O0@u3LBamjp4{I+t&cS<==YbCqtKBOZJUKe3Muv1OX7} zaWUvc0HVB+6*(3vHt7yOx3OnAN*jj{T>|wlzuwrh{yaMhfrw9lNaPq0(Of9fH?mGs z3Y-Ks0KoD(=Ldj76+z2UdkA>p59K0}qa8=C$WQ*!6j)*bF5m-1DFrXTmgJj^h20f4 zQ>`LQr{+w?MEOgviv%KknCaFi(S87sH*QE9ioRxhPil|92}P&pRjdccLrGWs^QfxT zO?Al_9jNjdf7L@$DBywR_YF4z_sA4N)6BM*dI+Rv+w?I$rqX%q3H?odB>wZ3vXU0)S0# z($VPI4EvqS*#wA2-H8#w)^*l?5M4M6r~}6qo~uGs#a(XRn^4{9cH-J3d~3obIhyCwKPi)u}ZSoNJMCSh2DlA;vQ{31_tw(0ViLhEX-~+*}=!uGncQpKa zbHnE5y8U?@BA;!JF~WWx9lIt&`|`@-L$Y|obLCCie$N2lX#Aq`6XUY2(g{yC$2f)@ zcLzSZv9MtoeorKIRN;5~A@h!(;h#1fzb@jRHXS8e^^^9k1<}ZdlgirH0Na`q@TUoZ z%5ge8$O^qsn?~HSsZd9H&eSmi{fuhdTD&cVMNUAX^688;RpazYF0CNsB7;hzn&69Z?*R2-sk6k}}(M(=}{tcZFObFtO+s>;-Ji4-dNR8!q1}jjzxyx-~ zr@XQ04lDEF=5pG=Q~D`J#LC-_pq1>WVo@@|P=zDCxF+a|$R-X4$m#g0I}?hidA z@fFbW7{RJkdGttG17Adk8b0qPJkfH$xc+MZ63a=)H{QUe4pk)yM8=R4g)gRNMRC{c zVvs!|9<&c4EbQ{|R?*kMwd0b{p}GKQ03~0n*RuxgfUQ7gIkhfEth27C_M6Ovq_TAK zjw_~iVvso_Y?1(ML>U}1MTE0Z>rY$4!crT~J-kx&ngY(w%1QCBS^;+?Wu)#$<#Nj7 zHSTQY8z&^kf6ANttk`M%iGb>oMfLC}v9Gm@Ofrg8wA*Sf$s12^KD6_0&IdrdGkM|Q z!tsr66VeM}#luDsTf?K8+=^G(l25mWB^kk$-~68NTMXfn@>b-R-=mk(qFgVzZMVCh zl#rwhm|gcc{-ol|PI(!w$nu@L2jq07fGhJ8Axp2fJH02AzFQFc5Lwl*=gD#8w}!eX z9acn{%x<8z(RHUuM54}|RBc-=Lydp%+g6K)&kM&lM=#4rPqsc8QmRZYDB}|dGwerE41Qn>dZgiur>TOim$U0KPJiC*i%Kg zV%wbcYB?kL%d@Zy;;`DJBq!-b)ym6*oz`-0U0~PcC!dt}64itAvAGejZEkdf8c4~xMtTG}-sLYmuV_4BsvTG={i5lN>-n8#jz8n{xN}D5NR~s& z4}T-zMXAVQprT>w00c;Fx8bWP9T1;WsBM@SC=ye>85g~Dx7PneQG(T0 z;SRF~k8ixK$G)2JcewbU=RpH>Bu|M8WRwnlzRQO{HD0b`E33PkSQIK30(Y;ZRoyu% z0+i4GjD2=M+!8<^kKF`pM`>v!i5qEO6I%ebHa7;OJrjFzXmw2F3E=ou3y3)^|6cmJ z%w$|z@9VhUgPS{(!MXVtNXpz73FJXGiSjp{*hmXxerqx{BFXpsSb{+*`Of&U@=rel z$CN8>8Yj3s3w2x;8H@U>`Z^{KDpUu`zQfn)B7o$*$xTO>zbFIRAAk>LyG@5y%FTRj|PZ>pRTM|{0Z3nW5odR{{SLqo{3xqoX7G`nF10qRm#Bo zC@pFCh$rVDOwk7AH(^oYf@gqe?akwH3CR=dwG_VtB6qH?*ZV(UmqhK2yGn&WR#yOs z`Sgdlc$4WmBI4?Q=rsUnU6gns=cf2QYMr)jP|KwNV3!2%$4i!YVJFT5DC#ubkcg7U3<-rmz9&Ag3f$wwYvAcgL_?xuQ^dF30ZG5PQE)5Ec9cCeO#>l(EE>Zy-V9zYOnx z(XbW7wL7H_&4V@OP1hM>U4cr6MBGMxUN8K2Fa=HhGoX+MTbPUzj{^QXfOFM&y<`6X zo_qy4EJ`xriE^Qc#AiS|n})}pSYDC(huDrQ66XQqFMw!-sobBm|ExixmT?@&`{mQ2GdfX(_sV|= zX@NiTZ(tznrhhVz10u?R$Rt?Y1AdL8z~n!GMDsPrYf@hdmrPY1b0M50#|81v$LbPp znjQz8EYP{ySQu4S?k)A;V4reXd<|$lx7SWw`J}#Ekn~9{AaKt19uTL{v}7DrI}x$H zMj1{zG-ceeK}_q!DfmxUto}oL|AQpfDRLd09XoARAtgp|w{ zAR5TJ>&C*ljcG(z6a!l}ft#uS1yBB>IAV#*B1eGt_rs#fXYZ$d0wg)HVB>!Sq8YT` z<|TNC7C_&AU1W5W%K? z+8VXNGzuI+fDs^)5WL~k`TLbm_V|B3bs_r8|8DEkUnI~d@3zTAAw0eVZGI$|{4UT+YIe7X@UJ@LutA z5J>ld?1;-NuyFOD44XjDism(-^>Hr?WyW2>G>?wY;X()KI__%5xigUY^GWd(>6Z;V zP8&>B)l>H;2GMUgDNHAnT4vmw`i=60qlF3+%CG};+s8;bYKz38j1^=qu?HvHL^Q*M zDmu|klD+)Q8tn3Z%dpENs)>RZNz|}tm-GM+kM$W*6yDQ0Q^5I@QrF-7RxbnqONW6;ss{#K#^8s{BY3O|1Rl1K%i{i%m1q8WK|Cqx-Ny4zN zLjl}XB~D)FR26QUu4Uwh7V=;|LTNS_Wi9YX98JJiOUJ((ZS_+UYL33DK+q#2YEG%a zPlbHUq`hGlnd6UjGddvA#KuJ-#CjQKW$BUcj@6G;(>@TEK7dMRMGll?Aj7h2X_htK3PR&xgi)1PE*V$Aj`?B%#zN^hS z473eC8^+S?&W3eo^peL&v1CFR$%ED0U(75ft7^B@TVpfmGA#8B>6gC)6P?Ph)h2fX z)@Z4uudm~kx4h50zpS_hoRN|T*SYc^bFJ|6A7c_s6{7xuiuj*|HD5jaH#6%Gp{-gv zKol^n7_W6{`w8GL8;~h_=ZO>$5Pfe)Oyj8>IrgZ0&uxNk?|g}QeSRm!iqekpo^A1s zUIUsuT8}@ZXi-PLKU;NARp7*e!52yzyoGB=o zoE@&2qvwM+9Prn3YD2B~!v{^y#~!S}56f(I${tF+wPMbXuw+#Qmt??KOghWZam@Ru zX};D41SB?#1X+>kReiN$h%1;;PdIeLW#4z!_UTl*4`RQKTkA|?oF5U-!;?KzipxGv zxZ+)lXAHDiTP4?It@X+@ZWm>pWj(E|c&^`QR<#h0Kii2f( zrlQfx8CLhnnlz|hXtaML+;f4ew}%TG&&7L_A;VmGjpSFby5!Kvm-80!EpH2u=LFtF zHd5vh%&4Goo53j)uHW*!V;d}ZESsS)KuPna%Onw-y+T)YJ}+&&7}}_f3M+Y?vdfDa z=@##Q;rTARuEAx)$JQ3?GHO!VTyM&$*%m=5qv_HVq>nMcD|NAo^hetfR&zP=t3}P} z)p;kom3GA3jMVsjo|a$g=0ST)LD^I>Fcd1KJKL#ws;?pdQxujnV`E)ESru_uzM~`q zPo@T1kJzeZMZ36+gb9u$!ObdPO8x#|>&(WM-9sOzcuEiaeKKwj;jucF>|EcyVOpL` z=CRRJZz~X1y43de?*xS{lX7LpxIHhA zu98=5Ks(nF`r0vQB3*V<1}M!H?q*vV*NYIjp|Ot%XzJ$v!~ zSD~%k$RnV2gq+VGoW}%(y6eo`aN-ejkZ%Te!K}Kdr_a7KzB(;ZF&2hvFAJ$i4YF~G z)GbjvN{Xd=hnyqy=S3PdTAShAAl(pO4;i0{LZnhb74(=v7(|;v10Ki?s7=e z^N)^&?YTcc|HN1>GG5C{9c!{vLZqRFs& zLxf^HBkZv`k>?L9xttk8CUeMGl(aMiVoW?RY8*CsX7L#>Y+F?9-!a#3sZPr=^@F9WRkwkrlNNR8ZY~mEtOzH zx?ol#^xEI&5H=b~$@qiiM`Aq+>YR5BW>h{D9dNL*EIvTDOrJHTxY6lvTDrP#tu)w3 zk2|5!jtDfvx)F`m9l?;@Bb?OsFff|-L#=cOq%R*E=26-AoeA6hh=w|A{?Lo5o1*yp z>)elHy81`x?0Es!-n061Ggmp-@-4^TVTGP%JKRH39cii6!YzgRl2wmT zY+^`dpSh3~bPa~)(Txx9I9T>&_PhOxM{r^=5nq^iXEh7MMcBpi=zEd6Zo~ScYk-VNQEwyOZ4JJ)oF=ly5|hqhU;_a-JK*d^mL39}*bki~>s=0m~EJZxrn>N8$f zZ6)htDuSRrh1Rt(!b;=GQ2$*^(onBQ;Mt#x()z0u-QUkmSj;(s(n1Zej;>Fal-EiAk7t-Db zp<;r!@T@M={J)|6Zq|PM(9lR(13vm%Sq}~$pWIK2nQ02F)myQ6=_6-goF0^?uNUSp z?~Z2^aBr&kG4V4xGTUxXpAVp(${trcN6K1sG)lvax3oYz>6k#YY%N`oMJVy82oa(S z^D-%Q+&v9p8Mm&#ydSg%5awJeag>6aicZ-beokA4@DpsF{OJy@8#Y>&US&%C1TI~H zJj>8g4D)DYH+2O_cqFXY~1@;pIGhhRMJi%sgsH^@eGs>2;)0Dc1S54 z?<`KGb0a%SxgU8i9P(VMXx8rDdB)?>ONf)PY=SKwd)y)_Ezd7vH%D^~*pO<;(!%Jo zUB9twDU&^OgH>wx{hxK z0~-G?KmrCxi7E#~Z>$|&pWr5qrHn2=L-z+gK^VkIacVHc6KJ&>#;d0lMA`5R$A4-pS^vws>yrh zVv$^l+jL9?ml^yuyJyHKjhTedeDSC9y$Ez7>8268^mzsB|^msTPo`W3jn=5Q|mgoYsIT^|#-72HtIb!+26?l?R{u@{+Je>?!AV zd>!_MjqIw03$+ag8+e7V*9dN;gsMiYs#TIFkbGa1VpDz6d~N!Oe?0WfK)8q305gK@ zMj@701L&e7_xiXqw9;I!eyVqeR6|!p{W+1zWEql=;osP zChciv9o9l(a&THgO9keZ*28B3ggA3vooZ?8kA5%gY94UJhd+00JXxcPy~-$3x?t$i;7N|Rqep6%(yd)RobuRHj1>;?dRIdM zUC-(i$^Yns;@Q5+zRwn#IfIPWI3s-`WVkHPyI}8mQl!6`jsBpp<3h$KsQUL{*y^3d zcO12yi-Du*XAApQR-a`p#E_C{yM<#}G+{;E{;wxrQj>;9lM^=2rL#``@~F~s($^)% z;(QjK$An6ZZ|K70=-KE7qte?Om43!$B~uni6-D{02Ujev_Pm=}Et`LyJWmfQvU%y@ zkWO77jO&=#Yt(YhXxkP;8{^!Dm=TQ`&u-wiG0%qRI48Q0(yiPx1uc$nE~40nB&UK2FSSX=uom2v&hVe77)vA zejCJ)k>NkcI=K)_ZHkC#wQa+(sZ;%z=~F_;wCfiW1>bO` z-Oh(}^~tLwo-iFPt(fNb&=I}{oO($&6Do$?vYJt=yul-6y-L8NaaV_4RocC!31vze zI@?fkrK=cMOf(78Xr*X3+w56oKye>+^v2>n>At#MB{rTk+&sMbt6D1#@4pvQ5P76% zcba{qY?aSIpt)15t6G7Tj7+y{L_g74yBJpN^ljY0WKY24^OrX@o%Sa@;!G@*$f9%t z`{^cY!0$Rr0|m$?2j)2RSfv!N*$;1!8W&Ok$GNyJyc~&G`4!~^9d`27%!Y+sc+p2| zPi|gZ$j$K_xvnJX6u>Nn1sOf!U9gzua(Dv2JH=jO^Yn3UWZc3GSJ4xnwFYdn{!rPx z18`UaR(32t=eRr=?=VrUtMfrM+!`)wK*5}{+8l^buV~>r;~f#9h{(qXvY%CJW(+@< zU@8{2iFoeeYl`Th=#3Na-993(*b=V2u$@#h6L#$3A8IaC6^%Fp22Xx~{6+?#dghAM zXTj@#P{55H{K{r3^4FP2y>f5C!#ejYnxEIeUYBMD+MjDcbnVFT znq^qm)~RM@+_sHLd!*OKZN4>AF>rVE!pG$fU6%3~mFv4I3`^#g`Gs&^*(RjwHX0le zpb*_<-s$^f+T%3}=hBjC|F~E- z>i9q#iC^fdQEhKbUjuC926A*mS0^rvmby=1^wKVsnFTnK_RtpXNjODx;@m zqN~vX@z-k?em=r|95U>IkgbZ)An(pdT6{O1kjX5-?l?T2xV^%49yOzB-_bbHs0Xv^ zZ9}usI#7?nPTY>EE&BsTYun~OJf5-{X$*Tj=ifH2L5&RDWiv$JJMsOd$NSd+Pa|aWWuvL!6v3|+c#3Q0E5~;yZ&wJ*5dRpux?^le;yjrr|VUKQX2LW z&ZnJPIW$ft#Tf^79>ohyH08N#z>LGA6Tu2srU3Lz= zd)`cVHRJ}`x*-(rZnT_HV#8hBmlMlCOg>%wl|z#vx4X0AQr`IPbzC;Pr+m@sTq#juvrV?$VU} z#kQf-+lzyGRl-jOQnyK)*z=vr8+@7W5u*dl=&H!x+wN(}y!toBZmp{xq|`HA{BGiD z2K!y#EGD~lal$6x%cNEQ2LdzH?(yrl*i7<8z_f3Jq z3?{R=stOl+Y7L<9{T|zrvy!+ntXid#h}7%#K}&`!9O%c;1dm$;IQ$S@X7|gtL6GW~ z^hO^>dRM3eA+K8^cW5ax5E zr^LN*3L)cLm^^0zhHiA`1cssZr`17kn7W_&)O+5*cgFepM>}RgvDVoFoJ~k7>`R5r zOcu#)z^pnY5YnCVtJ8>@IK8cjIDhb2Cz}>iMo!hPrS$taRwB$pgrAeRDbJ;2)#n=q zbO;OtwEC1&OG}vA?CsRNI%Btr6evSU$szQ$Z%7+-+agDc-&MjrGVdd>*{HI@Z-SlW;4W z(_2SfebE&nY}SFMw02h{%?pEIE`qY={tJ154`C1?M29DMD}z6sT1NXJI59hfs7IBiDdhClt>$8+Tr} zw9-YW$0&EJD_R|ejHIMA4F-NkK^H`;@}$ZKSR zHx!lbWe#&hu$+Cq15B6vrRSP+7t@=$@L4ESh)zmkXC zB$=Uqy383xg}S6enM}VhG){KTZJ<8ff$Y4A?`@U-J?&tygGA^g?5h{8fxk^rZCKXZ zd-0Y4aq@bv@%!0`%XkCDjRPBQZ-0QnXRW^Y<{9dMQf!&Y$0g<{*-`BqOM_&H*cG^M zgytZcVdF(@dXQ@DGG#y8yi}}}@p}g%$gr91n>JONbKh5Xm146H(LDsy!-nMOMGy;v z*8qGAtDDS8;KTI7LkRaAF-Oy~dCcI{v#kt7+S&IL#pAagjq9iS1i|_I+{mO@Y()QGr8Umjxm^;n25e?kXv8e_eRK6eb_{ejGdj!VW47;6^fp+m zK!tGB@*NY#j60OZQ8T(U4}P2u)M77XcbelsxSHS0RML&;>u38~qN$5X{Us7sZdpAJ zNz;-oC&^{w*GrtAL;D%*a?O~8cOBb*&*RPg#uOHoJ5#4>z6GbkPeY7nk#^a30 zBHwBzqbf2pH$XkbFgjt%p8C!m5;MW>UOhXYhWDC8`<<;7AoE}lbmQfZynQuOa)YG| zS=4Z=Eq4lSlRJx{p#>TJ%X8h6gy|;jLf&%bz5?=macZT18{-|R#~q@Ce`JN}u~e%k zkyz4GXe}o>sNQ9b=;Zi3OZ|C9?e>l|RS6)Kw zbmQs9FkcPK=us9)tw0__8cBg+A`8%$Nh8n+m5Bbg!s8jW@@E$(ACwz6hG^Lk^B^Hp zGAt@ObhdpIB6kC$k(3l2-x>FUF2f$-T{F3NtA*X6&=(qM+!66S?EC4_XTIohzJf2; z)dPlYjnmKWiJo)F^JVBd5Y=NyD#>K;#!EG+a;L`W9r8LdiheFz8-m%r5CsEo;_$A> z!@AZ9eiA4z*a5_h2sdA)>^`o^zUZtS_T%7uE*moNuGZu(%cR zIPZ*`_c!|%uL&65Yt9A@Zmo8bNn{S;9RrPm+9k4+30_(@kifL$8l439Z_j2msuF(= z`|TU`=vRT?NAKWQ&ohF2LngZ8TJ$jb$wVKFPqUB}GU?E~;F@DVf|%3>xh4CXU|Y+z zzn-+@xdnF>I9!hn5>OyXlsYpjG(<5Mt%uTO!1*yTF;}$E@448f(YD8x4sQy=no`gO zw&au-Fl(E@S?k6xNd(81kT$o4D+!xlJ$LF)tbVK@(K4=url(Q7rY0S#u_pY6Z031 zR!k{5{RrcuG#TbV(}$JX*;wd&1S?~jH}lIE4N{uZfY^hXY~w(|;I(cF!--__+({0M zC+E*6`Fr)-8>;E}8hb zgOO?-xVJ&VP2Xh{F{gH*^p}eKptZ0UK+sveP`+y9&6zw*^7-7e zj-iUsGM`-tqnC^SJGUQ!!)BhTpsU z`b$#v3`=B8=45O>Q{5Iw869PV8q@sY`BlHjwwV$>X3AzhX9u+;Se9OUa-6c5l7Lt&8~@ePoswcsfbeGxT5?(}=P+cC zRebpjWX84)9{g~mW3g?;^GzZh9A`cN(_^5}( zEp;I0ys5plJp9NornRx+);F8?Jo$(0P|a+1edouPmiA)3+MzxBWlEe)&IBb>^qPx&9+89rj&KwL&2`h1o z9AaR4rwXmC8&eoPIrEKjw4smHu$;zfNIGF!a-q6d-6Oo?^tR26)nFuZ$T)jXN*er3oIK&*Qv*Fb;t7L@^2?N zFKBv=@?)-Cus7+mjnGNr`tJSpW@ zM7CU7dmM6vVsb&B9qUOnv67bF@pLhKMj*I1uRy(XS)hYX7Pq7wD*hU;HT zqRYyXCei2qLE8DRtUqu`X+^e%VtFQ5+W7tlX$!D1z!Z6D|NCbfBB|MJzLcQBHx3fv zV3fGSa}ay|RIHyjPnWs)E9h?0S8UWlvnQ<=v6s;?wWo(*)vUD`JpH(4V$Sz1yfM2u zfWqTCh8U}pdADaf0$%Y+9`*16^*OZAVe{0gQn3xao2OtoW7mce(Au{68?tstn@om6_ughMtkf=) zYX&i#p&i7B7Con(R+YnJvvSC93qlb>;6o(YsZpECIsv!%z-WYEnH{rryAXwB@Q?MMhq zBD``SPvP=ado>^kDA`GyTwJlY%z3EiU}7L*nS_5F3iOa!ALHyJ^u|MQIuNswAp~SJ zLw}5$#GhjIfo{QWMV(u?m!qf0$KFU-{jn))2jx=rD9ogYfhtuQsaDpvxN; zqCesoi$9s>hwU?@W#xF&NO@>Eh^vY>Y540HO}C+Hp~f}xrqHNEd&a7jdwSfkx}V}- z@zFMhyuhx}Hm9=42iuD?iIk^sTsC@ph25KP9CB>1&K_=R#^hJ^n8zuH>3&rQEBPlG z^xS~d$^iC)Fo`tMS2w=~yd3KFaJms0Re+U3ke29q|9wl)i)x*AnG+a;|scGX;O zV=Q_gFj*zOr%F}AB89iFznWONykdLnUh3D!kttOnT&&G51lmg9$L4JV7Ewskb%XZ% zjo#BLo7-Ls9;!1TLt$jsdTgsH6nZ z5xO-hN28;o1*Zu+84hICs;*(O8ns1dGTM$wN%YH0D8CwE!)P;8y%cDi0Z}@NNqD3r zZEmgEv$dz`5!r<>~8->ty(rDhV#2IG-0 zxk=!W0EV1IHHoy(BD_GRdsPg)?Wd-oJXXa{I=1K}FbbIXiryMrW5TK0{<(hUGkYpu zt~*pqy08ja*)~zkS(Q&&xvY%{f=3*kU7eovu&Kecap?T9VCI8|fo)9u6t}>Mf;h#6 zp<9N?{&TYpRmgla)Ib6BwfWp*V|*Wly>SsEs#vty3Y*e?csY}dFKvNYnb=hgM5sxh zB#eCcOb%o`sar6#SK_GL^{dUV`C}Ijhc((HtdHgS1l>&YX&e zsZRTGFIrOYth86=tkM>R5!Pk!9mR4MYK|taa$T~@Sr%n-Cw&L|l;rT5A%czzE_{XS z`+OhT6!tVYwAdD9CKG;@ZA7hJtf?Ny8x`gCy$g;>*c?Bz{BZlkZNuEH*K5Mh%sd_> zc2nQ`ejN46@Gc1Jt$>B(Fg%?6^4aX<)uZGh(oudA6*Acctu)Fg$=NWSo#%^PjW5$5 z1>;;UsueJQ9hc`;*08Ksp#Q=s)<}5kgP2zMZsa;%E7L8QVWin)cg*;NOp$wZOU?W6 zDSNYFB@8Qr;9a9KW(>2{Sg)9*M+n;nBt`#N7u}AQLGPkr7-W@LO(uA(9P6qhEv@)e znzbua`qvQyDT{;Lx#i<4omB~yDbJGKXqu0gJB6q-=4iJ*cuS?*qrfuK3_dAqou`o zaL&Z80bT>~RcnMYsUEtu?CHb(qahsqWt&INh1|Bj-q7T^X(5RV(f?Q`3$HGjr`s%$ zR3jC!RqmrC!~48w+x>}3wRL)8X$d|u_rB&caIyi^0%0K2`nDzCZw6Ieqy zsoI$=UW5aC)d^ZY`^gwPZPk)iglH5zF240phZw1q^6Z8+EN%^u@1`uUs|}Wyo3ui> zPMX>SWx*^%Xj`M7nPE%AN0YO{eVG}^H^W#CWKwP%`it^%Z7S+edvIF75?o%z$gXte>Km{U4Nv`29M(cD@I%sB|{+z6pbxDpM zTf3jtFZsqkJ~`O8mk~C4umxpFousyn8?eIZNhD!r0lbG!-!)o?E~NP;KXUhg80Sb7 zWYN6w!w$%EgudUuWO&Y%6!fLeq=Dy!;7Rr#lPDYW#GuxZQRPl9ztwN=DWQ!z{wViU z0^{Tw;QR_LGt#FR9h`HIH-68vsCEac`>m1EUqmHg{Jr=rAJ#>jZv`EFon`Dp1K%XKR9fUK6q|y_Tl=5;VZRHJH7xfQI1tFXQ>lQ~>H?Ie${F^(Zryvt;P34h?Slh;WOHeh1)dovb2U;2W zlU?0YR_3wQU%0wG3`?)%7+zdZN5c7}mnD%Z&IU9@ZCFn!bUqH7bD_Y8hki3uLVY>< za^&MmNREYXVOo3UD|&!ypp%vEXgz#1eKBa-;Iq$7uDsLA)j8(=jd#aYQc~Qzz|H$G zuOwS$4+&e<0o!G+ir1v0yV+BU=c<~s`;{IZEi-drgn~zP?Bx^DkK8lZLu){sOSD2g zTF;+)xH3dj%iWU}h``zeD%o?2q@kFrwQ;M*dX@EsXua8ZjY@vwF@_+M_O|2=)s5|B zpn7l$PK$f30AD!~9L9ofU?1Kub|HsaNM~~=vBsB!_ZAC@+TNPgHW-2@ek5xNqJ}=b z@0Q2-_Acz1`r`BNyDI{cNxl6AY|n4w$C~+BW$s7G{#YB3vO$WVkjr0v+_Y7 zD3jD?g^)Ib>A=lZRE%6r6699AZyg#=+j+_)F__8Y^QqfPdhz<%eRZ-dIv8?|bg`1| z)-p49b(8IohDgJ!t!ug>6%yeWLPFC^Hrk`WcXw=kb!`6bSp@PwfAp*Zt!VDnlDv6=+9~Oafr+a>gbKLcT&Jl z^%j}DU=owbS<7Y@mMuapNA-}(tOe&+okHu$Q0SY`8#p)LTr@@FHoq19+VP#}*-oUlIOoXd1dbtBCJen{-JcSFbM>jushpdmr3Bc=7T@G4wBoJi zS!xNQ)UbJ^@RyRvB)<9V0D81H+K+ap_dw{q#S82Q)mgdIIUVo2I#?OQ&SVcO@bp>u zZeS%vpPt?s;j~1o6P~fdzf`tEO{@t)$Ld+e-cT*B1ZN zcK!nXt>ijQ)D--y$7?QE)(hbMmL31R+9HwkNg1U53F3clIkD>$QNia^;dfrI8qyF3 z=oKsQ{@w%fkYne^z-Y|`X6{9x(LfKZg0r|P|Bd33!pR0_=bH9f8 z_)-MeCo89tqM7-txWfu%5QKrwsz$z0iSo>^!=PD7cyp+rs$<`Vu7a7yV|<}BIt6Vb zZPG`M9i7sy`0jv7f3cW9?g4G1%d5rvEaMbg7@%Wsu!~PUIJ^ertpTr6%e)+*Af|C@Pyz2!h`>Zv^S4SDsAJ3v1Mwer^VbVM{Tq;b7>s6Qqg*n zoFNZb$0XXH|fTfJ;CyWYKz7;i>Ef@z3G)yyLx0z)p#ys^=M#+PH z69`AXn?1_*(y+C?WkW#xC*0C}8!5}>9Hlkf{yy&%zGe-YNb-{Me+B*AJG`e4_hb8U zrk`)EJMTkMt-P$21MYCouY$P@=@SxxZLQw>g-K3sUT#?Bb=Zp{k2j4@2M1Hi>BW7) zR`sRSX1R4~Mirobrl}83V2J8#YwX$`&zxLJ?nmAXJ!E6ZcsQAGPSjl?u83if2dT71 zeKAZ~suonv0jyps7c5tvNz_XoHEFHJ+7HN8{9`v7ZLvIW9Jjm|sXoQj@d`(}Im``# z7lk47n$)3K=P$S(6ZO<}6O9@`a~DJH9jr!&O_b882o;zbs`xm&OTV<8Dz6g~?f>ecnR#wJ%ztMIqB3)1zE3|; zc5?j}g5kupY&caXwUEWuNwcvm=xMcSGGmAGShG2@ZbCL{wNl;KjX0V?d_D4$X9)pH(D;SVqq+Aet?y%(}NPo#EQbPS?o4_+aOc4ANP8^EZbKV{I!@4 zHlEo0Zjio#9k&>0#!}TQM13w$cB@7u&(XWl8J(9)DeKbXOyCH?!cS4=D36<+T399- z(E1ng9z0F?*&$;SfU|yX9X-|ktu>C$0$$&%!VtAVGJ&NpKlHAdBNv4@MWb4BR9DFj zyoj`d)rff-#!T((mFCZOx1%1;=e5_=$5oEZ+B!=}y=-_O19*PVr3mScn>XjW>N3h6 z{4(@Ky=}EzQj$n~!r~B)D;{v&Y>eoR{;-N-yCqZhdOQ|leo~&*{b?PH{r??EgOos=6(yIgUO)5`Jjw| zUqP_dP?o*BobEiP0w2()Jfba`t9C{U-@RyCO|9MtE1FlFe`eM7M9BBzg$Q%F!8nTa zbrmdMZ6K2#Pu>75Jo3v6Ha|Qbv^KG7E)~w^%;txW-{<1mie}J_+CoQYU$cya?j~|lng+%t9VC!U0>83YVFFwGq8m7s8 zglL9gYjXoK29RkZ9nBwL%V*na!7aD3%T3yuAw8nLAfvp+;tWjL`lwFuT%A|}!1;E3 zUd=Yp@8M{6yDqZ^SSsGJC1F*lC}A2gApbsUbCjTspG&@joet(Bdve=rQ`(YdwF)PK zbEGegrZ_Klo~D^1rbb`?xvKr+ycM{DUjf`;Kc+1X=+ht1K=QbCc1!omkH*7~>C4|w z|K+%A=V?2NP@zw3+ncVbkphd3*sb*{kGOV>NxX>b%95hY`H1F&65 ziT4p@AL^~$Yx_rZ(%|1&b@Wk+yKTl2Fn}67U8-@M4pBF1U%v=UB7>f}@Fth%iP*?l zwF;Vv$FznfZ+74qp=v(*P$1sYOnYH`gT15>gVkpuJ#$kZjiu*ldA@)B*;zpD?n}zt zJUsVvucj*3Qc%JRj9zrawLj;3QUE+{Y^TB#e4`zeXc$l@?Es|Vr0H%lj~h`Yg{%{7 z67P4fF%qknE^9Z=%N8piZTt$l8E?nOejXfA?dW%^@`Ie?pqRR$I%nOS^0-KKEvb~y zz@MsdtDRn7gJnZH!gXeO7aopPS#TjQr4G$^9|WbO5Kzu=lmNJh|aG3 zInmJ}%M=JMkzo>S;bw1|aXejdfMViBCl+eh!%s%#L$ZZgs4-mOQJl%SB7^hPDpa$H z$i8@R6kYG%fK)>a84T%b-;pPzM{BnDdRW>U+kElM(6CkLDBo)B$JWKwg`f(Aq?`_Q zRFvZRu52jpVsI`Yn2DFX+&H=7HZN9_-z_e5Y-~N1T%XITM=~I4ZpguNDd0zxQewK) z+K`e9_*XKgC&iW)!6uL>=h;vVUF(?-yzj*qE8#4UUfH2SFz$l2E;>IxkD1=-Zw7c; z1~%+*^c_nab9vG}J{#dd>1-!I7-281!;Cqv0MTU+?rGZf3YXtUV_}7}oYtjApYe-A zM(AQ_?{u;ng;Fev#jx%t1FoClP{&Tbvf4R!;R`E))0R1@lfEcmMWs$qJ{%U$LUYW< zNhyKF-T6ypoeUKeXG^+CkQ8TiH?UiM>3KgS zG+Zn?({41TS#+ndI=;UW`Izs)3hRixP(}yzf^P8g*z|NfapvWaas95&4XxQL3In#9+Fuh*m~ ziplJnR}|u6YBCdMrlXx{iG;UO!gZc|qe!-H8oIjVH#4Xb1U4T~v<+z%mu9bDa6Gwu zfu4{&ey%yGSFHFXr`ZmHEpJ6=jbi){;1j_r8^$<8>X6O#mxAM&Is0_sAHXS$|fQ21+#{S3dv>T-#O2Thh>U-TY%vA;SXs3#etw{AH^ zsi%+(h78Fk7b$tO!$T)p>y|$qBkVD1jmb9URZBf_QEOmr!tfnYzH56x8or@(t@F3U zbVZmo!YoDP15Z}ge&*CR<nLzrE$g_qX}6_@MU%ek>Gb14ZY043BuqSofAQB4z+iMVaG z`29u>f9=F{vyrzCHy~Oa*fGM^#w<`2Q8?^nwTjvNv0^ORkYLmcZ)s_9UhkW&^q3wt z?GdqB>uB}XyDyd?2+fbJ5BZOJR;f6&oB5b}69dxHWnMp1dbuPLb@_O!1t576zM@09 zaEr#%bF!Xn9f*x}8!iFOv6zm+X}ZN8TF7IS(=sqKotp-W?XL-jc+9!;c@zwV+yE?D zMbd%FL5egoFRx>Nf?1e{bpyZTt%=(JaTT_LFPJ!nR~W9h=ubzhN7b4JMXR-{mvgO+ zC`n~$ogLn*>T=sQarsa*vQt(8d7vo-Jkgaw7{z*1weL;nVp3RGMyZ`nYDgh?gk`XOj1X5<8**ru zX3U+mv@9>Wql?PP{rH|RTRUtAWJ8P4cF`}*sL|F%|L`j`jU zdzPIgS^rws^eppO6+mb82kVQ{nSbjE@Ms<02U7As@S6|pq3tV0^CnJzdk;35hcg0a z-N>T}+1?4aIm&t23^()lwB&n8>ypjj-Y*S2V|NS5tjV48MlZ@0VJIG_Ylw=#4g#pc zeAEnNI#RoaV+mLnn6B$!=yW?QPT-CUe~I(!SX@X5c<-r6FmZHr1NC)2|yrTJw z#$gjrNii)vg=WXf^0y~RT+T|-*f|zv*dm5LU;@#;Kfr`S{m0G3W>PaV@W_&OXdF`V zq#;IryV|bq{3FrG+ z^fOd8rl~y{k*@2Gt1xivF`8~?m;duZb2Izk(+#La&9D*S=f1G9ED={K^Z2%@dWF`91}t*c z1;}#15T-06pa~8Mw-lnoZ{z)S-+NzYlRbiqyN{38aD-OaiOhs^{mLqkx{T(Du~#tV zbQw$)Q6#@OqYao=s@ctHnVX3siw6#G>A5RS9#VVHK#Cu@VaR(z z=gZ;@`GZwIInu#l$iorZbx^Og(Ru{sHL^DQX|Kt(Hd!@RS!X||v-P3`Kf=V;ZC;#3 zfW?%&bo>Cupwp*5S)}m7yVxI#%85(vBt9xi(8kYnyJl_h;bxxxH0mgUXLN$DIn(4wxrK1 zG}M|~f(Rj9fpbN7#fzpG8@r3bL`dAPps6Po^AslhyQK_^VZO_gAu9BrOape}nr=?sFefHKAU1gj_;)%loEs##l?93=3-Auvor*o0v2QtXobsg^_ zcb}7|K+d(d%PrpyGCWC~Lv>jS=iZdAAloHEY5JzJEP|q&5MMaC_LNCh4lb=#I~m1ML7W`|^Pzj~)O+Xg;vsjFji3n6=bfV`D_5AX%6T%WWc^sm*(@c-pWYAJWaL z+HiRED=1Ae8icpXp6u@dSI>2%tBe~>NOupcJ#b&C;HiyN-SKAFBiK+BEWc!D-On^8QkF3$z^|yELc+ zOmu+$5HRiirS2b>yNcL{_qaoycYX_voz?yTP$U6q8$GOT_@`{RPc5vu6lLD$* zL5GFlW?EGx(up7{B)JlP)Pve#8AA%97%U19f&0-0zt~+JYbeQ*vo4F)-1w#^E^=ST z43J?>^P*^#AVrb9i5JtIFL!hVywVpZo6zVWY2)$vPy1v$tU|+!{A*`X&SG^!Vm%65 zR7#C|YrO7a=CF`chM(b^K^VB9@RQKFhyQ7zbCC(4Rd<1 z5;|>|j?cz|S*^fr!jz^H{Li0qmJ3q;RzExywqNAHpSDUswo*SI#(NZ=6D1SBrpPVW zFba9x4CZ^>fS(h&mWL*c?O)tc(xhu+DX5@i^PUVYhYlGLbgA_Rm~-@j)^^3m~= ztD}Ld(5H4|C%a?s-e2r!VR*{gjYl*qviB^&eATVBEh{!i6j90$trv%s6LiZ~Sw8)Y zmHLq=1nFIq>9XkDonDC5OoZ0I%kKMBUk?s#a57}OBr(V$E*)NlL1D**F>54SK zy>>`6(6r}n?&7*rJs*WckY3dJmGbksMUIa)_wnopQmE%^u{1SJ(k- zyX}`#8`oRfk51qG6%>3=E~I&r^CabBKzFI_UUJ^ioj@w1A6P)+Ejm}xy5^_CBSAVQ_HC* z_Ii3Q`IwO3I8S=F5?{_NCWoO)D+e-W0ehW2U!e1K>>jm|F}6-dW^J_S+bXKq7iJ1| zvxS*wk3};i%HGI>4Csc0l)ISA@SV4*qd$6A6YoV2n_sdkC{bO?C6cAl}AO0pv-zOyl*WJZat zci&<8=BJY|LHTzhGRm0q$MYx+Y#8EHr!8PZ4S#`iVJ~{?kv9@x)iVQ!!m+KnodRIK zsde1%o@Szas!<4Yhu2!^BXtZd78}dv(p4AnUg7F z_gvR2(h498eT;xR<^5%MI`DovD0||)BS?ag2z-)ySsQ!rgtGd`XB!4dQA<(1L-P5@ws)m$T`w$W2W=A)yoFXO!6;u3ccxL7* zDpC{mla1?chLm!slY2&BR(sN@dHsZ6?1z3vW91#`A#JBB$uWMYkXAIrI-i`*u?`s| z9WO2=mO~xsK22cE`BB?}XD$82B@#*v@3!B!jqOC&*IfWJq z&n|;n!=i=dg606n#^{#m}R5~%`NPH-JqoM^~k6`<; zQ?rwrA*J*~MF+V?7PEw@Tn=lU^GZP)T!-?IW+#bj8Dk=GNQc<>MwHH7E+jgcvP^Uf zyP@c_Ky*O&?_3PQU-|E(Zr%?8*E)Qe_T!HfHb7Fk^&9Qub|PXT;jVaD8KkPwxBv8S zRsZF%eeK#Y^yxc$Md4S^r@gw~b5TiudmjACnb$w)1MKP>(KcXx20ZqB1<$kBy%8#8 z1xb~WYCJH&$B_VqzxM6dN9%FyZ|QM{*|9BqPoIowXEP0)>r5YHL^h#1ED}s}6zwRS zt?y@bG}f{|9E%Sft$hxuQj63GkfK&IFrg?>J;rNmOM1m2af2kxer;Qnfl_ zFuk8*(ml{@yqP~ebo4n1bM(hVi$#Nn4VpK?ha!2}MN*uJ^EzD|d)(THYg2IvB5@U@ zH3R;1#8GyuIW*~DPtk(dBzjKiH905F`JLQ5FMr*n(fJilKY?eHAm=&46`u=3pt$vo z;Ufaht6n$QtGwL2?9KhJan1JqOb?N<4>KsQxY3FY`#&B?l*dP^Q}9n-Pe^6u-sJH;lj5n3e4qZ}w@V!Dj#|%5PjE9l z>;jpg4VayCNx2G_y}fQ`NQ4=0N}KH0`_kUYSVX=wHXuq;bMz(+a)UQq7c zL^zBU#a2+7T$I(EJ*izWV23nQ)?!ObQ-5+Hri0eY>xeh$P+P6&>L+iyx>!1eco*<= zJf|LWG&VI_e35Kp*WRz-5kK0Oxw>SNX zE^FmN>ZTV0+L?evg%Yk5XU0sCL==F3N!O4BxOoB?u6>hxX>7-HU*xK6xpw^6zdrrn zKl?|1*MZZY7F_)^yUR=nG~Hzn0FnQ|A^%zPm-Ci~TOU+swAX1}wmfLd zCeq9r%Uz?;A!e$pe(S!51&r{A16@k>D+Tjs`8OjelMSiY+M!{dBbtyQvzS6Fh70`e zm=Cj*X~744d?e|l^ejjCN`kz>M|PVla7A|5o8*kenyk;gn^G?crF8G$f1n+trR6aw zZ)OE2xX4Jcb*S$G&pVk1-Sl+nh5v0cL?DmF&2d|2#PJhi#@n7*HwJj`0|0J%rva%< zS?IpQi6bP$lTT&ER|}KmjRx~c&uWMS@q@I@z19?tInvh2;;ppRZ9;EF4@u5ts@Q+@ zD9g)D#BcY`#EyyfP47(~qDU$Nfm_&&^&%hZ<-_k-)Yc%8n4z3pfd5Y0b2KB8C@~~A zjEsf^ReF-bIN`Ae(j-NJh1IzkRcVFQ=`rebM!Aax&-Fv6pB7`$%BzKpj7oBSrx8rf zm|kfEa8#OuesTTLYTSsZLmXp-npg3-4r#pE#1&=MiUc zg!yniRy47968IH@!A&b;gm(1=&F|zQ={ON%Ik%%mR^N3~rse*A?RZT>Dl*=FtTV8O zU+|XaOF32rms&{?uJ8ur{uYtP#5&x!o>%`tB#UH_fhXhdro$h6nGU_!{7uyfz~*;9 zDsT*-0U>OfPb6fYqkgClGl`|?g&my32Un(b)=!Sm>m%%KeO=eAIbw!iazCR2aF&aN zV|5LSE&I^ec(kn}#MNcM)+v9tAS(f0(lvA~!t|lk-n0(yc`B!Mb9-C;W-V3|+e)sq z7~E*e$&Y+OSp<+gBVH8y@ECntD0qK*du(?chajCsQ*WnJbo~Wc@V27`^?NUxo@*}~ z{aj)2%K3S-J13%EbML^`&*ufj_z{+rDE+!u@3gdO1>cAq#8=2DD6G6umfZE&`GGHKqy`YSo1MYZ0a0|Y6UXX0m4xo`>?4?5ABfahA$(RiV;sUW8VZ+4t-yylT zKdIid?zJzCJYHhKZw#!PC6xaPdNUQ(8$W7K)Nx6X?e2@okj&wWh7IqNL;Ee#!EL!{ zT;8HiW`f|HuqN1YwS9cXo2k=)5N^z!FR+xAi!KkG)HpsM->3{GyRc(X0Q*t&pugOu zmWQ+eAnlBvM6U&Z9&V0@YE8=?$ z8Y}Xc%2LSNG+N%G%++bG`lrO$96OLiyA_fY#?hw<}VW_dX*i z!%qO7e4|m+WM?V^P+YZKf&JAFRF?JO=^M*oVZ(7lq*j}3auf?vmezy(>YNTt5zMO$e|c_Iv6>1g5F5-Xwu%|luyWvlIu(-mZY+2Z#^`=j% z7meiyDeHrBQudFlla<#d@(K_|L%+N=t%!R*Z`oV4oi*1e2pVbd3LXq`CSDngsl>-3 zajh=nOh|n&4S0iuwk4tU!@mTq1gU}>T{{G`PZl=e<~(fGqft%Fbx3W{lIXP}74d|R zqA^!$PyoftL`JCFfdbwd`OAfZLn0*P;?#J?q%ChQPdZ$y)hbovXy;I5ymY{*Etf5Y z&}Yk5dioY?Z)ORn1c%0KYZ{BY{!N^X8`BVOTZ=ZN)Nq0E~{*uSa>Ux(7;$9rb>(~k?r5(_qPt_ilxN4 zqclzYAX9;S@&;n=7Y8CAxt zlX zS?h@@oJN)0FC(eUU_NK%#?GN`9^BGRw=1t>8O$ zCZUgaCjC&R*l|i*;&$FLNpGCq8FBM2cCF|r(~FkkIzadCA~bDoREI4h0NMQ+%73&_ zKh&iZFD`&Hmgg@+XWkago24)M72!KW5{Re7q7+hL>Mg(*a%wezv2%leU=1=Lw{Tj^ z%9Myt`c*Vt|o)t&B`vNwk4!9r2j@=9jD&Z1C!alry zYE8Z@5ATjNwfno88>AjH{;Ji2%|U!fll$rHb`_*N)&$J)F$QB1Fw97^DDS4+xzZy> zZmt-2b?P6$6|%aX^L*TzTHUEO;Q1xkM)@GZ`*rPedxL$cHN!Wo!@ zAsOxnUmb5MEQgc)MpTyFr=%(Gad`lzb63(K^uD6mC%v1O#Bqh|;eaM2LPV@hhzZ(JExuMAOHM-3YtXY@cF>ivTGZ01B>@xg?9s z;SYvNO9uQ;`pvBj_~k{n(2|0hgMt`-DAK6y<$jq;5Iero5#AS2K5mw=; zuu9T~UugCo278A~G%hifRfH0p(m+_t67<`715Vsjn?r7EJZJQT+kylJ)x?2W<{HKx^K>~Z|3F^TGf zD8T@!^1uqV&AV-e`_#Ln$8@kbZ?hU0_*l-gVxL`5g*7CeVcQs{aq5I|BNpHr&*&oX zRe4JbgG?PxvFN+Lw$$$_@7>br5FsyDAw7U!plFj7+MnCRxm|B62*AEw!DV-ZF&lik zE095zZn!++l2NgJ*GKk9D;C~qrFLm?06#2?Y5V3Iy!*VqHcnniNwjS-7(J+YLsniH zG}h=mO892==z5JG+}{JzQCl-W>z8gix_FN<*pxDy>YJ2ShbM;$eY#t4n-2#{JV>K9 z8B0wNrxz=?H#N&Bb`OHdI?u7Q=oDG?%*gyMvlJMLnKfZ!Ef$klRglQEBE@d6>` zkxyu*JjawuRX#TP!O;ngt<`n&FS}@OzaC}LAv$OSS z+|&8%Z4LEFQBeS#vY*Uo>lK?y*9aw&Ig-ib(vyV~dRQY4>vpcxE#JX?h1D^f8r?^> zKy<^i0e5}h>nXKK!)Kdk)^@Llurw?ZatM9ZaPO2L(Q7ote--NB%GcTK=%Sg7UZ>&g z?@zDp9`c~i?=HcNN6$%Fm-|;*H7DvPC={b!j&g*8#HpiWGaBx7p2HZD%0{o0Hrp-dX)~p>(Pkvp#c-&Uac+9$PO~~rsrOS-U3w*c zTI!S{;{ss!)vrv4(b-m_$2{%8*|7us;;b>AKNMk__XJkGHT%n`kSb0^2U)qV8WSe?-Y zh^)c4xWGzb?XLW;>yRh(b24Ws(popKcwE>E_}Ss%Tqv%y3l-6w)~x|{8yhg06}Z(F zX)z=r%T>S3T$}{_CM@`Bs76beCC0$sBdxmxZ|Crer#8rhI<8Ifdkjvo7b6xoG`-3y zUvbZ^OKqdtaAR#?+1D~R=OT`0rksc6D-)BZ-SEt;r#5&6Xfermz;XuqCZI>EjvzI~3K^89k;p+dVU?1P(& zEl(1yFTDw~y}$WkVxfc1XtJW6fYdRp7IUgl_4aD@X4bZRqJ^VlTY|4wO_Hm4;uWXi z>*%x5{igE0DZGF>r_T$YQmbpjMf%0>y(o)rF?PM3d}Ie5X@55Zoq`bgj$SOo9}Oxc zOa|cL@Qr$7C-X1jAR|Iw7aRU7n^%u|{BA4?E@;`lQ%&*fKdbkoJpprJbjDX_q!l?V5!?1_C$z&t&{}?W>?IlRLMJ zO%>gFy!ri3w*wJZZP!2B0*y6W`1bQ75xJ+&Y0qyRu`c$}x4;FXz8zgixFVdQWPc)i z)Myi$d`Cah3^I_CQLy|$poYln`l|Cd)StrcbpD~l%WXtuLrtuwbZ7mW0d0x zJRh4A!0-mpz`LWps$r2jb}(}}wu@6OEl3F9hIpA~^x?yVHA{;Gm}P0JdI;d)SDcZ2 z8|s)utU7>zSS07Z0Bk~vvX@(}^cH3h?cO6PZmylh869fWr-&|wiDkmfoLR0bI*>xB zR*OrP0Q(t8Ep*R*%Z=hn)@H_`+r)4{mCTc<;#ojxD5a7DyIYGnu0sb`b6nq-}x3se1{mKF{(hIm--~=6Umci@Tb1 zLt3K*$|spfm8|ua5ubt8=Hm>E)$1j||NcY-^`4gyS6JidHX%>%DmjTm^l96!qAv1# zrP|%+#wnxoxUMmV8~Q|YN!}7QE(c@K9$U zRE9wi*}Nut*{*Zg5*fAra4<71%q-QC;KJN$-ghs>Xbc_vNzhFy44T-KZ{#d%Um zxp^Ph@s#wu_Uh@-!Lah>N*}?@(1H2~rIgLrCJNG$F!;k0me%n z3}NY_x=c1#F_S$tb+Ld@4ATk_W`$#qJKGC~&A}1U8A+IkWNFDiRWRWfC>;?hYiaq1 zZP=cqNn%Xpw&jD89LE%G0$-SroKsnlXoF^^ZBo*chSfIpMha}7=SW_hH06;8YP-IO zb*c~b#7Xk&dJ$BB64*^Yhz2NO-V!Z3I|cX>nNvoxO9NsRt2UBC$P5 zRc?BLUsIHKBep)6dj1Y8~45u zCv%S3WoCoQ_h%p*?Z+!=S+IP@aXfg1t};QxpmS*eo2#Fil>yZ*GBbL|<3Z~8!=_@o zmzB6WSXVw^M5;GwwVI~n*YLHfAyELm8_&_gI{Sm-N%+u?x~Q`#^3ci23sycUENKBN zC)CxuH>g}JVa}KJ0G2iZxwwuN8GC>_>WDEjJ0-idpf${P+sljE@gjBqVAS9Rub`n0 zR>b<+p+!~-#6_2U?RD{0vD4OnunMPUbdDtLA1dFenh6V5gl8WC+Db(WwTxIAD`9vwDXEI zo?dabM2ChZ?77i({Q?X%4frIgvVo<@+1z{~FKMzqjr~J=Ug!JY0$~65^!MWRAMG>u=edKvg%aNntk*^-WD2a-7`tiX9h%5r zL3SHSI<3`ekQnrPK2k+l1$&$#a^h3^XL-^WL7P&0yS6Yg=HgP19h zGDh-{*=D6asUd|j(|lw0ffLwaI(65eg&D4k*Q=G}>cMwW`xS&Nts=9W19RE_9Q$sg z$QyxjPf5;k@`HffT;Z^=GVxNzBms?T37K~qX^*W6_GLu#esT+{Ti{I&m3vZkJdG&F z@eMZi!qJWv2Y5_l%ca3JVcYSlhl7@C0eWYXqwjn74AHP$m>I@M<~T?ha+n!SI$C9S z9U#STLX>6ueldmrL@-R!&t)b2z=OpREAO6=v^UsAX`~OAxV6=ry{HvI$~YP|>)M3; zdkTKM`R)|45yD)}yCA{7b9H3WIhEbLYdRI5tnxC>IX}FLJ~R+*2S-u`;tBTj#CsGA z9l&s1Ehaj&u=(l4S@sW&kxnUmt&xWgX(>wY2V>hvoKBmRM|7Y0x)86FBX3$?lky|W zgtL5XX&YKUF{dJ?JFXz1+5j~F=+&u-jxv!&lLqc~CG%k)k`YXK8{$$~<&T6}0Jp^f zzyhct>^)0P=E_Vp_K#R1P*ycuPR9YsK}ij(OS7B-QNc1>FcV3nvfNEx^AkWLq`#|# z>T_&0`Xw)F?0K^uFMb_SKF%sEtmW%?Fg6f6bPpiX$c@SaC~tDiTMD=_tiOs~XDJIhXnQ4zJ{Jix2JOjmxiC?3zUQ#QjeUMx@|as(w{vY~ zj$t2aqidv+Ka{`5N%|GECImRVT$c+R(Yc+YdSAV~vy2NiM{}%5k!fl19m~4%*VLQu z_MH9p*v@}>?EaEzb_Aa6kMS&jFWXMcJ#rO@FaxK&{6hQTu7&gNAH#^AD8H=n7Qp0U zlz`A}|1E(FO1Q1BmCkItDpfMeRnzy=z5&%&g6^x^w*2Y!1m%(FvvuqV7Ip_n_X9mi3fial<`MFA{WZn)0n)w2t6KL~3!9N2iDIG>#1ma62N z)=feHt3(~RP)h?q$#!2EWDcpNl-eJx&u*)Uf_c7rTPdjthtDSmU&w}JB_$sv9Pn*{ zThKf0pzY>#S4h*NsDw+yfoVm2?`kRw_Ht9a(R-H43cj1>)u<&VBqMY#y7YKuPbazc zqe|2XadYnm3Wv)?pN-qtu7#Rd$BJSP_73-+3+ z%mE4p%+rZ+X}DaoYOC8)i`L8eo4%9U^B>O~z5Lgkqm*#?uUQ}-;GeT#*RiIdhu;AD zqqWCd+T#tD;E*K%ej%Is7+9!kI&*gCFZ({(>rG2|j7#%-YW(owzYOz7W()ff=>7Sx z@>D*sNLusv69t#s{M535YvoIqJ{Fj80NXTylF~r zU>UFt*$7PYPiQD9}U)uDaj0^p{ms>=|y2bvD^C& z@}SFMlU*7jU3BRJ5H!XWqi^tZowda1dY`YId3RpQ%f?vYxqNj+t&BqJCNoM?PdLpOzB>35-wI~_J%vs})PNTX0N=m3h|ftl%*`)d}}LGo8@ zi2E|Ty4ZaGBG_+=UpJ2sB=6k}b5AssA*Fp2$ySmV)?T&%)9a>~htA|GOYEDgO`}rT za3|d4V`c(rP~cQIUBmqqWF*A4dL||3B#q7%y+=f3!9!NVKjcLk2bE9zSy9sE7Y>sA zs7KyI*Nt$8Kii*i^}kKGKgvp<{!x44srEB?DX8h`)m^jS90Ot~iw&Nt0HJmdgNQyI z{orRFvza?TIsLyz+MfQLcuDC><~Km^C3SCfd^gOs1 zX&*#Gw%6Qs?e6j)sq>VY;5w3>2@|=ppVM2m&nhe$jHQ+^{7OJSL$gLf2nn^4NhAdc z2v~+ucgX}|v>EX#g(B8T^(kNQy1zs(EzF2%-0f>vc#aat#>IZ9WnCUAaL7iehIbiO zhr05_hfH=;m33wjd#l7#VrjBQkIN|am466qO^zKfaO`v-kA$=;-!!-1DXNyC5d4)$;(g(Yu1A4=zAq?hMs-_Omp<;hoGe81p6qu?`-{t-{B-Z^IHwT0IsadBv z{(L|?o8-WZv=}oJ^4Cr|{iuC+Xh?2X{g^drAh!AyM4@1PdarD@RhZ^%CNo)&sT_3a z8pNVruE>geKZ=Y$+3Zud<5UlK!y=-{(&xrT`)lmfG`qXcPc$~YVM*;7{6M%+2)@&= z)*d@B<$#KxZRr-ziAMEj3BQ6&D&diCt*hqa2n!LS{AjN7X6V6Uw7-hV z!9{G2pH>m;?8RcL*=lKPl0q8oXS-kD$xQ9YKcAs59Y-){{+a##3JAISNBUGf=|{k# z{P-%!70b*z`qciBjPC-U{KvbB(#Nj@0s;G%|Cx5}Y`jYR4y=6U z5?gt=cdN4gmmuiA(oeQ4eR*u>u|4K@jnbf7cBP;G_x}I<%|MMt%|D~JfOZ}G$Prxm z^tT>LGymT4Kl8sh1?urngU+}>ey>xqe|N{Y1^>_#+;cH}=IVcT07{fT7XO>y)Yd`Y zsNCOLYYZqye%!2~mB3F=`AysfeQf=|lkuhW(`u3TUhX26-An5(6VL8?+o_*$m1({Q z{4An#&*4*-^1_uOuKw=bzy|DN0!TQwbO-+Zj6WZ=1%3tOzU}v7kdj~TtVBgC%hCUP_b-jxdzF)JUG+G<2)h3<9JX)d zwza!rUETJm?H-W4*V}#zz2OwUmG4$#PKSZJZ_gh7YxmCY_dWZ|Ob4pmvw-%XEo1$EX$As9TmvE)DE~(S;#Tik zzqak_p9SR3-@b@?(zk%mj9J>Jt4cDF`e_5chDgc3cj@ln?QKdIKW_R@gLX-FZegPh zz9@PNuLC{Zw)U`YZOe`;YQGKSarHFtWAa}AIVuSBwCEv_*}{%F?|$p3`nO6+hYJ$6 z{G)moJ__&#Mb3=v0zof-UOl|$BgN3>M$)`mHYbntwC;%U#M!+UdjsC3E@NT_4d+Ap2*) zVao_`0Mg%L{xbIOMO6#iJ}z+EyWi{sZPk`sg+R8suRZ)I#6hM?zc=}pBM>9eI}IpF zxCFYjwO1+pp3nT?wlsIVUro_9m1!K&Tq0T?{YN)qdIZ^5cN} zK<9P7-TvD6-KY0nZefo^?lA&Y4}I=UUOIgZd=b6>(_3f01-{ARmLD_#L_4soES&`< zOZNFqKsT=L+fIB7PC73K;or{Semn;3fp;nEShOb^Crjkpp8pK~6%_utli?~!_tHL( zpH#uyH=m`w8vJTUitAmYf@{03s_aK6JD(@s(q-#TC?%&xt-VisdEh?ro=#-VHQZs~ zvp{vv_kDc;Y7Flr0xJhFwXU5}LW9^Pcbt{BE`bg!VeC`Mk%s2CTwk<-2Ych{fr>lQjwtav0)1TLF{|MDx(*OQ%PMz(1 zkgNL(&;bzmw8s9-W!=+huW=W0l2c3dFJ)aaKit?HHS1XT+?HlO<^Ku#H2c6^2feSh zwJBqgzy9PjNd3s)_FX*@;=g0|#+ih5eJF6}tCaR%Qugaj*98?6cXNS1_^oU~_p6w# z;H<8Q^V$#LZ`Sl9r?uO`ty29|`ORy5DB3}5RUg{lcM?_^mY>arR*lI`A#$B zUu&67i_>M7#2_qum_Z4F^qe2Sc#nOP zX*5H3)CbC)VsY`$U;D8y?L7l)LU;may^ikD_tQeM%JU9zhV|LEFJRzqfD{o)wwSE~ zHV{Aypm1s#r0X=Pp`oV<0Q3bw{JIn>12#dio?c=sHsNdnC>o6~;rLNeL4qD|MyWf1 zrE(;Z(kv`)HpLjgVQmkQeb#w^!?o95-zulorF*dyC8$A5k=c6@Ph;GZ%rOqWrP!aOe z1L*j;2o0=zB4woj2X$U{>Qp(kF?&x>7BD~kD%^E*l$hK;w!Er(mEwNkxBj@7SX?^6 zO$zXY=2GRAldUVv?@8M8eiPOupx|$`Ck{_D1<#zJa z6V;5}ab#|1-U9=<9n(8x5$Uh_D9An2{A~9Ne)Y1yX5{#(7z6uuQE65cYRU0YfZBiC)JN>tbJu7!>y$Q;CL z0tx}ul#fB8X<+4Jvt4kqE<}OTs)QMmo~~2IZn=bk*<%7~XpMOUV}4~?BqJO)B%HIB z-kf_)eZ5xt8*5^+XVygJ!zWTfT5%Z2(9sF2i2Vac_a1syokx@>AgL_f`>8s4q0pkD z!irRUsCayv;u zp?~7f{>(pC>vppmz&VFFGV)YrA=FmViu}6f0+V)^)<7ZIl#P{>OPr~2=a5Mdk%#1n zsFv3nka^pxH^(D_NR9z1Qvhio=DR7qUtmS{2?5~vnu9n1evs9`Jp?6v>$S8poF>wj zbZ)Fc{K^Z(EOPUK=W|MnEfS|HZVe+oeP(n6K3igtH^x+mgO_jtT%hc3DkS5qrx_X9SgYhh%l>5&TA8>V$0^_5oECUD_wrv7Alz%&J;=EJY;^ s&j%^3M*u!gmR~VU1gz_jmd5p=x|^5`rp2P09L9R@&Et; literal 0 HcmV?d00001 diff --git a/assets/tinytinylettuce.png b/assets/tinytinylettuce.png new file mode 100644 index 0000000000000000000000000000000000000000..037b21f0dcdfd7150b596175d2c7a6edc167fdb2 GIT binary patch literal 124185 zcmV*GKxw~;P)4Tx02q~HU|_tHSWu9|V!*(Vn^#g4!9uPYT!d?JY6B6VMWN!hoHIk6n=}7F{ zoYG{dy&xC!q~?VN18EK*mSHGn2x4$&aKfR+1gyrHA)ldup^~A9A(J7UA%mfWL4m;$ zC{nhA|@1g zqR4v&RP-t6L;S=bPnI|RCV9E(>*=Y zGt;v>GuwB2d+OG$I(6#Ys(+t4b*jo5kH;;8`?6ZCT{4(RB)W_*L}p4r%P>=7*<`CJ znA4jnn8T#(hKQMhnX7DBvZXiY9f)xE{1{J96ng_vXk%>R$_#iN10^n?I%PgxzsDa z)juKCEEL)Y3G@-RvbFDAnpz*YEM&=)K(<$qnUa`niOugTEe*Hkr)IKgDRZQ6reF?J zN+!W9f6(e9_59|KyA} zz#_By-8{-noZpySiY~N={K|Z&Q0p1CGF#8UwUvRRJXGF?<}*#^X0aHL%#q|uyX}%r zHX(GGH(TOfQO-uhs71B}-NzdYXQN{z$fT2*C6~nJG*B?#W)@M18E6Ug3|j-ufkD5} z6bw=1C1CEK=5St!9|t1+X%E@z+aH=nmGeTxpjHU|I)7B9)%2=e>R@=L7`^)F;O zArcr`?|o=-dqQ65aW1rH*gBpJ8TeReDBX+Q5L8++LvGm;q^0lY?Qt$HlUo?*vZe-O zX<52VOk}q7+2Yg47H*HT1ajH|!gzT`U?8~cvPlg@LBpFSVjQ}X>;v<($pLMa!@IQ% z(b^3PZ7Z{zWyWav78+>x?JS$zy3f_;rvp@+LAKAY+n|uGYMOG7$;{bF+-H8wbPD1A zOCVR+8lV@}t9H}>+725E*_CJGaEmKoOJ>tnGrv1TGY_0NUpFov}#r0HcbL~ z?`B2^PAe#1w|Qr{Ka34@W&6Y1$(gr@{kC{vy=p*4;vC;}j>&j82IZ6UJ>^*I-oCGF z^6#y*bD?8SlSbcolooH)+DKr;=m6<6A7vwwmbY*_bS2P(w$NeP_16w8%o6$X%+L!puGIK&0%EpB@to1Xp>dY1E?7R`V{~E(!JywsL^o!pT;zF5gRDN<2A<k({j#~fBz{LC}c{YkFdq->9Rj`8JSsXT8xnkvRuYfBi@KHKQ%{YrZk=o!hpFJ z1I@X)tidCSY_-l8w*v=H*<{!^&ULPA(#Td;AwE~w(vt1lyD3PyAb~Uq6dF>RKnl0L zl0dGorHL9p3x#IaD=DWXMvOWB%$_JY8!tMI^KAU|Hhmh|!finUy^=uhV5?W8`;gdZ z{k`oTeJJb1W-)52*~nJk6UAii{G?UnI5XTOpG0MJ}QrAa;k-8%?Xr36yFdUY5B%klkQDExr$w zb(U;&jDm8c+%X%SLx#aKqD*TWIq-gxCdmo5bfck3^|tk#q(bd|we!_odtY+bIkNH7 zb?V;74mABRMx>E(pd~Q0=y!btMnPEp ze2aMzU|xvyvm%A;d657xq_194D>Q&?;_6|Lm@V7!iRZQPvMHWfqYe@Z*+|6cUdn(>;~~JlNnD>!a&C$F-Q9xC zp1%4+WEkW{2*aQjg({qI33N9|)W~HtH3Df^HgMc`Syq>k1II&NC>zq#)Y5MAQpA7~ z$veY&Cpe(hA0XnHLP+atY31@XGfP^Ulj^oR*gCO-;60g9^2ENjY^9qy%!|0vYMqUK zHh%KLLxIQ%mwfuSw|I+4D5Rgo_HW3MO- z*ThM`Eki&E*Id{PlCZaNE}q} zz-UZ5a6J34xx_&wlarYlqdg3?5GbgCT$w`7Gi)8V*O^(90gXwkmvMXCnv_wZu4X5d zctC4jAYPEb;FCZSwvJo(8+;NN6dWOcGcXyi!wA5oIl-V{+QfvJPg8W(;9#yxR;CB+ zLHE+&X>Y|lV4MyX!g+y^+kB!L$T9-lmmkLFLFmo?$D;Qjc4*Y|T9z1KGN3w)6uXpTKzb&)Sv%Q#sW+*T8%@ygAAhpX(lP$7K3i*vRn8{J>M*1At$dPQ0)H0LxmY+-t zeT&ic>QCwYr;rQh$6wnT=r^8Atp_g6FxR&FBe+~%e6|jgOWywQR>)hBz{xCulNYu? z7BlFWfyNxpZ{skRm|w~TY6S_LJQ5h5u+?YW>2t<@wwM!DZa>*BWG_hIgh*g`!d8x= z1T3`r(`VG^ZN}cF&#@wfWCaPFR1z2_uw@Ls`iTquM6rA|9iuw=B3PhPkic+~z%YTW zKEIMKVQ!FsS05_XWp(;Mr;w!}fx#?+lO49Yc{c+AUfI;B0^D@-A_|XslmHGECzZ!( z`hHtI3P>l&*3FD=nJqz%^z)U>;3pfw)_Jo1D)y?3 z6PS3=)+-xhCs0;}qKBUZ=)c3VrS+g~uIk%};OBAD>zjN*O{D&>eG_ zHKa~QVD#-}AQvr~zGW}uDoCIpfq|C5FzcatfUSX+c!7RF0tE^5UjoAkxsrp#Y*p$} zx6GEY|5DBig+b09b<`a8DCfLLqd??DNg&(%9sKR{L`kgsatqp4_wpIor-fEKu<|WX z?<|2~)80Dmx7E3hL+LJ4XUUAAK0Us+Y`==Vl|{CK@dYDPfmTkzzXSR$XSB1gAa9Zm(q?#5#7+5XvkY5Q5 zs&_qnU@NWXaEqV~S?Bl)&FRw!=1Ae>NoJN+?@#1N(*{l+u?_i}>i2$Pd&e+=tuBSA zk*m+mgxAw0*<1+rIb$xw2SXY&W~KPaePmO>tdz`#_}>!+VZ#KrhWOhruwRhCpq4=Q zKf}i#yoTFMA`;X>?0+lh{#Es#{?Mh6F9{D_2^2U#u@V?o+LqCs2x!nTpvQQm#cjl& zSm_VB($mB^@SU(&iLk~jmRLLyw^{8L zi#?IBrDi-y5m$%?Ngf^V%V?I_<(z+(j6(ZQI+N`@63>OblSSLgMHYoLeU*Sl6`fll zM<*SpORDB9T_)%h2{c&&p+wvg7y5R)-Qjc@g-9>5DuO9s)BQ@pnN?vH7-nD=lzUft zCsy)tliBU_LG?t^NDBOT?M+yKtrPjOPk7nA?>{MENM{`&FtES~kfwp*N9{~|gXwK&kvpc}RD+%=2`OQG4Wk#0F$gagnI;|-s!3$hsBp6qiWwk^i zQHO*6!U#}jkMq}R=PQOvW)vcrm2B+LlO$aiHZZY9H#R!3^}qM`DSn-O8`5EgnzU zZDP|}_ww2sZ~Xd(jq6)l+lxvpW5<_Go;ud;wgXDFHT8`RZF_dLp;p;#j`QAj!MDD7 z%dA;*tTv2Wc8kEEF;avLJ}}e;;9oipQ?zqoz*X=hpznhpS{xzU~w+;qXX!Eb&8 zM)&T!{QJj{!9d^0WU}2)g1u#9c*0h1rEt=w&uo8M6tqGvo6W{|JRS!!2=GYt>}2-_ zOGqGNEpa};6shCFubefK2JXWA6$$1)$$3nqC z%Vn3n|F!k6mRC5x`t>iK`?k~FUQ1(hjny7?xop9(AC`i_s3qZPX^gC1wdUTxtvSV%Osmk{+TKB8y*(7sP#G4yjT7_e=n0_kQB)}pY0TGBrqW09BoMef|W%%a6ZX{E9W}PJg1%MA< zQ0=FnASn_KH#RgjG&Huiw>PS*wY8NnS2!G|lxQ@{H$drhI$bW8*Xwn=eN~mCi+sf+ zMihCyu1S-}{oxO{-+1FUD=MwGeCHeIo_Bg{Tbris&>)d$ZE4=SchBCvdw1^GzIpTJnwp~x4Gjkm9t!yV@mL(1(P&I-R}>h<&B})V zm5?L=B_1(R=ybYmR=3q^^LX6F#YI(>b5M5PO`H4suAGBmR_N1B~}1b!LdrG`^DOo-N5j^Z#>BU7{^`Em{mTHG;^aNbRV(3#~ zk06%}7?ml?YH2!l?3Gv6z3}|=FTS|u(4m7(%?*tE(4JU+qs{1q?GB5t$TD(N)r=X_ zr%W9;aZ*J^Wl2?4RdI2#TU|<(vk{~e_yH>k1cSjyB!ZsBUxt#kwe@W+?G4A8cJA19 zC+ZS5AXSIP>9B~Ut|XvVDR<0n?nUod&{lu_YuGpeD@QuN5G=kNRb zOQppX&pr3rgh`Vbj5=I;O^~b{$|i(i8J{m$(d|b-3-G5wsEdHU9XQbyU~AxHJBTRj z=G7~PwAQ0ltt4RK4kaT*JAY8Pc5UCWdiClTUs|(q~!>!H!y4uFWhwEN``L&%p_Uzw(=)nF)eB(o5G&rg5%$en2}nasUQ^J z@TM=q8Y{WgP26bBSo^`O_O^EB{;XK|ZH`Dd>agpu)4^U4PFE-#sjok_W5>=1ANbem zRqI-sEDoD|!Xe9uQrE0mGZ!sh^6b85y$X) zWC->+S4+Ry%tCw1PXV?vWjG9D6b?|Yw4snF_NGXE%QuVHOh1g6t*zGP#)endz5M$> z-1Xv%&)3x+m9DOgTG*#%%$T}(@!a#yKbx4xA76d=$o{3v7GCg zSJAa78u5tUmHKF2vVCh-b>eJk277$3VItLl(kiCun8B!f%I1`2Qr+h=r_|vBY-P%1 z2*e2dXpm7ZO`45X|HtHFSWL`{rOM#ZQph@;J1{N6j9xeeDunzKX)BAHc(Zw7l}rl4i9sfkx1O>5J7>Z zsksMLa8l?@4seOTz6uPx(obDnksyg&p&(k3%k9~){*62Dyz}7) zA8KuFWF&|gYDAf3+44F6_JQ}Faptn>krj*?@xZ$M|L$N^_TG!$bMYnbn=oN)G!}{{ z!q}tGu%fZBQ8crNDID#heWo?^P8CrtTH19Em%tX&lulP9M8%43{r-RdYt>7)-g^Bz z&VSFd&#ZmufyY;^dZM*W2vt^8UipcuK6TY6$4wZ|Xw}M|I7uB)G(Oq-CJmQ_YpImt z0$fXbtfGg=S2xn`Lk*-cb-vQ*rzOaIk^1GDTXko89d|lS0k%3MGK50r7u+Rm(NN5d zL#bEPFQTAI`X>mYHl;$Rz<|Z`L?96Kd5d()jc>m8om;-WX3dMC5UZ0}E|Zuvx$2Tj zFL>X@Z#!kdjJCEmY)wJG?ZF3D-Fep?%T8T%&3|20Qs(vfJXqx;(O|+NDhy$VlY)|n zsSV&M1AuWx)h%UXGcEQ@9j!J`-e)4L_-=LF5KP`drDvh5abZ5eynyf*qb3mslz*c9e43RtO zHU=7)cs92WqjB(@@s8C7zM&~#-6ppzC)Nl>GIR?eCn4LqmtXzS4{v+!xyM^tTkxa8 zabfAwSyx@CDY2G2 z(N-`{v2S0;iXWwF8QNB_C&;BPT|oui!(3&NiXgDf5@*8G84lqu=8Q+3U%2Lr+qWOO z?bdIcdDiLJ^-D_1_U$|Jv!DLv;fL0SgV^kC6DQ94>Q}%1u}}O5$nFoc`f%0|%?sLq z6ql|Dq^W|$bj6EGI7Gg(!Ae@ZQ5!Af?w=Zo33Q**Xqjf%A{SE4meC+yfGtB5!zYl| zhjqnN@f}V;}wZ&o?*v zfAr&v!DMY81%c{uJ(5SiZkYZ@w!i+b@pN$)X`ePVLz*I z9CqK9xAuSeOE*+jR^9xquTGv`6$&@u&A=El8jk{n&}C{*TnZqp+NNrPf>{P&X+iTe z3#6kU&^ zH}V=OyCIU6Sf}hBwji69%P5FI0k+K27$)H?4G5IbV?#_t$}U605e+Nka3vmk;2$^L zc+!1Jp=Swbq?`N<6SR~fY zkrZyPgBe4I6EGCxle9L`s_5P@q72)hrP8xf^3^R?TpU!I{%S2o`|ezEhKSZ+u-)Tz zF^siaJ?$-_va*Ut9)9k+FZ}$x^OpbjPrn+C*L%Ep)rTDRVq7L7VfXF7z328{|1}hF z1OxF|v*+ISgImu#|9pJAL{iH6B2@*cY$`mg{vyoIV$51QL+m<5H@Uh@X~0%m0ft!9 za+#B9*s|Yz^UdbiLUH6D3oYxjePstw>|sZ^WXf|*FJQ``&!#;Ty9pz*aE?J zmz%|d?A##gl~SoF9KaIf632EGCQ4l-&>wAJmsU!AUIgBm4x?bPvI$sQZQ)R~q_j8~ zYMDKIZbN)h0+EiQH*wWJYAoHg! z?dP8Rb}`H=9#fnO^PP*!OASsW5d$XrR8)jCY#9H?KmPIcuYIMq=CF*F?6D7h_=2x~{qrM7l{YpV zD=u+I<7_6=YPYlXg746QrmuYU8>J(BH{JY|k)w+fmN5RGn6%U>gtBN+2AfF*gs}P~ zH7p)VGZWQH(l9Te>L=a43wKkUxStZx*_AvQ2s70nvkg=q^P4s9ZEta2@u5$(GzNeD z+y6dgae2bphM$Dhiob*>j7M7B?zETw=!bXQdHaLWFav7uSu4-^-uJ&hZ~iF|5ycIE zE}3oMH-Pm<@=GADv*NbjVfM-k|Hq$*l2}KvIlc>XE^`)hd0t}fj=^pmU< zROW)V)s_4PEYAFD2ci&-S**F?cnDs+(Wy{+5@KeiVI&$FYOneH=YI6VTig9Du~=x) zlG(rc?GHZjsSn!h?cr!r0U?>!Q_PO-8#iwK!WXWccgmdae*ebOGH1dXu%McVzCrWh z14*&_D6~SuVy_v|v(z|I4;2YZp;Fmo76S0WQx%uQHA#>^;Q&*1H3xhTYtEb*8#iofYHHrKd)Mkm zR*xPvYVN%GGF}$_5j2#Ip}7UQ04xIhFzDO-RY8mA#C-R;7CmO{J^>A-$4jRvz*dif z%4dS^Ut8`aL8r?L3UnbbiylP+Lkpm@xt@Z(g!_W+yA@%Ge7$8 zZ&r@P<;oh32cuDcoBx={>kI`kW;tJ3yYbpx{}$dm%EXW>Zu)M?``zVuzY_ijIWq^7y4^{JG^Ug1HWv_-r@hH;%QX+qwchlB|)Wa zNkF$c;ZE)Zz&Zj(SU>8=x8?yQOZ!_n6A7?Y3c@)9l$kSU&gw@V+5Fay)0UrESv7(k zBZ8rJmy2YU=H|xY5>F%=z-R5f?|oM^65FzM8*78sz4Gdg9oyf2{&}qXkskp0Ns!+L zlq}fRLPpx$_Ji#cpS8EIpttF%VK-TlO`%&c=rje`>Q+dD_F<~GOFJ+zNw;BONY5m| z4x%os&Ek#_W#{xC{_xgs-uz8$)hua#=ey6k`<`FUJ!L{T+U#>Vnp%%BT;k+GRwK*o zq1F5Jlds%-)2*NW>?I%n)vvzxS}+*eylKmG&pmg>8K;gYD~UuM42C}uufAj$!7>=NJM0TE&2}^gBPg;-BcNj z+uEA1zy6DV|NCD+G_TKo?RB5N_PVPgG4`dx9hBrT9JRm;kH;fh726#zzWDN&uKmt` zU3=xffAszBfkq7YYCt4gkwQ{Yrtyf*VO$Dq^t}Qs)1y>+{&vztG(zdFuEVtcJi!*= zCnG*tW5^gb;b7SaK!(c)=nVKKqK>v^+kbrUi|l28`(58$uxKjl5d(pCFx^+gZm8g$ zjJ0t@v)jBbXT_FHhj0AG4>xbx7ZHE@Nx%5jZ%#X7rQIO{!HlRkWkt40u}g)A-etUk zOxAhCtI3SdjLGr!$*5i4PC8BVeKt}B2Z&6u51klOGcvzynwL_q60#{MLpZw~?dj^z zUH$jJ-Rto8ds{FXvF+M%=(C^w>Xn~- z@5euJd2@5KGPm255l6HY{ky6foFJ9lie^V!Qbv0IQmVA9GmmJ5o(MnDO3WH zHUtczo5SWv6|ZyU{4E^~2MCN=SkamEWK_#7;Kdqmo5Nn@b(gmL{cF~|)!KIG-S2)E z{`3d|q>@~+30U$B4t&{o#4PH{m8WmpzV*2yg;s@ zu5e|(f^6`iUyzZ$`((!3Oa|0D?;f_&v!w2IbLsR0n0i$BI|L-4fdNJZKT;QFDcL36 zYGu5GHL9lO;AcL4)k6(|q8A?-@7I z`oi-&*1xtDR}a)q9P71V-o`;d;Yc(VMFnGPX_g6%n^1b|ZC^e2ym|3NTf4vEbDz8F z!3XY-MP+{r?1x(6>I$=x@SPLWgD+8!U_=6a^^SMdWaX!>^Ob(z`DyY_HLHSct4rAo z>PUIJrVg!te_|2YXhotyF7b6>yU~WLuDbHEN1tGg%lw6t|McfyEm$-o7?QDJEFNIc zL?>qS=XAJnf@h-#mhW8k=})aVNdQ&HyjLHaN)TYOE^lc?RbqyHC=8OjIl0} zbwW0q1AMQjEIxh3l70Jj?Aw1Bj9b0>@zJBI7cE+3iHpfkIk+-GArcYxMIx%oR>UYN zpsAt<>6emZI)_@gec;$k%^cRGL|(dWAS~5%G7HkVjPEoa3b2(XkRftwwT5KDvvf`_ z{f)6n??nS{H5@zo+0S0}$Rn%Sym-YK3-0*ck0(s7h{oG+E{()kvcrfkdaR)yyE2B0 zNEooNfBoxUFDWVJc=~uk{07jWq~mEE(LG=JzN^wG&?s4l0R;oCR9Ik3kfP(GbEINa zOVyAh9ujJbQs3$d85uPZsbm6g+3!-(Y92rREgx0;d3k=#j{Hdh?4csxfVk>VM+{A1#?(e$%xSbk&D23rz`!w z(?d%EwmQpWDBSU~(SsanOLmR~baI*wGy7uB;wVGhxZ}}C+jZCd*M0Xsh@oc5vT48l zzaLGST7}~~Lm!5R(OA12dl>RdZ(@$k=J0xp?zrRswr$(?z3+X$sL03i;|RkjQI3o@ zr?G=?J|Kb)=&*8wNX8gpfr%7sser(ek)Cpa0yHcD#VAFXQ6HUBeH%o0(u+%e%oS{D z@FmQ2s=!v1rIik*A!L#<9Ey(~HU9PWZ|&Y)TU6{=x$+FoRixqwL@5ElkevZC1}qVH zJnnKixE)zNdgiRz2M_H&bf}iK<(#@WdUWN|0=+U(s~ouCfWadyBV@%p)#+Uw zdQsiMmsI1C=F1#5I8Be#LNXrrB&-5#nM5_{gS>3@zLgS$XK6g@l1W26ssb3Z)Jzs# zC3e$|U;pDDe@|`3j4l7|9k)%MR>=v?tRiH@69~4TQ!s2~?G9VJ@M&spe(SBx_uO;O z_rCYtapT5>!t6W~VS7?=M%mD1j3on~P6l;)p#EsLcs)ARP^A$=cTxQfBdJ^YtHYSX zU=_MX3?V)#%t`{5?(I@v(&c<<*n*yDRs6vouh3Qw&!Aq>NDxKUVMBPHP%!rV^RLv_ z9=__7}ss0X-AR;PaMLplnwBY(X@XQx3bXs;(M8VO;&Oy0n%g!oJNhx{7WrWv+;{|{&RXs^A)k>3fHHXdB*YSMofcX*O?uAu=tu%2B z-dp4C@<#bYVqq*nbPIZglV!~ocE#h2c*~D}blVSq@MC(7*BiU@cRyaRcv3XpY_YL5 zd|No&jwN3CG{wCh59i%56e=q#|Iv?r_^D5QV#<_>;YiTwLKz8*AG{0?b>k7pP-tWb z$7ocV7xi3{Xc$r`qij`zP04^6db9~&vu!WegRKPy!`lDHjb$22e7N<5r5$lp(8$D6&Z$%JogjgdU{ilBqpa z5&6;{p*UEf6LD57x)n<3;-w2Pc)j-OhL&bOQeq~N>gz!<;0{#by~VhXBeL;sL!N$r zYd8{`IB9II=HxLsa`OQgX@gR^Ng}rQ3jomniBZTU z3Y7jRJQ8h41Cu=0$xL$7z@gF$?ie4E=trisnrufJ$dX1wn9gWQ+*43Nt>#CDoCb0+ zC}uGLkc2cC>tV&SI}!^P&Mhh`I&k2~u3dXwE)RgFbOk*|wz}P%>mdsd&`eQBDVcLU z__^RHBldT8xFR3C{GAIHk74VGb?aWf=9;V7@!84Yk)qvc(P8lFiQU5UUP_=K(OwGd z#3k21jFgXEhD)%8aPpTtarf{4`0zsyI~~?pvqsB9diM0%_< z^hxQ)GKmLNB}z+2ghKS?tr)F{?SYDX~oGjx=Prv@friwhNx$|~p)t*0wM zOe#CyC^J+Ok0rsFJ6g2~U!#{qciYwV3g9+Ji#6MXSFvdCq%z;7R5gXb-l75Uo6YE--EYlf9 zZj7h+fHHJ-01iCW;K&ISxQf96*oAppa1UZ=Rg7}&UM7i_hpS+}cw3|`(uYUEbzy9?e<|$#9g+x6tU3lCl2^7TIC$Sx`Omt(k z8&o8voODcKJ|zTrMxR zXZois(UXA~IYgJC%emcNu>grh$zijz!)KAxS?qL_u)Vs&R^)UPxtzrw*9blYoDQGE z?k)0_6HdgP(TEi;fHV1^h%w+yz*q?Hb667{iKD(4P02(Yd%d8w$yz@FOZbmOm7lB} zqzHrxq~hQzAVO=DQ5a|_yAQGiI?gvc)uX$0*HC)`EO}cpRzx?H+ER#C2UI-fCx>nRFGO{4v3}T!qpE6sI3C}k-w1j_(%Z2qF1AAV?%0^@m%-0d8fB z8+0z*xbRBiqbxpXb~3<{iAvCiQ6lgI@+ieYK!_vfaR)%P5)%(IW`Z_W3saD?9YVq` z14#&BJ7V{jP>AI+l7MughsnTAfC=9y^6(Gz)LJA@1S|jsQ7}m*@Iv`2f;=e%kt7`# z6gzIZ2thHCaMwo$=-AbQ-0C=6za!Xz{W zUHHSd8Y_uKnUNGIDez;lu-=#nH55QDEZKBgmc>+66_=J4RaaMy9XF<`suEc9`n)A2 z-jbpbc&=h%<2xD+;SmOSDJ1k!Cq{*`bXxQp`A{HO$sc7nqoX9NoPr(-&-IcDfJZ`B ze{(D5I!FVETsGNTni%omVs+D~(dA|3j@p{&&g}=5E}P46IBtPk+(b2O(P=f4N#wH1 zD#f_dVPmZ|!k9U8T2oUUza}fsox6Se*3BDgpLpW&U;Of?99(9R^JNpbaCM+xu9YaH z%PN5aY-JVAV0b_W16D+_QddgnO8$W3fc3{e{C@4)mt0QQzg_aK;-cyY?tNM}f@ii_Ec(AU<^ z;votVk5KkeV1XpCjNh47#{d_>Xc^K9?g-X&;xWamFe5mk5_CigM+#nqOfsXUKtcj~ z2rG)D9x)<`k`dmj>hi;f>UQqfXS2CuF~87|($%F35gsJF#LD98csvvdun!})!Lkv> zWg|+Pnp%BDuFF1n!H@Ue-5gE)&rg2*j<>&S@zOIH@+RpF{XbgNTI|%u2&BD&fv#u) zwgy_l{iUyU9H@woDD!~QOBvd*7isv-H#h$8&wt5}Jht!8SJNYHsnj2N(z9%GBB#Xm7U!{cK4nNEZ%S0s%`yz2)fPdW)qVPYX~C zIO7qBRz0#}`t+%jCXbytXX2P~mD8q89yflpuhx8U*e0a;in~4bG2=$R_G;b!14o%ijDdG{EO#jaii+0P zBo~vgOfZNiM4VI2+S;334%e(%v(~PC1qoGDc`v=}+`Iqq4EvYebmMn9A&D`zl_P%& zSNWNT~#`1%BZq(@66fLE32xs&`N#8)gIVtr*{U!EscSu#x_Qyb#=!M>_6Oa zthu3~<;daMy1E8D0a!x=exdX=H#h9w)xZu`{*VP*QdJd7Ss4o5;HS*sn~fej2%;M>e6Lc!4r18*3W_F{{kRauj1(dDV!r ziU!$K!+z#D3tn0G`o{Imk3aF~U+($KM?Ze0%E%W58*D>O6HGl!{?!S(e3X`QINu_L zZbVSnl=-;6dqbBtv};{FdWF`iusz#EU1GNrFH`XJJ8>7F_t`B+4(|B%Z+`jsqfhPJ zxdUqv9s+DJ5DHmJORVMPrL$&Fo<3vzq9rpXO&K$3(nJ6XCkas*ScMpg2s&BOg93Ca zAW1fmaN;}(T^NxIHsFm*g6bm*PSQ-ya~aR!iWv+0VP8FP$^@lquyEUY9GuKr9+Z;|oHWIRKbqV2QZ2?H|sm6yLz8j*OwA z;k_Z2D-PPBURraMC9c_a}G`Ja61k&==UHsh~768!I5|J+z-89RFN zGta!pj|aaKOqjxwLRDleGq569E(6M>ECXtFS@OK!m$?8lvGgJg(dZ^&PTGCg$k=_d zw5LWXyi>%I_LN&ADvf?Y+sc&HfX5hb%P5Kdh6+RX!+K@0%MMMUV8HE?wQsDP;k2u} z?)v3Ve)^rGN1MGau}Kw=u+N)6Ysu0DOO`EJz4}qkWq8Ls&+-+coUn5#>QFEgZ>Mt$ zps*)#3?ESOm|`P&zrT$m=4k`~pP&ofMt_7Ax;Js6sbKiQ*8&%Ox-Lsq$cqsw97eM! zy6?Vw_wL{M{);bcYCaZjkK$F1S1Xhvct42876VuX!4CR$Z1S{HhkfMo(`T!d$+&vP zT3Z-q23}vkasR%C z>5+r#z5rXt`*n2LCiwja8eqj~cQP^pPTU?(B*-oTu1Gle<*$7H?z`{w_$*Z;EfdC# zyX2DhUHI;EDyqtgii+?0{aw>%Rlo1jmHt3O!s=p3g@3k8f#E2n27@?2xm|8H3#5^* zS@Xi_r=LnUBLQrr`cJa?ni*3DAC53(){oswkI+-|(9 zoJ?XWS9|8&lo}`4M?C~4s1a&WJdUdcF5_$&-yf)}J+^=U z!5!On?cH;5-7BvjJ>1f8OePI5kk!|;ZrSj{-~YVU=W~~iD4KW5jOC{>S|9Y-1eULp8w|S zJGO5Q{s5&wTEF2Bzq|C(f0M=P30Yqy*p53M+jA*NDT;8XNelVZjfy$$UmaW>_oCAZ zl=hM3D!^72!3<1vmNp?nIc3j|f(3Lx+BaPTH^_j$_3EoX^T30DVX^M$v96DNDGW(l$lIBo@PwKxe_gHHDZVpBUZek+CF;Rj5E%f zi%UQIEHpO;4(&ho`ufc;t$BUh);;xgEm+U$>Z7%F?WSxop#T6t07*naR6BRoKKay| zk`d10QrDch)7T<;+46PhF-w=21@^_?C*7C?B!c=#S=HP>fzq8oCyPRc>C%e%#P^@7}v@>#i4`e|f{3TMiy*MGLDr8aaF@vTxUhryhOF?)c-> zX;}1TFFSSK-1$?O#bW^`u+Q0!tPem}Mek&ql@Tbe_RO5(rc3FvM+IDBC6l>U*noc@ ziNO&Dm8qRwWvDBOWnzSGA_HLsbL5ps>Z6fNlv?ngHBh+IS!}ar&VKfVefWPgx7I8@ zbTtNl<-u_iW0UjQXP$3s zZ+Y8!D^a_us>++2+kC!a7B32J$p8lokPsb}Ph)%g#*J_N=Vw0+cBrn%6W!^QxM zCv{IES5lz>vv8qz*kp|!3d@E!H?_31%ApVVzdJCK)8S)Irw`W$EJZ>Qcu*I;7qC*@ zUeJm-X_4X!CJ+Xa%9@@i>IQ2Yv3Y}M`ikL(LE?Da-xh2oAJ8-Vlu@%!8GG>u&gU?+ z-8&Died+aeFK^uN=JtaJ8vK5Ycb3=J?|b8oefQn_X!U5v)am0^oUwG-@&(xSIaLR} z67|#P^WcgPN>UrNB2r5j5}<+ojo#C)qBV#B%MXFvOCb4ycM zh4p*izhTVSvgVefxDKfF*szL!cjsTBVjXI7oK@w`3(VDiV4P zOA}p>J+f-rwCM~M)$R+jipTE6UL{y5Ju!I^5@u8k!=)Icu$5uJB4seZi4RXMUc3mm zOslnlWPZQD$XALp7rQ2aO{yNcH^ZZ1lUTM175oym>GKQ~8BbxsXHpWAG>=5Vl9O`8 z5C(g2rWl|actzRRXdxV6qEXPC8cm*FI&;puK5^A0ZLJ}cuXV4y`PidRA3bucu0Gb% zVrgiI?%2BT#b@@9D0^c1jEM^t&O!?tKe4j7lw*oQcxIKCmr*r^m8?1xX8539!5YXO ziHh%NFoe4XF(8p}qCrYfH+fMH0V!-~VyqWMjDIo@rg|E$FTsold!O#zx2w3Q)Mg3I zIc3tqCDof=t9$g(f4%g=ODoP?0dfm+W%N9wj8s{J;;*TB3_L$t)s%92K*|i^4Lom! z5)28b&MkN7SvnHra=Jv}dgJxiUtIG7j+$3q`QaHeC)#XnMa8UuYXffZjo82I=(er< zzjezu@CJ1_U0$!9109&g7XXxrGjR?;y`ghr?TOnW&pr3t2QT|D$_qOju$EBZQRXi3 z7?4hw>Xu0!;0o{)-9*!nj?LZ=v6o)l@c+Ja9cCl?D}z~P`jws~S`~ZU7}#V~1WrU@ zK~qzpCOsQMXp*W`qMY!TUJp`3fn4wd!!Sukv7IZ%M1fJe&4_nJA{@ZuL&4a>r4tt~ zoAIg7Tz>FC-J5T0f9~0r*SzrB!TtW$mP9bna^%pawJ&ZeF15^_J89)POXkd*TvF<( zInoG-vmmLow3KC)7})?WS=@uf905E;|x zhyEoSXg-l1W#ul90&ETR8_9`2dJnCfo~hm(dndT=`O7_Yn^B{RFZc?mcNi<%NzwpqDx^T$8q$O`Lp7Q0IL&mhCoqrINX2^14)6M zBmwd!^)+xoZs_Eql?Xz~lT=T@5+sjgd`c`(i^jr7Msvi7&0BYCF_Qzx9;M;X)nOYa zc3iXYM-5@!o-n0!^0c!qy7=6tM*pUb+n#v*nU~kD-@dJ}tsM)X<)s()ty{aVvdS^{ zlzR9GCEbr`o@>fCV8K*lP;W1Pr5I!(soH~(POG_&C#8`*FBj9~&W;Qa;s?XSH23QG=`EL~b%U1E0xF^==X=*+Bu#`p4>XP$odyWd0Ki-+js z9Qcm43+)N_CE9^F8z>wByXbWu+@t2qnL;lm4c2E{`kJey{^EmTl9tp@i$V*{sP9P~=mN8pcnYvJsG5anKD6k=c*O76 z!N_s(lDWPjJN}e0i};S(9WMOCYijCh>gvut?=;r&@neoJHwzTSzRVzz?KhLE7JUH4 z1P=u7hBvpJy8KjMk<9%w*C@J^=v;Ig)o%n`q%FvRPP87vDvcWU0D$bvAoln-Hl2DZ zL|is2CoKYeI7G;ZRHzBhX7ovQz@%t-D&#u^Eo=Tn<5Q8kbiR`sNGc^yQyEH zh>+rvhwYprCg5DZO&7>GH z(1~Iya|;4eA|EhIxZzu=1&Bi8?RHmn_2?rt4MoMMqCvbam{NS~ku`_+haPzFzE56t z74|Pkr7B23u9Ogyw>q-$jRZu-ouyKMtrOEQ6l8Q1E$R=y+8~HPKy2H#Eg%9j=+i+;|#W+a8TeSGs< z+b_KEZFs$_agEr>DMZg_VOC0EhcKhKl12hGcGQA0!&Tfjj~=S6Ia<45{(PqGSisivpTKokyDjIMPu4UtBAhJ-q-A|)u$p#Y9=$;*%# z{8dUNswgp39rj2id$HPGt{JmNesT7d*L>kq%s)Qy_;Zgx_DoHUA1{jauN~U4%pVA5C2I#~ zYLF{)4DudPvGCrCL^hz?f}}2IsHnsPavj*WW#e0KtY81?iWMtlSlLliU3ml{Z#XH5 zVev<|bCF4ZE_u~*$uEVv3b3W+aQwd00LxUcppu}LCAw$NF2;?F!bXlPWx`ueYGf$vhai28 z-i_LT>L&IhX-+f+-5PY;f(aS+5C=SoRo!CWvu7Xc>Bfv12|$23WQQ4mmCPD&1xiF~ z(-b;WOEtArO?e6cyVcq|dbnVd$^bR#{HbIxUIBtaVp4zA!y*1< zG<5=qC6Bc!z*ZiG*)u8QMRraqFBL4%Z69r`wYAajXA6{~l2W%ixsL=2Kg3nh!D{3c zXDq=MhGQg_9n>N^1`B!_CaLBjLn@_qd9W_m)g7s3d2;Tvp_hLw&H#vkFo%t(X_E5+IYpk*AEj5idUXo}ywOKP675n;4ckjT>J%d1}?B zH|n2x_Nms^mWqnvWF0#F(Q@b%b&}s05;0UQ1d`S=PI3{Qu<9p&id2`R@39;gR!U3R zX9n9!K=yA@7(p*Z7hq3?1@o8i!>;@f9I_=mLzslyX7y-Q3m*m9YyiJ-hp(olX4oMoexGgeW)S^Y(Q4C0R^^L;3KDRunvbBr9On2b_5)BksSq75hWRL zfhA0ALyZm-XLB$B8mk|G24Jbvrx~rW=*PzN0td3%9R8^@DzE?ARsVeCcR%>?)wAb~ zf_5m#mSOg1o>_P6cYpEpldpz@c#7GBq4wIk{X2JW*tY%kox8UlJ$eNHHLqs`gHR;{ z>Pxrc_X1NLwUEvAS)at)MAbrEAg06VVU!(?1|o6);wAIh$$7_)?d=x&h zAW!Bm!K{wA-608Sf`#jZOF)KNGF7PhX2R0c)C6F$3!R$&Sfn#v(*ff#lQ-IzIzIIa2KC(L=0ZWR8qW>sA6r^825uXsZUI zhGS?iI9TCIs*uQfq~<6kV%bqco`g)OrS=gNmIc71_ohAo7PqQ89g@|cquZcDGyyNN zq*8*0QnZ16>Q*9&AzpnXxBNFQiWcE0%OJqSv?A6erGm*~azsHSbw%YN8Q!IFON?Fp zEVfYG(iVyyE333$@^9z=>F!_r_zxn$9yMMp>H@~=N^P7i5?L+}X zHnvKvsXMZD$JQOYb{#x)NRXGcLFid9DDwkITd#Ex6C1Th(_?}%!uj3|s9{-_{X|b$ zIMrKZX=`hHam^a_d!U`>I%=b;pC&isX+Hx>fQBTK#0IFWezMwg_RO~1bDlzq9wnf) zCqo}eb!_tKhLRd&m?D_Rmxa*$2`5CS{NvV>95;)|uod!UFgBrNQN^a#x zCQi9ZIbzI#gz#^$xjeRazIWM$?>_yF_1pe-&%@6?^Gbbfbo2W9z1yCcHFv`D)91{Z zH`eA3TWqLK_PVCp`o`MYhT<`!M^=tzKZ@0wrxB4-Ix`#X1RpE8ww+RGe{D5vdUyuro} z4$(E`Tn2m-Xz>JPxg9$<2PRJ)%TR?0M@vNBH7!PTj?{%>q7C_wK>vacU0Lkz0#*Q& zjRSgQpT7eK=FXe#@ydQII5g3(M591ik|7#hPt4)EEv70iEb(6tgH&m{?n)qPPJnDG zOJ;viX(W$MtZy@uDXJY`nF)x|AJ9_~5fQ;EP_CdsSeH>SGZ-9#ivZ>?ny`5B^?P>J z{pt7juU_>`Fc?{jKqR^PDB~7EO)D0>Q9tfOdMP5T?`cd?z}n9<`|B!rp2-QU8&xW^jmT3VX1 zucNh(sj|8 zAB*~pH+^C3*lLE1(%%v>eyznJDHd;OZW=pw6#IE%cmg_n<1$za1qS~=oJ)8jygqmpg{SgFpGd7hyaK(Z>E_~pb3SiA6KRawK%f|bR z@fmQB8eLsl=4FSnmtI;UZmh%WlIE4VjGF1DV%+8>P=Kwxh%px;x?5(XRJ|yUAGXc) zwXfc=efzf7w%A8M_TI}raFJRP$QE~ii!B_&^5vp;u@~T^NfTrYr~m?(B2t#W$rwcp z!!mW|aPh57Iab`4SoW+iLbBk8#Rppt^Nf7NZ6by(pGh>vOD>k7;H61o<&P(fuIu;?Jrim6N<|?_AfhmPY+N=&@$Va1wLK8@ceM}A0 zm?)G_DvP2a278%jWVTVe)GN~IUrCDT(Q>a!Q4dmGsgq$vzYjsJxCKuU)|E5yT2fMm zY&qE@9BGauYEM~I@x32@=Jvb3vGUv*9DT$f{?Ugv|KQtyf8ogk(LlAsUXGOzj0z`O zn*)bm-}J(U&9AlMsv2h{HVYeFO!B&&9(IiuZiF4#A7bqIYBFtnYXcj@D(;5!*Fazv z@+u4x2c*%^PjaU#{l{im{qX9WZ~g`zl*>;W|KC5kIbm&gI2mXJl$OI87N|L5U&y0_zkoipl^Oa;FS*X5CK363I zpMsd$^d`h{1(B&$aahHtlr%CBW~}G;x1xrQ994PYg%`}5H>dVUOPiOM>PLi)_MOcw?VL^7(i$jcdyGV6`;P6*Y;s}}Ya7*wp10_2tPk$kwx7}W zWgob_yrN3@O0sXRg*j}l+5m+`2>m3TdfgplGBGPNO-Jh9gbF74-bgDWZQovlQm~S= z2-99-q4>{#{y!FLxU|f6%XhE$7IBhuTQnYuB|Xl-n4tvPzErMbPe)z8L${Mxq%f(!}O(2|iONJHtOdNA(8 zQpO$#xRv8e$#{@WUDzSV>x<)QSz20FT|E-S^tV}C+|ib%sK;AM(YQp(o>3fYm_P>< z4=Dz=OwWnxBqKzbyyIIz0~$uEOCCn1CCl2`Oa^r3|%;Br@1!bf!5~ zkCH+&{-oruC7LWio~jZAn$j;R-4sW$QoIb3t%*pCNn6}^*?OO09fLcck5yCRnZnlZ zp_MD=Em}0|(bX^9b?4s>>}%Qn*1?1O4=p`?*1O+-MtK#0RTG!p(9jyA^-Twly>S%g zN0e5(efB`eFUvaPv0{z`2*<{bs{;HR8;@<=v~lY6S*oTcjtapXBv62@9E3E`NvxKg zyEd<1zXmM0=%Ncb%)PE*XHl^OpKA_{pl}ocwk`F#O4xU^xTp%WVz#QG{@9VjhdIJ) z_ny7`_a8d2|IpE+b?t32&X2&YDkxjjFw=+2sU?htq%<0Ef)8DbK_DGk4dvukIvZm= zrUYz~o_z~A5KK;lNMN64-5&|K5_jDGhY@APrKLW!kFjG%kKx1UarEf&iV`+}VMr@Q zsv?v_t5UlJH&QSme+?W-X*~+FR2o1AASDqD5dm_kun>@Y_2GugAfj)3^R2Qs0H;fmB%MlE=Fxq1@?D5(@_@N8WJ?E@n{_owZ{`quU zOX%6hH}BfH|6LcI%`VchL>uc9{2?~>!9P}fPF!AhIM&YCo!L3ALvhQdQE_y66j08F2j9$&ATI?;#v8-yFV#|@k4eV>mI%I^w5z}m)N)<?X|V zRz`^wovg^<{*%ck%)_#d=Ud;JE0GqdQ)VVJXUPnAbgj&1g_u4|Kx3A4IW=2nhA?Q- zgc+alQSXed6gygTNS!tBs;sOG2IOczr=uJ{^o=@>Wy7u=HGB6Q0Yq4a zLaXNE_1eaaDIYbuYU0Fk)24GU{ixAns!B?{W#ycn<#l;*7?Pc$q|38qprDl?LUI5_ zNOcb#3C_YQyz$N7NGKi-#RC3NdwZ~{v8BGgkz>I)bo$t_hNhZG##rRJs$;Beq6vi>1^V(p(Vxig$#J5P`-f{bXVpl{Fh(;PFDH6_S&9D z9(}Q~rHN(IJ9llp?D7le%$>qHuO6>6Do5~I>@J*J?EdzMUCvv>J0#}yIZDfXi0;tA zLo(4JKX1aAyc*Ttbx+Q{N+?$m6?`Ve|`eI7kqHSW2l6KFK z1l7pGBzVZ>c9xb@Fr8QA8{u}9xE#ehwr+iO-5Wc$?|X60`U3}QfGVv?(XGagw@jK+ zy>!{42@}R)BcC{N;)oHY;sPqoo^~%EQ6>a1ncWLx$HMvxWa$NnikIY%xnzJJtUBsf zk#EAXNd$Iy&Oo%V2H302XA_lwgLGX$he9A7WIw9bRu(+&-@m{1Xx-jj2M+E(eE9Iu zrlw$Pi)F|5*1fx5`Paj*pqeoOH*wOK1q$<3>82O?ZDH!_pCD zQV30sQ_ztCz@{0)6Mu9CC5L2U>MI(Stm5WFL1YKE!suZ(ImFCy zw5H+tH7|YmBOgB2*#3|Ep7`Bef2*sDut00n7|TaL{=tb;$F>KLIei?~9b#2B+7^ym ztZ;YRN>)F#_D}z}aq{>HPdxF|rNG-Xm^-4C~1s_#>*1cfVDG5v0(A}zAjo(z`+q-@L$ zNGioYi|!cos4EPYVKEi4VR0Z5gtEh98#lRV;>_)yKRUHjI*#t~s# z_U<{%bitb&k8Id@Mon`#CaN1_#pbb#gr zmbakZaX~olxKv%GKFQ9|@kBzZdYz&qMiwf%9h|m}-3;eg#ItMH?iFX8A!jLqN+KUA zS3x#FOU22BsF`h00i}Y0Ww>jhcOvLu#LxZ?-s12TA9>rt#S?$=^FM5O<51lp%MWk) z+Xt_B=aSQAL@f3E2w=}cZDf?_a=SR;wrXUh+hb|>w>P)6$nRF_qBN!Rby=fSq9O8E ziLQg!-1|;Pp6sClY-N{?=}B*euHPBoj1wV4m#6!qz^Lgulom@%L&I~=Kl9gn{<8kH z^-Kujeh>)CCZ$uSSFAYm)blSmbJ?kL#!cWDg;svy+2soRcr?*29#U3DRcy^1Vh9LK ziA{)&JaNlZXoc=Yar8j!M8ZD36HpOT8kclJ!L}6cNCLT%V@7?$H%U;WR2IQ1`Zhfo z^+(e|!y^g9QfeVchQfz`0D771>`t#wuxE29+9IAA&iI5WmD6WUe9uMiz+hKf)3|l( z&X?D|x_Ki<%O$`lAcz_7DOBP^5C)2+N3f)CVQo}}m z6i5W-{eSk(13<2--v874%=VgWl1(<7P4B%oAcQ7O5kwJ1c@|K7zNbDxeJc7;d=`p; zNI;NY1(J{gA%T$IH_2|w_P#r_(|2Ze{-58uyPHk2!Qem9z{@4f-nnz{x#ygFe&74+ zKc(a{p{F;R>*|}ZVepRNNllngbnm@){qg?4zWCf)B;~*SdHLSm`>wfuL28yenhcSL znloc0J2fOI-H0lan}tHk;w$%*O+6s_tG=B$26gb`@xegOYDdq*)>)Zev>H4&SS^fq z-8V>*$lPqTY~Ql^nWvv#zWfcm7YLBXYt!v9r)65^&YwAb=GaM7#u44d>)l}L@U~Z> z_JVTBAA<`QuWP%*hD3(a8tF#t>2O&jtr0O(a@0uFo1{w?&jY2f5;_wuN%$%3{j!o2 zctWG>3sopNsJnxTY}7#`gPRZ%Q9ja#dB0`gDlTYp<|3s8j?Fg%&E)h7R5D_h* zG{S2MV`2!NEVsY2-@u9U7mp|CsI{SW*N(kp8QrmcUuC7g@@TZ;@cO?#u|6-~J#o^Q zi!PowW$Nh6oSaTwkTg|vR0EY`9sNi47&5#eJRLLDMOEnIi3G?LW{blO8I0cv5|I=XJce@>_|)P=dfiqHCH>FE-bJqDfT<=xOw8Zv48m8vLi>@ zwr{GeuKeq#Z(N!;#Fm-uiX?-SwPhA@Ol8t|H=x_1dASeY<#F(-?kIOXznyXh&coIz zr{<&|WZickWO>OtJ86@1Sn@C-P(AN+zKXzT&{KkS`SLfH{rS%yYGo9LRTEm&MU za2{SjvnEZPQew4JXgz>T6pcvoc)cNpaD!YT2fsOWJNOKk=R|h zA<66sAtE#40Ku&#h!@%vkx}a-;DnQ?2tfifJ;Q)VWIT$5yU9ib7WOgu(@0zj1&ZAh z8BM?sx)wm>0CJ*YK;&u#Y071veZW;B6NPmVZ&+0L04NP~c1H2KRA4IYXXIEHUO08h zB@4sBWNB%|`|oYr{K2mB@*_31ZLhqt;ctK2FmRB4;i8!rU%D8jp4CpezJO%=5~fn_ ztA^Dxysn<8C&Dl4M;Vyf2X`bkh_N#?Q3rMp0ry>VeSNdTQhy()x?MA9`?St_NPtEa9%%-ibGdP@c6t;TC zsV$&0IAE=*kJ>&fSrJSRg+K!`b+W)vfurw{Wd^fDmIVOC3UdLIB7#&BHv<13AH4t3 zKRsM=w1Q?*QXC^kjvO;)^o18L&d5mj`@MB_H7`8#8W~O7B#pV(*WTjuw**6N*w>&k zLfcf9JH6FrprR@^$Mp1!f`R=~Q$0C(*|-Z8sNY#0R`ghs`D zhSeXBEx?Xm@s(NSH@FS}NGO@HDa8)&e*{Eguti4HNmC#m;wH!|XhoJT#5E1_Z#+SO1W{_~$!J^A=5>R?}d$)d$erle-rP3AbBpL8GKL`H@eB+&wx2%@PzNY)+6 z{iU*ADVSk6alKLW{he_N7cn5QT39Xi&KRNTmQ9=Xj2tzpgN$bQa?ve8EPBZ%Jfw7( zO=N5BxpM|HEc-NvIimV#Exv0Y8O$0kz+klAXvHxu5NYjS==#asUw!h4m!JOYa<5nS z{IZP)cQ=0ehQ+BFwm=}N*V&{LFD72lOF)vA<>s`+Xvh>n1rKhlZvXN4(1swhrDmj}L-`S{64PPnAcujeRi_1V{d^+A?V@0G;zMKvq-KJ-^8c(8VZR%C@C04*Bsu-ov> z3H<4ihkpON|7~ojvtVqq=*Ep5J$m#Ayv)nW4*c?e?yRh=;zAm+m~g_fq^4$&tsynt z-G4xy%WcCs)J`@`dF7)#hTI|q-$L7gfPeq~eO1*Jp>QZWCku7~zAQD(jqBE+!U36C z>EdLNnF$2pxr&nkumtn~B~-nKWuk+Kl@K1F1VXWHsO!fD&GS#d$xK>$NL}>dP$*g| z{HoXoyl6x{1w~=Uo`;$lA-$gMxG$_^jV47V1sCX#yaoso;wEs0#DWK~*SS)47cHH! z^wNcXZ^sAk?^yok+KumT`(V?K_uk*}@T1lxOXppB*+nBq7J)>eP`hMUrU;vu9|V6v zWO@k1gX=h#*)#`Hk)8FDFd3*+Jp!YFZ!}8yZNUWZFD+Yq;WR7@GCROAkF;xms>6I? zWO8BeQS+S{6ED)r%If?)>MeE7~U(3;q5UT4bK_g8@>RbJ0ZdiqY$+yKS+dSkUg+GoK&t9oCozXJ={{ z=V9yb&!jA9Ig6Df%!ArEz)C|R=wz`Xr4hs|JQwd=NV1dgWy{k~J@Ko1e!XwsK5R$$ zBC#@UPRpUg`^w69+a0(vWlxw?a`C0p^9Yd1&(6q9ae18ZQj%xcL_3O#NQEwb1272` zyn;XCj(8g+KpAvLKvFaU^q51zj>CtKmhL-1g`TaOm)F zDY;1|hkt6Gj!jY59zUs7tC6B$7=X#_!MTAt+zUPhG@;Tx*3!5xsy8R*EF5v+(n;vc zH*9!s&6@SA-`e@7hgUrPQKfGCW)LY`Z|Sn`}XhW;J`sS)YukwP;pp_?~yrYEZu$9K(`yQWsEe4 z%1>8OadhIu2_)OXXv`=WEnrGFSOcPOOvY1EoeLMuN=?i7-EZ$NJJfinJo3;3FWhj$ zWo|;;^KkNdQWc5)A6%o@5H?o{+kL7ax6Z>R0(0ss?fd9B7c% zxe9OuyvR!haFeKtlJ%jEpxGY3=#ptmE}2(b6w+_h`>#; zU^)}VB!ogC9I>DmTsmP$Z4OIZ1Xy@E5Y0j~0qVA-o|%*o8wO>!dt#B~dmGjdD=x~) zOeb%(oFK4!Y&-tZ^OH6z?Hv3~aW{?od~N;v=ke4;Bwlx}z zuGw=Zbn0&W)1!agyRV_DI{w0QZ;l)`#$vS_#Jr4$9L8RG*MX%XwiAesAHNjOJ%=`V zRDEs7&r;VJ@+7)mX?o^Pm0CpB{NouM1#8#B*Tyh}_FBzi86rF(XC{u{+Q%QKg+V3YSJ`U)Qy) zTe8il$U7VwWWDmj?o5D9NxcaIAhUv5Tn_O0(!mOaAZc98m1lz_)dIP&YO#aL1{e~C zg5Q*~jj$M=Z(EzUyu7@$bpN69BY{A$sA%YJUfpok736laF!a7xIZ#mwZ`Qoc=>MH)S(%>lMLr&|42{q4x zI%<J%{v&HwZ(O^2Jpo2TiwA%1#%rd}oS-w1qgASk z0eg0*op%5%n-XxzMNJuPkPFVvPs{+0Xe8u`Mf8ad^OtY_)|bC@)4YXaI`xE80XS+b z(qCZ?dp_xvI;P>Wwu>-lsjR5K>&~A)_~89EC+t<2hXCpliNv*fbZ%1E5~Ts1CloP} zmX>kPukQ;4OmD5+SXbRnsFfjMwwaxO``h2fO&Bj)ex3nsbBUBM>YiYAx3lxXJ+yIY z%^w?kpSSCtgcE<%ChFv0`#jFUlg~V{#nbzK9=3WvyyNCfn|kfgaVKeCdcB=PAs;5~ z70Z`@=Z=4`t8FqGdH04FEuL`W=dYVFeNwc;Z?@>sOsR#!`cyh&r8g5EfJzt9rYUM= znF$L9Y=LUDsa|rT1LB;XbuW?92(Wac3y+in>lg~qi1bkYiM38Tg(o~xA$#b{_>~zE zzC-Xrm@nv`gLFn6$3#E`R0vxtoS#W=*L6B0LEWyM`(JwL#VuPmlnfvIm9O4ZJZuO6 zM+HNiK%_HfG8ZB;(i7eTh-3KoVw0tOWYT7FC*$^2E7p_H;^BwyceoHIMM&&aDK+_WdsbBDhJ zTRrFXxzlTwx_&=t_WHc;A3u0rNbGy=w0ZCK8;_Z-do^{haa@pKYaCAZ&wqN?FYf-S z;-qwwCl~$T2Va>!V=N_FgOPeD+@zOcd5AD1S*WRLJX%q^zw~f*6`|49P0e1M9&zW1 zQgH!zQOgPbf*m{}MKFXzfI-8?WYl3mN8RfJyqvDowA8%(?Ed|-`}NPt>z6rjP+tH3 zId(g&N)&+(5TMBC8YcH%Xg)^{ow}87#MQJ1|WQw2&Sl>Y!*{65OJ8(o_qF{ zOE0_F<*~xLQbk1YvU^iQJCQHwV!67on6pi=t}|v%h0CL9=`^-c6HY4#xmMmKVnf;0GU?O$_3PKt)HM9^!s~8W`0C3mwr=#|ulGOq+}*E#!R$GUc(oW|4dp!~xv)(Z z!SwUnsb=6jZ1piSCt()nu&foL$mvk5B0j=dV52&sw|x1QC!hKYiuTMb^Vh$5^OwGK zT{J>5!zPqXY3W&BZ)o4XqdRx(*|cfv-n|EE>jK0ZhC(`4CZ9IDn9#u~;16IzHe&H2 z6(WWRr-0XK43|p^DkD1LI6!ER&eYl#i?^B0RZtI0oUTwb)Z@{mrkM)|6_gAgI(F>n z;-N!GiIwWfgmXczBo&Q~U|x!jO47^ zNL1I{UNY72gOX>3@(J^k5@Eu8;rqc)beMCLz$iHmi96>6temvsO2Hcg30gOE=JZ7i zFW$Uq`+xlVe>FEpFIqBl;RW-Cj~I*<#Aw5hUF?jCrQ;$Znr%j1eCN&sRn-j(7S1OT zBM}JP4rJ3*?i%BuA(z(GUZ2u=@JmU>#)1RHv4pO|Ig)3QiDg3IOgWiB zF+I$l)kZExcZ$ay4ta+R&Aj}oS(zCJ*1ug=cBt%@TfXwf8*2s+9*Ag6NPvop@0z7v zw?21wwHfQPe>}kt`L)kWPV?ZoE+qC0LmNL6p&dD4I!<-G=kgPFdd@%L_z8FE`BkBm zQt{{{pC^ezWv#<~=*_o&_0~T>_Bd2IX=>g*zrJ(a#KAsqi_M(c?u(Z0D_^_zy?5W; zbog*hz|ZBP?Xuc+XwyL}Pl_`uD?KYa!|8Td?U?N#Y}|IxOQ9bzx?QOo)^FOr^#GV~ z>6KGR2o(%;_}cy6K&!W{E$9nHLUEs07YvB+1@U!YiYTCXQ7c2F)1k}F%P1}`nl^RP z_z5G6iu;=_Fa)UkAu};@@m^;L&@-G2A<$rl!{KCFs;a6#_~3)L-(Gj9{3u7}&Y3rR z_M9O@3USCl&LwmQ-!xHm!!J?l3`88phoiw+L$G&M30-k_!Fow1H5x#%{WKbDQu_YR@GrN#N)|eHKoL()(_s_ z_sBy}?A?2iyvg;Ab%zgmXG|M={S8-NaPh=grw{8ScgN(4elQY>IGq_ceg5`|6DQvC z)z47RHPIQy@G5@qOc1wG(NWT8waHKbAtk`_6P_TTr_JW7tHF7A`Hi2yHqjZeSdHNb z8MJYHXY6t-T2lbc7hp?xIqGa146b{B^~d62gNsU1D=NzaeyTK;zx3>`kY9Jfq9w~7 zdpsk<14AzF27&mqy>T1qYxl-@yN29-`0b4z@(|bMjo{Cc)?|n}1 z{hsrCA7o#<(aFDR4~N&bg!rP$k-}W8M&mtq-+k9zKSj8^{8N+f{ont#+7V>}O-(H? zJp0zW?`+<_y@t3PA-7JvOm+GFBrBFD)k!)~hl{8Z-u@6NA;}#g56ORE6tHDM97Cdf z^Nlt8_qO*NV88u8u20W$V59=|P<|s#B_eU4BNC3a`@+pl?RB-ybv2DG&A$4&W>P1^ z#IgS2-2~C_hjQxj@?BG>O`JP-)~L}XS=m&1h*R+pZzt#xQe@z$!(bCotKr%zR#Bs= zwZ*$})ku>OO;EPLkYkeGer0RV1-yp$P^n;PM1{w<(YFA=`&BA$x(=g3>b?gE05L# ztR*7`f=XCLg&XandJqah7*-3OOf3~+hGcq4G-!MLu@_x#>%2uH_LuF@WAyafo?iC; z>g9WkNyqiq-T23c@9&6&EmjAw7pa1Db~ieuEo$vH=V9wiOgC$#cQa=kwt6>y^8N{T z7c>(M3C@k`EvmFJSc!IR-+aLZbG+@5DbxD>{tv%S%SbtJ;J}M7yt@3&HO);@$A%KE zx%pJ|EG#Z5Oiy>C<`$ii${_j33VivKGLm|S{2%$la;lxYXpi=Qh5N8Myt2fj)tzG-> znpN)}Jz8hCIcLwByI}s@p~Z!1X||+3LI^R&d$ZX|j3grhjCgdoGAZz4;zSF^^Z|81 z4=sa?qQ@bH1>pyq-6_<yE9^{7Voz(`PlG3e z#sbe8SQ06hpm_y-<;!1Ow(QTD*`{TWKS+4ZqmMoM)~a;~gCsi6%r=Z3J7U!6p-vB3 zE(4u<5|W~pMZXMTW3gj#gE9b^a2#NSmRAOw7gjqf^lh{TeDOyfTFLF^E*SawTNZnR zwK{0lLTep@3!*6}DTCHiW(Zt`7sPv8Du$Vjc4;bZ3al2^e z5hUqhdWNpBFn!LPsS6j*CKD+vB(abH*FY%0kSYWw{s$%$Ov3vp67ICyNMo<7scu}e z`rWrytS&FFL-9Rz+W5s6%o{y+82wO832z!vv?Gd2ISjasE-jdFUI3y>=1Q-LIU+7n zc|t(ZqO9-oM*|>)(-Gc;qGx#Ulb1}K+VQ=zm?%nUYH6*1d+j@~zVz14?Uh*>nM;;j z_^Fu}?-c+5KmbWZK~$?3JKbb`ZV83kaUyZrv&s%OKL6~>6>q-p^GWI}?rTB!6if1$ zNLXgG8y!ybFaPJKUSAXTQ)~bPQISzYx~JX&+7hB0?C@&glL_IIzJZ}Q;*y>MkJfN7 znwFkI9XNdA=r2!9SmqO93s#gD4MSm4q8?ZFOV7OB+S>Z%uU%Euuzz3Ko){8jr_0x5 z`sJNZHC9JTic4O6>E%H~id0Xy?yPQS47Rjho(_?o!SS5yuyvZ#(E~K6dH#pbU{7m3 zr^%|8*b&rP>P?vJY^txBFkxI%Q|r~&TsC6lh=(42prOe}C=Qh)X3iNudi;<`M*w3y zd;|oHbsS=*dLeIama^g`!7R!Y!ZeX9B0+T?0SlcK!Z*Q`br@4%KBSIU(8z#w)sVAvrJyk<5sqim#9xFaf2B$_8J|PGf7c??7o8 zsjm)~c|!qITk?;-*{r(3g}DnB&7>v}Dloks1$-1mGDa~%i>OVFG{ij^2%-@ARv_s^ zWk=SmUPu0h(tQnCneJJ$X3kwOlN6?TdD#F#B-)PeI%va4h3$eFlWgfylaV2l!iUy6 zI#CAU9V*TZG5~Zx!pj(eV>)j(*`m>y!%h(nPDnI21~$IGclD}un>N3X*gIuP@fBAr zMO<|{t)Z~bV#A{iBZHx)()Z%?@4WHqmX=oR9WiH0^5V;;pxAn6&9=H4odr8AF^wI` z!GjAXPc6RT#w!X3XT{>w6GJdWZ4Wld46=bxGOKh8FcZi3Zsj^41H^JCy%Do7P|CYX z`cIZrVkE?*$h>RZ7MH+bqLSm;n3Qux%w|b_^3fG8hx_`QmR8j5J6O3l+(B~rgxl6{ z&EIzY@pr3jMk|hB-~9I1aCZ?IQ~sU3?F_>f9c9MOUUyGu`P_xA)0u^n@#S=`I$;V< z{@O>g@>CF@)XL<7&e+>)R$X@4rL1Wz?hQ>1q&LXUb&i`@GI4TA2WsF>@+h;y=?;yF zrfPJEb4AJM9G#i8 z@)47bc2J+uu3S#jnlJ@9iC~4pl4P5ZjT^IRJNR=WMZzjtElxwy+SU?0xW8)G_5)@6 zE1K$29*IVu>OP26qekZ~UAk!T;(7fGav)MNFQX1ewgXbYBbX`}#&`#pknPT$h*_$u zn%At_ux8ELvht(kH7qJBoH}jt#EIhw@FsaY9YWZl8?b?NPXkjKIHESy=0Q^?vNH*` zauA%2MdQFR3<}&lixC;Ez{QS2k2wDOhZtFOfzlEJ*mBl+G9O=WuCbsXCAib8_)X8_3*=5I!9sa!@At-H>kGPH`zVS zi97j1tsU+1lYi}c8VZDf8K~{07hbse=9{2ra-Am)@xr3axeF$_Q(2=SLa7mZ1V*&} zC>&v5-rnj(&SBQGh={zn!7=1AZp;h6(}Y}vy4kGvtbMb5>*jqanY!D*^TpI`bEg3% ze9UZz+#v>>tSQv0u>&w#vIId!HeTOIcC>}`!fg>={`M044mL-ReS$4EZ|_ zmLA#s!Op$AYrHMucmaOF4#K=ln=yRBqUj43&Y?OM01*m%F`2*88aqKnlgUU)UoNAlxPG#Q0+$m zLI5rj#{0zi<{R%l^VA!M4p+jNB3$P7*In_M#Y4vxb>cb_BdiJ*)Rc%O?UuB*rqJ5A z*6-R{)>uuNMIF)BE|+=q=)%u_?y^bKhr3glH*%Q~?IhJVvG4yB zwgjt7D|S>j97VI~G?QaH`?)7py!pxw;vC*wxqj^U2`4UVp5htaKHjh;4Dk8wG-rT! zi7ZY2HyAxBX*_wB9k1x|6N{(K80}1<;6!^oPVfz9FtKXwflshm;xH}f#kHW6eDI^l zX&CP$+R$Jkctvlwy3Ka8x2>V3vS#=0Lj*e8od&BDqoO&Q=zwWMT5oI4fSOY?mvnsx_$fZmtKCQrG=!!!pEhgTGKOJo)kN(B{pKB5cjaZ zwY9br3Qk&!P#{X~DO7LS+37=v4qmiy&ZyDDi-s1G--5u*xQ=RU$k8Hpq40v&3-}W6 zyuJ1DWv?76Z;V9sNW)Y(y!eW7^Di7}Pt%8!t%&qYMlc+(+r)s;p=+_F=&!k9(b7v7 zZ+Y+N+7%yE9&L>IVq3N!+`ju)<0cebe#J#IXHCY^$3&unWcJ{sV71s`?yNQ&Mifs< z`r}VLg$h0+E7RN7jH3n*0*(wA7j{&_uy`o)lHlofEdaH)2n0Zn*WT_WECI0Rp?JM* zOdxMSGM<`^iGhXv61pAKSlqs22e5UT3-Vk$jk(fZ{AaskA8**|nIq;)n<;Il=lH3n z?^Gw8e0f*Pu>3d+#gJ&^Mp*NQj~wA}IfLQW?3^_6XJuw)wfU+^OpE}9mI?_GG?8mX zQiGimVMzjLaLR_^QEV13YEBR{put9}UMdRi-Bop9{~@A$8X7``^H8>MNSepuBs*0o zmc&Lz}cm4#VWxgQv|K-dNwVdDGUNTT5%o6S1(asZO{2^<8hiwktd5&+`|~T)23~thwWD z_C%zkt)nwa$PMjSaO;$wB%&QYx|Eq?%E~RCycvMiV zthEJSSz$&2T<{k`EWcmI_zA-Y6%Oo|pF411L3&yWJe}A;@Qds8M?2dk#cNXU^GTv- zt2xEr9$oSF*5{sibJwm46ZP&p;2x8sr{!IE>GXoZDY0Z*n4CODJ3$A$F7TP4cgW81 z&M*<<9l9X3qh~G}I&IGI?c4UOTDj)P;fOw}TfeSq_x2|yPF}zGqUrPIO`$F~`50jc zO$MyDo!pf^lceEjW!+Lzx`E1L)*6PyAqwn zIe8wd&4oXd%|U^AiC)o6=_qTEQ+V(QEMonWWHczDk^}}(X|TDup|!dF$f4@G>L8K} zUZ+4)JdFJdJ69|#HyxETvXsO&D`zp9!YNU-VPZU#SGs_P!YaNBn;--PD<&>r9MFCi z?hJGjhEI4sITI!!83!3rE#aFTOZZUbr{=@(?Lj}!ls5@pejx10tNLGcgkk-wZvEl+{e-~Q zS)N~>9{j(EcnT;)-m^KZU;5IQHoW)Fj-8vgY&vxK&=C|RnOQ0686GEl76(cnq#<|; zsEF4u3z`kvc`z9EdfRN!Hy+Er!Ks5a>e1g%^P>^*jm=oAXjbMT)$`AbC2Bj+OXmQ zOD~yw(UMsM2ILqlu|U*|0tbjee~qe(s3!0fGn%-KbYi{(O0e01nA{w103CUdxg{n` zs?|qh68|VaC6gYv3o}3@5TzYeMcbxN+MUzQ5vvRZ6AckCKgJc2G#K%lX?5e0x$^btl} z($<8-R22}lfx|{(NG4Ba`}}sQ8TfdsN(2j(3vO)Jk55 zkzcfov5hVC2x{;85N`5lRXDvl*?knmb`1SAthyvA!O zELDA$vhPY$FGxnuRp$kfxRn4Ly%uy(fTzX?u!0ywhbnE!>1LrTAF?0UpfWJc8v-+LQTlUq~>qrE| zFo1Y8WyQm@=gc2FXqAmhNhye45>=%gyyno-(v}*r16MCO!ZB^W}#R z{`PnGWM(@kD;)?5x2XQ{PLg}WM=PD9=Fic!gjki#l*Cr(9pC-Vo!|P#cSesH-q3n@ z-Nsc3W>+4P#gw!W9lvi|^&jqez9Ve8@4i3Yc;goU*|WUKs_1nKwK95rXSX*M9RJI| z{@W)9wtCOyKRn~m`N`LE4A>H&5wwy81%4sbAkeXLJEGyz(tXQUyuR+8wTBLs`Mj;H zVsRO#p)O<*RhGO}4F;n8@PtAUR?v@}qXUNw9y)sTn4;n#2!q@jIY~&*Vte?3`@VI@ z?PiDWr@#6_dX6y?55OSc!i|zxB}z4=xINMhwE=-j=93$u=mpx?w0rXjqlbiPw1@`= zr)i5-=uP>mlCoLAN7$CizIDh}(& z#Dg;w!Ww-;UK=vB|M+pECQKYd(J+!y*d3_EX${e@gQBhQAIyOqf<+OJFsbpyKGq2E z3zZNGZ_;D<{o&oa4}7q3+m7vf50=%mwF)#4OGLCxG^R_<&`qB?c*d-8dHvE+5`x}h z5+QRLT5v|h4a6D(*B37yRG8{^JKau7??gJn{-D3P1?5s}l&s0Tc#zqpCjvKe19_}W zuZ^rY7K5X7&!K0ZdgI9cIOb9}^)VdwfZta%H22@X_eH(A-RahoYLNsSL)&=jq=l%!pf5dFn?JRw_^w4gz+sQ4#cazR)iaR&G zv;U#ruCN$fFTMQof`toJk9x1@Grs?KV2gpA@mrq320xjw#RQ$vc?Yk=$w)&XXkWpF zP#|iKs=F9koxk?%}peki-lUN!dcd-USa6*C7 zyf8^3IsEW@|M8pO-Aiuu@BQerS^1WfG$)ZmFlD^x$?$9vqq{^VvMH*QHXuPr25JGP z@V3H7lnsd#EfR^i!X4qR+F2qz%AFL=qEI)(ov|;2;1#dhWCx~6^l_rv$7m>ip zQ&Pu3+xn!Vrqa80ro5!{4blNN+0f*Z=nv3+~LH~ru2g))%03GRwRMl1^ zpi@^>*j?rwj28IT#X9jjal|9oC(KT}r!(Ss=dA-TJhQyLEtZJs+zxZm(EPqiXC~?D`6l>T2X{1vuL8kvmsvJPz&|ux=n?J1N#qj=j3NvEbw;#A<0`3#JZrZ zR?Q0rX&aD3N0hb%Cv&q4S{LhqC|Mv~BH++=Qx?b{@Zp4Wm86lu_GdDMqCtz9yfLB7{??_}PFZ^OoSN#EeY?wd@7M=^ zwKVIZz;8@fQ`>R$XzA;(m7=rbA0aj5{IXM*LUO3eOfV(Ll712+hDZb^lL7u~*~+MO z4G3Bty8Z)nMZ*dRJQ`e7;7kFifMdVpji0c8jr-` zEk&;;Z!e~SiNVC-usEEfO0vA|-iD_7rsgIzi?Dh89;N;pCBQ;aVyeI~vZOhQ_px_e z{h9eFww`+8&Bp3j>Hezb*4Fa!nnf2*NX>MT2&l5Eyt1Oo?M~0m%1uj4m&d0wD4PS` z4v31(WDLhq?#?jDh=1Luzd)o!OIstFN{iiwX;M}N(@O&BsIL6rQJnZoh7Qlq&y$rS zYwcgKodw!CE2H>?z}8uwZsAYZ1Zs#$6wy%dix)n~B$&nR$rYu1J6VgE>AM4XG+qpJ zQrEKyxi>3qehtR5s)Jsl7#d&i%n9Es?>ep zWjAsOkHpoqt^%r+tio5-78Xg)>!0gFm|{bn9}ES&K5v^(e5GK2AR9d2834Luu~9M- z3Av;%;u;30#s2^rcMy}louDG@a#6sDtBBO;SLn*l8*$On>CKJ4ilf#0OAqZYJu$RUHXk)Sn0hgL-{Xx~%?zjO<8{0dx2fdJp27bvZeJ2FSQLL%{=#uzkMZ*Cs7Wp~(1$l#Kf`A8yiRqfjyJS427eFJBwjZj|MGNc4+tXsnf@fA6#70Z^hqs zu3NjcuF=1I#nwHgrI%eXf9!+-6s~2uDHG@QwqT7-PtQuD=91eZvMOMtZjP8p8KLsB zN~$f9%b!xUhmRhB=`%a6+>yx##91xgV9eJVA>wNGtQiylmPt9kedr9Fqp+pXAw5@* z|3gRcPx(<6q1sR|vCq1K$mM|Om4xLHh)^5cijX5@seSJ{s=8u^1i1O_`9s)#T3vl+LzJ~O3K8iNq)dN z0C5ins3RG3XPU<544N>#m|&N7U!cCWxwXZITHV{`_qO_AtH_##IYPYFq}CRpGsJH? zNII61>dwwd&+C_ymf^|BOeL`&WpVIYNObx-7^Z>%qn@N-F_Lb9ozhK#{>gBDND;ZC6$jgV4WzAy(c)nvf8xnP zC}P=xvOqiHu;G$Rm#BwGPy1LTGI3{Z)0T=xcRkC7yPbUGtaafdG<=T2mNtUk)Atd^ z^iMh!%3;$!yMThHtlgZW+7H@SJ{73Qx04qnXNsl;z`)!UkD(g#`>Cd-qZAnoodF+K z4H!8~7(5LTxrkE~$Z%=bZTZ78(_Z!3z2pb^A6A14IZ+rJL=)_TcB{pT)*3t`uY5~u zYg0=zOaT|;Nrf2@0E4Yl*^@=#Ek0jBrLZyD&3HI4Ww1X;lvo3UXo>Kwl(HTMlRMp< znwhIh=3C4Y(6!^CCaooid_)>nT0+T$VX)dv)VWcmWZ{J{@F51aF;+&EARY$hNNx{S zk}iY$NLg+=fs&0*;&5v-S;mYXO{pQagH(q_!qSAwV z8k;KO$q0@y*gN4>l`7wa?rit={n#!?j2bdx#4vh{u2BT_^V_Ls;2edmlL6~riu%IV zbZ-!YKt}f`hqd1{!cb=koM@Z&S3b3Cv>o-W>kRcr62FKBS^TVxEv?PsjcL#g7%%|5 zP&fo7@>e52xsl?c#5ABaC4{csU;WU#X>Fg&F`mJ53Hu5}hz?9|kqpR4VF{zn;K+9l z=syV7sY$lxc0a}I>9gEbF_MUkU;>;H-#{=Ceo=P`i^c}pN?Z}07gaPOBw1XjriFkO z4z_a%ZVA8v!XVKsh(G`b-NvFQUR|IE><61_$zru}7qP+7sFZ-_K1i>ySjw+K7oi*Y zb9p?ec{%+u(=&k|5wy_ZB_aZC!kk&eW@z4)+bb@=a!&t&X~AHlJH-UpVJi;!L!z5UTNpRK^un5GMAv`dz{=Vqv6$CnHQ+}p z15d`S1{?JoTN>LB?mGetJ$LThwCqeeDNY>cw-23xkH-W-Iv+ZUv;8BOqXCe*5g^gE z_3Rwiz3!v@?cNmv>e;k|$9__$DY60{jfSSCM!cm#hV1Mtt^+5OT_5tM1^FskEv;pt zZ8_A_^oQ>VuO^p>LkF}?RvBgdl8p7@64rrM$bt!AP(dM9xAt~lV{=2Bx0#H?uu8(A z5fsT~h-YHj7oZgOhs+Y#={m%(4e*c-&>b!SkuY~^Tmw>@@KEqV3I`=VIVQ}XFoqmg zAPTV(As{9WjjNQEsT^mCOu|nsO3iPS`c&@9&dN)1r%8!6m_gbQwhXRN`F+YIN(7T1 z0AxodsR51h0A!r4N3EYW`tllh%fb?me^t93G%`HCiN%-4+_+b&hM2|6ANSJ~V z=XV0jJZ|5kVqOR#a4f zuxbC&OXjDfxubCk{K)h$>%bPqSs8Yxbb!gue#dQ6o{ptF0>`DkpHqO*+j8Ho9f^~JZ@B+XV zeiAHjS-?u6BXSL-FXX3P*mb$I(A~6fE!{u&z!^U(J_`z$jssqD4$uV{A%hCzA!!ou z7yvXB79=csV^&I9W~!$%IVc)had}2@jWthzzc>5|1u;v4yqrNhnI9vl8EVDX^xE4jLeK2 zmjj125|ZKLMml^&{G#NeSIX6&i2~NSm&}OVl0Rxn=$CAuEcmR6iZHg??dS=K`7PA7 zPKf;>Co{!rarEz>-PtK>JG(8#O8~i*+K~vhuzslX8U_?u@BG;pUU^}|>#y!?YSg{_ z-1dEY>aMwNVQzm5*zWh^r)CcL{Et8J>YyQ>IrAqZFh^hsjgfW@RJJina1%>{-RRl5 z?I3=*nfW=BCQjuEW7uI5yPv1l>+_!{&cMe5wuDSr&i}Ga-CCoBu0Hg1<`{OI@Jr7b z9PipcaXoe&=8v=jjj?CYW8Pv1rM1)PVBm^B5kyclD4wf;i@HLa6QHc`jZ3v3_#-U~ zZFg~JG$INhxuXbZa+=USAS4M7sE#ALA8}rnJ2`9)ykzt83P7J$Vv@oUFIn7!?a^3B zogg9^*U~0HAT5EcLY>mAx~+T)ez?^L}R7S6vukng#ST?#KSAa zlbVr{;U+7U$1TieCwT+~aijtESPvVsVDDOpfJ5NPWb5BVaE%ctrS1C+r_ zdV4L6vh)%?I-T#db~~(K6pqhe@Oqov9$aZD*c7?) z+Bsv!jCuB{w-4^G-m|-=vg+}<3ra3nGQ(kH4en~}=y5Kx6p8dLZv{0U(&_VCpEGcd!j>#8Av9Tl+SYr&``AD2 zuTYvILv=?*1l{-M3!T*Z+4D*76}!=W(y?og4XEcg*?F;b8rP8;aP0gFM(rfDEM7dB1lZ|h{mpT!G7uip4FUo8Ku3Ve|b;)q$ zZunW@Kmd5DYm&Omv@~6sz9278d?_Mfd?}D6aik#Q3Ot(Rzx#MEI!*>YfK}j=)M$&57p<_vGf+8 zT4v6iNxevyXJk!XMNM55(MU?Er%O$BL>Vl03!|+g(O5LXbNhc>yms}L*IwT0ZPmU0 z^8N#R+P`w^=gSW5*tak8?K>{c$g)LZE#mg4PnyXCZ7>M~Em`y94wJdEswo;wxb60Z z^A`%#5^n@>Y0swTnf`z5FvdQwK67{Iv-z`r@EnCLZQQK>Q_cHXpO_E1LC;F)1s5zt zb%l^a*G`Y*_90Dm|D;$Dxs=G4rshVd(rVM=8~{;s6BrON;)(P{*e!D+!X2O8S08sL z?e?-RfgZJL<%pajwyP>R}qXO2nVD>=u7AfIePbSx=0C+hQ3zew;9k^2oGzQunhxi+_2}AJ^?1 zg{?D%R;+hRAeCWizM5U z6rUgwZu7`|&^c#n;03P)?Sld0`7bp%$V?D(+1;U_BpRk`@=vBuP)ax-4$!6MoW6)9P*RnNM+kGmNm9U3*e%iatG2X1YFB!!HhRLC zxo7tQ21IW7xRr=;ViiormX;$~Sss_u!m9%7XeyBwf`Mf6yNAGPk$LaE=z2Qdd#7i7T?f^5y-zq{ z|KtXx1t#Z&^G-f;!U_M9ujeRi{Y&QJAGm1`W+^NZGG~D)CAYl!k(9dHRPT0ZQNH$C zf*NL#gOuUt%Vo&S$nTdwI2vunk18pKe{5R(3IPL)=#?e+JYo~8N}2V9 zzo22s5{Rg!@+L>|PuCH#n?YUJJ$oxEE2=9hk5V}XlE})+DjqfzFQK%|6oOkK9X_X% zbT3JNz~^+N5_>2>)zeQQbG`}W@u%lg4UMldAYsl}^l}y8Dy$(~5`Fcgxrr#sOv~az zta8X`1Wu6t;1uO`zmWhehB(v z+H=@kg9a3)rKHu?)zvlDa1=zQKPrPpOkxd4_+!w0@s`WSj5+l11Al96CIQZl>YBqh z-F!vALAkzA6WMQ=D~5wl9Fg#TUrU-Zy{@(iu{4v?l^N-RXc{@P47%9wKD&OZgT00% z%j6?$=O#9N$N)a!uyv-Mhe#4VTd&=oW9+~k@CmWONT63$@C2p^L!^Wnk;t?YI1|q- zD((Ry1YN_7_}kkEyv8d4;;gD{3ix#XR>P~$uTRgkrDxi+bKRblR3`=~BXK9lVnKym zf>1!292H?sJg2xHad6xdOP0%?Qd!yj@=H&xTKR5$b&E;>1y3P0#3GS|!(sNMIA+h8 za>I>R77oqvc^irEPU`%zL@Pmm$_c)Q9}(E1=Zu5TlQ+77z^MtXsP)x}axVY3d0U(g zI5=XS3CbZP(dugrY}i26pUqxx*yjz9ogLv3hA}nOL)F_+ql?E+7}dXjR$98pZZk!q z5`M(EDA+|x33S@rT>=$Q&Kt-;9~2-H%wl5Uxs-o_czX$cp4`t5JHk8)4=KWOK|w)A zMmho0m>tOih+NA?5(|&qS?R-=`&P_l*mou;f4O+VhDwC?9>e# zmZyp^&K)~$B*P>nsnHw}cBa!HCO@e_8YG^njth|+Q-GTv>q%^*E|Neuefp9BXaR4=-8F3z^6(dIP zM_B-iF=f^Atxx}DD=z?tOLxN;FPt%VL?qTo9$C6U;zDDmqq#0|=XW30b=dFv;a#`g zb_X&&(~Wxw?F$NjG>(bqblf6U5AKvB?s;&Wai(7Fr@Yx&_~ATkoz|rGe$2fuP~w=< z@`^f#a)-jb3RNjG;HslXkFH(4X7%dTJ9qA^si{G!!H+1FS^X>*Ry;d+CqnN?C>FC7 zvcXN0w=oGCph>K3Xt`O#a1}@sD1|b!%mWAYpFeNb%vqBrOe`^5W1(;hB8$=xS&*n4 zwv1(ees0-gPy2jftJMKD0%{o<9?bTVImTd!MibF6%o(*d+8s`NRrMjt%0KYXJtIdC zApHSlZs6w7k}DOE$bf9x@cE?AY`ted?IS@Vt_snjapqG_98d+~^I7=`*I>eDmi=k1dMEn;0o^nJ}9u z-C=DgqzJyig(-4{)q>R|-2zZ0844A*b^5Tyki_>wgDo{)30I&W>@!~FRZHC=A~hB7 zED|uu2|LRVA82fy*=Wysoj>@{^(ghbT znO!g-m-WX>J`&P@|9k)a{)Qb+M~amq&G?nu%x({EM-UXnq9LqK>h_??_Bb=C^hNi8 zyf$Cs=e}^sx9<3IA{iicJLrU1tH|a=%0I;_m*6C5SpZ8yYWUX$TO8+JNpC2U@bLU^bO55hmV$#-vX}M;dFCJC=zhE#L^jPk7cA59y;Lr-(N4Quhta| z(tYPYKbx70sE<*V)bV*gz4Q0wrEQ~!kA8RE27Kd0K?w7#xF7gXF?AfjG5M!nhaZ{S zI>nr{{Vqdv1fIE-$doRtlgpqWCc1R<_mxpLXEWh>u$3y5Kf6RylB zI5lSU=pjRf3>{JgY>_e$r$bN-E(46GOdgq-#+O};c$?U1qWwY7S)6=;w z62>vg7zkpQj8bWWrjT{)Rv0=`yyyVKU`R*fa6c*)gx+Qs;ppV4J89C6f!eW?;WI>{ z6n+Q7V?vOU>g(#ZZk06fB=0NeS4a+uw)P4s+Kgj^BxL}vr6vfd2j){X$K#Pn!>1L- z1W$@*pLuQ9&b{CN;WtN&&WU%n3%7#A7>yt>GJ6VM6<`TsvVmW8;8@>Ihx8blLMQ1o z*&LaF9t!*yf22jEHZW1##H5r7m=q2YXl=uymdsUGT_qYio^e;dmFf9p zj`nnhaoXpc^e9jJ%5&qyPdK-AjGT_2?vMW1P9_6cS+tLa9K9^u(F-p;fB*gWZ{ECF ziGCCo)(;vqXwsxfqZME|?G7|q5C`zY`y4q2VnA9Hf=TMJ`T>6^WFcJ$63T(yfb3zv zPfYmUhPb7xcqLiC?+2zPFZ>x~4_U z0$^3QoTyY`(g1L2HaRPfHhk{-Z?-fMpKQiFN}|z3(vNIKxr$Ca)#20#Q&tIZy*E$~ZlNr2Fn)KWTqIA>39sEh!GW*E7Pc<%Xo@BQ`8T|1TcrYY5vI(znMsF++Jli-)1qXJ!-0KyM^~21iHIys#E|w~H`^+MYbuWJld)ciUGkWaANfU+)8IYToMY>>DV7KF248Y> zbHdgJwSn>AKfe*};Pu($`V8tCJhO!3V^7mZIUo~5L9hH*vpg_Mih&*gMwq!Jid zTUYh7A3x;x#c+lShMSPKaXk~(M_fil<)j30l!kakVc!t9bh^~jL#=x!e0KjTeMP+w z4uUdZmVlJ-adNpfWOf)82RJCbO-c=FF5x6rLtai^b4!cY-$L>p(X64pBl;R2vIK=S z$(pZEOG!ukYio{f`=H$0-uC7jYlD7rJ*WQS7r!9g7_&{lBPuOTHiz*Pm%5XXt7mbZ zFp56EYA1IO@WfMknmW}9Cth%legA}TTW9JqoT?q(>R=7O@nm z2k*W29$%sO!oh_XTyVj~7cI%k$oBgQd{Kr%E`z9mG)>>EO6f^!$Vlp~9*$uAlA;VM zU`4#1M7<>iT0s$9v?$akwU1fV$Pq$0sPB_j^2(Eq9d(f1ZmO)RSiO49j_td`;STr( zl5@u5!K@tTf<@D2&Ym!L-ZZcYP6}+oW`LXs4&t_nU`UNZ2pr`%qADB*vML`UuPKse zt)8%~8Hff4-JjYCg5%nz&gZNybW=(wPdF5BX>Nb+*;kGnu1-(O9x$NDTRnh76tz z`@8n-tZS^5n0KC%Ja7_rE0v$lU^nDG^}xHUmv82kla^ukQrr#+Ug$3MIMTeq-s zbSj+>H^NC6XH<67bkcD5wD#4B))I&%IL&u+MYPp@aWNVe9WMh!Y;C zc0TNhwv_qW+S2mF|N7x$k3Gh+1CIs{9C*`BH(h=8)x5F2ZEbZmbz%G|#g~#QK9cfS za}fZi;65NWzu({7+}!RX?I$_u0>MZ`bFCx|6Z{2fObFl2$;q`@9pr%^IuF}Ajte9p zhu4$zQ?g2tM_AeN$+cvHZ}<6Ey|sGFmd$VEI4?=|KVWIPue0qX&!oWTV3l|iFpd?aHdzd(FI#Vcd&}E zKOJsYYP%2T2KXokr(h%{f)L9C!FkeuVtACe%!L-SBNXU3Twe3uhV5%tzgN2N5Dsl* z4NW1CD1lEr*}{Bx{p9vV7fuR>8!=-bkLfBPc9%Vk!c6B24CbEC~-_IZ1 zT}xO1%CQbIKj`AeO zqXBRVR6+^^F-5xu>5=ONsu2Ld0!GF<92B~QzT;;{V5ko&h5;0xPa!m=Y0|Vya)9%h z1QppRqD`bX>Cy4eQTo)^rp6^k zVP#;UA>lu}phu0W_eC;QqL7o2D+OS2e74}WynZj5JTm$43yd|r;x&+n8DoCeZ+`S_ zBIIuK`kA4z<46DS&L7X6y?{1jG2~nT*G^|U;v%2QRBBH0=V7b&q#wV39=493kM3_L zEO&?s*a9tB-@pFVuYUgXpQ9&ckxiaF`Nu#0@r)TWfiu*`d-v`IxgbEGK)inRdgR1i zdv@bxT3K1?Yj5Wg*diK%wjc}MAp<;DmoI;F)mtkdFBCxT6yr~R_RVWP zeG!5crD)K<5LgF55FDc{hES;}nDEg=FcpSF9HW3OA^TpN@Er8J*O8vxQ^FQ4BOEhQ z1`gmdX}}1E1>sagGbGtjxsf(L{+dw5QTK+Lh&tkIFmwob;BC;<=zaaQ)sH;%SBmTy z_4a5iFl}b>{eQe0H`GX!1lH0kI5^PQpeIyNI8d$sCp^cV9-pwoYyemoL%@B4sQBar zP>i`yXUbIc@LWW4g0wu5L^6tSk1&tYao676!AKBp6@PlLaQKXf`x zMC9Si{`|8qe)Ai*;~RSQRi8qnC_l9S@Zqxh`X-h*|HS_>2`ZUNN=hbAoIG*jWV^%V z_v17~elB5=a1KzuV+L|6R8~|x`^xPpPRbQmaCoSHa=Xs?XErdqD-YwPM{_fOih=>B%=VX6?~24#0RCQq3>YxeAEGiUVc-#<4m7aW650Sn2{8;4LWxB|f? z<>n58;~=JI;RQ41&70~EG{+NRa$4=&xqIb`x3_QG+th%nKVC}dt~$M5}mjiNxC7ljHg>U%1D~w5qYv*n2J1&w)jxMV3+Q)-_hY_Qp%^fAIdo1s6`6Hl6HMgvCG!L4TO^RpM)(NM3o(H6$4L+bgf8r@9~c z<4P)w{rWd|8uf7iF(u7IeP$Ryad3yJ;C$guM8hqkp8If(~*}GJ~B2SK8S!D=K1KtRupl!dr1q#B&(~=n0(3C{=Y2L(=Z| z!jQoZTQV~a0zXVTt-)lGn@2=N;m=@lMaQq*<)eJ|4CKU%P6lZ04E6X+0&aZ`m5~wWqpVwN+J5J^j?a(o#|Yz(7C08dKca?66&P z?KPjh;remo#yMPgDv-4)LEb!}clG`zZkSaf%S|e|78jb@u z98~P8@;Cv3%G)$ zO(BVgClzxi5N%SYqTZ8%Yk4aK#Hoo*FPU@=FS~Epb>F`KXvA)pu7oD#8|d&-gM11*OZqZ%FM{ZVvU#9;K74|GYo=j z*Wxr7Aa~Nr6)S#q&pn8WxG11+zV^CLuX*R4pa1fIX3m~#aX13XrbuxMTp^Jv&~@|4 zg6;aJ)~J}*@V!KEFj2l0Vuk>)C*et?DX{4cE1F6*k$5tQi+N7I=SM&J?mO$>_~y5- zGuz3R5^Zj)edyu)9(woz<%CI&7?S^zNDtI7QU7bym@!}b`fVOhHd?dSU)y%aw}0Yo zjXCWZpqnCX-t&?YhgD8UAZd}iXjDWP{z-2X*=j(*C-|-%>^cra$Uot{WN>m3dw_;8 zSMq8U^}84+8Jq*HIaX7t)tW#f<**u4t;RI7KGm$}Gu>*;w41Z6#&o03)fqG6(}ZHn zVkI03w+2Mckj-wynIjTwy8OxodHvGx>L3*i*kNHJ#G8Xm7~JRtnpb!VMi42kxux;KMZ zdq&!|ihI6ylO~A4vSUSJzy@dPYip4WckbGaXXMwu@r_%)@)b(1vfL|<9NF^0Cb!cS z3IrZ};K9{v)`S57gKpCFsXzJo&%S>9Z8p^GRx9Z@`3G--Ws0Q-`$o&$lmE3H*;hos z4!4HHYbq;WeeMO=f_WFrDjr@a#bdxXktO9+$cgj$AgLfVSuHlh-1##+paZ8dxL z9D#xHdc7Mqtam!B1qJ;mAZmfW<9e&vPPLh=tjrN3Mr_@>g`A^h2kR;-4$huEi!5Gv zvv$NN%h(C;0AxEHZlBjrPrA7(&6|Pli!i9VEnSiRKx|#TXVQezz!s+d2{psGXr8O+ zneo-Yi~zFeI<1{Chgt6t7l))Z(P3_B3?1B8yJO4Y-8(Ax@2TCj{pj9Z6^F|kd~H!O zI#^5&tI5G9vM<*X_CsFdK!C7BgV|~zudzh)=}6T;$4D~|B$56BHV2J9U0{=5s!wf4 zrXBp`li6lRJ#IOHkb=X9j|?4J#Pg9!W+?Jc8>VWbt0}ED`ISu@Kj8+|b(I~75W*sl zO3fQlS;99V3%0k!-hA!-Xh=6}*8ERhc_mz>T-n`n?_SDrt;b!RRUOcd{=HB2Tza2G z6Wy)!KKx19|AYvMr};Qec)C1Tc{ldjy6kI`YspqyQ~miHKfinTo)nMg>$iRL*029t zT52kUjm)@t)295~yt0D_@4xRp68)KM7BY9<^0iyPariW-a z`|*F>y?0lQ%VGKJ(@)gYR)6ZMYtf|O*+|ZFY;*WwqU8G4?caIukN4F#RIFIO!{)f> z?qB{V{^zLINVh;9nOKMUMgS zPG*|m7XU|}@87>m^Vrf(39lq>5c1+ElUolTCNgYz4cbX&IF^!>-jauio4k6uM3W-t z*0an}JxY6BZf>sXOioIHY>Xv3McZ%v{$4$Ba>3UBw`*XF5a}4l45?YO?pd;Q8Cue* zS6uOnTW%()0t0f*%2hkJZO`l2@r%zsfBlU&aKZwxI&^IR;3E%>IeRQW>%4B%Ua~k9 z3KDGmID(3{NQ5ZKqZA__sYny)t9_vXBni#G31$JeFMO#FjcLv{(;oN+%GjN@dRILx z2AmiU`F;B4fBfm2PyF%OmuA0*#%j@`ugIKv`4v~&k;!xtsx6f<5M0vc`k&wQ^wWQG zd#gW~yDBH=ts8It8ElH)7(mh?3>&U77|>c0|(OKoK?3Arif*tY2%c71+m|} z4H`(Pd&rhY}zdbPkEJe5vM54Tv<^oQfX9&Zb5G5PW4(9@X^ZF?ceEvLoepv zx%+?Wo|Y(Zyoz%T?w<^)`|rE&%{Sg;=iKwoyKBY_I1$vz+qQ1S(Y;fC{``+Ve&dZd zFpc>`!9l}@y!qZc!$yx}xlsv+MW@>+NkrM@b_7Ed zd-mz7suB_fQi$8*gUYxiE34g=S6*W=#RGV6yf$y!mO_UuJrc4gH(0<{@V4nuP1G)2 zRdXF6JjfEZONbM^;f+5deEQMisgtKqzwVB$n+i;Z zM58|5Y>v08BY7(Q^|0Ix4#w^>R{3!i`f$vu0wISFwj^jb2-G=jD{xq0KpoohD5)z*Y( z-u3(SYxWv-$zTC#W2h>OjPO&!Qt_>@i3$NovW%;+3#vxYskVXEa#oJ>kERJ|iJ_2X zIvm!t)hl*fdhzs`znW99yMZNctR@|Pwb*CPEP^W83q>IWp0bh|EFmB905RbwVL_kI zBtRS=0(-z431Tn|o4;PN`O3?F`NX4hd~PxXCYcO)jnK7N7RcTuk(F>!h}ueY1#1Lu znl`$g3M04+Dv87jP>!Hh`(jlOohyfQYf5p-+0NgK5hLl*$oIHu7{ z6ivI;a|IUZ1O&~XKU}c)AkAyvzC&()UaSQRI%k_TKB)D2Tpd_YU&zo9HLq> z15d;oGV(GrGw;3kzFxh0W7DoEt3W)=&&~U4;lhvS&&MOd6YyVq;|-5K`9xxyw6H$f z;B}*ehv1+>N#+1usCiZ52QlY~-#J0TvIHTtqv(Jq%Ep8E7CMrRY*|+L4vq+qruZ+u z+0@yX{);j&*b}BLg++)%wjwYQkZZRly!Y;hLk5pnu;3pVZBoee0Q9h^@?T$>vwp)G zyznUp?<6fMd)Tm%y?gaD8XS9el})?y)@Pr2-KbC0N1dp2sDtu3g@fQ3V&6pSYr+?F zH{x3)@Dv=Iq$9yupC7j`7J|0&I73{pF_h z1-eFu#ppnjC}Ds&sRJL9(_MP>aLkJRqDSdidYkElCUF$$a_iP@{rdG4%#pVtx&D-d z7<)k+ZQghb+CDp+=3-!roJ7)ee#pGTs|WAckqi(af3&Ki4j-@V?6xWC>5}JPZE&Je zf_nAMg~xVcPdM^$>+hC5a1!aZ7$uAqt?`UY#x5h6F{&M8U&flz^s8Up!QxzGC7n6* z?z7Gsi=r1MWeLvl4(GZxYhQTb1yQ4#jh9|}*?kW^h-p3`gApGe8TDif)q?F1v;;!6dwh&V+>c%ItWqdtgc2p7}`A(0z-CMz-C^(4sVghx zkd&E+&>82SKdG?rKxNfIu;9;6yjN6s@R2{J@T81~a(G|rZ|dr#$Ecigpd`Mw0FgO#2U2_-qcEF!^59Qc)r zKPMiT9XjTA?b5w{`;H0mN&KWg>gsBV1K+l7b4f`N0R&hRfdr5X;T=~9U%*pe7j?uL zwro6jmttAd1%0Xp<2u*ozc(U7vJwfJ6b-n51M&Uh!F zE~F_^H%_Wo#u(HVlJGbyss!R59ClYSk2C2((g3z`V2jK z_M%*d7rlwPlad$@ylAc1mJiY%a_GT_KlJEdN8j7>nxm`T^6dZa{z(K|EngDDi=W!O z%HZT913Da@^YRPtzdx5$Rud+id*zi^;q6HpnXeYEfXIk9d|~!%<@sqCJN}$MJn>i% znF5s#q6||kw2M9#qdjBOVg_$OH*Euzcoy6iz=XRag{Jzy9?_yh0r|{oMDyuB^QMx#u2> zPf9>9&$@-RwRNNz24;a=yrQs0s(z#9)LAe>KGHbW`@t`tH2KN3)%EEq?UsJE;f`B= zS5g=t;h)Ei_a-T)B}19T>VS#F^p=;Gcm4$z(8;+u`SI~dEJsF%Nf@=pj_7qfdgC|i z|Ni!y^FR5hzP6qOKCrC;zlX)XNE3$6ixfT$wT-jxdc^Inzx494E>9V2TC*Y%-e;^v z(NsNEyBO+AU*eELJv}b3!(sJ#dASM8moFbYv%~ zu?b##M)OM&*$mOs9~bYyxZWWv@AJZ*P5&V+_~|Ml13ynOlE3on|2n$GVD zPn$OV%{SkM(Q6RodIfJ%%zRWGy25nWrAwC{J$gDF3CM;iiE&>q+4+mJ2x-?`PnZnTJ-hZvwlNBWomjlYhjRn*`#t2>&-B7tdJ8T-~7qf!l)Pirwe`{yB12kTb-|$5{NR0mdF$2r363m}i={o7HJD-uQ53C} z%p@9S$vt4Luw<~Y%nkIdvRdpQd(SDSbc;c-Ty~7DCC?&r4WPtT>aCHgpYg*!VE_V5 zVw@1Cf4Ub%;ojXv^jAVcB4$d(U&}kDG&X9&_c0#Udgwd!z)1>Q^bFz^q{t5Qo+3yc zDQJp|OGy86`|Y=rVu8V3R$94Z=PnXy&6zWYL|RBV`CU3c^2bLLl9N!$iy8wKijhd1 z2T3eR1CDW?#40j271bz+fh3?9?OUFt(2D0WYGdM`dQU7cA#!R|3$(GR zAV9pl{R-~nL@OoL=-p0L>6-Pd>gw41mJxLF%i9=Xq z_7^Q$)VJ?xFw{->tKmotO^ydEoaSC>yg`Q#iabIfOGpW+R`gu5@q!G61!YvBUE6kf z`T4P1n(u9~BSp1t-=_yoQrOa_MgWV!q|LyaHf{u)pduEnz%!oX*z(Ae#Zz^U=jwU5M@${2vnHgld!X*OzubJi7@l7(A_*l?o317u@DzE`r2xTy!#T2jMjXX;m zzJgi8st7dkL9$Sn2t9zOd;=>Qsvhw+HCjTcgc`oS#((GS_lLYXn>opBa(wjBM;Bf+ z(c|;SC0LDSoVnmQ6u%MG&A9vC3n!jSWxo3A%f0({N~S=gK0=Z@pFcAr`>eChMjlO0 zO0BMP#W~{t`T2$ypMS@0%|Pd6(2I!x9}Dr#lnkh`>S(P{KiD;pkTpIoWzWvi8NYg@ zx?E>5#reEsi{@Tdcr<(c-n{(I3l@An>)!j&l7UU63B+G29P)d-4Jeg(53r_~ED)lh zd@uZ-#e`BDy&}oT(6#qJz3&$bKc6u1+;AhnWW|{SAI}7*GaP92y5O$#5C8Tr8`kVj zOvnJ0$j6|#Esf#QI#{c=U>2_cJ`3z4r4OpT{sa2StB4s)n=jVhjLa5nN4x@l(q?rg zf-fmE7XT^QW(2l^3<92l2Y!7WVLe_vX}fgp#8jje0-hbQqy23C{$4$BlEM~!B)TLB zQ88T1-cLUHBnJVqTW`Hp#I(l7ef##I&tm7LmtMveh?H~FFMly;=nx~~4wU0|Gpl0p zvKmZIGY+1Td_?>l!6~JN*JgPkZ=NG@Xids4K`_}g5{Zd`O?aSjTaXrBug4@9xe<0E zsAr+WnRnh_TI@y^3;A__f9t)015X#4w2J2_RsuZf{lUP_U3*@6<<(oJ-xBn*k~?`> zO!MZwL(1k5c0P3fMhn-D7(S{^dKN&FmYOwe=xC7Z`MQA4sFd(# zsd5XzqAr42Ln4TXg@>*ErgFTL{edFM^!N()Lv zR!;K*vQDef?yid#7c@Nb&{M2nn>-z)YTRzlJEzUIQXr5V4P54P@n z_Z~QTVGEIm(W4pccJJQ(-h1!ybIh1A!-oyW^yc;YIGB=}`u_XxmseEaRyugt(Ccr! zK^7f^GNRFh_;_#%RSKex1qBx5EYzXKKDeY!(dvl241Nk-D8WVTbBxEa$w{Rp;XE5q zg3g$Oa9fOQ?E-$XX&11AaAdP2efYr_pUhty@3aSep%4f_Bq=I+|H^UO2HU?1Re+M~{EcOAO^czcMuyc2 z8Xp)e&?81$M6(23(Wf4JJ~`ghQ0KdA#@**%GO3}~W3}5nURm^%RdB|ibN1v*F5b0c z8x=(>j_V_dG+C>WGN|$DRbLks?x$cOH6+GJW52!w($g|{9F~Xk&b?sxkTZ)4s^5HV zu7TCREC3wN5N728%YXr8XQzXE#jz`j&wA27zgXC*b6#p%BH;`gFDy-pb+Eep&`+(UhkY}nfIFn4;BBIe zM{$Ra4Q?A{9exymKA*w@+%hbQNy$BX_vE!oU#s#pK8xG6pRtD>_J?PE`$p4__-NNR zAN=;)|L4#C#9<3$Xt}A86B{k)Q-b~giV%<6_3pdxFh6$h)w5s!flQy(71focq6$frwu@1gv4q~uv0Q_Q@DAf&OYz#3-a6NxoTYwxC^p(Q8>>eQ!usD z8LKPmNXUaVLDF)DB-u1i89H<*^{uWd|LB7c?w$33&6N1@yw3{@%1Oq~Y(jzY?pV(;OhlP-DO-A*2!3QL8M`{dM+-4r3O|HO!ZYuhf$bZc*f10o=_5Hg@cosLoH>!Jr>qB(dJw6rz!SY+qiTg6U>JU`HtvuqijV>UdufrWElR z3-V=%Voiv%8R9BS>Z&VA!WbSnWQZL&N54riaxd5Nhe0SzD@U}0?6>&HW%8I~Z>k1| z!4}tX@QCBbzPZJf$GhNv;P_7*wvIQU2}6Vl+$#Q!@4fqOb#*nI)5P;Hh)+nwMF5|8 zsFSS6EZ#+=OPlOAlYchF5g*S|JZN(n{`fawwU+?e*yxf!6m{r@Gq3XD2#Oo=MiLJ% zC^cqNY`%_5giBLnSG4)?YetgZh$kc&Fv;A&+89c8PY@!{K;L7x#Z{D6|NYJP@c@W~ zbobx?0OV$|+u+5>lLr#Aq%#f;x7~Wjvc=0!>D(2A9Zv=U(0r@VCEP}h=)1q!u;E~F zAu%?nkYLk5&OUuk!}9}+{>v}F+$pb9ztaX39;jRV)k>3*EK&G$*btnlDb0fLSbawr zwW!YNNO|kEd23f4aNsQ!HUci9@4nVx^)1vK$?rvpFCtFM{%{PWKdEa~_9SlT1T zpGt9x+63Uy#fKia|Gp<5e;k(9?;+~I0)IzhYZ?wdf>{B-aLCLTQ>ISsdP)z`hv9H% zCbq8^PoMk_iFKe=B&aoFO09X9Nm-XEh}jzcYtjdSPDWpH=s&|&!S z;m`@33(JTmr=)zfXi-IFB_V}Vr%vtBvj_9JW_pL2!VMY)Eab<;Ne?RWXeyeKBU<>F zA3RV5apNt)$VNhJy6EV?2!XMxl1~}F2#7&SqGCK(H9-*|44KikXwkB=@)}YA;z2fQ z%otofDH*m7$m7Fm7bulP=8bbc_4LyyUqBGr#Wx#GV8d8hRa?7y^=bi1g8`iv3>Suo zo;`aJ*|T@=o<)oQHTCk#Xv*iGErQ<38;U}Yr4@y3L|TutB2o+ZMH_|HBAY5XX~dbrbmyHjP zH33;NYDg9uxhBHOASLTJtUmLs5wM|fjT{xfdES@?NEF{<6K1FhRbmqiXE|cfxxH={ z59d5+$U6e4X@!#fn5WEJH}9s;6BFWw4I3`XO9jp?3U1x~K|S!(hpl6E4`?O4Ix~EE zSsBUTp`OzR4T9GIDX@z(&*NgcVZ#O#_E}k37fqTZ21h-KnIw@Oumv@+$O<$XV-{o} zANE5jq{NjJA_q8vP^2boK10bdR_=Gsf0N9fFLCJe3MMGIk>!_LFDL$C{ zG0RE7`c1#_MmRXcF|I%h1Rn($017PSfYTQAH2UnbPU(J%U;wY50Ue0_Uz{d4CtTHCUf(qomG_$a3~17LazeCz-UAen*xotC5zT< z+fpGJXdqxCLN388hHl-u-f+XuX$3vT%F+nU7%?H^4jntpd1;OSger_LXOZ0HkFb&DdKZn0YJc-RB7AQAi){YL+i6EP47B_){_eELOQ zjSHn4!W$S1Gz+W{-YCn~+UCvs_|!fs;jzVo^YU^#=XX)FmDW(z+Sach+yf^CY^lK~T_rj; z^ZDnW*VfdNV_?LHGvHVP{y=4Q6+?0TH|vW_4&vB;=2>I&^Ya<^aAu%YC>Ve~kq{w; z*ier_k^Es`L)OAJAx3d=V?^;;a6&L3LUh_gTYjrTgyVp&xPq!CB*arqC<=oYH)&PY z2!^!~%Sx)&uiG97hFPy{$nfEag=$Gr{7uDONH*x$Wb(kCDCwzl*Ul`5#SzwRM6Lk~ z;4e^AAs#lk8#Zm)NYmg%z-7>?UAuNT1ORFqH*VO!e_#LpgX-%7+qUk)KLG$jCyUN zFl+JwlGN#@BfK3#5nXkfS{@6kdHDdnIA%ji0#TsJi;#YeV_=a#v1kDDBXl|=`ZwN99$}~xE z(I^&B5QNNLTefV*B7t2LM1z3Q+;#8XgX>{4KmY8rkt0SD{RgQde2ME62_Iy64&{l) znp-+^>A|X{i`JQqHmA)>3DODC#+;m-^Dn#rX;wNRf?*L`pOM5lU^fe|!9x;(Ak9^q z0J_?w2jLlU$1s_3_*(GEr@MCUgdHX#ipV6N7eEBom@Gui=5o1c>pAC)cRJ&^J3l{< z_KJ2)+&&Gg?)AmzU*Rar>_O8*VZfTKURLV<=9_O0A3h{2D+6fO@R|y!Co7ee+EZUz zb)@Iyqv4m@=Uq^FkI#*eC{u-~86?(%hog~{_*A`d^Typl9}K2_)TogneQJdqD!g^? zhxWj63F(Ix{NH*%v`y8t!Ro4S*s^6Svv=ptUEnqRK3`o;EvEr5)|V!4UDvK%d-m=H zesLFM568hlD2{~4%&U@1SjMpSQNtTd6i|T@6G=UmH+u$8Nz|YFc%A%q|yiOfu-PkD;yT6#930yg8v{K_ygO>PxM=P`JH;7dMY}x z+}xbBv=k|TRa$KhEEOBS*->6r%hRZtDjV_5x#H_}ZddL2@n_Ofq)u=akcDjn!UUJ3 za$3E#Kd~R$q2_O{#CllQP$%o5foB3#h0UNp~`0mtfWtJbMqXIII6yP_g+kU?b^1jD6L_+Rei)BZ%a2t zoe|b-Zgg7oiH3-C!PV)Kn9t%maWSI{`jMq|_iH zHqNMv7cZthT1}e2IYNC@*bTiXXXkw|ueQ1xcN30NF?xpjav2gCIj^Nl_4?CJJB_7N zSwJ^2F^&V|dabQ{(a(BW064mfVi`nXD*)@Oky?5uHf5C)=L>n#TUC?zDt7^iR zY})HlyNA6YDj)|%k*Qt-La~R8rtoR9C>gK?s|C=*3#0HXEv~Mts5cp`y?ULRo{>o< zW4EhvTfa_75B&6DOCtIs$`m;VFH_EwXOloXAy9lF;YlPM2KGkrT*6dvo76<$ zEw*tl9}X&1;o8RJEQpn)O=lfn@R z1ODKKb!$)U)w{mh8wuF9Y$#f>WXGNzm35U-b2KgLPjHyq?A%IzWEK%4A&lgup&1#O znRr*i5S5me%$zv`2~==P7T$@n^elMs;Df&{Ei2bJFI63Q5#e2VP1K5JQZ;FHI8s(x zx_;d{u9T!@A|6UCHy4Ns9B%`)uvnL_ojB6qs!2;vQt4y;W{E>0Kc9}YqO^__ub?~* z_8-psm_8XfY6!0gC7p6M7A2Xy2C0I~4AeoZlJ?C{3g^6Z7YP}GDVeg+)&=St>P;4t z*dOF^@WPU{jY-8~Ojx^m8Rt9Nr}lR$tmc+Sz;Qec}Vxbfdi;!I_7lD$;*Y95JP0VDuU-* z#<;9YBkH0i#z~yB;H5~HAgX&lYWxdDupLGV+Ha0adCfg9gI*JrqK#k{8`k~0T9+av z;6*lY2ZanQ7`{<$+qR95j|)M7p&(LWaq+>|kf~mNY08u-1N!yHZ1>u0um0=HFOw4! zS&Bzk1dUBl&(x@es__fijkJ6f6=fYd653hO;*ORVIy!VPMZf!0= zDx)Yytbv%%FpU8pWn~p=vKKA@st{FpB#iV2X~AlNlfp9*2WK>T8XBP2kl#ON_UymG ze?@S-Bnh(}-%f79w~6wPsEU**i4u(-kGrt2utSHAuzSdrm;&q&(Q6r4GZ6WbD^Y`f%}j(V*SEW7|=K65s-*n#{wMiM=Mu+J?X*= zI^}nckB=`xi+1oJ9-+h^lPjM3NRvf|I#f03IpMj4F$5+yZ{7^6VxY(6T0w8QN#+Vw zdmYJ+vzR*oD|B1D8ciJLZnS=+OLV(Em{5t}dikY4pMHA(Q+nh_^=@94Nb~Y!VS>0z z>L3SWfF*zAX|honJY7)qQw7k6Zv?~Y+G;VgDCc`LZiubC91&Udjstru3if)4r#|z{ zGm}$Oh0j&W&_g9^<+gtRupT%`VT-|9S6_>BRyY_+PjAEGfWD9b6qJEd25l>p)1zm1 zK_5*tpqG^lfix%{SQ5u4>rC@egI==3iLzVqMT~zzt{4>@^?qU`pDcA05%~;t zk19f7tHr~#S_$t!IC4>OA+6vvh>LtnhhXw!Ne!_Kz)pmeBP*Wj26J)}Yi$r$E|KIg zmH-3U7Q{lJGvEh|d27o4DIq*wRC$40s zRjbyLpyu{FZi+PG)Q<9;n-$h5PhsivCX_m=f4EdYO}ys_Gr;-FsP_kbwRN>(z)?y! zDoJs?0&7rR#*$@UZGh6_KT3=U-_6n-$(zPqZ1HlL!u`xVlMrD-iVXVZ7G!pis$s(=~Q4L;& zF|NUtOydk!^=TTfa+~%!)HMv{*hd@IQaV1IW^;0Kie!Uv;l>0HD0$}~RRKo4PK6T@ zxDm-k)?dIc3MWk;%nt0BP&WcB;U=Ji*$mr;%uCfYJQe!qyh1Sc*=Q-Zl$0Dq5N?)b z$gQ4{l}XQtmu$2#DJ3CHMk_N6W=LlUEB|wY8?G;TpZPcE%uE$9euj195cyLrPNe8*1d*5kC( z`fv#48MA^4`Ds~p>;4bzfs+)r7^c|%F{q)2L4INEVR2{h0T8T555dGIBxGb{Fg5`n zp&9;Y8~e}&f{Uhu$3ln92w*8xi$Rl+f*SS$Ug|dW-9#g?l3M=JPBn{i>1{H|=j?KO zaoqx-s2HaNOwoUvB~quVx{9)KZO0)4F$&I0lq^cIMwI~wST?>jCr=5+R_^WcL}A?o z6k<cC;;DnUUzm(u zA0gHRbjo7dIO+qGR1dgegI^L$CtySW;Bx?Hh(RM5B*!x(|hNw2I@(e^WIzV#hwxFP- zq$I);QFzH1j?MK#SgP+uHOp`pnB#~-G#Tiz|LS1#z~`{Ksp;NhzdR8}L}Jp)*AY>~ z{M}IRuBxg^Ok>$m5)6paAK!?Sl;k3PwcF!@iXdrmBqNm{`Iq@?Vwq)pAp%elFa{5p z7@xond=RigZKaOdx1vlmHTcX6SEc+C5);Jr3673}(%ZDb$sTEtg?t=|hMfHDxCD#M zZiY}jZvUR$2jI^v4lPDvz^cY7VG}h16kpMjC~@sSqFTk}9g&aWAO?;W5R> z3JQjCV|gBvmGA&5*`)XQ0=NA7`k&u;MI`DaT`IyCSCyC6E?vCx?{9vvX+vQ`Qc``5 z2UjVW9)AeuahH-$E%DBH)J(tx(*j<)IEa>2RB%eF0UN|0>LLiC>LK5=X}!3dBKza>+PAQfR2;Nlwfh@9E&vnAlKFI1`7Z! z@eN1WzFm931s5%Bju0m`CqRW}Bqk)2ubGTj7~(qSrs++dsGd9%ZhxpM5Uz+z3{Sjp z_`Hu_9yh)p>6P(~Qm8IKuy^ksD4B*NIFgK{|1w*%voiDZ+PBF_!nKMs6pOWiZbM+> ziKzUce(6eGG$$vAxSw|I+9xNc#>Xep-?;3v05_n;YY;aS7Q+FPB?}*tATO)&U{M*% zkF{@?S5{ihs!1R)Izd^+(qKAp;2;^B$4@vD1pwHhWN6YQqPYh_cyq9aOwPg>XuJnx7DZ;A1_4Q{Qs- zSacTChYrM7<=oS_&P=u1!uL5F-a%sOT{M;+A0L`>+A zv&TiM!)|4`F$}p{pic*ACV?SPMNL&Lnlp_f5Ve0e-1U^sNL-lnwr}0WrI-e|B|E!q zT3Q+yOChPL0}%Z$+t>0G)p)`#Zi3#pwk(Atc=mG|0#Gf;BSFEgmMs_)&BkaaD~Tz zB0n#m&IC;HFGCcyTFrU69RVFp_aw?7UO|8>@$tfA1E$Ox=XAg%0$Y5P2bp2epg}ZB zdJzUveZ;7zd=bD|lhttGKmlx2+jboai%RH2e;7M}jtu%VytcmP;>j0;B0jwl0bC$k zl>|$R97-QktYD>jL$q36wR(BfOJ0HCmmH;diQVB33@Lg45PHQiS-ejGLn5?U8pT{+ z9o)I)06wn0d-f*LGq<6a1N4aZrW0Tt4}01F=8x|-lK#!Y|Hp1SNnwjIhtmU+8KW6e zhnO|;;O(Id;u}_rYnssHM}Mi>gZS2}!6M11b)bv#@(Q6I=s=+-Kz3T+Q*r8o2VAsh zp^b29WGEHJNJ0p%A`S^Ykt3{fN>P-TmDPsKU5cYYoi7{5*^u23!GH-K%JqnEz!v*# zoJHbe2aJHpOiD@=u+WjR3InCY@hMGN)a_~L+&Pb=s~8OEE(jeh)b?%LDk{p+bM@_a zS`eifHa<=Xg^NQ&R%RxQ5Wq;PXmC-~Lg?UvA(orQdydLzj#t2$)+iF`uq3Pqzasca z$e$>=acysGv^gxHfP3YtB}vH$pmDo)?P8WeJ_clvGRea_;*5d4`*dZAAh;?X!h$|n zGZan<9JToHC_yjjh1hY~(>Bz9A~?$eu7=9W3KiM~6GmVr%MQU7Gv9?RhQ!q?x0aT$ zZnk6mgmWEnaVXM|3WaSGmp2;D<5clY#kT(afjw}N!WN?lsG{#QYyzsFQm_H);vhS0 z8Ug>n{{4=(Qw^>JFBsiS)Je(7*f-HeS65aEA&N|dV}y};KC5kJr!M)3A&VC+WbGQ@ z3CllKJ$u|ZYL8L}XLk50DG=5z62cENH8lyKLGR9oR)bK<$;ml6IcUmwC`Y&$K%~j+ zYyALDaUtZIp03Yso5_tD?GYaX3Bq1cS(vk~-8!eYL1QgB$Vk{iuh!PoZXx!7b=wB? zO-)V_Wst{BK6uOpObzYYwM|P+Le5hXn53qHRV0WqjVdN3iS5szrRAWm%DzuiCouHEWj-9X1H-N=11k32y*iABIkSI4RjWdCIwgU_DM7 z;V{Ykl$(NDd8p})v_vi`R33B)&g3`$VLU}`uViW>!093mB6q`zB8T#mZo@jVWFbLF zy0r8*lP6E+0+nM2iCbySq}nplTDKF}11BqNDThYpQV0MthF0lQMj;zu!9E883w~@8+q1@= zO-5`dhk9?OHU;^G8_tb(R|uS^Fb~i$4%^Tgi{bE z!p)%5`?qP63dk#*=XwDwN`_z&{nFEud!O2syI`Ntk*NZ7^XJc}o|)Mhz5Dcn_UrKA zm+UdZk->I!Xx}!sW4ky9JX8cFQfhKSPR9<2s~AaP9(g)W8z41&-VHZQH(0PUNIEe3 zhZ%)5{&WKHN8S9s`Yl`6m6jHqJ$B5BuUCM#tk4E~iO7q(vFE9svfCvg%r-{xu?pa; z0EWOSs?O(~0!8Y>4pjsc!OPe_XSI)XA%I0p6<#115J^`pSIJT413_w-KB67BZ!Rh? zB@LNr)Tq%Na&owx`V(wGv0Tn5Qqre-a_iR#>VcCLw$zGYj6vuY%?>2N2D$Q6laJ72 zXlzIf(Y|MQbJzFX6iP~Z1JQ073~e$pK$XhMDikV2{AvV%+!hS`$DU25E$m`}x88c4 zyz#g;L3eH2wZGuPN#zw)*b`iCH-tzzkQa0g*|%@s?v(D`x^(T_?Ub(FyPpELMeArX zo@>A~k4AXpa`qACXe$f?+9>FE$k2gGJ`?5(FqG>>+7rfyfaEX>uMX&ql29NI@>Cql z7him_fB!ym*ZgeiW!?bmM7Znf2~Zc49MT_oircl#I<@xcYqThtUHrBLFO(Yr z%plpg{DlV!R<2xe$t9DQE?rhuRw-*N8s!n#2GY_jrw{JyaH8oaQJUxh07N>Nnk&o# zshTm1Qt5I|$}t+wZ?ORLQ)UnOpim=>90fnH#lS!e=K`rR>yR50_UYjP_ zX{=_b!+@DCUAx_T@BJ*%T2otJUS5f(C!SBB37izfMl*pDuuK>h@~+!#F1M$us+ymm z9_wXD1(bph-UNP)?47J&o0*knMqw_7NTe!}MwML2qDMIEO9q&}{d)8{t&_l#YSDnVlj$rc$3d!n#`DCX!}sz;e*kuEJQBgBf} z;x{HMrS-y;9_Do9}U^Rd9=7E+Ysx%tei1r*x^ZI}f9j>j|=>#(A zGUP24Q3&ZOrbIY6jCWpt!`0VLjbNv0lQ8Y{sbJmPp4B^Dh1r=;_FnqcfM6csGA@0F7Dq9^J=| zJzI+A87f8!`=go#h{06-cf@0l|MNHlO$JBv*7BO>gD2I_$pu@7Uai4^wT`ed;*fzY z#sR0G5NHzEVx$pLcd)pav5eJ=??X5KID6nGkB?OXJkT4EVG-P*y6UQ0BqU@Osz*Ix ztfG1}T)sPJ+yUZ{vf&SpKD>9|F5rmt@otxw#UrNQcoQ0>ipuJJ`wwj1vJKl}4M{(} zWDoRIR8A0LSoUIG~YHo|5_Z z_8YHv@7AMp{wZ(1{tjqG)@%!T4Oyktq|eFAIAios&{$|&NtWWMCB6uUzd4Z;wT%;U zDGVLI*+ZdDpTOE7*An4WSye$AMmc6Os-eOnMdnDb(PE5SwQMsvsd1|qKmKf@#F`3J ze|eb#h+<38ejj$W^~VqEfs+xov~fhFG|_X&Jm_~Arcf->3NvbIYHFNQ@PT&lbd_DC6%lXRE|2R)-+aESJ6vZ*==AqK`1C%#5A4WY9_rh zYmF&PF`;M+`vb0oWa}?)zn(SZS%nSJ5{}s6APO5E2j^Ad z1cDVM#ryZ}x&GQ4?zr_%cfH^1@!Ks{qwulni8Qn5EUB#hKRKAKU{cBW%%WjOo<0w5;rGIEV7e3XNPj&JI|{#12=pWBYc} z=81#|EA-=U(3DV`rj;i(GwSO21lR;%OVXkUiz9?5c^ga_M*j0UVG!sGe(U`O>bki+Tdjpl9Tf~cRBN{vtNDnm47Y# zYT%$jlA0>2%gvRPq);p{fgu%Na6~o))vW9cN#}Pcd=JhGml4!b!n^?VI8^`;{$Rry zXAYb+Wu!kOjuNm&Kpa?Kc;EnOr|Rlz@P`;RX4IxFn}7S8-=?Ogli8xYvKlcKyQecg zF*QBo`svgExp3ix^UmW10M>y}&?hkrcryrC5Wo!Yf=Wy9B@$e}Zq1|%FS>Wuz1z0z zu$ZjCC~onhRx)jWHA7P0SZzR#wFj+!f5f%p0lYqe%4tVeV&0W*4zqR;4#V7MWAl~Kq zXJ7PrB&Q0xcg%=qjT?9F`R9}P5bmkEstTtB7T>0Ua4tBXe!XHPTnIEzBp?qLPD24Y z3m2(07$l--LxOgm$fmI&0G6os$s2)soQUH;ti^g9j4g<9_|?Uw^%1H9?wQzlVqFf?11M1I3Ubi!mNq2@DUr2z?8v^^Piacj(GM@_~%K=ZE0_m1$!V? zyo4HD4UBL`E9?d`4iU(tfnl|-y}S3Y>I@|NqmBR>+DM2Zo`^1(!4HNot7m7m#pI7q zKtqEY)rv3#Tp?HE*cJANT%<82MEUvIPi1AN!C(CA%g?4=eR)M?DLjG4<3dG6#zDx1 zh3VUO?AWJYziy}Wu%NV#kpC}YbH>4r!BLP|r9;P#nzsii1S*Mk-0QK~bRBcrV=fdP zM>2WIhU%Zrpn8-|eYjY;1dv$Wo0Mw(;}Z`B!#GJ;ff6*?@P%+ND^{$y=9;UPE?tbq zobI(HCiWjZ_>w6qlFq@ifry`I zps;YyukW~h<<~3W=7=iASV!&b4zt%Ex$^1}7hif7!QMIpIy=0~XgVM1RqeyR!m{SK zNmt58`z@?4%&mIU)Ss@#n#^KOz!nV`6q8L{Kv<-)SU$0nl#g#vM_uaq4t)L3SZ0`%ev)FKWfTaLq00lm%wD#}YziHEE zih$oyi4w&Ngl>Uc_wL;bZ;_nrz?!M?7}{|zR)##>AY>^ukc&#T#OH7)RF{ZgceDf=~;K*dFkYfwr|@@a1kkMaGodLFg`)lc>@OJ&${o{sL>VC zH|UHcUncqo)e?v!-`b7_ObWc{Y$c5grpO0AS1b-{>Z0=00@jEm?6mmB{f}LO&tUPvcc$RHI$hhuhCE-4x)|tlGEeg`|z!a7o8IbyLax| za>uW4y?WXe3l@9=aNraG1#%NnEuPlqDHtbKT;~) z?Dq#zFd>#c^6*1vju|;`?tA$6BkAJDfT9W!oGKAejngSAMTB6w0AEf(3ZDd4`9gO^ z?(5h8_R_Gyr~mm+FUT?%s$bE0`6FRxoP`ATow{ZI@lU_CI-|h|Jf>#ClTKEBCEv|E zu{Rcbf5HuFZ;uOTsemI0M~iT{4f`vqN~OcZ!Ygbz@PyMtW5`%kP;p>iStw{Y_0+y& z&l%5K5o}aiz*uqeaNd^o82O>L*i&P5kY|7QPg5DKe^n2hq`EBz3`Coqo!z~A_qD6n zZrZfzjA0|7Ly>^+&oo(1?S1N^C0}jZvZegs!R(xlfEr6bGpi$;kiE}lwTULA$rDxS zMq<(cB+kZo{O%jKh+89KNKkw=@ic;jaAVKz&;bt67`33@!t5XLyU~BcH&G3fi3*4} z6Fdrp9@3GoJ#zoUGtQoH*253}ap#VMU;J|cK{olhod@(E)S0bIJ~$ZkhhZOTYOD6` z+xx-`vq`y(yEl?DS|q&1P`Bd=QCPIEtg<+)3-uk?%VI~?!$OFnhj>H$n0O*VJ&-nq z3B-eC5_kt2BCEr4iOUlV-Ffddp+?v2=RS!~AonRO4(Dl3(CM&l-?H(B>!&^QbkB&7t0nrWbm06iKak}^uhEM3s&flZq??}Lv&`f%6wJy<2;Y*tS_9vH$D zlaeql!RC+nhmPs|`^WCd>gWtaYK;~HT$r%TQb|D^P2EHVwb#L$I{fAEQ{Emq!o=A{ zZ^nizY@jYwR#nU#Ag-=PQ;4-QutJ2%eq)P-EGrf-q+K{BPn&jwh=E|O3KHNNVU^_( z8e+*Ph{=fZctL)x8gg8&Jmx0XG+%lA%Z_>P$#U={g)QcK-Y{cg=+L3S7AtNM4d<{q z7^Ea8Nl1)m8T6&gmsC|&{bRv`8*jNKAa*3?Va7J@QTBjOLiwQA7DMmQi2VLzM^O`l zl`$@IR74iDH<~cI+3fcEn%d%`62I4vJxkohfe@T02|0pRcs_(eQebkmqea$5`VZ>; z?%dZuoVVa_ue?!IP_}LR`b`_x+iW5*0v|vS#gd;r5FpChNIt~={rlq`Mn5P43b(8X zh=$}vUQ)cVFyG-_Pb+CSk5$qZEf&~1tc|#27>r(zn<^#5S^VCrhyHk%J9i^`~h1k*~ua&|-{e6>9zd!98%2!WP62DY6ka_tcY5u{QdS z9XkdN9E2i;fr~M#UHkUgZQB(U75@F5cdwg19XEBa&rd)MH=+xR!#PvLH6SDQ2Aaoa z(}C~(W+)8e*N6wwqWHA*bTZnz>nqU8V`~dX{Swu3C_)ALQ^+M!3cMGt4A{;ZZE?1r zU4FqulO`-*x?;hn|Jc2AcX3gfuoOD7Z^ADaQ`6dD!rQg006FXQK|}E@2dP0NyeKK* zn+@MUhY5+gp+g5E89|1?BXxrT5cdFiB7_NYv9ZU8)l#{D+!9Xu6MuNK$yQdmd)ECo z_Uzet#+?tlaL6?35UJEEL|E!Shi!BzwF0T2LsqCOKr z*cWsGxMJD*x@s`ge)ZK?i%nKV zp;KpeTHs^L(p&dWd=H$Yuocs4==B2!4$R5PDJm>lv2rCEdS+34O+hh)g8>5u6c!b2 z*|L@JnQ`Y&gc-nfo=FvT7=esp+Jg^~+4Tf$8UQlxDH3u8vm&7&e2~)_hwMhM9^ZIr zE6bR&YC)7D1A!U>S^_4bk%4x|JBl-RgWc*FKXKHUvxX4mS5a13TkWc?aoKI;wn@p! z$(?-3&+z=u&FR>+Yd0|l#uD1;c~DW|0m|#%J+EE+OmCn*GqCvwq1t?*89^5|gzA#xrUSFh7zYY&PIHT`?u5N!R z{;+ljERRoOGpT_lwF;ifrgf3Fe-x0#4y#MW6AkR94*rl=`90wJVus-Y(n$AUG2!W| ztE#A5zj_;#7?9zhqtfJ-;EZa;nx9M0|k_|N;VdIDn(JqW4Ypqt&h zcOjk-P2vyusU$pC-@bi8D2C(g*|XuT2v$ZuLA8tn5hGQQg+VG*9s6iaQ*zt)ZlyN# zp?`KX4h{;018LW;9kf(dR?aFuPz1aR6_dxtsFGblEqN#s8mbk2V<6;3Cl%58tLsXA z;fA!+ZDX+jZf>g_}1Ou-0ndzI|{qAlEY~nsJ*Xu6*sfugUI! z8v4w$&X8>DA|Zl>5GUW%3!IWb86_tQ!_`Ej)B!(La(j{X6VPeIe?m{b;&63#N5@<5 zKJ)4u_jK)^9tiodXJX^?_*@8&Mg!a>H%g{cm`IdbSO#%|Cz+Spr+7Lzh~#UrqI~kB z9U~1r%QfD3%Xx3U^Hh)1+V~@7HV3JP#Mr20JT&{^cxMVNppw*_?Yq4Rs*PexoImKm z%IdOucO7mS*f#-mT>5Y%A)S24uiyO5-rDLg>cq>ZUa7^dGK2&x6=r?6XSH5+0(#&i zg)MYj3^@7^+u4;@UWtr?ufVcp%a|}Zg3cQ_V2#z0BS&KLU$yMmKv{ zq8JTnOZOuEe-{0X6G{)!$x%dD=JObx{jvaHL85i+;9+Bf>!kM6@ zVuR%I0$Y43RTr|31n~(N5Zlb+ffj?u=f+9G8>q*ZT{$p#AAjtL_;@qvn@5Zo4$mV@ z8-5W+Obwn5>sPT*5guEk#|#UGU^(S6REX#BjUGqi%V>>M^3W!=Wff?Pnf1Y}bS<7F zxNY*dMxDE&X5ZKer_cZF%|Aaov&X3&%=Rdk1j6;fuqP05@li34F%YDUN>IZ+g$y@- zl3EFF=zL~-kcZkMpHhF&!)A4O++GAb>5ap- zrZ;rh!R8;rE+R7XI1DuvRplxi2*W6i2Klfz;Pi?)#I|zT7N1+!HoGGUa~MCWW3&lF z9>=h0F0pmzN9}R+E82Pb-9W4AqzXF7_zW88fIzWr8Q3h%J*(5m>{SG0*NsSe{r$ZQ4Qkw zNVQ&DQ;&KJprE!CjmAgubb=_zlw6LyhJ&hN4Fp_R8kU9SWqDJ4D$reH^}}aNA10b` z&YYJ@N@_js(6C{{5qo(SIDy6r$#lz>%~jRq(P&`!h`t?jGD#Q8Es|+VTn?y<(rN(` z(m1V^(r)EZ37%r;V=)W6&r8q1?2ro?w=1d}am1O!(S}At{dto{eEiv~^A^mWcEdS+ z24tpXsWs+JIdTN-ZeM{|+-h@%b8lu-#R#mz| zCRR%0rvMX(EJh4?t}PpjiwnYZ?$paJ%gW3GskPU^NT3cddYV@L=)zl%obVnvd39Sb zTz~~;L5OSGw5t~^SWr`2y9)k^j?dmnJ%z5cocizcb&2T6- zt2Kx`g9%A!KBh;3R=$lgzV+2H?h*eeL3lvnmvB#FNzukmojT+3;PKSrC=ag5M}^l2 zk_eXYbZAYW1pE@Uj!{yT!Y%?=T#O5;03j$sX3lW*U~%P(FTBRB+1XiVjv0%^4O|nC zaFwtacXp8kO}a^!oP(FSB&?43QEaI+45G-x+NK)I<9IIns>NW9Xzh(4xZCXm^f?aZ z(}l`OLYT);$OG*YgW&Slb?%<_o8R3W_RCsfJGSrLy{n+8u%focjoKTxH3CAhduC>) zKaZ9xTetsI5Bw)!OBj>ys%7|3wVR>Jk8z2Mz?wB{4jwFi>d8N!d+xb8`FT_kRS4)n24h$XVo^k;gd*ezY}(YV zDN2A5k;o*$l`b_kO@x$2-GPF_(?^{a4T)PBfJEixX`*HlDTXi5fqMkPSP0NWF7vRB zoW!W)WWiUn-`9|mn0fPk54!51dEK~i=fD|(+uY6_fYeshHhcDgaNT>Rj~v~f^vFnO zqDG@+wHhlAQy)zP)sv@`R1K;SG9nOBU<%E5=L)3FIJiGD9iC{*Q92NF%p?n97Tf$6YwE*);i~icR~*{*`Uyr3Q0a9B_3Mf4TBb& zsqkP?ovX&;F#ALJYDg#&?-oUX#T2(<*^bg8cO+;buNG^*aU-vc!uh5sHf`sK#~tyL z%Z@m5yr19w%Hx&r(>|`vNGG_Bj6ZGD%4PoqKi~%!2Qh19fId_Y%iv>=KgKXUP*k{P z?HcS>q;TRCV-Z{4dFP(T#9LEUJ@c*^@Foy0A^}P+hCK2bOK4~^BMKGDq%`4}zV`;) zX!oc;5S{v!-X=3CDTS%Os;Y(yD479G#nee2$48JWb~R;)oq%@0j1U|XM4OJs1HY4# z(%yb+?w4Pz(Kp&r&73}HNGK@QI_eKVkhNj)l7Au!@ra8io#TuL2V+aFsyC+|0gol| zz!KAghTPTQ!HWW~PExq+-dQyLhFk8r=Qo_Cf~=^B5*^nn=?9Ubm90%~C*TId9G_q$ z>Wk>TVVye^tq(TVgml$mJ)bIfpg3Zz^@YoEYp8dZ8O%Y8UAJ__S5-A-5D7nW~%VZOxja+_Y=25&kvS<42a?`ujiA143ZD%M^+W)u-7NQ+GJBuk(nXthvsb?MS&(13vi$9%PL;S-NP4gp#S>+=WL zfnp$Wk;gP20X3ZA#EyRAw;rJ+p#vee=1s6jPbMtftTJ^R8*<9+@H94qAkRDps87Bxau%qemjR zQ2}&57#8s?6>X7X1jUI~ctR?VAriMt6T9$m5u$pKMfnJO30p&?4<^Gf`Z$Y?lv|dn znragDz5l^`S6n>>HcB#2Nc!v`TrzB(1WOAu6#FocVGg5>BprhLJNC@M>jtXnPmqf$ z@e!QEnbv5kWGUT->Zn1mij;uZLq$^n6gC=)_cv_aSZcL6hL0FFXz*b5u43;=L1b+I zTmA@D9BccQTUt+jOAn}-@LSIRuv5ILA6CGRdpE=a@4(1o4reUhcmMs2e2jJMAKbeOujx1L{pME(JIg0Jlpp(MSmOB5ZNVF&6%#d_a3wgSf~UxA0_B)2@w9+r zQ+5OCEDd%wI95G?oM>gX0CY*Y#ma;4y+61Afc~Ak2Ri65^zcu2|c( z-oq>aU=d=@+dM98Nj1PwDkp{)m{XZ7$P-&qR!Yes+$@;T$Zx^eFqo_+yIJpCx^RQv zP5ML2v}>-Ep4T{B_FA|9qaHZ++x9f^o32nHIVq{dy-#Hz>Ksl2GQNInIBMBxNV(y~Ul z(SYBFgB3=JfH$&o`T8|0ccv!mF1u`!!3fmAoWjTvxF=dM>WN?&CG{2JAr+>tDXkG? zA#;`{6vw7Dv55-PBbVX&fYUt9#6@H8-u(v;mR)qw#UzPF@KnpS29?7E;wKSK6oP&Z zw*w02i8elH(!s>zb-sX<+B9*Sr_c{3YNF|Z248X}Ie!iKdgwv&RH z;LTBEM$*@`N<-V@J*D;dkKF@5E^IMEe(VPPd*#O@1%)}xx@dnW2am1a|NarOo-%e} zxQIFy_f7`5!N5MApO;U)9O5e2xBvR(0ie3NhV(T63giS)2~G+5z~Au^%+j979$Gt#$!@d6>muglgp7xO z`^4LC{L5;xBqyd_d(AXdLJ0{@4`BcXW1KU7$Byl5)~vz>)DfqdEQ;>x8sPol} z0|sF_fB=nGl1Bh4B*2n2I~O-pz=##(VaJH<>D{~c`0?Y(PPKpEzMH1sxM%n7fZq?Yi0^`8-zZHe_1^(& zP0WdPst|>cpeTm(^74_JSmc%LIsz7$0@wtz#HCxis_8b#)-0iodaJu3m>8Gw@(XXj z`1}WU(8SYt^;OqpWo8G+z3!*ZWM?UH^2dWRwbl+zU7q&tAs z0=B3KPUNDR0ykMT+GZ00t+v*+Ztdn^KsRChM6xrNm6t-#HXGTmfHoUggG?>0qA3|HU~Gvk($-I#tuA2n&D@3wBy8)22zYu;hv$C*eB5&2zG+?;{6g4ZDI^WD(z_ZBBCIcy}%zjTKEjjze=U@HZ15Y`u zPB81*Yp-K5YOs&&ra%u60sMXV;k?q)gDCg<_RG8J=4;TzV|G-|?heIgN%Ai-00=ZF zolzrcwsdw|oL(pSTM7%RTn%Kr(sk;bLmXjzykwHVsA{v>2`!*}dR2HSjl`+0fCaE6 zdwf)r;dq4c(KZ@D#n3GxlB=Bcp+2YeZfK}STw_^T?0SB0z-VAywGc0YiG*8~QcM#G zrEqvj;qRIrp*=`C=FrAvf|8;Tq*vmRptn4XFJp54WuOFuezAJ3;N?N{^BYu8+-CExG?N|QC z&H^n^5noD9fi#s#PzX#P2n3(`%U`;5>jK{~ci#JZ_U%GbD|wg9Hm2fWAacP4lSYp` zlaal8#i~m#n6%}ajTX`xfLsjuU;yDlc%(>#G&rCRoD8JM>#x6FzL@L=He2+;-`>Si zs?41W-meuwDpLdV!gd{-Nd&(5tBBCOqR>fU+t+hkj zs{amWm$vq&w%S^4(YkHjilVhDATnjIun7r-gzWL|_5YlE-}mJu2@nvYZS(N*$&IqoOABE=VYX1WTqFUJG-Sh3NzE>qABJK`P_C>Vey*s=0=>98o6B@*ua7p$X04S zvq;=Xfy~FL7Ya+rVr+gK(Cfky0`DM>w1t0kyWqUQ3BW_pM&*CTJZ9`@lJK!m%0nc% z7>)#GUAJ(wDwM3!h;gh+(Jzy1v2ePkLab%! zKfL3^P@**p7!GqrQ+@c8IZLB4XU{$ZFTD6tiAs$+%?P{f_$(5TQg2fgu6DfQeJ7nq zUuF*+xw+|o*Gpnl>rD=b{5o^yOyog=Wj+4H<3IY*Z5gaIF(g(qzDk&2I`^FEtOfq) zlaID++;rK+m;CyH-&}Rg)uCvZu&Jy}VKE^j+a&rUGw}y8u)t>%_@d_5gnU$C?1YRJ z5y?e$sLnK%$=)DOhufK(*QH`_S!u~`RNmkxN_i|u@wUv|7Di)8xUhN=L^G?41U6gR z9Y4P7tvBaTPqX2;;Npu;KKW!MN|KNWaKw0h>((vrzW27%jhDgTukQcJz`@-Kd*ig* z-g*1owQDx#WEa5Kz~Imh>dbOIm@k;1`i9zOPi@7Xy$!VimQL4Jd$DNu`9myzg!f@Q zfHJ7n!6cf4ZXTeK2Fj)nAaf``(E-VuI!VjDfJ@>*4&@|%8b3_AB6m2|SJa_S&0_*3 z0*kVNRv5M8RS~Dpn3&?)*<_Sg><-27!%C1x@i`%e#29v%T{Q6X#cOM-vCp?%d)*D` z8JRG=yil1v0K7zf881#{-MQ<~9*DzMi|V* zjyvwS2OoU!)?067mEv>HJ$uuQH#_W3D4Ag^?hQ<)GtQWjnU((D?AfTCZvDY6t5>hS z?f>0Da6{}=r4tZrtQbuZA;%9##}aHte9-433Mo{TpfLGnlUN8#SO}yW=^-INAOKnu zEt$!k=0;9jw_zp0P+8~)OMq{9BpN35;Il~I*wB#f&cqFu+3IR)Xt?smtBco{;9Lcl z%a>j@gSgNLH8@7i&hF~<;CjJQy?5`cuf0Tk!e`?Wku~6OCR_=Vfa|XsE2`sDHE{a}(%2jedl1=kZ(rN7(Z@LO1II4c;Dc#S!=q69o-o1pI!%$4-l=-~y)k#Ch z_2DtcH20ImEROEexA%;TFCq#x4d8>pixoamXK^Hz?|kMc^uUpGTP+Hg(eR(YK?W#g zXa~&2HP>8&*V6|ccwpPMZBIY_)b-chfX<%i_L2atZzy=$sgsBu{pxG4R@GEL@t41> zUAOM8d+t8vV+4ct*$AzIRdAjIq|kTzHPSQ>lr&KnS;Fj_S?=s|Keqz zufbP(#h$vV%0?{4G4cW*(=uGSc^S+H^zPjwJw2^^_a24$-G6w?ZMcgl*-^pNp4e{^ zqf1=e0RAitg1+e|8bR9>oyQrXFE~;JvksmGuvcQv3wQFYYin+99yVm?cW(O5 z>u$44uK^$fheIb zITV$RCUQ-2a+&|4u{N3dNd%qfE+Wo1`w9xPa6}_^-}y@4fr3+3zl4 znXxJ6=-s35HP>91-=(0r$p>aBJoK|~sPnz}{EJn4tHM$5gk$^t;y1V8c>&2u^!%bL z!*jfM;E--3M)gs~Xe_jJpasJU0NW@Q0I>uR(*(K&RUK`zTeAla>9Jt$=2a^;;d>MV zI98Wq5F%5Yh@w^mOtfeWFq*8{6NMZ!S^g%Q$?>5#x=1BrhEM(;g2|$UL{Z7Cb$N8S zsNTie9-kB5y;W7Ph{CjrWj~CzjY&hlH{`Us=6<}4O0!7e^2;ydt?`w>(uHghvGuD- z=G=ME@jYO<@4ovES*p-jV$+HBS@J7S-!iR>8Sai&Yv}Fmd$DzG;m`p2%_C82OV zVKM&r=erBLxg^k+--D(Mg&ROi)#^igsE* zy5cI;3gIFZ@2g)0YP3jpH0t&hybkN|utn4VFB~Gt!f*>*@)CbcbQlR`8|1xE$Odb?v$pv^HxGc4b1T6A-n%;T~ z*s`8}`e_9P`Jq5KJ2TJQ9QyMg|9bCT_y245XWk|rHy7l0yY`yvPdVjOvH`OYc@Xh1 z3I%3&m6rYEA203NRbsMuPds(t?;g6nXYZ_LZxtFaW<6P8h{*y52H+buT)-@`$FS!j z6Pf5UU{GXLHuYj6mD)26huY5VCO&lj8?XP%=L=-z+DDBUj*FX67{4*FEvyry0cy3N zuv);DqI1D41wZZi&Z#F7gMc2|?>uom^)CsMKYd+4!d_#g6fMrmOfg@;U%G3j1ZY*p z5HwbOPrx*%!<_q%=ilG8qbWbH2l`DGSD|8*8Bg9CZ%`gMY`d-0T6P|P@g6t?Y_$nS zZC<1*2fF^nYuUj$ASZ%ZG=~k?0?W>wKE1KAe({pUexHB!>NQ;p3yX@1@b|<56avzS zS-Ciw$-^FXV84N-B|Cl1&9zlkb3Xd`y;-w>415iCD=d^iSmJm9nF~T-FH9EdUFZR} zP2rjB73C?+ikX}^Q*CM@D*ZcezgbgLJ@u@!2KFDYa{1~%KlCb+TE*Gu6*%@zc)5kMPh+z=T3O=p`R9Zcd<~$ zW(SZ6OM?X|@la{0U=8IfW<(YVi3NjP5C9@83>WM+x0oqJWLkj5MqRu1@OdM1=C0md zzW$tZre(l^FD~-lL`==IC#K z``i2Oxesp4?Q{_!;SZ*nM0hv1#MHmxTtQiVo$cj+`4A%u0oV$4O zXQM_BudT1%wygv%0b5So=>S+Kop{>u$DfFiC$j`lmsHhGRC+vKN!;ppXU$%;=u^Y+ z1|!WkeEaNMe|()Qjfnw|#TNAkh^|N^93`R(O^en>jL5_?gFQ5@t>mH6xPA(`&+cF~ zj2Bx9@(1)n0oT>vym9H$hH2;Y`{Sbzs#yn*EJYL-I%J`BTi};4kjSbCo*dIkrtc8O z!3%nry%x-MZ>vEg4=b>cM+G$0xw_SQm&C02Exrl~WK3WdP2!*UGJ_E{iZ^c9v$w*X z?#7lF6+0SvvcL?&Z7qEEFLOTrU|nWb{%jU8l-g4)KSUKYy%?KTJPL?ovgu(u=ztT~W6$R>@Hy=(`EElHQCtvcLqCy2uZphwV$idanb zjdkma*AqQJUlD!^B#Rm@yQ8wq|C_sBuHF+pZ~8ZgHOD*A&ng_J$YEE zohKdB1D&vSuzpT{{Glx6GBgB(Q@mm0)mL4GstZ99#KGO=gkz6|IU+IBg_!n;PZbr= z#bL)Afl;!vWZAOMiC0=%TZ<?!FA4I^m?zO`bYz9Z)}mxk}%Jq#6(|M5q}e6nlL|l5lu%YS5@G?o(`+ zkh?$wzMaKA85%$))-cd#&LHY`=G0Y({_x0SWxKZh{LVY@rwsOCF;D*h6)AuMi^aJB z6N`)dJ}g>Uz$ng?!7rw6sXWJ0u59gut%GBh%133TW=jROm@VA0ZA(enPWl^xkt?u% zg#N}MAQOmhzA|^#E9+g>f@hw2dg`=kQhC8l=^S})lArLTQbD=cxqUS~&D6{ll=RqGqCa8=_Q@xo1Y9!HGJ5vxiPmYvh!H>yyaP_MgsBkk$-2;-u`Dz-EQ1oAtqjM;fZ(N{lhL@FlVpF z#vu|7VIU4b06zGh2E`O?L5ECb3ZJFcPNUL-IB5h@C*;Zo?SfoHsm8>C5Ysa6$VzP5 z21+8E+RV9DGe)rJ$PjH|?yw_~sm26DC;6&u6#WBENQgmkmBA5dw5Z6zVZzn{F^hbX zFOH(c)I?e+$|--)zjpmvU(nB~)xlx|pUe!$Sit3KtPB47?!Q*;GA53n{I3t*v&vV8 z6;xE6$ED_~5%hyDR~pp0eI-5630v*zcE&7}AbKd99VHac09Y$mtoZ3qe!6+T1YC+lSRuS;&z_p9YCK8d6_1c& z@+rqe6FvmjzZt|~&2ZRIU3JOAai84*V*+o+>LL-;G`S07!ZU^zwUD;T%Xhsq>#fqV zZ85`#ll#K%rk~w;%em*B;SV%Kqdwe)0+6`G!tXG10?B`D+j0YSaM}xriNz-pAY_yg@JSuU@Ts%63_1U z7fT5(5>k1Z29)RL=W`{2H6-4T+mZ>4mSAx}%;I6CL@JP*cU3-H1=ZD-lelClyO zonvTfciKIEFYC`OF^9v*p7-xn&;0EpbJ+gaAD(2jmMTyt4e1SjGT*CmQ!U|GS75(( zb)Ixg4|Kv-yZV_`Y!DSmAv5S!+Dj9f zVif5a?=skhZ_>-2K`rrS1@e#{L<6W0lvGOiEmkDM(MWiD7%mtF;z5W5XmnkKv~Z0TPSAq>?oqPZQHV7!KW)$evazJf)Bo!arL#6e|-CQ@Ct7sBm^GZjVKmm zI3Tz-rX$1#l=n2dgd?h!76PP8zgPx7>Qm zj2SbCT>b1be@A;&T)bh;nl%IZ4H!CfI8lGF`Vo{-zGASd$GrjGWguJF81(44P8DFo zmpP3G8qJ%+F5~We(O{)cCZifvQ3s`-@EZDq0yv2io*e*noSeGwfwq)tT-Q}eO zoL9g=-2#(nQS4*WVD7TAaw$S4^h6u&6dMH) z3ONy%RU#NdmP{iGajGr=vj9^Kz`O%Yr2GEb7x_tR^re*h+5cMNRZJ&6Lv4+M5uivY&m^xtpA?wWr zlT5|yH;bhJcIMbRq;YX>qj0iouKyT=V9JLyDW^`CR(8+DJ(Onv| zxRDs{vi=1kR8R)ohTcN8JWhU$;aGy(>G5zajZYs5RQ1!?_(wk!BKN;9S~!&h?ho*UR+R5 zOHu+@_+N+567mfhCOqu{s><{MM2o`~k5DV&SxB$6c3_jrK$P@;yPb98c&%|#ckmr9 zk_Q#{B;CtXWX)gs2|mU*k}%MVHh^m_AY~R8KyqA8&D*$W;C& zZ}BWYCFqfr-{gLo002M$Nkl zr}v^o^9l+Iu|KugU_8|vt7Hhe33e$BATLEtP^6aGdE9|L&^BxxvH~)E)h2Xx;H`DC zWcuW-13&#AaEIpTIeW!E$P67CtAEX&{p?dutzET-Xz)z)!{OMRwqE^v4IDCvdBD8f zf~>S0corc6tkNSswDM-4Nw7+SGGNjLJtK!g_QK@_f+(&V8X77p%4=(?w`|#5zGrtc zlZ+uB*6X-^FYK0f##twS^Xdx*4e1pKq5M*Q-=Q}%6LY*BK?+EYy2XeOAH!P znb1(ephX=H*Ps9N_>?my4<9}>5M(w3jNoxez^nu!ROQmv&E1iEkx(7N)NCc$xnEUTQtK?e(t#s2kK;!{*@YQuS)?C+L zyLH3HS6+Jc*=L`xtf~xz{Oi`QUbA5}(qe8-0Odd$zkWeZw}LK(J&THPt6*`)SY{~k zYydG|E!%+uTi`&Cz4jRcvOpA`r+yV8vjBL`e`@wsPC zot%^B3Wn<_n@+EhOYTvC{81_jhfyXtrS;Hgmo>ws zaGCLqS{eg-hp)-|(MSJ&;;(=CeEAY)-tc?FvOb4ZqH^&BvwJbz)kVWrGoHMZECa4! zL4*%YxGA4F|HT>$VUKt@sPEClJ018{jokjznv_o9N8S$KgC;Y4%0jLrLpm2OmTPEEAzMmi8tFpOD7>ddCp zKY^r@;DBX!-uUu+po6$A-H;Yw)Z(;7V-@`$&NsHj^Br=kVGHEqKyzLkAb{`%kBDw6 zVsco|z3}{vTQ~muqkoMUK4SRrk$ZMmRPC*UjlzYXs8>QkXsn^3o~cEs0>Kd2V#*J* z09>kPWo2WsRn)Efs8PoZ8#-w1npICd^*5wF_^g7iZrCe_lc`8hCXB%p7N@K%(&Q~_ z7;q3ALJ*B{k2wUfd!Sb_M`vQvumHFuP%tNQ8U2M~3!o9$Qk|#K=Zo}XoKYeRa@3%x z3NkrtuFadbGDk6D#BjAHN&L04##7BL#!FWaMjiC2-nX|hVlbhQl4%Iqh+DG8=EnNQ zdKxO6DCTi8g-BD;D`qTNwZ5*_>vCmWbIrBM;&_{1>MTKfThf`Xtsdx5*rEaujT(+} z&{lm~UQ4ELdE@JG&gy7AxIr$(>A*uVe})~Dm}1ggjIgi>nD^;th-t@;op91gCwo2q znwomdz8o&wr}O75Ts*I^u<)**-Rok3oZAKU5(NkSJ!^i5Ra1~(K%gKDB_e*7)R+l$ z__x3PwV~1T`SJxrM}0%wiJHPVGk`MPX*TiL05w9c!m|i&@sC3~8Uc3K?uvnfdg2dI zm2lBrW(+WZ zRosov05D}EISpvr@=GN+@GeR`A8{*$xCd?nTh+By(m!fVB9p4jfB?`|lOyOgFIljL zv$11G@peEv5mZGrCQGIhZPD#IaB-b?I<^No8n!fMCEM5@@cNn~msXrpNi8H|vIO;F z<*piBZg5>I*`<}ckk@n-1^dn_3Mi*mWzVn0N!SZHzQOSMtiO12-UMHD~L^Dm>6nU zw{6?juYVDF%E}@LRx~(T1x{HB@=U&%!#Yryr67h~3ksJy$W>_$%}~3TxoU3>+}0Uq z-UxKE{*nm@a2$JD;gTc={i70Ko&F`S9J*%u0sT673(8m|s97y)s`OTrBz{Jpf^RHCqe(`f4iig-g{!ViK?*O2#^N zO-!s?4J)cF*r%95H_b4_j*;;PP=nZ|3Q!kx+RRxF~kRh zKqzJj*|2`X)C&%dyJU?KLqTC7__k?d2@yzP%Oub#`eic=6eVMR1#co;o&PW8{cY=b!ft^jW+Y^#Q`bMVFq)vGecOsRud~wpu_{i_?R2%RvU{L7wV>TL21m zQwSp;XG#D+m!+q-oyO3%!O<=`hDe5l84#yr*I@%8N0TUa#` zsd>pbdja5>aX&9YC=f<^bXTs7jM1Yn5Wk=U$MVokR6>pYSLQATs5?>^hIWPCXzanC;7`Qfju0O}1nys!Z9nxSmYV`XKoT z2dQ^cueZ3Bn(RwD)`~4Kg^go2z)Mt0NNkE=qoK(5ZClW?r)6dYV-dd?5n1dm2kuiU ztE*rEh{e;ZPhXZSQbQ;WYG+IBwANQr@9JY7 zkY^NfN$XVxltrVcB=)q*TFqR%ek0&MU|@feXrR`uR)ax!z}BOpb?L3sw5Sh{kgc@c zkQJbGH8vko4H#jJ=9qo8WGr2@rnV-~CAaIPmtIbMOx_)@fCu@ezq1Is{20{kE>*&VWikLN%u_Bva=hfUB|D#MN*uIdizpe1WEP@ZcDX*LXUWp8Y=_? z2`QSo0S6SSbmF!}3zy)MAGwvgMimjX0`+UurxmjI%;NLj_t>7O5)jC)N0YlI1Q|r$$-9B{a(43qc zz-{Bk4TO5eI1BqDe(|u+!Z!f{f&$Y2agj~n44qq1_nx0GU5P#(?HSao-h9d{izn9O z;YdbBvbY4pCFP2J1_ByEj9}#qYh6vl+BIt@OdOAWDc-u2ks1jEm1G7&S6eWPpZ=y& z6B%xYHJn5w1b}dKVYgvsfh%otvsoQjfG`hXv$$=RjIEpZRP1gdve8vnT}8ZWDO>qj zRin7gLUmusKJ~pER>e9KAC(>ubHBY#if+X)(plMTwc@bGVP;{oSUfmb z#K%1)*9dZfSaG`{*aNgMR9{cbVW23A;%fV95Wg1%1S&GxZy#KI&ti2 zXGeiF!ZO7-o{c|F8T#G9>xoHv)I9&wh2zJK2eCTra{Evm%n5ogsp^8;O> z2vCP$R+W3o+!mu>L2hBD%U_u7hshDwT47H^qe0O2sHvj9u?eogWPa(n7tfwD_0r3) z6wjt%tS+r+uz@fRfFn)W!juRt%A%ifV<$u-Pt?~%N_UobD;ga3iKkQ2L8+{_QxK;) zXu%Fn6^e2I7Q_Y)9Jpl3vVnsx3x*owtQn8S%_hi0k?1~-=|HXA?|K}f6jrq$acKeq zv#cdzwYX*1uJYYwd+_(b%1aDoO;i}EO|){7q28-(G&GP|VroP1ELqNerMEtz`U3nC zcNztvpEn{?u_jb$W-R6?&EZH}v21zuUQb?b(S;XYXkfL)M2d8ju(k1y7Vb~akWel) zIbi7>Gp<@U^MCmYL-}AEujC+G>XW>0w$v;C3&*LgrlV-GT!}^+8|plbHMwr1Fx?rh z*_#mw5Gyg)2p3w75#0)h6?7S#mvii(0YiHhWih)IHDT2b$_JVeq*BBr zJ*htoa&8PCHi8hO;h?c`Ly6hsvY1%U=)kf;t&0}ECqeo84M?pNW?^X=8aHn2+BNIF z9-KgcbG$F4Gd6~SfSUTr9d+^O?C1|+3B@aqdFj&63ktg8!CZka5%IgNK`*OcFz<&HN7g;t^CJ zQ3e#HiCNRO)_Lmz4U~|vkQ|?RB7ULLS-);gQ*+GbaAM$q*Dde?C9pZ{ci_<2+h7R= z@DOMsFrUecp9eJ6vaC=!YqmRG;ZPLc@M1R>mvxwwKu%){C8XDzJBQB)pjo0VC{n`hOHQ|kGOKSIe zbF;H9zW7o#z=(mN^n)r-IucPDNnHfncuRFrsYU3z#jB7~nM9>J|9+W0a0nrhm)iQ3 zw0Tiivdvp(CQ0hgUM(>l)w)sf%?$JAdB4q%pS}JUcpDVd6g0}~}_gcaM z|AGbc=FItU_1e{oK3`^co2vuHitQEsax*e*R%aB?zEg@Fop$ayP z(PKslePfq^9hV^yf1(7Z5M4ahLs-Dm6kKuoSWJh~Cp|N5-1t%d`f$gZRhw9d&7x>r z#j}``X1*PHsX3L@^?RMRB2vT2i%!L;uPatPjJ@U0T?HsAl7`yP4ZA>oQZ5{Ka=ezPUKxx94q zW&>%lYZptp%igu1OP0&cW2`1L*^NN64;R5{F5~2rP8N6d__T{e@K!1!iz&<+9Jm$Y zk6FzQ)=}H6XlFv&GuN=;gWh@9*i}|pThp9dK`a|CcF>a4W?nZ zhY5xP%qJ4v=e!F}hMR~+EFhZ@8`cOaCA69Htj;017c5L5OY-7=2MV)5PSo#GJYHE}SsCT3cHu z>a$pkaJ()@+LY5LZ(LRT(Fco}(DXLOPCDU)tn73k9tjfUODQDvIC(kIp<4advX9z) zB>8OT!BOskPS|Q+hpJ(zNRcCcYU&Nru%aU;u9x}V`|f%4(MN!)Ns~uUn|50F?m0}s zLn?K9YZrX-*@AgXipv6lJ-*D!vMh(ae?fM)-1JDe!5?mLu%g$Rj@LkzWW#abh+PCI zxGG$8hOl)KGf^l-qWy^T1`~l1{XTqneSlZ`hKAtgO(nXKQUAlDf25J*E-0$hbI?>1_Oqi4@g|g93sl5s9zv1zDaCaeL!Y#<8 zc)K^HrMa%V;gU70imUg8a&xo4^{uOwhh?TLsh)mb?c3fZ8l#KS>Dx}(dG$;0fg^-1 zp@YNO+LJy$<_;&@j!LVCtZK6Cv@%H4PKx|u%a<*l`PAcxR`=ZZy-O}T*KWtJay?5A zWrl8O(P?L%7>zm>E?)Qc+aG)|`-5$j!A8&CirT7C!v{6`_re%tXXSz>a7@r$Fi7MG z#jintW~@(yLBtTG0v9F+jH|+-Uyx{iHdqj8Y9dS??JUR@pQZY%2VVbe*`5Z zL!b+dI07pEOLvy;xblk2X{qGDuX6ike^_HcBbVgVvc4=z_cb&Dj=Ox^w?Ud!P*t z{EKG!-}yYi!Pw=;z@^g^W^pdYo~C8XmQ>dSrk*|K!WmP2fhw=RI>y3hXvY-xM}5`J zRn5`rV@~XG+b=JF?W0G&{r@I~9mdYRv3W~2H;3Hbuu)ZCd-0_gKKtBLW-N=84^!ot z6E!yKA@ogn?}HIzW()@f(ZIH_5*|5)HkY9TvZJIJg;)Vh6J zX&~SoK724uQ>yL*=11{t2fW?Z4b3N}JQc&2S(B#$U)WZg zxGJ^cWQGR8Krw+R%l8l>3G%*W>xO}YdW;)Cf>m@Ii?>x*)hj8?A}aa)F^hiIG={cE zts;igVgA5BcI=pSYm4!#PVTA*_Sn;lZeDd@vZaUHoyiB|0~4q z<(BY{?Aelps?OT8)6eaDkfBA`S2L!cp`fv5_4;E*4Yx_3!1f&pH zV^bpyX!iJ6Ym}}NJDMITW`M=HAk=g_Vl0L`~9vxT~Twxpdm%0#}2FKUIQ^~ zoY+PR%`5Mka6k?_v-qY+F!-0hJXTdxdd%ql!$$PY$xTboFnc`4>Q(FPc9$3vfC=Jl zP+&{7PtJKz@8QKrqrxr5GPBaVbje@8exs5NWeliCXppo~vPw|#{s;O#nYQ?;()R6C z=_(1?GAl}-l$7imIdT}53E?R723D0~-vezPC0eoXX6q~a?(Khs=|f#Z1Ij@@0_j$) zp<#=$qPNt#rNWIq)@f3&>-1bteK2`3o@rZmTc_s=IqR*>yR9#_u5;@&hjT>@=J@v( zjdJ`5p$b?zcKpPyg*l&mG=KTBwb_}4evip*%{7Nzb{0*WiFg_eMLp@6DAVZ*^NN+L zinmr4^mN{K=eG z#toTp>_mJ&Jn-v3SMF&deI$xh{)3Dx^X%DgzxL|Oc+jE7vIvwulNky&$+<;woW|ST zsy_Nc>-242Nj%*~{%xl3yx5`#4h>r^9yqMi)Dd}D4<00On}g{f>5tMaJzU8Q8q+_O zam>Ie%7_sor%XPhvB9|craP{>^7`9;bmu*H{@-gae>8v2+RAdj#hhce<%T16qP1Eq znJ>Kb8dhCX&N(Tgi!EyPLuhay$c7CKjcBi+6!eCT3QCSOWsEBAOHAB z<=(n!(@!2UtQVT~w6yf}bT?JmvU$gzJymoy+U`VS=_LJTYTmG6WB2Yw zIXT(BfEOK_vd)*gUwlgr2F+xOxtrOHr5k1CGO<4ul?k!|KjzH=b(D{sO9!Qn9&g$ zK(wc)zR0^d(6f4&veyaN>nq6vU9kf_-kxf=l}0F1QLiRkO|IaAFkMc!^PM;Uxp3j4 zv@}zfyo^12_AXtz$LA$B;gWQxk(Za%wW!;ual=QB?LYCj@hjGCTJTv}cE0hg?_Jjr zXn?t4xsJ`2wRf*)`?j+D{GR#wd1#_+HlZ}|3#l_4AS>e^FTb*2!Q5kxDLnszDdDIG zaBFJx6P_AwYj0%`x24BU9AtAk!x4!%t!Qt*cIf^Ayx>Q$XeG?q;K76Zet*f1U1M@a zfL1Ku#(5re4uWyI-^1-VQN~|M<|NrmTtVq(TEBk7;K73lLDb~g3)?8^+R`DlqnwsW z^xKCS;|9mnAK1mgmUxX-YIXpKIaVVat*NO3)$K9&XY-0bcz6D^=_fTdS6zAaH{O}` z!KMu*e|qc>4?OsX7?ZMagG{R`%o1~4P>dlKFIKlJai%UNhhoc`?SGyerOC@jdx$18 z{+~Gb5@G9$^j~~9wj&Gu`dg%L=U{3__K76%5zCN;t&pLt_Ogz!x~k&NJAYPQUUtV% zZoc%g>0WPj#ooPZ)|Hf%R#cSj+PY=ivgI2Wf4&hDu7Z3c>PN?A3?Dh5d+!3Xy}3Ey zw^^K4o9p9`KCY|?OrJJ9J;N=g<;p+3cugS4sNK9{YssJf{0PoLFPt&0OMVuDPta$7 zdgjx6%DpgUED2t@e8Y(+kMadnj2{{=$|=3kJ>(|52=t6@jdPpL8VUv6Y3W0T3|X~m z-S}h2dwq2Tn#C@Y)*TUMDWpV~t3OA7 zShgzSf8;`Pd>`fZw&yJ#agpuGoFT$?#6`4tkc<*}smZFuZL`(<#v89y?%h59*pc74 z`TDeU>%hT1hmY!Y@iiy^`k@;id+Nupzx(hTZ$EU?cczUQRam*p2zgnNXnf;>>1KPR zG0-H|+-AGS8-4!y*O^ueN37&lFadu}cGu!U<)mUtPtKD1g z*=e#gy3zszhveUR*Z2SN@?Y<{`#QAYP4z}mQTJfTXNBvqI0-@h@rMhFH|);H$U9@o znJnK!lZ6bZ+{z;)h88ZG^U^>5)^A|WHP_Fu*$D7wuPASL_UYF_t6P5Xqw~)Dh8Uf$ z-;Q4gw<}Gp-~+j+o2oOsh_G7tz%htsrr!rqps~mYrv?uhR9R77Sy6*AlL(F~@{dxA z>82gLPxikXe?t9{lKC%xtX9X4?K|P)x_9rU49D;dP0@!fgzjIMI^zwfzj{y87#QF$ z;D}i%rQ>!2&JOm=YD!zSWIby?dldB^J9hLPx8GjSrRT>VE}cE=6PGKURo2&Bcgg6{ zJ?6}r|ME-EJDd&}PF@XptFDNbtT2mX)h>oWVY>1pajGOW=Ww8aV4M_-MV){J8!WPMzp-Hz5;*8QEaWk zHoMu8kzwo#FBDo#P;X53Oceu@v01KmlQyhX}26M`&1C#MS*Rq{^MKtSfz3F45@-hN4@qfh7_ zMcYy|%b+cUw(Z@!S4L(UIm=!Ct0=U|XO84xf6UT3NO9=neLj{k!Lrh10G&>*sS0dd zzZ;X3v(B6fUJn~F{FWd5n3x!kKJx6gEqh#UOLngP+uyzx6PCvx|MTuLCOrd~%88m! zq~_Ketm0uyxY8HSKUvuky>y-N4>y-L#i}BI= zkt4dINKx@v8Uqxw1yERtde89ldA%?Z%(!Rg=Ae7F;jbZP33`3uP@tx^A`$OW<+W+{E|y4kepdfSAAV-plAQ!%`0jVV3qc17du2y)uI=wluf6`yrJsL3 z@q}*E&OIp_W2(#Y{(JN0%~@So*!|&$A0>jss8M5j_UuiI7B5a3;YNy7%LMAu)1l}7PSUjrV|NBp^!zug_JFSa+!;I<0Sb@TuZqnZ-K=D zK?4`8ef#yqnLAt_6N{K}A&jB8r=5Mayp~w>=I?#)*a;_9Rt9JO?VqUcS(0$Z*(aPjdF;-eJOA*= zBX*|)?-2;&$&wRo=C3#gQVr*3viXOob26XgK__4(yW+?W4l;mRW|#yKeIwcR8YVbp zYZEdKbdl@&DfY=clUI@lsr5)+Odh1(ZGB7;Ew=V%jR(4qudh0vzEnf_XQ&O*6P`>n?GdCeB!0Y z{_o{K{_eH!ee3=w9)A^$(&QAROZ*ok{{BY5Z@KfXdj}2~1ZcX_Gmf1&4qj>L;Rj29<{r& z-+XJ~{Q2uqFB-O*n{T}+JID6Y3oor$vdAD19IP7gjS_8Cf7Qo8SX!`;&Qg7p*wcjM zm{eVKuwj*K*TE8X-aBv)90qJ1IFtW{JMrf_y^s(nmJ$>Gs^*3DigMa^P zVKkf;jij55OtU$|?asB?(k&KN>ZI|(cUs-sw`|FF8@spFJpAjK58ONRoi{$t%*ZY1 zTKL`Xe+Q3Om@g3K24y+44~YE!_rKj%Qhw>>=L{U&o2Y^Tf9#+Ce6MV0P6Wboi2nVDJ9 zuzVOo(a4aYL%(zLEq=f8=%Y{3vB8LU{Dk3`U48)};8+=okSL(2h(pRvlr7Cmp2+cj zn{NJompV_roF4eP!`9bmD8!i$_!k%%`3{Q?CxVl|%xMj2=1pIWAXTx~(8L>%8+$zwC;0 zckeEN;>=e3=LOUnN6Ra(zWnOzFOHi~c;Sq*;KKY~^Xe6)um5vFUS78c?tc)04!T0L zWS#PuF=L8~deqlNmoHnbl3E z7X@4nun8`;;)ztYI&<0U$Hf%EWGO8zg-7evyQea~Z!zzeEJJx#I z)pyyW)k3$$BNn(yS{<=e@2xK@tpP%hKkh`cm7umV--;au0C(GMw+$aYWX1A|f4=;7 zRz|nxroi<#TsdS|kM}?L*K4o6M%TqKnPXF!CGD0M&WV2U$-+AizJeY&%640+pT}ft z+Ps*&dsqhtAHIi`)nOzS;Y4d^g=7<-UUgu?b+3C+NCgKTS@W4MGM}1 zcODudebKl2XLVZXZf&VfS)qwPna zgQ>!^V19o7h~Xo5?=B;(!{t|Pj^+yYapx0La&<|~0R@dY$$KSM0B-Plpt1K%!GdDLU5^Yj6pGdMV8{xCG zbh+=^{zvg#bt&{f~_f&f`NeO;|2IJUx?UD&TU&a2K+`wdiKPL$H(Y1;%rzg zNR+7C0k7Zx{-NpTp7X+Uub(}2Nz~|E2wx_~1ev`&!6I=Gz^7=%HWjEZy{@+rD-Ejh9>O zp@_+|x2AIAmMzOZU$J)OhR+x6ZLHm!=`yTQBiCUo=$4=9av2f7+iB!v7S`MtZdeH=3M9jJJ?9g@of>OF9fL=7J28myLR?#u)lK?BKOSMmBC%m)?~^%^vA2!%@}CHR?? z!11J0Prqcwj2B*f?dh4X{^Gtnnws{WbjsLCr=2+ay-%O~>tB9;?=K~wi72zkn`bGI zAKgghr52srSJeYYI7SayUB(34A(I^_fqwjp$YpJz4YRX&zU;)xIn0C?fUs9OWll1PX6xqFE^P2%}t)2JNCZz>c6_= z<=lPGFVUvMXEirBLP}V2L!8hN(*iE1yR@`v)5h%%J8@&h?*hOEwE+#_o1$t>b%G{B zheG+7yGDBT>JGHwp@B&Eq!eR%S=+ep-RXCc=(m==khrc9L<$nE7EB3xrC_MEbZ5_= zMc7XzDj>-zMLi{$+hRNP8&A|r*JHohm`qKcW?cINZWyWIdf)Gjm2BIC3D?je!`*H? zP=lF>k7AN4N4Q>$#BTrb&%5NLzw(dwHgDRQmgbC^0yll=9{nY+PMQclb?br zT9mA8q7umqod+G;1D)K~q5E3`I*1-C`H|fxE7dnP_=EmFee*JjY>~mdoiAeY0!cZp zLLB7T+`(ff_Lwm7`hdss`pdH(yZnRn*i6!%Bz4jnTre zp~nmxe$28Ji|5W+G~u|BNRIGLpqu6|z!9rNxBw)uUj;xwE=_nCIz=-wU8t*@Qo)S zugB+3cX_?dHanjH>+lhyzj@^iPyY3fe|z%T2OhjN8Vw#ZYS86ZT=?9xuYUO9hu2(x zJ>Lbz;gNmfy{7;eRaGUo*)g3LzpNgRVfvr4B}VgCP+y6i%xV>xdDk0+!_SPlCTLT( zdrynXNjdF~fR~UwO@TnY$%4;=CX=;!+Vs=#+`vN(jkSKvnXm{58BS+rm;53$ z>kg+m3qKK#NOrEHci+79EF$-L+-X*)BmE!$c;~bE8;(DI!uP)aeIykLvBN|cz9GUm zL3c9y2w!8e+D|-@NIb@Z&z1!PNJn-(fe1xuwnCE@dxb&?IKY@A8elcz(4oUtuU;!) zWoEe|4xqInFSVry!z|MAY5*qiPM#4~3o8n<&nqVDEDlGDr0&Ly*5nywc*JMS?a@xtRJ?a^R$1)W@kQ^>K^(w0@-hS0KX}raEG+gU2k` z5IRkfam6SWkEqp6l2}o+^am+ZSS>n$ zYuuMm)zwus<>i&|Nyt{{@>OCIDE>nDngfm#a@9Y;h)p|tfnfpxD=Vw8YwcdtRq5J9 zt3_dC3Kk^a7YE6W1c_r^+C{SkJfx@UFP%a#%Vil$TO}352+tJ7O>C?A= z|GsKH10arsi2rz2mE+(HK^on>t$Vi~)2E+H%EgOTxLp}$OSu2Q9wSB!-@UuMs;ZiA z0?j992BUnmV6WgD6B~6%9Lir+DcPl7>^wf|J#fI`c<_?+@Jdb7`bz5E|B7SrZXjEH z_{srJQSQ3?S4Bl#{`{wRest?SHI?BkcW-M9{vz8SV4^)M-QA_RE?|$kS1nz?Vo{09 zYT#-CNzj>Q;M&1rbv*gRyWhUyS65y6-P?Y2-wV&qS~!3Gy47XP4OVk3f6uPq6MufW zuEx0J;%}aG;;E1#1mE1$1X~APz}z4(BK1%WZVetXq;KE;HPyyPAAW{UXJRIDk#Ih| z7pO_z4g{xk(h9{icY+ZU`kip^-aWIjGS{xzV7I%OivvCq?c`bgQy1FUsU8vJl1<~0 zDv>_2)Ya8zWTd;9eNgmH0(DCgw~^DnODaz(_kde62QnltPT4b!2%n|+qgW^qX=?E9 zt*o`04Gcsud?zKFnDR!$5>wI^2T255!1Vg-ucy(gR<6gtep6EeF1pgv+|A8iFkhr= zDM0EheJOkT=U~-4V7)tU9Hky;$*vzLTY2Sbd>kmjfAw`C4b?zlJk*B7D3X-LZXPsv z$lw3|PlE3K{cj)5{cy=yXHJ@W)}*oHhi2z>Zw}O3qIQ!%^xj(^{o%ox`1H#(XQo-) z(Xbac@Fz?<{^{pm+`M(``|rO$ch1Kf*6djH*_)w|7#8*Jlij^XH?JqKb7%E=(=WW| z?q2{vlm=UYXB*byh~*KB9^nJxh!D-l$~tY*lues9&7M8?TQ^^fM`tVmqESB<`UriB zKOnKLJtT-dy)+8gB@UO>WEnl?m_-Yho;~dhz)sl`aC4v52BOsiKsP?I*SW>(z(wd^ zDXqXPl3ObFROaMnWn|KB@%^AFUq34_pnxEG+A7b)1uCLAoqgviM=BfH0YV;+M=*=K ztR65sE2C#V4$?Pa`4l(2K~XG_*&i71g|H+ zh1!*+DK#W$yVw|@@qO=|5AMDDr*l94*K;#xz4H95Aw#{DQ};pxfesi-g6*wEBq z^vrVh?Ox=Mga{f&9Q$FzN91%V9G_JjagFK?0wvJ9#p7u><;;$zv>`IAa3=y=6_sd+ut_a6!Gtquv!&BZftrVznM~E{2sYWY3e>WCA<(( z)Jy_fItATgHM@3}Z1gr8d6_xm#*I^J(-FG)Bt$UkGL$PsItKL?+!uj!*Q{RI+}zZ! z-&p+q(~50dw;>^(b=FB3*3)~6OO<-5QLkRPG7C4jIz#j}JdMe)Q~M*gywWS6W@Qy}G6zO_))T zW@OonVZ-vf6c~xr1D8Xfpl~2OZtTPX{fDgEu;l&s<_sTsJ!VX} z#_{{b)pC+qkot=!ETMC1PHujGkYIxS`}aYvEZ(qn!U+SEhlZ9fS|L%AQ1wCbH2$WN z<3z@Y$y8HQi;$?S2Lv5iZNtW`WU}~UiKo@y{qk%**rXb%cp>;G8~BBgC?`VlcEMKby2(5IU)qGkAw}7QERwB9nSyRyXvKsN(yc^y zu$zsceS7rolHFWiVDqx~s5zPc+w}SQkSD$rXJN z&f~3i`+$K1p-<%-K5EAqX_&Q&cB=)s4^Q*ruL?p!ngfrx$f_U=4rCrPX1?gcOEwg* ze1G=G-@o;mm^p~;6s#Gh3Gtc>k#ne#i$f4l2-xO;>5!lppMAFI#8ZZ0n|jc1lYaFr z2LPR-p->?UuInRs$H%aP+6q`nj?zd~lLPJISefkKCIJ@lv4wP(Nk zNls4Q)TvW>kc#V%6Pe2^Vm?8RQYTweD%5${Aw3}Uc{46H<8Z#edgiNcJuWf2JFo^G-lie@JKDsFL_(8oU4C+0&TW*ds7HF#S zdTN8wfIkv$ge`ET_ZvI}gKhD24!0$07gz=?iSPgc?O~ruGzwX!g65s?KimrPkOCb- zy}~_-4Y*J!m4c5E=UzC2k4~LBEkD0-$M(9#iZDaA~`ucixt2IOj_NaWMO#Zjn1g#X;!=7UlaLB^>`M4pIc|(!rcub?F z z;LxyjklM)LN^A#7_Md)B#;V9-3|2K>6X)_*-I0-Pbrf&f%tzcBVG)nj>}#|~y!Noi z>~FN-f56-1WKIwIvRG>yG01XsE$Eh)lLrA2wpwzLPrAfHg}xBnP;AS3LbV23(XS|s zxJyAy^GkYag2n~O3&F=^6E+6}4TpKq(1FJtepbv4Mm7qUG7LWe2`h;=|ZcHMx@%%AALRd>i=DjtoRF-i-;GlF_#Y9^s!BSMLD9ZPG{qKY>OnC*5X}AID^DCSICh!_r?u zg9xI*#Tl@v4y)1FSY;4r$00ehf_m4E>VU`S+pCZAu|-(|tg3rOQ56jb>^4itA7G^q z?&N39dNnuKa`H)IL&4Z*^H;81x#h%@CXJtP0s+T)?+Rw=GT2h}f`H}dK7V2I?mM>* z?}0Eo zTvA!0>z_Dr+?Fld>+6XC1SG~(Fdsn~g;ZiDDov41@tr7ng!=+Ns1TIQvD&1?D4Qg1 zz02*AHGt^1V70&)=@&|-`mzK4q!Mx<4+~aFxyhftB{vb)Xd)4FTT{LO- z?Dw{A-F(6c_b3$Nf5=$dPEfyuy0}a0yH- z4%39Il|F~r17twn@`JcE#S0t=Q^y3q?aE89NO#-be(U|(TAwFiZt_JT>je7Kz!&0!R2Y z{lhHQZizXp@`LF^9J3;)A<%m8rf--t(=)9g5oY`NO;xZJ;Ye|MU@d;D5hocB(pgIQ z5?&B7FUEu)ybTA;|Jlq@TL48m8a;aqUXwIHPaqTtz(P7~jXrPK>}v2u9a#6e+=1qB zZDr%0-GmpjO*{L1g1MRDGA$?%5ztYA1)R<_47OM>3dDkX&p-Pj)t+|FxtM5u^6}i& ztGAza+T>}|&Ordj@Ga%N(7m18*QEyz0b9JB|H_v@d(ocCl$2kp+9jT?&Hy zp;lJmz$c={*7|Hpl}d&}PM34svE!C5T|(bVR^mt^6hlUn#pDVr6-*);i*&46CxxC` zkOVpTC(NJ41m}YY=61TR#LkJvcJKh@HS3Kcr1cq#>|yFpvVw0+-eZ zivz@41J`lfhXyT-{U@HMaYE*>GQ~kpc!&({4_XX6e3s3Wxnt`dyaW3Efu2RZaXiC& z2#14wFPP}C+igTVLMpY{UAX!GZ2r8JE0+%*G-%?46B-)5ufF;QIC|Ap*NA1D$*f(` zr@Rt*or%-rLE=K^-wy17L%`O7Y9eF!e`%8eD>$UutPT=qP*Vgd-M(eB5rWw;Ty``- za96Ud56%JVV;2nLWEmVV`$4amk)DyMjfoJa$nkI)F3ra2?iUylVp2PS1RSF~dUWsE zuUCIGJ}*4?Y9#Ey^)RdlKw!r73;hw0gk&0xInhaDNFmxP2|)zMgBigor<`28sko`B znVYDg!!U~$c?=~KNCs*|6t5FD{QM%OV1&qrA_%hsc~Fv}!wZ9GU?MD=1Iy2d;fR`D zF(N~n>=;f7>d2_0=F|v)0=}3T1-TR?;@^r{RAvdnCKh1gpA;Cx#tM8Te=&Engkf5# zDlu+@%^0jv2SVzaV*$I#nd$0QUfT5jd-Dl+;c_}RZ{DPok76VN2I0~M6%}g|nU{@V zyKA>T{`jL_Z~gh_pOcx9wPf*%4ePfIA2wvtq|-5&1tY0s;w|gWj(2$qc%@$NJnpz2 zI0S4R1fC9ZtK+KQfhEZ(PHY^<2Qgwt#|Q9ld!BdiuTgkvzF&tl0g z%W(`y4fRrfKC!+E;Y*NJNCNMZ$oL5UN)BpM6(rA4sH~)P=Z-SaW7E1_AH6>(J1sBh zW41rS50MPB0MUfO{lVj40YD3YEka1(W!+-w)3c}B;o7#Pgk>&h!I*DDR)h7BfIDLI zLF;%zm^;OV!H6mHR--|7ibuja13el%3$t}7wG?wE_$o*)7Ptacf?Lu>Iv=ryQoSZz zmzuJr6O&hBtPS81I}5#K9uQF&I5X_%ts*vJT{qSH-L@>l>-hMsPk(#o{`W~mvSaAt**=_UZee%gC@An%}IQ`ry9*_UcH{NY*G_JY!hRm!S zdV+P?FsAXYP5na)l3kv9z4N#Od*HAM1}1{)9WG|Ieya7A4s7)i7DdaGT&5nnLV1Sa zYiiuGd85r_}AUWrJL1`3Oa+MiHa^)Ou zP#jr=S!0QyC|qnlR- z8(~$ZNz_~9%;y0ZjS^>s=pg|QVW`d4=%trWPs^~ZSiWY-l9fY;_CN2u3qnDHEfEGt z@Uhj#C8=b*|6ASeywEW{&@R{#Vo;mLnvSX2*R~Wzs!}_sS2Wg!QCnBDdsi9inVgJt z%;Nb>V`-qf42nkC9D@3+&Tx!;Yh7*+7s~xldg?+Xcn-+8fJ`)6T;xDPkg1^Nxv2uf|@o30lp zotTyAlCSP>yI?C>Jw~rat}l*XNhnNa-ufU}TE3=AxlBh8Hqt$81ob7M0(f8r0Xo^{UY zXPgc4p&CN58PRu&UAcZ(Qy{T?QII}F5kO$<@(~a zn^v#dw0V1FeN|n!S@ek^i!r!|{kTCrGtA+NfRD8Zes3c=F#n-I#)|ThNl2z7Y<3LX zn6&Ud{K$i7&}PgypJ=>0N=oL<{VXpp|I$mYFj;I7SYxvl^%n0W@j20$CHr>2tMLyYKBzCbPX|XOs6bnSJlR zTh2T8-T$0>?m6dHVYDSCRRVjcgtLi`2FBnaumh_Ec8LTk)ES6M7~go(#+U#3gs-=6 zef6?c)eD8A)u+h`)^5 zq>AbsHmBJ73>Ca2gb8*D!7HlARWh2e*&N-yU1p;-9@1+Prl2QbHfQk|cB75WcO1s7 zL`2tI*IIjE-}bHB_io?2d&lnPgGAK8ky7HjQAHr?FWhu}Z*R|6zI{i?8`L1mssg!L z=FVnM6qS}r?{GLB&g?iaB)K>NUa>GTVDt}v_}%`xng#QV-goZ*paT2*-=1!4>c91? zUoR`Kq&Jw1(ra|k;hh5c@@pWqu6)fZ2gZOcM!qzt#P_Vyd8l!jq~!Im*p$4RdNP$q zkS1ePAc2fD9TSRp#2nVS^!oaS1};!gv7u{>%@?%76k8mC66Qb5Pa;ubsdxfDEP?W} zbLKBxBoijoIg=R+l;I;CgQwKPbc+#<_ROT!oO#weZ@=~PfpDy^tGA-sUNO(Pu29$E z@zyjqALt6#Zti|-^J6OWV})h*r7M=6uyOs0Wz{8xE{D}%Hh?tx#fvsP_rfps?eE!F zS6f?G*U{b`@`M9CK)(nMgYbp^3Wb+6l|x~qHX1Z0qt=y^WwTjMKIx>@Yu3jjc8^;X z@DURk497lSNAzd$yvUc>v1-gDx(dr)d+oK`zII<%t6Qfv6&B@w_=@+>Ur_$`&YD+V ze4}nxb4^Wsw;NedH~|pawLC|aldmc(w77C|>e@V8w!gh?`wO4^+%5Lff@`n5aZi0m zb%84yiW{}qps;aTbjgxs%+PV}@%HyOH14PODl03zKKD<5dOrf;4Ilfc)n?kUqvn~X zUsy1|9J>c1WyeHMq2@U<}xT5=I~WCx3G(pHCNsVrWmt zk$l6_DomzNy^-WQJ3#TkA2opo_Se<%91c51TlQGA(>VA;%@hjWF@U&>e#jdLGHQ!U z%Iw)LQur_~1V6DZ7*I%0h)k49hu}*t2`&heCzZN#;lfpG*6n(IQ&*p-r@Px|h&qj$ z?1HTF{N)jYxvsUVrn$Yt?Q1*W`R|@h&;DtX*{aHMsfvrU%gYM$3yN%x!j|UVi!Zvm zu{9!Mx1hGOYK$7AUTmVF3GqlMoy7bXO%{rAO*9l_df4w4RNU|U%Nb{!VzoI6^5)sC zg)C99@D~Y&Bf*&0=jre7j|OA@-auD(Z(Y-Ye?0jwcW1OBzw%(+f$en%8}`?FyuLsW zQ%n_lwJd|GD9@6eWu8}?s4Doov##^mNOX^vwq}5boMCb4I+xHa%3O}#)hpV(KCcx$@(+)WDu}q zkQls@$Yu89@p$&_ujMK?CnpldG#n5TvEBtJabSH;BtN|&B8;|#V`6pnLT-t=kX5A0 zB*sqY=_xiK|D_aTQp)6wg*pvLs*6T0x%|pcJinO&WveZ^H*Rg_h9HMe2^ zu0kuNkv#RZdLtV?g0_i-0pku0b{FArEdI)-O%MG1=bPWy6AXkGFPVSi^iqYga_I*y=-RjUf&1_8=<^XmA%0Yr@dpv!V14O z>I+5K+vvwXxx48gJF}_mSt?tW(P}qz_7DZFK7Qw6%3GutFH~3<)keKeM zmMvY5DJp6rtOdliu!|BXHnBq}&Qd56s$)PU!9trj1xD1G|9RgB7O(o=-kmj7716R> zC-6aFRZS>>eFodw7}X(@PMeo!SyY%ER4wWcM?1a9K&Y}qfoQxh5N>Mk>I+4#y!f)? zR;|10@+;ojviI5-QG75XBb%B5AAEvmw@YiISqc6Hi^W>6LA{ z&R%A)*)FrYGtkrTKJS7J%a*TLxOic<3zJfd!NBf~ey7uJGUD8nc5E8!NjKTi{;-n_UVXlZgX0kD05ucCErD6m z6mzf^%n3w11xfcE$FjnVLfY!eyZXjY-u#)H-q^EW6UwW|wO9-~_BJD~LD=u}`rQ_` zIm7-m!iKRLms*>XWzgk106%X`8*>yt@X%kv5mk{bt7CuNo;O}EvKg~2mS`XtVIeRa zNJQKLHv)=4Vl2_zVU@v}_4IQu2IJ9NzxlPw@?3An!Lv_WW>b6Oo_?F!YSEfZCZom9 zu0_N_MDedOm~EcOyg#+R&{Q9k#@8*b&dqzJrL#Fc-)S+&Lo^VIm<)DSN5gSH z>jru=Dk*|fhgo)DGM8nwGJ$Pv>#E=1vY@i^eeboY|%2Wr$`-1cI~tXD|rG) z08~`bmtTARQ~!GQnI|59`R%s0B12_a{=BR#Z^&oV8EqCb{)xyz>Uf0MH`rmSV&S;Y zhpjxTcpaV|BR4_>DUC*0NC?NWb985)x}IH|(Ne`}hj=(37J{8tScmEzh5K8&9{TH( zW}5|jkBX}D-CMS8ShdU&X!Er+=_5Kbfhdex%ny7Kzu)84vDz34`02%w{$QV;4wVlY-HpFyqI59akyLtJW}{U18%z6bAa@2+KXEPRhN2?`P7A{xUpF@*4EG82w5cG~GD z|Lw1@J^sXBFS_JI)e9FPI>JVlhM6KFCrjE#Atny=&EVgOc9oKU+Bq-=Y$eBh8h$0y z$0R7VV6s$d;;=Q0nPJ#c$@)|8%2f?}D>d7w=aO-OczwlePM*<8<_%ad=?Rwd`Okg6 z>tM^T9{kmg9#!q;wz6hVPL@T4WfJ@AsKPO``(SSiN}ZT$_0q*f?9z;Vx7aigSq$pL zPkI^%jY`bm%|m4bQNr_vLp-f8-9iRL^g~&5*S+5(#OR+N`t6&|;RbK>{?5Mn#d&49 zS!}Inz(v_)#85vP4+v^C6v6$7d22kX!N^?LLwRCR8?o_4I4_?r(yzs?D-d$!7U-;o z=8hh$|815mtggD7I(P1>#n{1>Ul3Mnvhs_teE0i9Sy5HE-|O**Fby#qHMT6IE>5f} zyatE`9Si2#R5k4=x#B%hf5K|Wb~yX+IpHg19&&GU|__gLK|{r8y7AseC5@K;-z`F-~H8aq=RikB2l~(*)CiNzVI9; zE5dUCOt=VOaC_#vPCahpvs*XseekD0y!)Q-3!FlCM~~1kSkMd8ErC@jcqLo4NIf># z56a&o&w(*uYw84n7n8%FNi${z zoqbI{o!aMQa)Wj}LW~D3QC2*DmBpsL=7t+hS$0fPp;L-#fFY(N$ibuuP820tWQt^> z1YLwhPX={BWMIXwq_q65d%u6`X{Ueu#%uc{k*->AbDP(uSC!^kmsMB#R2n<_IN>^o zITe-vabRmEi5caC1+6V@n&(Oez!mK3nYg z2WH!?J>6X>p-`WDy{gW(jy!7|sRm;<;E~WG_0Ja@imNJ(7Ja|Bt1!Q^t-a^|`+xe@_HCD5cEK5Eo~$=m{5}t{ zYa$`B7c6_Nee~K7fAX!bJpJ_Fue;{@Max&qpQxpY(cI^rCd zBy5d@L^FpPa?K;PJ1pN)Q7odM9>%R>$Ri0XK7J~dA-CYqDFuv@WUK)}OM)!vT-;D=01I?0(3w+IbnSK5UvurXef>R%UD!qdZ?2r& z($Z2rzSHc{pw(k1K&(;h&4u0<<@^XuPA*v})WTt-5v*XlLqVqy8De2o$bZu(Kk@Km zkL|6kS+sQ7x$i&c*=L_VP`__qlfU+0|LdxLCTn?FsvNtjIM0!nU*Oc*9R;PaJvF`k zzP$VrlQlaU^R=+D7-j*~D1wce^(70k^D0L#u3)C4* zS&m3TOqQAQsp4V$ACWGFfrwe3ApT5P+aK5Y!>ZPnrp}HoqtUEU8FF$}o3^#Qv9rY! zR(Y|B2irBOf)aaqW!Yjj0$NyAy|ApX$YrtUnBS^HF|#(x8U)59eZBn)7c6vSnY+4r z|MKXQ7_vU|)b{+sLOA@%PhP!rX>}}0d@&W?M+|!)b-cjuzBl_Yj6C}kAM0NkGreArM>3un(c3Huiw|u*3ws7uZq_6 zYjpiO<6ezP6*Otys^5E#Mdz$4+w|O?jyK+RnTUi4P@{CxVNfqr-7Jt2ls&HYgyOya zcy3{d)@18t*%S}+h^8&7L#5~M_aJe>2?EFXapI0A`h4C#FUjoprFG@2mMyDx<>odv zw!iu2u3$)GHOKP`%_lBhP+3v7w7R;gVqR`uq19$f#3TNI8!bFhyRa)^7g|Dh5gDF{ z__&1|^~P+Id2w~+mL0qAzT<)Xg4~lfp8WcoudP2};q^CM64!Y6>G65|Y~aR1E)kh~ z`}&#tMq|N+3#;K(S67$lM=VaII6|5KswZFXWAJ2Rur(%w7%kJ@-_%LFdZsco`^}Axf1nHwziA;(p@YE|`> zSAR&YGIn+OYxg(q+P#PUv74J3YW5vytM~lvsmHGT=oR0+@7^EW^PSqacLw_0@d&QS zVx1KA5*s*%2*-oQI}pMBdc#Sp&%fwG+?Ds$*Cq6-XI^^w4YP{vxY0u~6UH_~xHj{+ z$Ui!j1#g|W>i8u~s>@tKU+<$&{C(%n{SBSbvZ{)&e)-1b%gfk$6NPaZE$* zwpg64E8wkgJYWt-unJUhv1kZa97EG2Ouwh;y+yE0qa2 zMs|_WYuL&sng}rzfdCrf1ZfLd&S)@UUS&0AW2pY^Z`}LFOK;TgYyHIo4}b0Jp9+RL zLMm@G*e?p$wM&o7S#jbS!cCh05#8gDzx1VB?m6S6jTc{d$?BDBFZ|#KiQI~;f}};A z2AxUlxx*6^mEMHeSOO?&Y-_va%b$PorROl_N?uWPxjiGSV zYPR7|iK%HYNX%%16mLgG01tje3`QfrJ=p8QH~NHw#PR=FPPC_#{8Qn;7_dd>O|<7G zQDiC)FuP?DBOm~oi*Gh_XHAV0W{k*MAQyYtGYC-3<2o$+S4-lRDk%pE>nZ_YqLOJj zU_Q_(E4DZhiY;MWC8A|vh-5RFZRPWC`Qoi#xcR2v{^p+>j$iwpb2oYdZ3KFeBIp4O z_)m(!(QIqMZW9+?_^ub8`|mSPZ~5_$9wg8nRoSqGFsZB-19lHiTei!Yo0XLfh5;aK z3$}az{=NI_{eIQrMfufLW%+sbkKgnWgIQy8m^5q?3A~AD$PuI+@F&ZXf8lu_|Id?q z9=PkH>sKu^Se?K4^`B~g@l1WgepW23CUZOz6!EyQ7QuqophvC)i80S7QkmE@L)u5f zW;gZrb+U{Z!9*@XfQ|5*Z@!V6<05+Vwzsxiech#(U3Q)?(rL3g=pD1s^~_WM$;!%F zf844#B;cWK5?e{`+_Bs1^H)~QGuv!J^GHxu0`5rr15Hb(D3we*2gZP{kuYndbcPKi zrXrV|Q%+A(pH@8WZZg}>{26Q% zB@ysIf=HZ}gPN1fm!DCHT_PMiHVrJ{u?cqcBUgX)KhHh$)YFfD<6HO5UplXHUam^z z4~E=$fD_{dTwt3tVv5E!M3?D5=Y1R7I~s5O+PB&|I~$u1wzRbbd;xc#TUdv&$xooW zxu;Q$^EO_WMq|~YoYT)*yJ5p|t5+_oC@H-4%Qr{kZj%;`UJ!fwa5OCN9S|4R=W%a1 zURsE&k19V$pO>figngG>{((oIcxLmPZ#1=BSdfF^8^#zhrnk@x_7Z22Ct`C}+=;Mg zW$gjUG8_q*ZF(F*Ls7lep7+$h{o92!H0d z1?TiBDa!X3Z+EuaDmt=r$Q0kNxHGzTS}4R=8&EI+HNLLpmn# zzc{A13zXWXkpn^lGD8Q5kUVCni6d4mpmR7b2`FM<9QsJ$B*T(qI4YOmn|Y3K(3KYr zrD_Ih8q6W;n9&CrB!VcW#48T#j0S?V-0_{eZIC_7>}ph=kM)vw+F)gk+?4u^F|T@46kBpEa5wR!M13; zSFQD{G^`gAJyqY**{L-`CBRAa%IzOy%^(^*aGgjpTD z;*$5QUSs{^WB>izzn(EWb0P`!hUl~KC*X+05+SWor8Tm#BhSJvi}r-hWp!4#1NwV^ z@Qbg0?KU=X{Mhv$DJ#q=E-VBcJ36`@c_n+^KJe8qfBTh}H=l9Fh7W!C1HM29>nLdV zjV5bjW5=H!deY}rUvS|i3#u2&X7RkVbO-ItXs0NZPlW^13|olSBT3g&p>jsc%CG4R zHX-myhAJ7UX0WM|s%G4a`IS~1#_>=U^M(8$_}~R!{L0y(eEz%Neb8XcB}lSX zWAyt277L!(M1aSLocLOabKZBh)0OqmAAir99q}yiJr0FE1k}@+hy;yr7G(cHf}g5l zL3J#kRfUK!!FFLRMa*9?4=XKH=vY6na4L+xL;y0@3os{B3mYy~+!Ijs`@<%i11+E? z7POmHU;paOME?55|32{ZU;d7b$}G-;xZbL>IHFo(m`S5vuQpo~MtfLg^GD5Z?``}3 zkAHjq#aI2}SAWM4|Np*u%ezlM-mDFmmlgm!2kKiI_cnk2=C5tt@-|VbKl`~)xN@u( zo8IGf8;v$%VchfGpS=Bc-J*pnuKDP7yaqs(tkU9S&b`JXb^TG9J|mLwoa=HKxke>> zs6ZqmQR-VUkis8SKZkBezusJ?6&TEsmVBt(G@ePpqa;T=9WKxT5yV}Uxca(l+q>F- z{Nww6`oO~&0o;1)$0CsgCKQBqh+wrEBQyk2wM;>O;L~6DE-YcoN56aapD%Ab?YI+8+~CS`5F8a1owvu` z)!}ZeZ`-}2u6Eynx&!;e(HLG+=bwMZ)t6sfn2prtjYPdJm%(P&zy8|xYd-Snx&tlQ zxvujsICtUVihf@MCJlCbUj6>gyYBq?3(vnRSF;M>dsp#B@*_1JQRYcUzaV&gC&kzsu%0>)$c9%H|2HjhA&PUIf8kchinIgQQD*u1jE zD`FbTqG&7x5TK7IEvktUwiF?&t-aMB3gg!;WKv?ly8F+0_ZbCwMc?}0@9o@Pzin^* zumAXPjw6c+V2oG~{)peBiUyDZMWDf|`DLfP<0RB)ix{$;B*%kS$|Y4ouK5n@!R z@L4px`O}}d_M_MAcysHLWlIpsa1BJ#Vj_lHH28tZCYDbk7&X(yD9e_@Gr;0>;xXOt z_StexXr@@5V{=wfMx~2uEYY|j*XeYaRc%epu^_u2S`ujNarrUodb*oeE-U@hZ-4mI zzyABT=l;33u0D(lH=BOzRXH}p!aP@Hap}s%OBODyTCk)-Z!-qs@sPhOE6bV)2fDi2 zEmlWPPPWVCY;AY{`1ad(?X9`(J9pf1+Yd_0iUJ{@*XzR@$z;kt^^}vYx%S3YYnEe( zhUTA+5iv6c4c`#8I6vxNQ{(iriV9dasD9v`$DtAx2+2A9*bGCY?7_9{pPvY}7USdcyf-^94cl!KutMkCWkK1_x1Ov3+; zS<)4kU;W329zE&AQ#;xNx7_m6i!Z$9l^1szb@`F7L9HR6mNgg&eEd^4J@NM^I=h57 zjI`K@h!v6eAcCrjuwCp>O(35L=Ag_*)r+dmqOI|>Nq+zr^{)w zq4!duHBVrcB_?H<^8-c9K!pb<+g3BH6q>VWkd;a-x#F=9GeVSZh)Ap|V2%b>0#~dK z5XB5Y1Q7`eghgt3X(0(69bLpo#WJ606b>iRQ}eRy*;+QHhFsA-Vktm`KUI!7#Q_0Z zqYuA1^*A#~EWg0xuU&`&(lP!bvyu~;0u2sc#0EWqSYyYnKkl)|9{bwYzLB42d-bJ* zmt6Rjk6m}$&bK->Dpw@J3@(;i;P}A#@4<`}`a7>#x|_yMxme2oBt_(!qP z2B;(w#D=mc)rD802%3*nhd?F351xU&LXZ=?jZi7jMH2QG@kn7&J`q&+?%hqS2w_rB zej!rn1{O=C&Xl1``hA%d_A8A}k^{3WZSitXlJFTQHYPD-kdGN?d{(LyQXnggD5x2_ zKp5hR&1V1N7r*q+XP-akoU_r|KmCtaF1_>M)CTaS=*Ewj5wd5lIf&%VsC$g7H8@JyM+%2+V|CnBDGRi?XJsW&#N2 z=H|0huw~13H1}w`gki3b;vkF=qsTHwNM@jP=Ff?7cKtn?|xTNRc5gn%vQ~otqmW!;)m}$`-cDf#&7=p z?5@v!_J4Qo*#FGa{{euQkQs;uK?GHy&L)&r7;i;_zNpm1arfyBiFcfF^46`Jt>~Fd zM1>LIdc<5Zrg1s)YxmV}e*MjWUqx&Ne=umanwcN6W+3L(0!k3s_?F};wDCf(g=7VF zD8J+nNCaRBH*r*S#MY29fj~Th%49YHYQ&gj=7<~?6GpHSDncf;N-w@BEP8NG_oH}# zW9^u!mBfkik(Y6{P=Kb>s7+ELkKwet@}-<7nKLcV&^psHWX7eMS0AyRa4u6_n!Bz*7l}NukLASi9Ym)Cx89RCyzgV{^G@zci!=n+S-=0&U$BY zNq!8EWujLxO+@TMv(66oVxFk-x}!@MuiLS0zo$QzlT#1~1mfCYdrMDCbH_u!d-R3p z_MpGF+EhJ#!MgfJmvf%bkkK0e5#K_+1tZaY2^V#_FC>9V+>;n6ri&8fNcl+;#e^ir zVxrTPhtL&Z*GDw?$c@w`ELsN&PyyH{HiQF4Wem%VE>Wl$4+k;@w#Gy2k;#*WMDl2v zXwym9N~>+6^~`jBCWcUmDhk~^G;@GoVG#$yfN|Es>ZQ-W@|@ljJ#ORT@_D(NU)$cW z&u!JI-gsmG%P;QFc51%))!*N9=kHgnT(n{Z0h8w~S+a;-R7DIxAOi5goiLX0czP`Q zs$c!=DTJ)f&dwb>c6N95_H+mOdQ?bP$V+xhb}Z;6D&dCp1Z?n8j9H#oj78`YE=l~L ziQhCy1QS9SI0c=OE^7d@Xbb28+vMqJ-%K-&r^U01e&l;MN$*|6!~OHQ-GfQ zk?>C3RE{~pfeeBz-o+z7$i)CSh7alS!7i~7C!vAhhL|?SvF+3?S-KochTi_3ifUJW zQPxG5ocq$hw>|mzj*HGer>cD6Yp-r@X>IoSI$rp1&A*|D#1chE^TDm^1+w z!j=SqL^~9e3B(d#GU~8cDWyqQ;FzSQ2ZR%-^R<>O+iJRcysje2RE_x8m`4cM8YnIO z6)H4ln@DpK7*d&X%nc4qGi=GYBfn!Z1BOMSWa7ZvD7~Yz!myzPLsF7FNJ@Q{R0`85 zPs^5CLu#o^JC{rrWTFP(TY3=YWda_K+i*NPS@-q(U`a6K%XSd!*a1A5jP^Ueb8o;K z?#Dr}XMgj-`aQd9dwP1CnhwCA zE8aUXM?~ftc=}B1ABCRbIWWzzHGCsV>KxzzgIN|vpjCJp@~5nFeq3ibk^dQK9wVlgS{Die8L^7Ws-17|OC-8&4t0H_udC0K?aa~X&1}Tu4|uV6K=&j(oQ-A{ zB4tBaJRBj9YCx_KPSo~X)L428n+X}(T001B`k&`scy-gRP!t;o<9Qce@a^00VpD4% zSOO-#qzB{Kcs>QI=D=$?Jc$_!TNC~9lYT&lf5I_S(*z~*lQL8a8?&CdpWzS}B@Bw? zWM`L^mG0QPDHsT6=ZGy)HQG3i$!>4ERv*%{QipMAgsnf!l5oZz-G=OJb5@p1(wK?J z5ZG!7UaE)UH2(KT?-oPsC)LC0R2t8!VQ7Z@9jpW6$0L&prRzwjFysUZ6~6v*#bT zVbyil-*oy}=OReqd&Kr(Lavl#t-)^M6_ekxBv=pLQT}E;2aXVI4TXjb=gc?>9w{5B zH}ux9i5bZUp~S`=nt{E1*|L{5ZSr{o1cz}svki;vMa3`od%FEWuhrsIGk3#4T^jy4 zPvdBVbps$H$!+WbiEbX1x@2p>luR;Sz)+rPS%kg9^s1%HRvAGdow48Rarde2|Jk3H zEnRl{NyoEIcF6BeB>d>tFt8`=1N-S1iNUI2Rv7jA!?=9!*u8IeP5q8twY7BzeL(<; z&9E&wh4W87<@8G~xn%8{mB?s_R;bmmx)4e%;c*nh0ca4VWXvKRl^3^@ml@e13Q>o1 z;0VFi;f-d>)lg)aM#|DaN*3RyvHqE>N>W{eK(Hsae*K2T&wi1J0U*E))>ydd#M%z_ zxZU1dm(ZJ|XlLGr8eRaDOcVJ~93c^jnHZG-S1eWvc?-!4ktY!&9EHfbVBRu~+S=XY z%W*l~ZeKX2`tsNADl05pv2@vS>sHzAI!G{^90*E;QSR;S*ZtM74)9AjL8BNybO ze}sWpvU+~?!o};@eQ5m#&de4YN-wnecqSpl;PSFjbRTfbnxjWS`tK) zM;n5OcCeSORK=v-0H6* zLM=lXO%;1AMiX{hc42;b>%j)B4l2q@;}LClxi1ml+0)%EgeoCG32}<>%}|C^CE`g) zi*?|aY+Hb&g{z~C5b|#TCJaOkL`_bSl`B^K`?(j@nCr(p74!3pOA7z_)XOam{Wst8 z<@pO1xcfcfP(o)gYjF%BSeKYvN>fo`o*@87h*y}H;UFzwQTj;UI(U7HO&N(_h+HSp zgB%DMI*nLW96S!44W=l6=LQD`f1IX?TI3Xt)XIjYq~4v-xfmoPd`7BAZhf3IBqQx7?LHgsJ=wn2oEK1>Hm1ZISDGg24RL=mL{7-6D^ zS2jfHH*Pq^X)}Zak#NwbRx=Ou8_a6G(a_S~6N%~Vj)L5x5@)Up`w64oC|k6P+Qf5; zDJNDDR4oG*)$w|OQGB%WLV>WCm)aEsu!Ak=Ccm6~Lp(3PAWBko9rn^rkX=glBh7(1 zBW+Fbmr=atDM#8RcNlm>6H`hYL#JW{BQ_zM;0Ya9l@^zGwR@~4>ykwUy?x#8T-*;; z?X4}6rby-)%#{iq{);t)WyvlO3y#E|XXUZHtjcV%F&SjrM1#rH+0h@32Hj}j`#pNS zS)dL6KN85u;?_m zbL?agViR6|F@iG3SFT*al(DzRlb2UqT2i)X(ZakuQ%g&W%neD*lJ)l!8vjT^LMvZc zS!uW1+uAz1y1Mw~V9C$r%0@@!_p>dd=n;X0BX`YU_on|nc6#dAeA1&FwkF!CjGSSQ ziPkgM^E1EGVqDHF&Y-DYxWH=FZ+Ua4PHRSAl%JoQmzUer)F_2N23EwZ52g+K$it=! z4ikc2DheGW_@>4@!45p;@+i&(#VWAk=(!ibhT%VEzi$ZKnB4TZ4s6p$h* zj1J@HnB2ouBAI9L;w79H6_!+#vr4#N)vD!emeJnciieP_K4)bhnOQkeH~yTD^{%BzptaeFGyTKv)P2S)z{bS z^Ld3vo6TA!?fTHiff>m}nK6TxQm7mWxv*VWyJjsr=2TS7TfKUn)n>`d&%ytvyPI(J z11~2c9_Tn&_K5x+?9cSSN0bPqu^}8dwqPp>RYUk7P;6-8v6@1-P?iO?XqvEWh(yBc z)~@gGQ|+xefOjE4W3ySjKCj!;PatL?m8HN_l4HXr9=a=V2ssT~E6wWFYpfPy-Tp?q z%}FUL&&o2oJ$>9FKQqNqAS>CSX(!2sl#`>91IG|-jSfts53{4P??-tkVNEWKsRWb* zN++IpGVru@%PzCYj&Z51H4g>@1Q#U3P{xkQOkxnqSVxG6s;YSoNA?TPzv}e_9d?H+ z+nMchBF$lsD$B&9(5c)yHaTz%!B%oC4IO53-zPJS`6`f8KGJ^V(=Jo%k#7x_o9dJ& zSx?$Yl7Q5vCvi?al#;(O=3KOBaY=E(rdPK{acmBR^7He9L6rwZ`r(!ZNn+%(6i-bd zF?p#osBs^XW`6`1w60vSW^YYLf3J@@DU(n~maVI+L)wx@CSkl!kgR{a1;)xp^TQs~ z-q2WtDEANJz%c|{hduYKUN917wf(UhXW@=aXqTXr*&Ly==FeYv;6SsdKbVMX*{ued zIL!b6BLqoAK~x9DR7Yo~zL0?i* z+}+jbcK2b~PQVDFI4Fn0fm9A0i?GE{Whx7%a-RIv9eOfswenAQ>MgloYRY)dhZO-x z97@LI>X$hrRN<09MF^S`j^B8&xvROUGZEJ@iNno?#Xc#U*okK}{nK#IBf}Qpi0co| z&!?Pp8ljlCzPZzGcj6&bkYCW>-{>x%K< z#gU|EA`dS1Vny8!xWKR&etog{#*HVUYkF(zE*yGTTQnL~{rx>?&5@W;-cO|RfxNPa zCM_pwR<2lEQk?(RmR+d$X^YLHL!nS-XNNG7r~b)QPb;)A&d(&sSBlIg2aYY+I`W*( zRA3sL6HEZXENK=9)~lB+SwdX-*EhXIXh0)}N!8WefuU(k1a_yk3H%-;v*Y6fe}G4a zGk4AE+I~tdcLTrC>V@3A8Zo10}Y~pawr@)#DQZDwqzk_r0+wFnZC18kZaOP zu9>DErIM^Kmu7QVL9l&vPDx4Wj_tb%1CRe8!Iztw8h{mcotOly$Wjo+k<<{c@XFqJ z!YSQdfrHJhAYgWOHZdc5dwT_LN+Q$r^~Ufu%``uUZ&gX1EC-G`*qSV(rl*`dJW}f# zs_ja>JA3B?1`0ru?89s}o7b;DuDPkLrKR0!wU(9@H#8gwhr=KXp9x7@DGwz;3FJa4 zxABCNO(y+YTec(L*=%-`N!``eNwvZ>Yyy&8O5Muo(aV8316y-?qL3zVrCrYU+XX75 zd<0&Jkb8LcfFR;R+{Uh5w`R@S{{BQ`V-umW@bBp+nzGj~#36A^sDEgK{4^joxgffq zMyDqDUydvP<(J>ostKTKW52SdCMJmju_V|cf9gkS^RO@Z5mJ*<9xC+#hdECzrJT9%d&O;LfL0KVNZq@m&?kTxtpwN*ps2f zhY*lt04HM~l1L=skwA>Jk7TZnR5kb^0F|(kCoZTIyDBF3_#$9@eC^uf*!XqV?zfF* zU14#7#}g2tnd7xx^XkWj)6!c;BrwYB zP<6RKk?rgP7+&05T!I<+4-Q8WXQsyQj{jm33bhvKVuujaS^)LWF*Xv1Af)MrD3-8# z!#0n%uC6%TwD6f-uXhA30@$DFWqV}GO0G;uim=qNWIce@?1sA{*nDU$+Y2{?ZXO2P z)I#oTmxGJKm*R$HGB8$}n2l{);7j}{ZCt^Rx`TNJ!?=4xD(-2}!r*Gue4>z6kqeJ# zwqc6d1;M*OB*DFhagf9HY~62|qT2eMfGY7FWP=TXU}Qd1v_Qx6<;(M$jx30G7>J{x z98OD1YiSa=xw@jGO2j^z?M6J#7Cxg+;km1+MfmHQ-!eo7^2l4s()+RwXpMxZhE#)w z*0sV(t=#mb;uxWCP}S8lpw!wSin+7|F+pd_rix_MDs2YGmgIIqM(ph5ULURsknGDl zgAK>OTJ=W>z@OA$x_A)>1t{D?WlRCAUC9xamK$#S-Y3fnO4-AeczF}>SWFZFmN8kp zlEggvTQE$>p@&R0*GC=306xv$l)>swy?r}Y{058r?jzH_Si=N6;YCvdLa85u7AiZW zO#WVel6rg=twx5D=GWUZjUui=?t`PNx}RxV9@`XOujh|RDBXQKWB1x{o|&&&BHW|? zD3bf=DLDsB}_f!c$MOz^7D z?FMGu-+KtgD9&u$*yKM8SS6xvp6Mm&qYIaGPuK`;f_RK6Rq59vh{`O?-tt~4%M(Y` zMV&oJF23b7)}30Nnw)J4GBBA=mqth5t-?`Rk7>dQrs+!rkNkN3cBI?89^WF>8kxpI zTQU@I0Mf8YDpp};onQmeRXmb+%@qu6fK$G-4Tq@0s_pl{wWq#^Rxn1hORGlJBc57g+#Lk`r~-yRzZc3AuqQTeih`yNdqHmVDWL0H_kLL8M~ai+#7q-B&{qq+K`w zbA+4}PFi}h@@Hd~^ld@}zUtZ7Ek)2GFK-k!qBwn_;^yR+!Z93}7H!>8qC#Q)y(!By zNNv!Xgl2X^6q^}XcxeZ5z#0!5wBj?w{ju}+uCd{GZZ4&|S`9z;f%hO3AY_aAn}=b> zp7`EZWRO&YS+X>r3K|btmp>k(r(dv(Wxv_w!`>aet|7JH`)nt7w`Xp19z0aG?Pr}H z&j>=rZiwi^J?O$|+0Cw)EBxLKx>RP6q<2%B1!=-V#&c_kHzq-we2hH>C>G>lPtQa* z5`Q|r-3Pk zjAMxVR`po0HjXr^_RIIi=H{1W`z3km6Qn=)|6P6FYoKMi718o|6O5*WmJZwhc) zit2scmM-psPY6GGF=Lf;>DRN12i;w~oQo!Rp}3^q;zQ7nw8@ZdqMk81PkgLWP`I+Jb%$b4cx%Z-4!Xk`tt zlH2A5ZavjrMjhZ2 z@dq1kS>+@;6S$ibbj_E${^O})3cA3S_Df9Vt{&gs5&2s-C&Zq{H_4clN-LYo=rN9p z5o`ftQuwiKrvI>L_Wan|`Sd|!Oud%7bmmI~u=3bxWT=d5XzW8uYD(RgFwjUs%8px` zu^|C_CWCjWCKynDe1*k2Z7=a&z+p@;R(g^)8uP?(TyxRLRErWv(bVs%#B<`i44;vL zW4Ly<$cRO*D9aAmUNsjm{+q`O%7vpW{QJ+RWLI_g+}6SD;fJ-yd5Z7fi62PR@Hsy; zV|$;j8W|bgEDZQaUJgia?^u%-9<`r7bp6YIGX1D0G+8A~DZ8q~=)yx6&pI$Q8X}5f zlw>ab*_#n1Q{I`%h%wSw!ZmTwoX3tYLrGRYcCp#{!Dy}h=sM`>$n~}BU~b8~djD12 zR}0ne+KU@1zvlBWvyTiL69y8azpi?p86HC(3nQ+Zq*myy;^dlm6#-H&mAAyLqay-hR8GQuAYfQ)Ohha&dmu432zZ3s^p_{ugP{XUi2Yd;nLURgP6M{*Rk5y72{gBdE7UE0l2%osvJ9Xe;8 zLWHnT4D$=U)1f!@_sUFDq1|J)G5vRv?J#^c)^ZQ8HW1P zqYL1W)L#?H!_-|({W?(VF{m$>6>Xp><5W?m6 z#kFOS-$hE=+Se!XwdXO{An)^j{eW%ILQ0Ar(}%wn?icYvg=ki!2T`Nr>@oTLSv`?j z%!mvtDlCn6c&%}0T|ME-lw!D;db-&-oXyH*n1Ovoj#riXW_}wgX3wL$!mY`vhTNSO zKUAaw;%2PTz8Ff9RdVGpkr5~{MDmOjk`yxPv#eqKVdshfj;^;K(=&?>BE(S2A;9%QX|pm;;kvylw1@SfqFFbx&*SqMUh(q=@ zC$9_u7rcEi-!l`?R*$oe8CN%Jmk{)N+x2i;UW#_(J>%%+YL{6EeRc;jDOGB%@!z$* zR_Ti~9db{Cb(Tmc3A%fcoOjXgdLq;>3iMvU?n&4>S1DVTN%Pw8GJ_9a3~Y4z)g?vb!Kokvk0Dy#qA>B-bWqQQDr(S9qN=1*vRv zlxGJj$5N%rF&7j5`{Xx0B@iqQ{3ArZL19^0ZO5~Wr>Ccp(ML1aAP9ue=EU64r_A;! z)x>cj_Y4kycB;3LRn_)Q>l4=68iX90&|jSa20v#BcV4b~P8e&NAiq6m7T@pi*<8?Mx$s^6%^yqN&mVBxU}zj0>re*s8RZF26xase_pBfJdS+v z8xAQuIXC6Uz4(p_4FL|!Y2wI>5)qv^z-|{Ws?nL*^NzHGsq#;1PZ1c0c_Pf$!*Sucdq$`m(Tg}FGwS3FAiH=yl278r`Nf4tz;(SAV`mG5v-<43YHFxBJ4^% z^Ct7Cywx^mZ`R_^yRfHIAlWl7@arH`I_Q(NwY8z2J(m%pKA@a`*?H;C54^MUm&ZWZ5LS<=8=^Pe zq|QRC_>>SY1KO#8b+#B#$-FH)_y1now%d!4x~@H{4cg;u4N!KC3Eyh>|4bC!#Y#>l z!)>po5RvhzbHbq>>+^fbug5j{+0sVX|jf{4w-v^cZPT$x0c5J zt>JRo4C6^j%QAXvIF?IsH1A^2U}bM;D5~s|A2-jCKu0>{hLh*;f+^oAHUy8BMFvNX zmx&+qI<9|c!(3$@!T==5RQraGeQo@yU%|DbZEwYM;rIR3#9NNRKR!NnB14&_XZvw4 z6vTyIFAkH^qM;7w3&TRSiijOQq3JRL4KW|CTUIZ^Ts!ZTd+~*hKb{8O4Ae)(ukeEx zNpJ)sj2nKnF*uK1JV+GmzFZWGF|ovN>E)YP4U4!5y$4dFSr>1l**UsO9c*v?SRF@_ z7x$Ko?2&d9G=4FurLc(nwS^D2M4~ZGOny68pBVh46`5RIUY?&nga9I8VLwv08eWp8 z>T(cCbkhY7l>a$iZqPkjj#J~u!31YS4N2JJ2_=v8|MZt~NL?zP*dtKFy0 zmmWKbhnu+gP$=3y2M^>g&{u3=b!mg5(d{;~)%96`^+uj+%kTaTA%dE{%_e;QeBBF2 z1oDvq>I+)r8&%sz#r(j5bkv(e+?7BluM>(9e--i+5L9HDVIVI}75#$7;82 zf1nZfarqz;DZzLL}gBLc%k ztrin`RaR2Xqg%Cn_aeyc!}d$D9bCUZ86hO@HCZafo@k_ zTl3-8T3VXeIsQ4_$PHA|FY}a54tqznbH+X=6db#cK~fI^J3qqgpV;4#41U~K8gbhX z_eg~THO-IeR?|VQoR+|sgn0JE5sH0ILtT93<^o2lr=-k&OtHyWE zTtD(WjZ45KsyBEqkX#CQnp*XDja>`@xY$jz7KefcHKzjEUM=$QG(gk~2O+|K%GN9hACJ?lwLS_Ab17yKC3#Z1p{0OHMs+ zGM)La`KJ)whH`Las7)?79KTwYkUTPLQ49mE+wC?1@_cy)>RdyTnmqmSl?`z?`36R= zsaC(9f4#Wa?CFusoAs8I)KS z;mR0|=7Vzdivqr2o51Nd48ue~zkp^jfTpgeSG3}i9(!r~T?rlmJC9~rfMviMS;*HA zJh*<}Ze>4> zXWcofe=%Tq!O24=qXJ~t`bA+J2dE!}L6oy7+5474bX%2?*^}8(O5&79U;7+&adV;|vUgycG6xj?(Bpn)ofDc z!#pzOx@^|wDqnjgQ6&SqNDrw71vIUiq(Ap75aYJ@9qTg6F`9~C(V$Vx;u@2OzYw#D ztK!it8Uzt#M8mL>2vT*Bh^I{ISF4{s4sYy?Td8OS( z6)3>Ci>^NTCy~{mfUnyaTsD%#mQGEzn_z^_rMQbcjgqdoYo5*&Fwa_3QlI;{3zHW$ zS$Q=;uLDxXp9ZV95rn;AV%?>!Q_v>!Qu^am&dQ(k!SC8=dI>Pz7-okW8!@lM8m->*Y~p9$#-;C8ujj4eM(iYP$v=# zk0+B@Y&{X^QB8uNz|>HJimi{!zm&fK=|o_dtPC;PCzI8TG^}!Z%O+8b>>9a)ez)o) zs)0iiL*E8G`JVj~MRjnCa787rYF7T_#|XJW*f8-3W%Nj@lA6Jel!QylxrV#QF_V89 z6@u5I2$F*KvSetRJ0=?sbB3X3^oMLusM{ozXcoE2Yy|g&B#7kiw-Yte5wJ|_;qfQygrVbMBVjyb_KsYa(^mGnu&N;sb=I^gpk{G(%%9g`BL7tg z|1W$LNM!V~Lapb4pRmHb!q6?UxJqWM4bj7AGYu?;(%Pv=$TL=P_|L_D&Xz zlNpV!IY_4d5LWl~biE;TY-GM3ev^O6VnN?tFw+Y8@%`pxxnczp0g>#a)u5%jm<`sb z4<6?kU7-h+p2gVN2logoX*OqT0_-?@dzC-o2h{KQe^xhV zh@KF451E9PpF`!W)|LPbip>~P1RA-{A#=$Ya3h%HtBsgA8@`}oNcwEHl}dXuE_De9 z)u6OLmgvN!?W>mATO-;`PcH|^uyk@=Wa%IwpY~I))UJG%=7lW>b~4oBr}r2ojMoA} z-BgsEEuWy}kdWUw=%(B(k}`#CK+WD$zpXKubL_7ny>Qdw!#q+Vus-Ps_mMHQjZ`tqW|&b zwsx0ejd!lx&8$fNqO)3T(2TB_b4O}lQPZWpgY%!aNmhd^Ad>=LERp;w#&Hy#GUJj^ zJsq0@?e3{jRE6bC?bA3IcTzloVx@!fCCy0~bN60R4tSvNFYC8z)dL&Fd_Elh5`Ho< zzO4Vc;{A_hmKCTe!UIE(a&g2_Nrtuch9ua`N3DqY+`I85v(*Vw)qqD>dO<@`F=ATl z97W$Omm{^9B4@^@hxA3J&9eexm7!ti5D?N}kCZMQND$M-jaK&wberi;pxBK8M#8ur zxL$E}QJqc-L|eqo%M)q1Mc;!`%5+5B$o@A79_w1;@n@~*FXuKQ=VFfI-Z^r&05=rp zqC&T9)DCse6g($2{x^_Xbt;mgzqa0|Odpy3_`YAVR3rM3!bN(LYDpPBu63@6PE4+O zyZuw5w`lip@-hDiveYKEQ~3lBn~VT?p*k6d2-)flcHK|y@Vya08C%aWfZx}OGZ2-(jt7;p;)0?tenuHF*)ge{*GW!Uc6V35r|DYC&6dO`2$_Iu! zGHfd`@;H7>FnQ83Y3=XI_oM%L;5g7VC6hl4osJk%s#!G2YyF?AS(CH997QsvkhU=~ zW{xF5q?ZSEnIa)Z2(}4+TmlwNLEO?Y3u0t@6Yi8}bauLw-1ybI9V*AzcWfh(Kcu1m zi4f@0&Y~4t+|~w_l%=Ljz;}U}wi2u$Epd}81U&qFdq}|l=bOev+r$5acqozGLam5e z0g{kHxOvX}lr1T={EQx@EVXq{b(VO}p3+djTH%x88HPSUhSVJUo9{;56;Pj;o^Q}b zml`tN;_9|(9X4x$xTUvsN*mMZ#$tGU$oLfQU07wJ{0erl!xB|C`&eW(GHIJjZCEiO z)ts3;oiUD-IA0C?CoIbzmU;DZ~C2Zp+O<3ESWL%UyX7z8y6yVG8d-I zp!p1uJ)GPRabxyq$-YLO4G)TFUlSB-v!zeC(uVvKneC6`qZv<2=i*Zh+jK?TtVHsa zj3D6?jVr@-Zb=xR2$j!ehD`eQrW;-5KutXoi1h@oH|bd-;a!`khPdd@+X4v}WT;GN z;cK!~WM;=@DYAWa;G#XAX(0)kt1V^+rfdDDoucEhzTu>FO!taCzG^6)N-pCJ+-AIQ zK*vrD>1L`Vh!AzX*IFB0Q~jvc;Pd^%iTYd{vPf37AJku%b~=#vF8=5Gg7dU@;qZPA z%<8xR2n(lr!he>M5aL str: + """Create clean interactive HTML with highlighting (matching original demo style).""" + html_text = text + + # Apply highlighting (reverse order to preserve indices) + for span in sorted(spans, key=lambda x: x.get("start", 0), reverse=True): + start = span.get("start", 0) + end = span.get("end", 0) + confidence = span.get("confidence", 0) + + if 0 <= start < end <= len(text): + span_text = text[start:end] + highlighted_span = f'{span_text}' + html_text = html_text[:start] + highlighted_span + html_text[end:] + + return f""" + +
{html_text}
+ """ + + +class StreamlitRealtimeHandler(BaseCallbackHandler): + """Simple handler for real-time streaming with HTML display.""" + + def __init__(self, html_placeholder): + super().__init__() + self.html_placeholder = html_placeholder + self.text = "" + self.spans = [] + + def on_llm_start(self, *args, **kwargs): + self.text = "" + self.spans = [] + self._update_display() + + def on_chat_model_start(self, *args, **kwargs): + self.on_llm_start(*args, **kwargs) + + def on_llm_new_token(self, token: str, **kwargs): + self.text += token + # Update display with current text and any spans + self._update_display() + # sleep for 0.1 seconds + time.sleep(0.1) + + def update_with_detection(self, spans): + """Update display with detection results.""" + self.spans = spans + self._update_display() + + def _update_display(self): + """Update the HTML display with current text and spans.""" + if not self.text.strip(): + html_content = ( + "
Generating response...
" + ) + else: + html_content = create_interactive_text(self.text, self.spans) + + with self.html_placeholder: + components.html(html_content, height=max(200, len(self.text) // 4)) + + +def create_prompt(question: str, context: str) -> str: + """Create prompt from context and question.""" + return f"""Based on the following context, answer the question: + +Context: {context} + +Question: {question} + +Answer based only on the provided context:""" + + +def main(): + """Main Streamlit application - clean and simple like the original demo.""" + st.set_page_config(page_title="LettuceDetect Real-time Demo") + + # Show lettuce detective image like original + st.image( + "https://github.com/KRLabsOrg/LettuceDetect/blob/main/assets/lettuce_detective.png?raw=true", + width=600, + ) + + st.title("Real-time Hallucination Detection") + + # Check requirements + if not os.getenv("OPENAI_API_KEY"): + st.error("OPENAI_API_KEY environment variable required") + st.stop() + + if ChatOpenAI is None: + st.error("langchain-openai not installed") + st.stop() + + # Simple form like original demo + context = st.text_area( + "Context", + "Python is a high-level programming language created by Guido van Rossum in 1991. " + "It is known for its simple, readable syntax and extensive standard library.", + height=100, + ) + + question = st.text_area( + "Question", + "What is Python and who created it?", + height=100, + ) + + # Initialize components + @st.cache_resource + def get_llm(): + return ChatOpenAI(model="gpt-4o-mini", streaming=True) + + @st.cache_resource + def get_detector(): + model_path = "output/hallucination_detection_ettin_17m" + if os.path.exists(model_path): + return HallucinationDetector(method="transformer", model_path=model_path) + else: + return HallucinationDetector(method="rag_fact_checker") + + llm = get_llm() + detector = get_detector() + + # Single response area for HTML display + html_placeholder = st.empty() + + # Simple detect button like original + if st.button("Generate with Real-time Detection"): + if not context.strip() or not question.strip(): + st.warning("Please provide both context and question") + return + + # State for real-time detection + final_spans = [] + + def handle_detection(result): + """Handle detection results by passing to output handler.""" + nonlocal final_spans + spans = result.get("spans", []) + + # Pass detection results to the output handler + output_handler.update_with_detection(spans) + + if result.get("is_final", False): + final_spans = spans + + # Create callbacks + detection_callback = LettuceStreamingCallback( + method="transformer" + if os.path.exists("output/hallucination_detection_ettin_17m") + else "rag_fact_checker", + model_path="output/hallucination_detection_ettin_17m" + if os.path.exists("output/hallucination_detection_ettin_17m") + else None, + context=[context], + question=question, + check_every=10, + on_detection=handle_detection, + verbose=False, + ) + + output_handler = StreamlitRealtimeHandler(html_placeholder) + callbacks = [detection_callback, output_handler] + + # Generate response + try: + messages = [HumanMessage(content=create_prompt(question, context))] + + with st.spinner("Generating..."): + llm.invoke(messages, config={"callbacks": callbacks}) + + # Show final status message + issue_count = len(final_spans) + if issue_count > 0: + st.warning( + f"⚠️ {issue_count} potential issue{'s' if issue_count > 1 else ''} detected" + ) + else: + st.success("✅ Response appears clean") + + except Exception as e: + st.error(f"Error: {e}") + + +if __name__ == "__main__": + main() diff --git a/lettucedetect/integrations/langchain/requirements.txt b/lettucedetect/integrations/langchain/requirements.txt index faea511..361a790 100644 --- a/lettucedetect/integrations/langchain/requirements.txt +++ b/lettucedetect/integrations/langchain/requirements.txt @@ -1,6 +1,10 @@ # LangChain Integration Requirements for LettuceDetect langchain>=0.1.0 langchain-openai>=0.1.0 +langchain-community>=0.0.20 # For Streamlit demo -streamlit>=1.28.0 \ No newline at end of file +streamlit>=1.28.0 + +# For RAG example +langchain-chroma>=0.1.0 \ No newline at end of file From 61f63d906c4d1bb3e00fcb29b0ed80016bcf0a0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kov=C3=A1cs=20=C3=81d=C3=A1m?= Date: Fri, 29 Aug 2025 17:53:00 +0200 Subject: [PATCH 07/15] Langchain examples --- .../integrations/langchain/README.md | 209 ++++-------------- .../integrations/langchain/__init__.py | 4 +- .../integrations/langchain/callbacks.py | 146 +++++++++--- .../langchain/examples/rag_example.py | 172 +++++++------- .../langchain/examples/streamlit_app.py | 2 - 5 files changed, 251 insertions(+), 282 deletions(-) diff --git a/lettucedetect/integrations/langchain/README.md b/lettucedetect/integrations/langchain/README.md index 6ff8a42..e6070d2 100644 --- a/lettucedetect/integrations/langchain/README.md +++ b/lettucedetect/integrations/langchain/README.md @@ -1,186 +1,69 @@ # LettuceDetect + LangChain Integration -Clean, professional hallucination detection for LangChain applications. +Real-time hallucination detection for RAG pipelines. ## Installation ```bash pip install lettucedetect -pip install -r lettucedetect/integrations/langchain/requirements.txt +pip install langchain langchain-openai langchain-community langchain-chroma export OPENAI_API_KEY=your_key ``` -## Quick Start +## Usage ```python from langchain.chains import RetrievalQA -from langchain_openai import OpenAI -from lettucedetect.integrations.langchain import LettuceDetectCallback, detect_in_chain - -# Method 1: Use convenience function -chain = RetrievalQA.from_llm(llm, retriever) -result = detect_in_chain(chain, "Your question") - -print(f"Answer: {result['answer']}") -if result['has_issues']: - print("Potential hallucinations detected") - -# Method 2: Use callback directly -callback = LettuceDetectCallback(verbose=True) -answer = chain.run("Your question", callbacks=[callback]) - -if callback.has_issues(): - print("Issues found in response") -``` - -## Real-time Detection Demo - -Interactive Streamlit demo showcasing real-time hallucination detection: - -```bash -streamlit run lettucedetect/integrations/langchain/examples/streamlit_app.py -``` - -Features: -- Real-time token-level detection during streaming -- Visual highlighting of potential hallucinations -- Clean, professional interface -- Uses local transformer models for fast inference - -## API Reference - -### LettuceDetectCallback - -Main callback class for automatic hallucination detection. - -**Parameters:** -- `method` (str): Detection method ("rag_fact_checker", "transformer", "llm") -- `model_path` (str, optional): Path to model for transformer method -- `on_result` (callable, optional): Function to handle detection results -- `verbose` (bool): Whether to print results - -**Methods:** -- `get_results()` - Get all detection results -- `get_last_result()` - Get most recent result -- `has_issues()` - Check if any issues were detected -- `set_context(context)` - Manually set context documents -- `reset()` - Reset callback state - -### LettuceStreamingCallback - -Real-time hallucination detection during streaming generation. - -**Parameters:** -- `method` (str): Detection method -- `model_path` (str, optional): Path to model for transformer method -- `context` (list): Context documents for detection -- `question` (str): Question being answered -- `check_every` (int): Run detection every N tokens -- `on_detection` (callable): Function called when detection runs -- `verbose` (bool): Whether to print detection results - -### detect_in_chain() - -Convenience function to run a chain with detection. - -**Parameters:** -- `chain` - LangChain chain to execute -- `query` (str) - Question to ask -- `context` (list, optional) - Manual context documents -- `**kwargs` - Additional arguments for LettuceDetectCallback - -**Returns:** -Dictionary with `answer`, `detection`, and `has_issues` keys. - -## Detection Methods - -| Method | Description | Use Case | -|--------|-------------|----------| -| `transformer` | Fine-tuned encoder models | High accuracy, local inference | -| `rag_fact_checker` | Triplet-based detection | General purpose, no local models | -| `llm` | LLM-based detection | Flexible, API-based | - -## Examples - -### Basic RAG Pipeline - -```python -from langchain.chains import RetrievalQA -from langchain.vectorstores import Chroma -from langchain_openai import OpenAI, OpenAIEmbeddings -from lettucedetect.integrations.langchain import LettuceDetectCallback - -# Setup RAG chain -embeddings = OpenAIEmbeddings() -vectorstore = Chroma.from_texts(documents, embeddings) -chain = RetrievalQA.from_llm( - llm=OpenAI(), - retriever=vectorstore.as_retriever() +from langchain.text_splitter import CharacterTextSplitter +from langchain_community.vectorstores import Chroma +from langchain_openai import ChatOpenAI, OpenAIEmbeddings +from lettucedetect.integrations.langchain import stream_with_detection + +# Set up your RAG pipeline +documents = ["Your documents here..."] +embeddings = OpenAIEmbeddings() +text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=0) +docs = text_splitter.create_documents(documents) +vectorstore = Chroma.from_documents(docs, embeddings) + +# Create streaming RAG chain +llm = ChatOpenAI(model="gpt-4o-mini", streaming=True) +chain = RetrievalQA.from_chain_type( + llm=llm, + chain_type="stuff", + retriever=vectorstore.as_retriever(search_kwargs={"k": 3}) ) -# Add detection -callback = LettuceDetectCallback(verbose=True) -result = chain.run("Your question", callbacks=[callback]) - -# Check results -if callback.has_issues(): - last_result = callback.get_last_result() - print(f"Found {last_result['issue_count']} issues") -``` - -### Manual Context - -```python -from langchain_openai import OpenAI -from lettucedetect.integrations.langchain import LettuceDetectCallback - -llm = OpenAI() -callback = LettuceDetectCallback() - -# Set context manually -callback.set_context([ - "Python was created by Guido van Rossum in 1991.", - "It is known for readable syntax." -]) -callback.set_question("What is Python?") - -# Generate with detection -response = llm.generate(["What is Python?"], callbacks=[callback]) +# Get context and stream with detection +question = "Your question here" +context = [doc.page_content for doc in vectorstore.similarity_search(question, k=3)] + +# Stream tokens and hallucination detection in real-time +for event in stream_with_detection(chain, {"query": question}, context, check_every=10): + if event["type"] == "token": + print(event["content"], end="", flush=True) # Stream response + elif event["type"] == "detection" and event["has_issues"]: + print(f"\nHallucination detected: {event['issue_count']} issues") + # Handle detection - log, alert, stop generation, etc. ``` -## Production Usage - -For production applications: +## What You Get -1. Use the `transformer` method with local models for fastest inference -2. Set `verbose=False` to avoid console output -3. Use `on_result` callback for custom logging/alerts -4. Monitor detection results for system health +**Token Events**: Real-time text as it's generated +**Detection Events**: Hallucination analysis with confidence scores and exact spans -```python -import logging -from lettucedetect.integrations.langchain import LettuceDetectCallback +Each detection event includes: +- `has_issues`: Boolean if hallucinations found +- `issue_count`: Number of problematic spans +- `confidence`: Detection confidence (0-1) +- `spans`: Array of problematic text spans with positions -def log_detection(result): - if result['has_issues']: - logging.warning(f"Hallucination detected: {result['issue_count']} issues") +## Live Demo -callback = LettuceDetectCallback( - method="transformer", - model_path="output/hallucination_detection_ettin_17m", - on_result=log_detection, - verbose=False -) +See it in action: +```bash +streamlit run lettucedetect/integrations/langchain/examples/streamlit_app.py +python lettucedetect/integrations/langchain/examples/rag_example.py ``` -## Requirements - -- Python 3.8+ -- LangChain -- LettuceDetect -- OpenAI API key (for LLM-based detection) - -For local transformer models: -```bash -pip install transformers torch -``` \ No newline at end of file +Perfect for building streaming chat apps, real-time APIs, and production RAG systems with automatic quality control. \ No newline at end of file diff --git a/lettucedetect/integrations/langchain/__init__.py b/lettucedetect/integrations/langchain/__init__.py index 1de2c08..332cdcd 100644 --- a/lettucedetect/integrations/langchain/__init__.py +++ b/lettucedetect/integrations/langchain/__init__.py @@ -22,8 +22,8 @@ print(f"Issues: {result['has_issues']}") """ -from .callbacks import LettuceDetectCallback, LettuceStreamingCallback, detect_in_chain +from .callbacks import LettuceDetectCallback, LettuceStreamingCallback, detect_in_chain, stream_with_detection -__all__ = ["LettuceDetectCallback", "LettuceStreamingCallback", "detect_in_chain"] +__all__ = ["LettuceDetectCallback", "LettuceStreamingCallback", "detect_in_chain", "stream_with_detection"] __version__ = "1.0.0" diff --git a/lettucedetect/integrations/langchain/callbacks.py b/lettucedetect/integrations/langchain/callbacks.py index bdc76e6..e2f3983 100644 --- a/lettucedetect/integrations/langchain/callbacks.py +++ b/lettucedetect/integrations/langchain/callbacks.py @@ -1,6 +1,8 @@ """Clean, minimal LangChain callbacks for LettuceDetect integration.""" from typing import Any, Callable, Dict, List, Optional +import threading +from queue import Queue from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import LLMResult @@ -124,16 +126,16 @@ def reset(self) -> None: class LettuceStreamingCallback(BaseCallbackHandler): - """Real-time hallucination detection during streaming generation. + """Real-time hallucination detection with JSON event streaming. - Runs detection periodically during token streaming, enabling real-time - feedback about potential hallucinations as they're being generated. + Provides true streaming of both tokens and detection results through + a queue-based system that works with any LangChain component. """ def __init__( self, method: str = "transformer", - model_path: Optional[str] = None, + model_path: Optional[str] = "output/hallucination_detection_ettin_17m", context: Optional[List[str]] = None, question: Optional[str] = None, check_every: int = 10, @@ -163,8 +165,9 @@ def __init__( # Streaming state self.accumulated_text = "" self.token_count = 0 - self.last_checked_length = 0 - self.detection_results = [] + + # Queue for true streaming of JSON events + self.event_queue = Queue() def set_context(self, context: List[str]) -> None: """Set context documents.""" @@ -178,8 +181,6 @@ def on_llm_start(self, *args, **kwargs): """Reset state when streaming starts.""" self.accumulated_text = "" self.token_count = 0 - self.last_checked_length = 0 - self.detection_results = [] def on_chat_model_start(self, *args, **kwargs): """Handle chat model start for newer LangChain versions.""" @@ -189,6 +190,13 @@ def on_llm_new_token(self, token: str, **kwargs): """Process new token and run detection periodically.""" self.accumulated_text += token self.token_count += 1 + + # Stream token event immediately + self.event_queue.put({ + "type": "token", + "content": token, + "position": len(self.accumulated_text) + }) # Run detection every N tokens if ( @@ -213,12 +221,19 @@ def on_llm_new_token(self, token: str, **kwargs): "confidence": max([s.get("confidence", 0) for s in spans], default=0), "issue_count": len(spans), "token_count": len(self.accumulated_text.split()), - "new_text": self.accumulated_text[self.last_checked_length :], "is_incremental": True, } - self.detection_results.append(result) - self.last_checked_length = len(self.accumulated_text) + # Stream detection event immediately + self.event_queue.put({ + "type": "detection", + "has_issues": len(spans) > 0, + "spans": spans, + "confidence": max([s.get("confidence", 0) for s in spans], default=0), + "issue_count": len(spans), + "text_length": len(self.accumulated_text), + "is_incremental": True + }) # Call user handler if self.on_detection: @@ -254,42 +269,55 @@ def on_llm_end(self, response, **kwargs): "issue_count": len(spans), "token_count": len(self.accumulated_text.split()), "is_final": True, - "total_checks": len( - [r for r in self.detection_results if not r.get("is_final", False)] - ), } - - self.detection_results.append(final_result) + + # Stream final detection event + self.event_queue.put({ + "type": "detection", + "has_issues": len(spans) > 0, + "spans": spans, + "confidence": max([s.get("confidence", 0) for s in spans], default=0), + "issue_count": len(spans), + "text_length": len(self.accumulated_text), + "is_final": True + }) if self.on_detection: self.on_detection(final_result) if self.verbose: status = "Issues found" if final_result["has_issues"] else "Clean" - print( - f"Final detection: {status} ({final_result['total_checks']} incremental checks)" - ) + print(f"Final detection: {status}") except Exception as e: if self.verbose: print(f"Final detection error: {e}") + + # Signal completion + self.event_queue.put(None) # End signal def on_chat_model_end(self, response, **kwargs): """Handle chat model end for newer LangChain versions.""" self.on_llm_end(response, **kwargs) - def get_results(self) -> List[Dict[str, Any]]: - """Get all detection results.""" - return self.detection_results.copy() - - def get_final_result(self) -> Optional[Dict[str, Any]]: - """Get the final detection result.""" - final_results = [r for r in self.detection_results if r.get("is_final", False)] - return final_results[-1] if final_results else None - - def has_issues(self) -> bool: - """Check if any detection found issues.""" - return any(r["has_issues"] for r in self.detection_results) + + def stream_events(self): + """Generator that yields JSON events as they arrive. + + Yields events with types: + - "token": Individual tokens as they arrive + - "detection": Hallucination detection results + + This allows developers to: + - Stream JSON events to clients in real-time + - Handle tokens and detections immediately + - Build real-time UIs and APIs + """ + while True: + event = self.event_queue.get() + if event is None: # End signal + break + yield event def detect_in_chain( @@ -324,3 +352,59 @@ def detect_in_chain( "detection": detection_result, "has_issues": detection_result["has_issues"] if detection_result else False, } + + +def stream_with_detection(chain_or_llm, input_data, context, **callback_kwargs): + """Stream JSON events from any LangChain chain/LLM with hallucination detection. + + Works with RetrievalQA, ConversationChain, raw LLMs, or any LangChain component. + + Args: + chain_or_llm: Any LangChain chain or LLM + input_data: Input for the chain (query string, messages, etc.) + context: Context documents for hallucination detection + **callback_kwargs: Additional arguments for LettuceStreamingCallback + + Yields: + dict: JSON events with "type": "token" or "detection" + + Example: + # With RAG chain + chain = RetrievalQA.from_llm(llm, retriever) + for event in stream_with_detection(chain, "Your question", context): + if event["type"] == "token": + await websocket.send_json(event) + elif event["type"] == "detection": + print(f"Detection: {event['has_issues']}") + """ + callback = LettuceStreamingCallback(context=context, **callback_kwargs) + + # Start chain/LLM in background thread + def run_generation(): + try: + if hasattr(chain_or_llm, 'invoke'): + # Modern LangChain interface + chain_or_llm.invoke(input_data, config={"callbacks": [callback]}) + elif hasattr(chain_or_llm, 'run'): + # Legacy chain interface + chain_or_llm.run(input_data, callbacks=[callback]) + else: + # Try direct call + chain_or_llm(input_data, callbacks=[callback]) + except Exception as e: + # Put error event and complete + callback.event_queue.put({ + "type": "error", + "message": str(e) + }) + callback.event_queue.put(None) + + thread = threading.Thread(target=run_generation) + thread.start() + + # Stream events as they arrive + try: + for event in callback.stream_events(): + yield event + finally: + thread.join() # Ensure thread completes diff --git a/lettucedetect/integrations/langchain/examples/rag_example.py b/lettucedetect/integrations/langchain/examples/rag_example.py index be3ff22..96c295c 100644 --- a/lettucedetect/integrations/langchain/examples/rag_example.py +++ b/lettucedetect/integrations/langchain/examples/rag_example.py @@ -25,6 +25,7 @@ LettuceDetectCallback, LettuceStreamingCallback, detect_in_chain, + stream_with_detection, ) # Sample documents for demonstration @@ -59,8 +60,8 @@ def create_rag_chain(): return chain -def example_basic_usage(): - """Basic usage with automatic detection.""" +def example_basic_rag_detection(): + """Basic RAG with post-generation hallucination detection.""" print("Basic RAG + Detection Example") print("-" * 40) @@ -76,109 +77,110 @@ def example_basic_usage(): for question in questions: print(f"Q: {question}") - # Use convenience function + # Use convenience function for simple post-generation detection result = detect_in_chain(chain, question, verbose=True) print(f"A: {result['answer']}") if result["has_issues"]: detection = result["detection"] - print(f"ALERT: {detection['issue_count']} issues detected") - print(f"Confidence: {detection['confidence']:.3f}") + print(f"🚨 Issues detected: {detection['issue_count']} spans") + print(f"Max confidence: {detection['confidence']:.3f}") else: - print("Status: Clean response") + print("✅ No issues detected") print() -def example_streaming_detection(): - """Real streaming example with token-by-token detection - using proven Streamlit pattern.""" - print("Real-time Streaming Detection Example") +def example_rag_streaming_detection(): + """RAG with real-time streaming detection - simplified to show working approach.""" + print("RAG + Real-time Streaming Detection Example") print("-" * 40) - print("Watch tokens appear one-by-one with real-time detection...") + print("Shows structured JSON events during streaming") print() - # Manual retrieval setup + # Setup RAG chain embeddings = OpenAIEmbeddings() text_splitter = CharacterTextSplitter(chunk_size=200, chunk_overlap=0) docs = text_splitter.create_documents(SAMPLE_DOCUMENTS) vectorstore = Chroma.from_documents(docs, embeddings) - - # Create streaming LLM - same as Streamlit demo + llm = ChatOpenAI(model="gpt-4o-mini", streaming=True) + chain = RetrievalQA.from_chain_type( + llm=llm, + chain_type="stuff", + retriever=vectorstore.as_retriever(search_kwargs={"k": 2}) + ) question = "How does Python relate to ocean exploration and marine biology?" + context = [doc.page_content for doc in vectorstore.similarity_search(question, k=2)] + print(f"Q: {question}") - - # Retrieve relevant context - retrieved_docs = vectorstore.similarity_search(question, k=2) - context = [doc.page_content for doc in retrieved_docs] - - # Create prompt message - context_str = "\n".join(context) - prompt_text = f"""Based on the following context, answer the question: - -Context: {context_str} - -Question: {question} - -Answer based only on the provided context:""" - - # Track detection results and tokens - detection_events = [] - tokens_received = [] - - class ConsoleStreamingHandler(BaseCallbackHandler): - """Print tokens to console as they arrive.""" - - def on_llm_start(self, *args, **kwargs): - print("A: ", end="", flush=True) - - def on_chat_model_start(self, *args, **kwargs): - print("A: ", end="", flush=True) - - def on_llm_new_token(self, token: str, **kwargs): - tokens_received.append(token) - print(token, end="", flush=True) - - def handle_realtime_detection(result): - """Handle real-time detection results.""" - if result.get("is_final", False): - print(f"\n🎯 Final: {result['issue_count']} total issues detected") - elif result.get("has_issues", False): - detection_events.append(result) - print(f"\n🔍 Alert: {result['issue_count']} issues at {result['token_count']} tokens") - print( - "A: " + "".join(tokens_received), end="", flush=True - ) # Continue from where we left off - - # Create callbacks - same pattern as Streamlit demo - streaming_callback = LettuceStreamingCallback( - method="transformer" - if os.path.exists("output/hallucination_detection_ettin_17m") - else "rag_fact_checker", - model_path="output/hallucination_detection_ettin_17m" - if os.path.exists("output/hallucination_detection_ettin_17m") - else None, - context=context, - question=question, - check_every=8, # Check every 8 tokens - on_detection=handle_realtime_detection, - verbose=False, + print(f"Context: {context[0][:50]}...") + print() + print("Streaming Events:") + print("-" * 18) + + # Use the working streaming approach + event_count = 0 + for event in stream_with_detection(chain, {"query": question}, context, check_every=8): + event_count += 1 + if event["type"] == "token": + print(event["content"], end="", flush=True) + elif event["type"] == "detection" and event["has_issues"]: + print(f"\n[Detection {event_count}: {event['issue_count']} issues, confidence: {event['confidence']:.3f}]", end="", flush=True) + + print("\n") + print(f"Total events processed: {event_count}") + + +def example_simple_json_streaming(): + """Simple example showing TRUE JSON streaming - perfect for API developers.""" + print("Simple JSON Streaming Example") + print("-" * 35) + print("Shows real-time JSON events - exactly what API developers need!") + print() + + # Setup simple RAG chain + embeddings = OpenAIEmbeddings() + text_splitter = CharacterTextSplitter(chunk_size=200, chunk_overlap=0) + docs = text_splitter.create_documents(SAMPLE_DOCUMENTS) + vectorstore = Chroma.from_documents(docs, embeddings) + + llm = ChatOpenAI(model="gpt-4o-mini", streaming=True) + chain = RetrievalQA.from_chain_type( + llm=llm, + chain_type="stuff", + retriever=vectorstore.as_retriever(search_kwargs={"k": 2}) ) - - console_handler = ConsoleStreamingHandler() - callbacks = [streaming_callback, console_handler] - - # Use the exact same pattern as Streamlit demo - try: - messages = [HumanMessage(content=prompt_text)] - llm.invoke(messages, config={"callbacks": callbacks}) - - print(f"\n\nSummary: {len(detection_events)} detection events during streaming") - - except Exception as e: - print(f"Error: {e}") + + question = "How does Python relate to ocean exploration?" + context = [doc.page_content for doc in vectorstore.similarity_search(question, k=2)] + + print(f"Q: {question}") + print(f"Context: {context[0][:50]}...") + print() + print("JSON Events Stream:") + print("-" * 18) + + # THIS IS THE MAGIC - Stream JSON events in real-time! + for event in stream_with_detection(chain, {"query": question}, context, check_every=5): + # Each event is a JSON-serializable dict + import json + print(json.dumps(event)) + + # In your API: + # if event["type"] == "token": + # await websocket.send_json(event) + # elif event["type"] == "detection" and event["has_issues"]: + # await websocket.send_json({"alert": "hallucination_detected", "spans": event["spans"]}) + + print() + print("Perfect for:") + print(" - FastAPI streaming responses") + print(" - WebSocket real-time chat") + print(" - Server-sent events (SSE)") + print(" - Any API that needs live updates") def example_with_manual_context(): @@ -217,9 +219,11 @@ def main(): return try: - example_basic_usage() + example_basic_rag_detection() + print("=" * 60) + example_simple_json_streaming() # TRUE JSON streaming! print("=" * 60) - example_streaming_detection() # Real streaming with token-by-token detection! + example_rag_streaming_detection() # Detailed streaming analysis print("=" * 60) example_with_manual_context() diff --git a/lettucedetect/integrations/langchain/examples/streamlit_app.py b/lettucedetect/integrations/langchain/examples/streamlit_app.py index 83e19ac..8ce5766 100644 --- a/lettucedetect/integrations/langchain/examples/streamlit_app.py +++ b/lettucedetect/integrations/langchain/examples/streamlit_app.py @@ -16,8 +16,6 @@ # LangChain imports with compatibility handling try: - from langchain_core.caches import BaseCache - from langchain_core.callbacks import Callbacks from langchain_openai import ChatOpenAI try: From 813a0c870190e0c536beb4341950dfe11cf882bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kov=C3=A1cs=20=C3=81d=C3=A1m?= Date: Fri, 29 Aug 2025 17:56:22 +0200 Subject: [PATCH 08/15] Langchain integration README --- .../integrations/langchain/README.md | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/lettucedetect/integrations/langchain/README.md b/lettucedetect/integrations/langchain/README.md index e6070d2..75864c9 100644 --- a/lettucedetect/integrations/langchain/README.md +++ b/lettucedetect/integrations/langchain/README.md @@ -47,6 +47,32 @@ for event in stream_with_detection(chain, {"query": question}, context, check_ev # Handle detection - log, alert, stop generation, etc. ``` +## Direct Callback Usage + +For more control, use `LettuceStreamingCallback` directly: + +```python +from lettucedetect.integrations.langchain import LettuceStreamingCallback + +# Create callback with your settings +callback = LettuceStreamingCallback( + context=context, + question=question, + check_every=10, + method="transformer" # or "rag_fact_checker" +) + +# Use with any LangChain chain +result = chain.invoke({"query": question}, config={"callbacks": [callback]}) + +# Stream events as they arrive +for event in callback.stream_events(): + if event["type"] == "token": + print(event["content"], end="") + elif event["type"] == "detection": + handle_detection(event) +``` + ## What You Get **Token Events**: Real-time text as it's generated From bfb3f8dffe6277f7c286e120cc38341540c28c51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kov=C3=A1cs=20=C3=81d=C3=A1m?= Date: Fri, 29 Aug 2025 22:19:06 +0200 Subject: [PATCH 09/15] Added integration to elysia --- lettucedetect/integrations/elysia/README.md | 42 +++++++ lettucedetect/integrations/elysia/__init__.py | 9 ++ lettucedetect/integrations/elysia/example.py | 15 +++ lettucedetect/integrations/elysia/tools.py | 116 ++++++++++++++++++ .../integrations/langchain/__init__.py | 16 ++- .../integrations/langchain/callbacks.py | 89 +++++++------- .../langchain/examples/rag_example.py | 40 +++--- 7 files changed, 258 insertions(+), 69 deletions(-) create mode 100644 lettucedetect/integrations/elysia/README.md create mode 100644 lettucedetect/integrations/elysia/__init__.py create mode 100644 lettucedetect/integrations/elysia/example.py create mode 100644 lettucedetect/integrations/elysia/tools.py diff --git a/lettucedetect/integrations/elysia/README.md b/lettucedetect/integrations/elysia/README.md new file mode 100644 index 0000000..894f324 --- /dev/null +++ b/lettucedetect/integrations/elysia/README.md @@ -0,0 +1,42 @@ +# LettuceDetect + Elysia Integration + +Automatic hallucination detection for Elysia AI decision trees. + +## Installation + +```bash +pip install lettucedetect elysia-ai +``` + +## Usage + +```python +from elysia import Tree +from lettucedetect.integrations.elysia import detect_hallucinations + +# Create tree with hallucination detection +tree = Tree() +tree.add_tool(detect_hallucinations) + +# The AI can now automatically validate responses +response = tree(""" +Context: Python was created by Guido van Rossum in 1991. +Question: When was Python created? +Please answer and verify your response for accuracy. +""") +``` + +## What It Does + +The `detect_hallucinations` tool automatically: +- ✅ Analyzes AI responses against provided context +- ✅ Identifies unsupported claims and factual errors +- ✅ Provides confidence scores and exact text spans +- ✅ Guides the AI to self-correct when needed + +## Tool Details + +**detect_hallucinations**: Main hallucination detection tool +- Compares generated answers against source context +- Returns structured data about problematic spans +- Supports multiple detection methods (transformer, LLM, fact-checker) \ No newline at end of file diff --git a/lettucedetect/integrations/elysia/__init__.py b/lettucedetect/integrations/elysia/__init__.py new file mode 100644 index 0000000..eddb65e --- /dev/null +++ b/lettucedetect/integrations/elysia/__init__.py @@ -0,0 +1,9 @@ +"""LettuceDetect integration for Elysia. + +This integration provides hallucination detection tools that can be used +directly in Elysia decision trees for automatic quality control of AI responses. +""" + +from .tools import detect_hallucinations + +__all__ = ["detect_hallucinations"] diff --git a/lettucedetect/integrations/elysia/example.py b/lettucedetect/integrations/elysia/example.py new file mode 100644 index 0000000..096b285 --- /dev/null +++ b/lettucedetect/integrations/elysia/example.py @@ -0,0 +1,15 @@ +"""Example of using LettuceDetect with Elysia for automatic hallucination detection.""" + +from elysia import Tree + +from lettucedetect.integrations.elysia import detect_hallucinations + +# Create an Elysia tree with hallucination detection capabilities +tree = Tree() + +# Add LettuceDetect tools to the tree +tree.add_tool(detect_hallucinations) + +tree( + "How many data they generated in Kovacs et al. 2025? Please answer and verify your response for credibility." +) diff --git a/lettucedetect/integrations/elysia/tools.py b/lettucedetect/integrations/elysia/tools.py new file mode 100644 index 0000000..61e1e86 --- /dev/null +++ b/lettucedetect/integrations/elysia/tools.py @@ -0,0 +1,116 @@ +"""LettuceDetect integration tools for Elysia.""" + +from typing import List, Optional + +from elysia import tool + +from lettucedetect import HallucinationDetector + + +@tool +async def detect_hallucinations( + context: List[str], + answer: str, + question: Optional[str] = None, +): + """Verify AI-generated answers by comparing them against source context by using detecting hallucinations. + + This tool analyzes whether statements in an answer are supported by the provided context, + identifying specific spans of text that may be hallucinated or unsupported. It uses + advanced NLP models to perform token-level analysis and provides detailed feedback + about problematic content. + + Args: + context: List of source documents or passages that should support the answer. + Each string represents a separate context document or paragraph. + These are the "ground truth" sources the answer should be based on. + answer: The AI-generated response to analyze for potential hallucinations. + This is the text that will be checked against the context. + question: Optional original question that was asked. Providing this improves + detection accuracy by understanding what information was requested. + + This tool performs the following analysis: + 1. Tokenizes the answer and compares each segment against the context + 2. Identifies spans that are not supported by any context document + 3. Assigns confidence scores to problematic spans + 4. Returns structured results with exact character positions + + The tool will identify various types of hallucinations: + - Factual errors (wrong dates, names, numbers) + - Unsupported claims not present in context + - Contradictions to the provided information + - Invented details not mentioned in sources + + Always use this tool when you need to: + - Verify AI responses against source documents in RAG systems + - Implement quality control for generated content + - Build fact-checking pipelines + - Ensure accuracy in knowledge-based applications + - Validate information before presenting to users + + Example scenario: + Context: ["Python was created in 1991 by Guido van Rossum", "It's known for readable syntax"] + Answer: "Python was created in 1985 by James Gosling and is known for complex syntax" + + This tool would identify: + - "1985" as hallucinated (should be 1991) + - "James Gosling" as hallucinated (should be Guido van Rossum) + - "complex syntax" as hallucinated (context says readable syntax) + + """ + try: + # Initialize detector with transformer method + detector = HallucinationDetector( + method="transformer", model_path="KRLabsOrg/lettucedect-base-modernbert-en-v1" + ) + + print(f"Context: {context}") + print(f"Answer: {answer}") + print(f"Question: {question}") + + # Perform hallucination detection + spans = detector.predict( + context=context, answer=answer, question=question, output_format="spans" + ) + + print(f"Spans: {spans}") + + # Calculate overall metrics + has_issues = len(spans) > 0 + max_confidence = max([span.get("confidence", 0) for span in spans], default=0) + + # Create structured result + result = { + "has_issues": has_issues, + "confidence": max_confidence, + "issue_count": len(spans), + "spans": spans, + } + + # Yield structured data for the AI agent + yield result + + # Create human-readable summary + if has_issues: + issue_details = [] + for span in spans[:5]: # Show up to 5 examples + text = span.get("text", "unknown") + conf = span.get("confidence", 0) + start = span.get("start", 0) + end = span.get("end", 0) + issue_details.append(f"'{text}' at position {start}-{end} (confidence: {conf:.2f})") + + summary = f"Detected {len(spans)} potential hallucination(s) in the answer. " + summary += f"Most problematic spans: {', '.join(issue_details)}. " + summary += ( + "The AI should revise these unsupported claims or provide additional context." + ) + else: + summary = "No hallucinations detected. The answer appears to be well-supported by the provided context." + + yield summary + + except Exception as e: + error_msg = f"Hallucination detection failed: {e!s}" + yield {"error": True, "message": str(e)} + yield error_msg diff --git a/lettucedetect/integrations/langchain/__init__.py b/lettucedetect/integrations/langchain/__init__.py index 332cdcd..3e78ef4 100644 --- a/lettucedetect/integrations/langchain/__init__.py +++ b/lettucedetect/integrations/langchain/__init__.py @@ -22,8 +22,18 @@ print(f"Issues: {result['has_issues']}") """ -from .callbacks import LettuceDetectCallback, LettuceStreamingCallback, detect_in_chain, stream_with_detection - -__all__ = ["LettuceDetectCallback", "LettuceStreamingCallback", "detect_in_chain", "stream_with_detection"] +from .callbacks import ( + LettuceDetectCallback, + LettuceStreamingCallback, + detect_in_chain, + stream_with_detection, +) + +__all__ = [ + "LettuceDetectCallback", + "LettuceStreamingCallback", + "detect_in_chain", + "stream_with_detection", +] __version__ = "1.0.0" diff --git a/lettucedetect/integrations/langchain/callbacks.py b/lettucedetect/integrations/langchain/callbacks.py index e2f3983..35bb2d9 100644 --- a/lettucedetect/integrations/langchain/callbacks.py +++ b/lettucedetect/integrations/langchain/callbacks.py @@ -1,8 +1,8 @@ """Clean, minimal LangChain callbacks for LettuceDetect integration.""" -from typing import Any, Callable, Dict, List, Optional import threading from queue import Queue +from typing import Any, Callable, Dict, List, Optional from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import LLMResult @@ -165,7 +165,7 @@ def __init__( # Streaming state self.accumulated_text = "" self.token_count = 0 - + # Queue for true streaming of JSON events self.event_queue = Queue() @@ -190,13 +190,11 @@ def on_llm_new_token(self, token: str, **kwargs): """Process new token and run detection periodically.""" self.accumulated_text += token self.token_count += 1 - + # Stream token event immediately - self.event_queue.put({ - "type": "token", - "content": token, - "position": len(self.accumulated_text) - }) + self.event_queue.put( + {"type": "token", "content": token, "position": len(self.accumulated_text)} + ) # Run detection every N tokens if ( @@ -225,15 +223,17 @@ def on_llm_new_token(self, token: str, **kwargs): } # Stream detection event immediately - self.event_queue.put({ - "type": "detection", - "has_issues": len(spans) > 0, - "spans": spans, - "confidence": max([s.get("confidence", 0) for s in spans], default=0), - "issue_count": len(spans), - "text_length": len(self.accumulated_text), - "is_incremental": True - }) + self.event_queue.put( + { + "type": "detection", + "has_issues": len(spans) > 0, + "spans": spans, + "confidence": max([s.get("confidence", 0) for s in spans], default=0), + "issue_count": len(spans), + "text_length": len(self.accumulated_text), + "is_incremental": True, + } + ) # Call user handler if self.on_detection: @@ -270,17 +270,19 @@ def on_llm_end(self, response, **kwargs): "token_count": len(self.accumulated_text.split()), "is_final": True, } - + # Stream final detection event - self.event_queue.put({ - "type": "detection", - "has_issues": len(spans) > 0, - "spans": spans, - "confidence": max([s.get("confidence", 0) for s in spans], default=0), - "issue_count": len(spans), - "text_length": len(self.accumulated_text), - "is_final": True - }) + self.event_queue.put( + { + "type": "detection", + "has_issues": len(spans) > 0, + "spans": spans, + "confidence": max([s.get("confidence", 0) for s in spans], default=0), + "issue_count": len(spans), + "text_length": len(self.accumulated_text), + "is_final": True, + } + ) if self.on_detection: self.on_detection(final_result) @@ -292,7 +294,7 @@ def on_llm_end(self, response, **kwargs): except Exception as e: if self.verbose: print(f"Final detection error: {e}") - + # Signal completion self.event_queue.put(None) # End signal @@ -300,14 +302,13 @@ def on_chat_model_end(self, response, **kwargs): """Handle chat model end for newer LangChain versions.""" self.on_llm_end(response, **kwargs) - def stream_events(self): """Generator that yields JSON events as they arrive. - + Yields events with types: - - "token": Individual tokens as they arrive + - "token": Individual tokens as they arrive - "detection": Hallucination detection results - + This allows developers to: - Stream JSON events to clients in real-time - Handle tokens and detections immediately @@ -356,18 +357,18 @@ def detect_in_chain( def stream_with_detection(chain_or_llm, input_data, context, **callback_kwargs): """Stream JSON events from any LangChain chain/LLM with hallucination detection. - + Works with RetrievalQA, ConversationChain, raw LLMs, or any LangChain component. - + Args: chain_or_llm: Any LangChain chain or LLM input_data: Input for the chain (query string, messages, etc.) context: Context documents for hallucination detection **callback_kwargs: Additional arguments for LettuceStreamingCallback - + Yields: dict: JSON events with "type": "token" or "detection" - + Example: # With RAG chain chain = RetrievalQA.from_llm(llm, retriever) @@ -376,16 +377,17 @@ def stream_with_detection(chain_or_llm, input_data, context, **callback_kwargs): await websocket.send_json(event) elif event["type"] == "detection": print(f"Detection: {event['has_issues']}") + """ callback = LettuceStreamingCallback(context=context, **callback_kwargs) - + # Start chain/LLM in background thread def run_generation(): try: - if hasattr(chain_or_llm, 'invoke'): + if hasattr(chain_or_llm, "invoke"): # Modern LangChain interface chain_or_llm.invoke(input_data, config={"callbacks": [callback]}) - elif hasattr(chain_or_llm, 'run'): + elif hasattr(chain_or_llm, "run"): # Legacy chain interface chain_or_llm.run(input_data, callbacks=[callback]) else: @@ -393,15 +395,12 @@ def run_generation(): chain_or_llm(input_data, callbacks=[callback]) except Exception as e: # Put error event and complete - callback.event_queue.put({ - "type": "error", - "message": str(e) - }) + callback.event_queue.put({"type": "error", "message": str(e)}) callback.event_queue.put(None) - + thread = threading.Thread(target=run_generation) thread.start() - + # Stream events as they arrive try: for event in callback.stream_events(): diff --git a/lettucedetect/integrations/langchain/examples/rag_example.py b/lettucedetect/integrations/langchain/examples/rag_example.py index 96c295c..5bfae75 100644 --- a/lettucedetect/integrations/langchain/examples/rag_example.py +++ b/lettucedetect/integrations/langchain/examples/rag_example.py @@ -11,9 +11,7 @@ import os -from langchain.callbacks.base import BaseCallbackHandler from langchain.chains import RetrievalQA -from langchain.schema import HumanMessage # LangChain imports from langchain.text_splitter import CharacterTextSplitter @@ -23,7 +21,6 @@ # LettuceDetect integration from lettucedetect.integrations.langchain.callbacks import ( LettuceDetectCallback, - LettuceStreamingCallback, detect_in_chain, stream_with_detection, ) @@ -104,23 +101,21 @@ def example_rag_streaming_detection(): text_splitter = CharacterTextSplitter(chunk_size=200, chunk_overlap=0) docs = text_splitter.create_documents(SAMPLE_DOCUMENTS) vectorstore = Chroma.from_documents(docs, embeddings) - + llm = ChatOpenAI(model="gpt-4o-mini", streaming=True) chain = RetrievalQA.from_chain_type( - llm=llm, - chain_type="stuff", - retriever=vectorstore.as_retriever(search_kwargs={"k": 2}) + llm=llm, chain_type="stuff", retriever=vectorstore.as_retriever(search_kwargs={"k": 2}) ) question = "How does Python relate to ocean exploration and marine biology?" context = [doc.page_content for doc in vectorstore.similarity_search(question, k=2)] - + print(f"Q: {question}") print(f"Context: {context[0][:50]}...") print() print("Streaming Events:") print("-" * 18) - + # Use the working streaming approach event_count = 0 for event in stream_with_detection(chain, {"query": question}, context, check_every=8): @@ -128,8 +123,12 @@ def example_rag_streaming_detection(): if event["type"] == "token": print(event["content"], end="", flush=True) elif event["type"] == "detection" and event["has_issues"]: - print(f"\n[Detection {event_count}: {event['issue_count']} issues, confidence: {event['confidence']:.3f}]", end="", flush=True) - + print( + f"\n[Detection {event_count}: {event['issue_count']} issues, confidence: {event['confidence']:.3f}]", + end="", + flush=True, + ) + print("\n") print(f"Total events processed: {event_count}") @@ -140,41 +139,40 @@ def example_simple_json_streaming(): print("-" * 35) print("Shows real-time JSON events - exactly what API developers need!") print() - + # Setup simple RAG chain embeddings = OpenAIEmbeddings() text_splitter = CharacterTextSplitter(chunk_size=200, chunk_overlap=0) docs = text_splitter.create_documents(SAMPLE_DOCUMENTS) vectorstore = Chroma.from_documents(docs, embeddings) - + llm = ChatOpenAI(model="gpt-4o-mini", streaming=True) chain = RetrievalQA.from_chain_type( - llm=llm, - chain_type="stuff", - retriever=vectorstore.as_retriever(search_kwargs={"k": 2}) + llm=llm, chain_type="stuff", retriever=vectorstore.as_retriever(search_kwargs={"k": 2}) ) - + question = "How does Python relate to ocean exploration?" context = [doc.page_content for doc in vectorstore.similarity_search(question, k=2)] - + print(f"Q: {question}") print(f"Context: {context[0][:50]}...") print() print("JSON Events Stream:") print("-" * 18) - + # THIS IS THE MAGIC - Stream JSON events in real-time! for event in stream_with_detection(chain, {"query": question}, context, check_every=5): # Each event is a JSON-serializable dict import json + print(json.dumps(event)) - + # In your API: # if event["type"] == "token": # await websocket.send_json(event) # elif event["type"] == "detection" and event["has_issues"]: # await websocket.send_json({"alert": "hallucination_detected", "spans": event["spans"]}) - + print() print("Perfect for:") print(" - FastAPI streaming responses") From 4b3070fe710521250cf008775ef96128c7bae2e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kov=C3=A1cs=20=C3=81d=C3=A1m?= Date: Sat, 30 Aug 2025 14:36:45 +0200 Subject: [PATCH 10/15] Removed unused comments --- lettucedetect/integrations/elysia/tools.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/lettucedetect/integrations/elysia/tools.py b/lettucedetect/integrations/elysia/tools.py index 61e1e86..042555d 100644 --- a/lettucedetect/integrations/elysia/tools.py +++ b/lettucedetect/integrations/elysia/tools.py @@ -64,17 +64,11 @@ async def detect_hallucinations( method="transformer", model_path="KRLabsOrg/lettucedect-base-modernbert-en-v1" ) - print(f"Context: {context}") - print(f"Answer: {answer}") - print(f"Question: {question}") - # Perform hallucination detection spans = detector.predict( context=context, answer=answer, question=question, output_format="spans" ) - print(f"Spans: {spans}") - # Calculate overall metrics has_issues = len(spans) > 0 max_confidence = max([span.get("confidence", 0) for span in spans], default=0) From a30b930e0afc5f6349c8219405e62dfad6cf0161 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kov=C3=A1cs=20=C3=81d=C3=A1m?= Date: Sun, 31 Aug 2025 00:42:41 +0200 Subject: [PATCH 11/15] Added tinylettuce demo and notebook --- demo/tinylettuce.ipynb | 1020 ++++++++++++++++++++++++++++++++++++++++ docs/TINYLETTUCE.md | 636 +++++++++++++++++++++++++ 2 files changed, 1656 insertions(+) create mode 100644 demo/tinylettuce.ipynb create mode 100644 docs/TINYLETTUCE.md diff --git a/demo/tinylettuce.ipynb b/demo/tinylettuce.ipynb new file mode 100644 index 0000000..cd95f7d --- /dev/null +++ b/demo/tinylettuce.ipynb @@ -0,0 +1,1020 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "b9ea123a", + "metadata": {}, + "source": [ + "## 🥬 TinyLettuce: Efficient Hallucination Detection Small Models (Using Synthetic Data Generation)\n", + "\n", + "

\n", + " \"TinyLettuce\n", + "
\n", + " Small, task‑specialized encoders trained on synthetic data\n", + "

\n", + "\n", + "\n", + "[![LettuceDetect](https://img.shields.io/badge/LettuceDetect-v0.1.8-green)](https://github.com/your-username/LettuceDetect)\n", + "[![Python](https://img.shields.io/badge/Python-3.11+-blue)](https://python.org)\n", + "[![License](https://img.shields.io/badge/License-MIT-yellow)](https://opensource.org/licenses/MIT)\n", + "\n", + "## 🎯 Overview\n", + "\n", + "**The Problem**: Training robust hallucination detection models requires large datasets of both correct and hallucinated responses. Manually creating such datasets is expensive and time-consuming.\n", + "\n", + "**Our Solution**: LettuceDetect's synthetic data generation pipeline can generate realistic hallucinations from factual content.\n", + "\n", + "### What This Notebook Demonstrates\n", + "\n", + "1. **Answer-based Generation**: Inject specific error types into correct answers\n", + "2. **Batch Processing**: Efficient async generation for large datasets\n", + "3. **Training Integration**: Convert to formats ready for model training\n", + "\n", + "### Key Benefits\n", + "\n", + "- **Cost-effective**: Generate thousands of training samples at a fraction of manual annotation cost\n", + "- **Controllable**: Specify exact error types and intensity levels\n", + "- **Scalable**: Async batch processing for large scale datasets" + ] + }, + { + "cell_type": "markdown", + "id": "0086e655", + "metadata": {}, + "source": [ + "### Setup\n", + "\n", + "Install LettuceDetect:\n", + "```bash\n", + "pip install lettucedetect\n", + "```\n", + "\n", + "Then, install datasets and rich:\n", + "```bash\n", + "pip install datasets\n", + "pip install rich\n", + "```\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "95eac334", + "metadata": {}, + "outputs": [], + "source": [ + "# We recommend setting your OpenAI API key as an environment variable\n", + "# os.environ['OPENAI_API_KEY'] = 'your-api-key-here'" + ] + }, + { + "cell_type": "markdown", + "id": "eedf0e53", + "metadata": {}, + "source": [ + "### Generate Synthetic Data" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "e5fb60b3", + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize the generator\n", + "from lettucedetect import HallucinationGenerator\n", + "\n", + "# The heart of the synthetic data generation pipeline is the HallucinationGenerator class\n", + "# GPT 5 requires temperature=1.0\n", + "generator = HallucinationGenerator(model=\"gpt-5\", temperature=1.0)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "a7f90660", + "metadata": {}, + "outputs": [], + "source": [ + "# The generator can be used with any context-question-answer format\n", + "result = generator.generate(\n", + " context=[\n", + " \"Ibuprofen is an NSAID that reduces inflammation and pain. The typical adult dose is 400-600mg every 6-8 hours, not exceeding 2400mg daily.\"\n", + " ],\n", + " question=\"What is the maximum daily dose of ibuprofen?\",\n", + " answer=\"The maximum daily dose of ibuprofen for adults is 2400mg.\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "8760884c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
{\n",
+       "    'original_answer': 'The maximum daily dose of ibuprofen for adults is 2400mg.',\n",
+       "    'hallucinated_answer': 'The maximum daily dose of ibuprofen for adults is 3200mg, per a 2016 FDA guideline.',\n",
+       "    'hallucinated_parts': ['3200mg', 'per a 2016 FDA guideline', '2016']\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[32m'original_answer'\u001b[0m: \u001b[32m'The maximum daily dose of ibuprofen for adults is 2400mg.'\u001b[0m,\n", + " \u001b[32m'hallucinated_answer'\u001b[0m: \u001b[32m'The maximum daily dose of ibuprofen for adults is 3200mg, per a 2016 FDA guideline.'\u001b[0m,\n", + " \u001b[32m'hallucinated_parts'\u001b[0m: \u001b[1m[\u001b[0m\u001b[32m'3200mg'\u001b[0m, \u001b[32m'per a 2016 FDA guideline'\u001b[0m, \u001b[32m'2016'\u001b[0m\u001b[1m]\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from rich import console\n", + "\n", + "console = console.Console()\n", + "\n", + "console.print(result)" + ] + }, + { + "cell_type": "markdown", + "id": "0b049c56", + "metadata": {}, + "source": [ + "You can easily tune the error types and intensity to your needs.\n", + "\n", + "Currently, the generator supports the following error types:\n", + "- factual = Change facts/entities\n", + "- temporal = Change dates, time periods\n", + "- numerical = Change numbers, quantities\n", + "- relational = Change relationships between entities\n", + "- contextual = Add unrelated context\n", + "- omission = Remove important details\n", + "\n", + "And intensity is a float between 0 and 1, where 0 is hardly noticable and 1 is very obvious" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "f09d1a5a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
{\n",
+       "    'original_answer': 'The maximum daily dose of ibuprofen for adults is 2400mg.',\n",
+       "    'hallucinated_answer': 'The maximum daily dose of ibuprofen for adults is 3200mg.',\n",
+       "    'hallucinated_parts': ['3200mg']\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[32m'original_answer'\u001b[0m: \u001b[32m'The maximum daily dose of ibuprofen for adults is 2400mg.'\u001b[0m,\n", + " \u001b[32m'hallucinated_answer'\u001b[0m: \u001b[32m'The maximum daily dose of ibuprofen for adults is 3200mg.'\u001b[0m,\n", + " \u001b[32m'hallucinated_parts'\u001b[0m: \u001b[1m[\u001b[0m\u001b[32m'3200mg'\u001b[0m\u001b[1m]\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Lets try to generate numerical errors\n", + "result = generator.generate(\n", + " context=[\n", + " \"Ibuprofen is an NSAID that reduces inflammation and pain. The typical adult dose is 400-600mg every 6-8 hours, not exceeding 2400mg daily.\"\n", + " ],\n", + " question=\"What is the maximum daily dose of ibuprofen?\",\n", + " answer=\"The maximum daily dose of ibuprofen for adults is 2400mg.\",\n", + " error_types=[\"numerical\"],\n", + ")\n", + "\n", + "console.print(result)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "1c4add75", + "metadata": {}, + "outputs": [], + "source": [ + "# Lets try with low intensity\n", + "result = generator.generate(\n", + " context=[\n", + " \"Ibuprofen is an NSAID that reduces inflammation and pain. The typical adult dose is 400-600mg every 6-8 hours, not exceeding 2400mg daily.\"\n", + " ],\n", + " question=\"What is the maximum daily dose of ibuprofen?\",\n", + " answer=\"The maximum daily dose of ibuprofen for adults is 2400mg.\",\n", + " error_types=[\"numerical\"],\n", + " intensity=0.1,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "4f612a47", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
{\n",
+       "    'original_answer': 'The maximum daily dose of ibuprofen for adults is 2400mg.',\n",
+       "    'hallucinated_answer': 'The maximum daily dose of ibuprofen for adults is 2500mg.',\n",
+       "    'hallucinated_parts': ['2500mg']\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[32m'original_answer'\u001b[0m: \u001b[32m'The maximum daily dose of ibuprofen for adults is 2400mg.'\u001b[0m,\n", + " \u001b[32m'hallucinated_answer'\u001b[0m: \u001b[32m'The maximum daily dose of ibuprofen for adults is 2500mg.'\u001b[0m,\n", + " \u001b[32m'hallucinated_parts'\u001b[0m: \u001b[1m[\u001b[0m\u001b[32m'2500mg'\u001b[0m\u001b[1m]\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "console.print(result)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "cd939938", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
{\n",
+       "    'original_answer': 'The maximum daily dose of ibuprofen for adults is 2400mg.',\n",
+       "    'hallucinated_answer': 'The maximum daily dose of ibuprofen for adults is 3200mg.',\n",
+       "    'hallucinated_parts': ['3200mg']\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[32m'original_answer'\u001b[0m: \u001b[32m'The maximum daily dose of ibuprofen for adults is 2400mg.'\u001b[0m,\n", + " \u001b[32m'hallucinated_answer'\u001b[0m: \u001b[32m'The maximum daily dose of ibuprofen for adults is 3200mg.'\u001b[0m,\n", + " \u001b[32m'hallucinated_parts'\u001b[0m: \u001b[1m[\u001b[0m\u001b[32m'3200mg'\u001b[0m\u001b[1m]\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Now lets try to generate factual errors\n", + "result = generator.generate(\n", + " context=[\n", + " \"Ibuprofen is an NSAID that reduces inflammation and pain. The typical adult dose is 400-600mg every 6-8 hours, not exceeding 2400mg daily.\"\n", + " ],\n", + " question=\"What is the maximum daily dose of ibuprofen?\",\n", + " answer=\"The maximum daily dose of ibuprofen for adults is 2400mg.\",\n", + " error_types=[\"factual\"],\n", + " intensity=0.4,\n", + ")\n", + "\n", + "console.print(result)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "31e3a3e6", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
{\n",
+       "    'original_answer': 'Apollo 11 landed on the Moon on July 20, 1969.',\n",
+       "    'hallucinated_answer': 'Apollo 11 landed on the Moon on July 21, 1969.',\n",
+       "    'hallucinated_parts': ['July 21, 1969']\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[32m'original_answer'\u001b[0m: \u001b[32m'Apollo 11 landed on the Moon on July 20, 1969.'\u001b[0m,\n", + " \u001b[32m'hallucinated_answer'\u001b[0m: \u001b[32m'Apollo 11 landed on the Moon on July 21, 1969.'\u001b[0m,\n", + " \u001b[32m'hallucinated_parts'\u001b[0m: \u001b[1m[\u001b[0m\u001b[32m'July 21, 1969'\u001b[0m\u001b[1m]\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Another example: temporal errors\n", + "result = generator.generate(\n", + " context=[\n", + " \"Apollo 11 was the first crewed mission to land on the Moon, touching down on July 20, 1969. Neil Armstrong and Buzz Aldrin spent about 21 hours on the lunar surface.\"\n", + " ],\n", + " question=\"On what date did Apollo 11 land on the Moon?\",\n", + " answer=\"Apollo 11 landed on the Moon on July 20, 1969.\",\n", + " error_types=[\"temporal\"],\n", + " intensity=0.5,\n", + ")\n", + "\n", + "console.print(result)" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "b5f07a91", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
SimpleBatchResult(\n",
+       "    results=[\n",
+       "        HallucinationDataGeneratorOutput(\n",
+       "            generated_hlcntn_answer='The maximum daily dose of ibuprofen for adults is 2800 mg as recommended since\n",
+       "2019.',\n",
+       "            generated_non_hlcntn_answer='The maximum daily dose of ibuprofen for adults is 2400mg.',\n",
+       "            hlcntn_part=['2800 mg', 'as recommended since 2019']\n",
+       "        ),\n",
+       "        HallucinationDataGeneratorOutput(\n",
+       "            generated_hlcntn_answer='Apollo 11 landed on the Moon on July 21, 1969.',\n",
+       "            generated_non_hlcntn_answer='Apollo 11 landed on the Moon on July 20, 1969.',\n",
+       "            hlcntn_part=['July 21, 1969']\n",
+       "        )\n",
+       "    ],\n",
+       "    failed_indices=[],\n",
+       "    errors=[],\n",
+       "    total_time=16.986872911453247,\n",
+       "    successful_count=2,\n",
+       "    failed_count=0\n",
+       ")\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;35mSimpleBatchResult\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33mresults\u001b[0m=\u001b[1m[\u001b[0m\n", + " \u001b[1;35mHallucinationDataGeneratorOutput\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33mgenerated_hlcntn_answer\u001b[0m=\u001b[32m'The maximum daily dose of ibuprofen for adults is 2800 mg as recommended since\u001b[0m\n", + "\u001b[32m2019.'\u001b[0m,\n", + " \u001b[33mgenerated_non_hlcntn_answer\u001b[0m=\u001b[32m'The maximum daily dose of ibuprofen for adults is 2400mg.'\u001b[0m,\n", + " \u001b[33mhlcntn_part\u001b[0m=\u001b[1m[\u001b[0m\u001b[32m'2800 mg'\u001b[0m, \u001b[32m'as recommended since 2019'\u001b[0m\u001b[1m]\u001b[0m\n", + " \u001b[1m)\u001b[0m,\n", + " \u001b[1;35mHallucinationDataGeneratorOutput\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33mgenerated_hlcntn_answer\u001b[0m=\u001b[32m'Apollo 11 landed on the Moon on July 21, 1969.'\u001b[0m,\n", + " \u001b[33mgenerated_non_hlcntn_answer\u001b[0m=\u001b[32m'Apollo 11 landed on the Moon on July 20, 1969.'\u001b[0m,\n", + " \u001b[33mhlcntn_part\u001b[0m=\u001b[1m[\u001b[0m\u001b[32m'July 21, 1969'\u001b[0m\u001b[1m]\u001b[0m\n", + " \u001b[1m)\u001b[0m\n", + " \u001b[1m]\u001b[0m,\n", + " \u001b[33mfailed_indices\u001b[0m=\u001b[1m[\u001b[0m\u001b[1m]\u001b[0m,\n", + " \u001b[33merrors\u001b[0m=\u001b[1m[\u001b[0m\u001b[1m]\u001b[0m,\n", + " \u001b[33mtotal_time\u001b[0m=\u001b[1;36m16\u001b[0m\u001b[1;36m.986872911453247\u001b[0m,\n", + " \u001b[33msuccessful_count\u001b[0m=\u001b[1;36m2\u001b[0m,\n", + " \u001b[33mfailed_count\u001b[0m=\u001b[1;36m0\u001b[0m\n", + "\u001b[1m)\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Hallucinations can be generated in batch as well\n", + "\n", + "\n", + "async def generate_batch(contexts, questions, answers, error_types, intensity):\n", + " generator = HallucinationGenerator(model=\"gpt-5-mini\", temperature=1.0)\n", + " results = await generator.generate_batch_async(\n", + " contexts, questions, answers, error_types, intensity\n", + " )\n", + " return results\n", + "\n", + "\n", + "# Lets try to generate a batch of hallucinations\n", + "contexts = [\n", + " \"Ibuprofen is an NSAID that reduces inflammation and pain. The typical adult dose is 400-600mg every 6-8 hours, not exceeding 2400mg daily.\",\n", + " \"Apollo 11 was the first crewed mission to land on the Moon, touching down on July 20, 1969. Neil Armstrong and Buzz Aldrin spent about 21 hours on the lunar surface.\",\n", + "]\n", + "questions = [\n", + " \"What is the maximum daily dose of ibuprofen?\",\n", + " \"On what date did Apollo 11 land on the Moon?\",\n", + "]\n", + "answers = [\n", + " \"The maximum daily dose of ibuprofen for adults is 2400mg.\",\n", + " \"Apollo 11 landed on the Moon on July 20, 1969.\",\n", + "]\n", + "error_types = [\"numerical\", \"temporal\"]\n", + "intensity = 0.5\n", + "\n", + "results = await generate_batch(contexts, questions, answers, error_types, intensity)\n", + "console.print(results)" + ] + }, + { + "cell_type": "markdown", + "id": "e4c97223", + "metadata": {}, + "source": [ + "## The rag-mini-BioASQ dataset\n", + "\n", + "The rag-mini-BioASQ dataset is a rag dataset of biomedical questions and answers together with their corresponding context.\n", + "\n", + "We can use the HuggingFace `datasets` library to load the dataset.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "44185421", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
{\n",
+       "    'question': 'What is the applicability of the No Promoter Left Behind method?',\n",
+       "    'answer': 'No Promoter Left Behind (NPLB) is an efficient, organism-independent method for characterizing \n",
+       "promoter architectures directly from experimentally identified genome-wide TSSs, without relying on known promoter \n",
+       "elements.',\n",
+       "    'context': [\n",
+       "        'Promoters have diverse regulatory architectures and thus activate genes \\ndifferently. For example, some \n",
+       "have a TATA-box, many others do not. Even the \\nones with it can differ in its position relative to the \n",
+       "transcription start site \\n(TSS). No Promoter Left Behind (NPLB) is an efficient, organism-independent \\nmethod for\n",
+       "characterizing such diverse architectures directly from \\nexperimentally identified genome-wide TSSs, without \n",
+       "relying on known promoter \\nelements. As a test case, we show its application in identifying novel \\narchitectures \n",
+       "in the fly genome.\\nAVAILABILITY AND IMPLEMENTATION: Web-server at http://nplb.ncl.res.in Standalone \\nalso at \n",
+       "https://github.com/computationalBiology/NPLB/ (Mac OSX/Linux).\\nCONTACT: l.narlikar@ncl.res.in\\nSUPPLEMENTARY \n",
+       "INFORMATION: Supplementary data are available at Bioinformatics \\nonline.'\n",
+       "    ]\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[32m'question'\u001b[0m: \u001b[32m'What is the applicability of the No Promoter Left Behind method?'\u001b[0m,\n", + " \u001b[32m'answer'\u001b[0m: \u001b[32m'No Promoter Left Behind \u001b[0m\u001b[32m(\u001b[0m\u001b[32mNPLB\u001b[0m\u001b[32m)\u001b[0m\u001b[32m is an efficient, organism-independent method for characterizing \u001b[0m\n", + "\u001b[32mpromoter architectures directly from experimentally identified genome-wide TSSs, without relying on known promoter \u001b[0m\n", + "\u001b[32melements.'\u001b[0m,\n", + " \u001b[32m'context'\u001b[0m: \u001b[1m[\u001b[0m\n", + " \u001b[32m'Promoters have diverse regulatory architectures and thus activate genes \\ndifferently. For example, some \u001b[0m\n", + "\u001b[32mhave a TATA-box, many others do not. Even the \\nones with it can differ in its position relative to the \u001b[0m\n", + "\u001b[32mtranscription start site \\n\u001b[0m\u001b[32m(\u001b[0m\u001b[32mTSS\u001b[0m\u001b[32m)\u001b[0m\u001b[32m. No Promoter Left Behind \u001b[0m\u001b[32m(\u001b[0m\u001b[32mNPLB\u001b[0m\u001b[32m)\u001b[0m\u001b[32m is an efficient, organism-independent \\nmethod for\u001b[0m\n", + "\u001b[32mcharacterizing such diverse architectures directly from \\nexperimentally identified genome-wide TSSs, without \u001b[0m\n", + "\u001b[32mrelying on known promoter \\nelements. As a test case, we show its application in identifying novel \\narchitectures \u001b[0m\n", + "\u001b[32min the fly genome.\\nAVAILABILITY AND IMPLEMENTATION: Web-server at http://nplb.ncl.res.in Standalone \\nalso at \u001b[0m\n", + "\u001b[32mhttps://github.com/computationalBiology/NPLB/ \u001b[0m\u001b[32m(\u001b[0m\u001b[32mMac OSX/Linux\u001b[0m\u001b[32m)\u001b[0m\u001b[32m.\\nCONTACT: l.narlikar@ncl.res.in\\nSUPPLEMENTARY \u001b[0m\n", + "\u001b[32mINFORMATION: Supplementary data are available at Bioinformatics \\nonline.'\u001b[0m\n", + " \u001b[1m]\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "def load_rag_mini_bioasq(split: str = \"train\", filter_min_words: int = 10):\n", + " \"\"\"Load rag-mini-bioasq dataset and prepare for generation.\"\"\"\n", + " try:\n", + " from datasets import load_dataset\n", + " except ImportError:\n", + " raise ImportError(\"datasets package required. Install with: pip install datasets\")\n", + "\n", + " # Load dataset\n", + " qa_dataset = load_dataset(\"enelpol/rag-mini-bioasq\", \"question-answer-passages\")\n", + " corpus_dataset = load_dataset(\"enelpol/rag-mini-bioasq\", \"text-corpus\")\n", + "\n", + " # Create corpus lookup\n", + " corpus_lookup = {item[\"id\"]: item[\"passage\"] for item in corpus_dataset[\"test\"]}\n", + "\n", + " # Process data\n", + " processed_data = []\n", + " for item in qa_dataset[split]:\n", + " passage_ids = item[\"relevant_passage_ids\"]\n", + " context_passages = [corpus_lookup.get(pid, None) for pid in passage_ids]\n", + " context_passages = [p for p in context_passages if p is not None]\n", + "\n", + " # Filter by answer length\n", + " if len(item[\"answer\"].split()) >= filter_min_words:\n", + " processed_data.append(\n", + " {\n", + " \"question\": item[\"question\"],\n", + " \"answer\": item[\"answer\"],\n", + " \"context\": context_passages,\n", + " }\n", + " )\n", + "\n", + " return processed_data\n", + "\n", + "\n", + "# Lets load the dataset\n", + "data = load_rag_mini_bioasq()\n", + "\n", + "# Lets take a look at an example sample\n", + "console.print(data[3])" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "db23fb5e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
{\n",
+       "    'original_answer': 'No Promoter Left Behind (NPLB) is an efficient, organism-independent method for \n",
+       "characterizing promoter architectures directly from experimentally identified genome-wide TSSs, without relying on \n",
+       "known promoter elements.',\n",
+       "    'hallucinated_answer': 'No Promoter Left Behind (NPLB) is an efficient, organism-specific method for \n",
+       "characterizing promoter architectures from computationally inferred genome-wide TSSs, often leveraging known \n",
+       "promoter elements; it was primarily applied before 2010 and typically analyzes about 8,000 TSSs per dataset.',\n",
+       "    'hallucinated_parts': [\n",
+       "        'organism-specific',\n",
+       "        'from computationally inferred genome-wide TSSs',\n",
+       "        'often leveraging known promoter elements',\n",
+       "        'it was primarily applied before 2010',\n",
+       "        'typically analyzes about 8,000 TSSs per dataset'\n",
+       "    ]\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[32m'original_answer'\u001b[0m: \u001b[32m'No Promoter Left Behind \u001b[0m\u001b[32m(\u001b[0m\u001b[32mNPLB\u001b[0m\u001b[32m)\u001b[0m\u001b[32m is an efficient, organism-independent method for \u001b[0m\n", + "\u001b[32mcharacterizing promoter architectures directly from experimentally identified genome-wide TSSs, without relying on \u001b[0m\n", + "\u001b[32mknown promoter elements.'\u001b[0m,\n", + " \u001b[32m'hallucinated_answer'\u001b[0m: \u001b[32m'No Promoter Left Behind \u001b[0m\u001b[32m(\u001b[0m\u001b[32mNPLB\u001b[0m\u001b[32m)\u001b[0m\u001b[32m is an efficient, organism-specific method for \u001b[0m\n", + "\u001b[32mcharacterizing promoter architectures from computationally inferred genome-wide TSSs, often leveraging known \u001b[0m\n", + "\u001b[32mpromoter elements; it was primarily applied before 2010 and typically analyzes about 8,000 TSSs per dataset.'\u001b[0m,\n", + " \u001b[32m'hallucinated_parts'\u001b[0m: \u001b[1m[\u001b[0m\n", + " \u001b[32m'organism-specific'\u001b[0m,\n", + " \u001b[32m'from computationally inferred genome-wide TSSs'\u001b[0m,\n", + " \u001b[32m'often leveraging known promoter elements'\u001b[0m,\n", + " \u001b[32m'it was primarily applied before 2010'\u001b[0m,\n", + " \u001b[32m'typically analyzes about 8,000 TSSs per dataset'\u001b[0m\n", + " \u001b[1m]\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# You can easily use the generator to generate hallucinations for the dataset\n", + "result = generator.generate(\n", + " context=data[3][\"context\"],\n", + " question=data[3][\"question\"],\n", + " answer=data[3][\"answer\"],\n", + ")\n", + "\n", + "console.print(result)" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "b3972a0c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
[\n",
+       "    {\n",
+       "        'prompt': 'Briefly answer the following question:\\nWhat is the applicability of the No Promoter Left Behind\n",
+       "method?\\nBear in mind that your response should be strictly based on the following 1 passages:\\npassage 1: \n",
+       "Promoters have diverse regulatory architectures and thus activate genes \\ndifferently. For example, some have a \n",
+       "TATA-box, many others do not. Even the \\nones with it can differ in its position relative to the transcription \n",
+       "start site \\n(TSS). No Promoter Left Behind (NPLB) is an efficient, organism-independent \\nmethod for \n",
+       "characterizing such diverse architectures directly from \\nexperimentally identified genome-wide TSSs, without \n",
+       "relying on known promoter \\nelements. As a test case, we show its application in identifying novel \\narchitectures \n",
+       "in the fly genome.\\nAVAILABILITY AND IMPLEMENTATION: Web-server at http://nplb.ncl.res.in Standalone \\nalso at \n",
+       "https://github.com/computationalBiology/NPLB/ (Mac OSX/Linux).\\nCONTACT: l.narlikar@ncl.res.in\\nSUPPLEMENTARY \n",
+       "INFORMATION: Supplementary data are available at Bioinformatics \\nonline.\\nIn case the passages do not contain the \n",
+       "necessary information to answer the question, please reply with: \"Unable to answer based on given \n",
+       "passages.\"\\noutput:',\n",
+       "        'answer': 'No Promoter Left Behind (NPLB) is an efficient, organism-independent method for characterizing \n",
+       "promoter architectures directly from experimentally identified genome-wide TSSs, without relying on known promoter \n",
+       "elements.',\n",
+       "        'labels': [],\n",
+       "        'split': 'train',\n",
+       "        'task_type': 'qa'\n",
+       "    },\n",
+       "    {\n",
+       "        'prompt': 'Briefly answer the following question:\\nWhat is the applicability of the No Promoter Left Behind\n",
+       "method?\\nBear in mind that your response should be strictly based on the following 1 passages:\\npassage 1: \n",
+       "Promoters have diverse regulatory architectures and thus activate genes \\ndifferently. For example, some have a \n",
+       "TATA-box, many others do not. Even the \\nones with it can differ in its position relative to the transcription \n",
+       "start site \\n(TSS). No Promoter Left Behind (NPLB) is an efficient, organism-independent \\nmethod for \n",
+       "characterizing such diverse architectures directly from \\nexperimentally identified genome-wide TSSs, without \n",
+       "relying on known promoter \\nelements. As a test case, we show its application in identifying novel \\narchitectures \n",
+       "in the fly genome.\\nAVAILABILITY AND IMPLEMENTATION: Web-server at http://nplb.ncl.res.in Standalone \\nalso at \n",
+       "https://github.com/computationalBiology/NPLB/ (Mac OSX/Linux).\\nCONTACT: l.narlikar@ncl.res.in\\nSUPPLEMENTARY \n",
+       "INFORMATION: Supplementary data are available at Bioinformatics \\nonline.\\nIn case the passages do not contain the \n",
+       "necessary information to answer the question, please reply with: \"Unable to answer based on given \n",
+       "passages.\"\\noutput:',\n",
+       "        'answer': 'No Promoter Left Behind (NPLB) is an efficient, organism-specific method for characterizing \n",
+       "promoter architectures from computationally inferred genome-wide TSSs, often leveraging known promoter elements; it\n",
+       "was primarily applied before 2010 and typically analyzes about 8,000 TSSs per dataset.',\n",
+       "        'labels': [\n",
+       "            {'start': 48, 'end': 65, 'label': 'hallucinated'},\n",
+       "            {'start': 115, 'end': 161, 'label': 'hallucinated'},\n",
+       "            {'start': 163, 'end': 203, 'label': 'hallucinated'},\n",
+       "            {'start': 205, 'end': 241, 'label': 'hallucinated'},\n",
+       "            {'start': 246, 'end': 293, 'label': 'hallucinated'}\n",
+       "        ],\n",
+       "        'split': 'train',\n",
+       "        'task_type': 'qa'\n",
+       "    }\n",
+       "]\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m[\u001b[0m\n", + " \u001b[1m{\u001b[0m\n", + " \u001b[32m'prompt'\u001b[0m: \u001b[32m'Briefly answer the following question:\\nWhat is the applicability of the No Promoter Left Behind\u001b[0m\n", + "\u001b[32mmethod?\\nBear in mind that your response should be strictly based on the following 1 passages:\\npassage 1: \u001b[0m\n", + "\u001b[32mPromoters have diverse regulatory architectures and thus activate genes \\ndifferently. For example, some have a \u001b[0m\n", + "\u001b[32mTATA-box, many others do not. Even the \\nones with it can differ in its position relative to the transcription \u001b[0m\n", + "\u001b[32mstart site \\n\u001b[0m\u001b[32m(\u001b[0m\u001b[32mTSS\u001b[0m\u001b[32m)\u001b[0m\u001b[32m. No Promoter Left Behind \u001b[0m\u001b[32m(\u001b[0m\u001b[32mNPLB\u001b[0m\u001b[32m)\u001b[0m\u001b[32m is an efficient, organism-independent \\nmethod for \u001b[0m\n", + "\u001b[32mcharacterizing such diverse architectures directly from \\nexperimentally identified genome-wide TSSs, without \u001b[0m\n", + "\u001b[32mrelying on known promoter \\nelements. As a test case, we show its application in identifying novel \\narchitectures \u001b[0m\n", + "\u001b[32min the fly genome.\\nAVAILABILITY AND IMPLEMENTATION: Web-server at http://nplb.ncl.res.in Standalone \\nalso at \u001b[0m\n", + "\u001b[32mhttps://github.com/computationalBiology/NPLB/ \u001b[0m\u001b[32m(\u001b[0m\u001b[32mMac OSX/Linux\u001b[0m\u001b[32m)\u001b[0m\u001b[32m.\\nCONTACT: l.narlikar@ncl.res.in\\nSUPPLEMENTARY \u001b[0m\n", + "\u001b[32mINFORMATION: Supplementary data are available at Bioinformatics \\nonline.\\nIn case the passages do not contain the \u001b[0m\n", + "\u001b[32mnecessary information to answer the question, please reply with: \"Unable to answer based on given \u001b[0m\n", + "\u001b[32mpassages.\"\\noutput:'\u001b[0m,\n", + " \u001b[32m'answer'\u001b[0m: \u001b[32m'No Promoter Left Behind \u001b[0m\u001b[32m(\u001b[0m\u001b[32mNPLB\u001b[0m\u001b[32m)\u001b[0m\u001b[32m is an efficient, organism-independent method for characterizing \u001b[0m\n", + "\u001b[32mpromoter architectures directly from experimentally identified genome-wide TSSs, without relying on known promoter \u001b[0m\n", + "\u001b[32melements.'\u001b[0m,\n", + " \u001b[32m'labels'\u001b[0m: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m,\n", + " \u001b[32m'split'\u001b[0m: \u001b[32m'train'\u001b[0m,\n", + " \u001b[32m'task_type'\u001b[0m: \u001b[32m'qa'\u001b[0m\n", + " \u001b[1m}\u001b[0m,\n", + " \u001b[1m{\u001b[0m\n", + " \u001b[32m'prompt'\u001b[0m: \u001b[32m'Briefly answer the following question:\\nWhat is the applicability of the No Promoter Left Behind\u001b[0m\n", + "\u001b[32mmethod?\\nBear in mind that your response should be strictly based on the following 1 passages:\\npassage 1: \u001b[0m\n", + "\u001b[32mPromoters have diverse regulatory architectures and thus activate genes \\ndifferently. For example, some have a \u001b[0m\n", + "\u001b[32mTATA-box, many others do not. Even the \\nones with it can differ in its position relative to the transcription \u001b[0m\n", + "\u001b[32mstart site \\n\u001b[0m\u001b[32m(\u001b[0m\u001b[32mTSS\u001b[0m\u001b[32m)\u001b[0m\u001b[32m. No Promoter Left Behind \u001b[0m\u001b[32m(\u001b[0m\u001b[32mNPLB\u001b[0m\u001b[32m)\u001b[0m\u001b[32m is an efficient, organism-independent \\nmethod for \u001b[0m\n", + "\u001b[32mcharacterizing such diverse architectures directly from \\nexperimentally identified genome-wide TSSs, without \u001b[0m\n", + "\u001b[32mrelying on known promoter \\nelements. As a test case, we show its application in identifying novel \\narchitectures \u001b[0m\n", + "\u001b[32min the fly genome.\\nAVAILABILITY AND IMPLEMENTATION: Web-server at http://nplb.ncl.res.in Standalone \\nalso at \u001b[0m\n", + "\u001b[32mhttps://github.com/computationalBiology/NPLB/ \u001b[0m\u001b[32m(\u001b[0m\u001b[32mMac OSX/Linux\u001b[0m\u001b[32m)\u001b[0m\u001b[32m.\\nCONTACT: l.narlikar@ncl.res.in\\nSUPPLEMENTARY \u001b[0m\n", + "\u001b[32mINFORMATION: Supplementary data are available at Bioinformatics \\nonline.\\nIn case the passages do not contain the \u001b[0m\n", + "\u001b[32mnecessary information to answer the question, please reply with: \"Unable to answer based on given \u001b[0m\n", + "\u001b[32mpassages.\"\\noutput:'\u001b[0m,\n", + " \u001b[32m'answer'\u001b[0m: \u001b[32m'No Promoter Left Behind \u001b[0m\u001b[32m(\u001b[0m\u001b[32mNPLB\u001b[0m\u001b[32m)\u001b[0m\u001b[32m is an efficient, organism-specific method for characterizing \u001b[0m\n", + "\u001b[32mpromoter architectures from computationally inferred genome-wide TSSs, often leveraging known promoter elements; it\u001b[0m\n", + "\u001b[32mwas primarily applied before 2010 and typically analyzes about 8,000 TSSs per dataset.'\u001b[0m,\n", + " \u001b[32m'labels'\u001b[0m: \u001b[1m[\u001b[0m\n", + " \u001b[1m{\u001b[0m\u001b[32m'start'\u001b[0m: \u001b[1;36m48\u001b[0m, \u001b[32m'end'\u001b[0m: \u001b[1;36m65\u001b[0m, \u001b[32m'label'\u001b[0m: \u001b[32m'hallucinated'\u001b[0m\u001b[1m}\u001b[0m,\n", + " \u001b[1m{\u001b[0m\u001b[32m'start'\u001b[0m: \u001b[1;36m115\u001b[0m, \u001b[32m'end'\u001b[0m: \u001b[1;36m161\u001b[0m, \u001b[32m'label'\u001b[0m: \u001b[32m'hallucinated'\u001b[0m\u001b[1m}\u001b[0m,\n", + " \u001b[1m{\u001b[0m\u001b[32m'start'\u001b[0m: \u001b[1;36m163\u001b[0m, \u001b[32m'end'\u001b[0m: \u001b[1;36m203\u001b[0m, \u001b[32m'label'\u001b[0m: \u001b[32m'hallucinated'\u001b[0m\u001b[1m}\u001b[0m,\n", + " \u001b[1m{\u001b[0m\u001b[32m'start'\u001b[0m: \u001b[1;36m205\u001b[0m, \u001b[32m'end'\u001b[0m: \u001b[1;36m241\u001b[0m, \u001b[32m'label'\u001b[0m: \u001b[32m'hallucinated'\u001b[0m\u001b[1m}\u001b[0m,\n", + " \u001b[1m{\u001b[0m\u001b[32m'start'\u001b[0m: \u001b[1;36m246\u001b[0m, \u001b[32m'end'\u001b[0m: \u001b[1;36m293\u001b[0m, \u001b[32m'label'\u001b[0m: \u001b[32m'hallucinated'\u001b[0m\u001b[1m}\u001b[0m\n", + " \u001b[1m]\u001b[0m,\n", + " \u001b[32m'split'\u001b[0m: \u001b[32m'train'\u001b[0m,\n", + " \u001b[32m'task_type'\u001b[0m: \u001b[32m'qa'\u001b[0m\n", + " \u001b[1m}\u001b[0m\n", + "\u001b[1m]\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# You can easily convert this to the format LettuceDetect uses for training\n", + "from lettucedetect.detectors.prompt_utils import PromptUtils\n", + "\n", + "train_data = []\n", + "\n", + "# Add the non-hallucinated sample\n", + "train_data.append(\n", + " {\n", + " \"prompt\": PromptUtils.format_context(data[3][\"context\"], data[3][\"question\"], lang=\"en\"),\n", + " \"answer\": result[\"original_answer\"],\n", + " \"labels\": [],\n", + " \"split\": \"train\",\n", + " \"task_type\": \"qa\",\n", + " }\n", + ")\n", + "\n", + "hallucinated_labels = []\n", + "for part in result[\"hallucinated_parts\"]:\n", + " start = result[\"hallucinated_answer\"].find(part)\n", + " if start != -1:\n", + " hallucinated_labels.append(\n", + " {\"start\": start, \"end\": start + len(part), \"label\": \"hallucinated\"}\n", + " )\n", + "# Add the hallucinated sample\n", + "train_data.append(\n", + " {\n", + " \"prompt\": PromptUtils.format_context(data[3][\"context\"], data[3][\"question\"], lang=\"en\"),\n", + " \"answer\": result[\"hallucinated_answer\"],\n", + " \"labels\": hallucinated_labels,\n", + " \"split\": \"train\",\n", + " \"task_type\": \"qa\",\n", + " }\n", + ")\n", + "\n", + "console.print(train_data)" + ] + }, + { + "cell_type": "markdown", + "id": "3cdc1dbd", + "metadata": {}, + "source": [ + "## Save and train\n", + "\n", + "Now you can save the data and train a model. First lets save the data.\n", + "\n", + "```python\n", + "import json\n", + "\n", + "with open('train_data.json', 'w') as f:\n", + " json.dump(train_data, f)\n", + "```\n", + "\n", + "Now you can train a model.\n", + "\n", + "```bash\n", + "python scripts/train.py \\\n", + " --ragtruth-path train_data.json \\\n", + " --model-name jhu-clsp/ettin-encoder-68m \\\n", + " --output-dir output/hallucination_detector \\\n", + " --batch-size 4 \\\n", + " --epochs 6 \\\n", + " --learning-rate 1e-5 \n", + "```\n", + "\n", + "**And that's it!** You have a hallucination detector that you can use to detect hallucinations in your data.\n" + ] + }, + { + "cell_type": "markdown", + "id": "8f275af5", + "metadata": {}, + "source": [ + "For the published models, we have generated **1500** samples from the rag-mini-bioasq dataset (3000 samples together with the non-hallucinated ones). We've used the `gpt-oss-120b` model for the training data generation. We haven't specified direct error types, and used the default intensity of 0.3.\n", + "\n", + "For the test set, we have generated **300** hallucinated samples (600 samples together with the non-hallucinated ones). We've used the `gpt-5` model for the generation to ensure the quality of the hallucinations for the test set.\n", + "\n", + "For large scale generation, use our script:\n", + "\n", + "```bash\n", + "python scripts/generate_synthetic_data.py \\\\\n", + " --dataset rag-mini-bioasq \\\\\n", + " --split train \\\\\n", + " --num-samples 100 \\\\\n", + " --model gpt-4o-mini \\\\\n", + " --output data/synthetic_train.json\n", + "```\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "id": "fbc37ff5", + "metadata": {}, + "source": [ + "## End-to-End Workflow\n", + "\n", + "```bash\n", + "# Step 1: Generate synthetic training data\n", + "python scripts/generate_synthetic_data.py \\\n", + " --dataset rag-mini-bioasq \\\n", + " --num-samples 2000 \\\n", + " --model gpt-4o-mini \\\n", + " --batch-size 10 \\\n", + " --output data/synthetic_large.json\n", + "\n", + "# Step 2: Train TinyLettuce model\n", + "python scripts/train.py \\\n", + " --ragtruth-path data/train_combined_large.json \\\n", + " --model-name jhu-clsp/ettin-encoder-17m \\\n", + " --output-dir output/tinylettuce_17m \\\n", + " --batch-size 8 \\\n", + " --epochs 3\n", + "\n", + "# Step 3: Deploy on CPU for real-time inference\n", + "python scripts/start_api.py prod --model output/tinylettuce_17m\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "2b24eab7", + "metadata": {}, + "source": [ + "## Bonus\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9a75b564", + "metadata": {}, + "outputs": [], + "source": [ + "# We have implemented a triplet-based hallucination detection model that you can use the same way as the standard lettucecedetect models.\n", + "\n", + "from lettucedetect.models.inference import HallucinationDetector\n", + "from lettucedetect.ragfactchecker import RAGFactChecker\n", + "\n", + "detector = HallucinationDetector(\n", + " method=\"rag_fact_checker\",\n", + ")\n", + "\n", + "fact_checker = RAGFactChecker()" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "id": "09d6f585", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
[['the capital of France', 'is', 'Paris']]\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m[\u001b[0m\u001b[1m[\u001b[0m\u001b[32m'the capital of France'\u001b[0m, \u001b[32m'is'\u001b[0m, \u001b[32m'Paris'\u001b[0m\u001b[1m]\u001b[0m\u001b[1m]\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Get triplets for a sample\n", + "triplets = fact_checker.generate_triplets(\"The capital of France is Paris.\")\n", + "console.print(triplets)" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "id": "49953813", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
{\n",
+       "    'answer_triplets': [['France', 'is', 'a country in Europe']],\n",
+       "    'reference_triplets': [['France', 'is', 'a country in Asia']],\n",
+       "    'comparison': {\n",
+       "        'fact_check_results': {0: False},\n",
+       "        'raw_output': FactCheckerOutput(fact_check_prediction_binary={0: False})\n",
+       "    }\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[32m'answer_triplets'\u001b[0m: \u001b[1m[\u001b[0m\u001b[1m[\u001b[0m\u001b[32m'France'\u001b[0m, \u001b[32m'is'\u001b[0m, \u001b[32m'a country in Europe'\u001b[0m\u001b[1m]\u001b[0m\u001b[1m]\u001b[0m,\n", + " \u001b[32m'reference_triplets'\u001b[0m: \u001b[1m[\u001b[0m\u001b[1m[\u001b[0m\u001b[32m'France'\u001b[0m, \u001b[32m'is'\u001b[0m, \u001b[32m'a country in Asia'\u001b[0m\u001b[1m]\u001b[0m\u001b[1m]\u001b[0m,\n", + " \u001b[32m'comparison'\u001b[0m: \u001b[1m{\u001b[0m\n", + " \u001b[32m'fact_check_results'\u001b[0m: \u001b[1m{\u001b[0m\u001b[1;36m0\u001b[0m: \u001b[3;91mFalse\u001b[0m\u001b[1m}\u001b[0m,\n", + " \u001b[32m'raw_output'\u001b[0m: \u001b[1;35mFactCheckerOutput\u001b[0m\u001b[1m(\u001b[0m\u001b[33mfact_check_prediction_binary\u001b[0m=\u001b[1m{\u001b[0m\u001b[1;36m0\u001b[0m: \u001b[3;91mFalse\u001b[0m\u001b[1m}\u001b[0m\u001b[1m)\u001b[0m\n", + " \u001b[1m}\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "compare = fact_checker.analyze_text_pair(\n", + " \"France is a country in Europe.\", \"France is a country in Asia.\"\n", + ")\n", + "console.print(compare)" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "id": "77bfe6e4", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
{\n",
+       "    'spans': [\n",
+       "        {\n",
+       "            'start': 0,\n",
+       "            'end': 31,\n",
+       "            'text': 'The capital of France is Berlin',\n",
+       "            'confidence': 0.9,\n",
+       "            'triplet': ['the capital of France', 'is', 'Berlin']\n",
+       "        }\n",
+       "    ],\n",
+       "    'triplets': {\n",
+       "        'answer': [['the capital of France', 'is', 'Berlin']],\n",
+       "        'context': [['The capital of France', 'is', 'Paris']],\n",
+       "        'hallucinated': [['the capital of France', 'is', 'Berlin']]\n",
+       "    },\n",
+       "    'fact_check_results': {0: False}\n",
+       "}\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1m{\u001b[0m\n", + " \u001b[32m'spans'\u001b[0m: \u001b[1m[\u001b[0m\n", + " \u001b[1m{\u001b[0m\n", + " \u001b[32m'start'\u001b[0m: \u001b[1;36m0\u001b[0m,\n", + " \u001b[32m'end'\u001b[0m: \u001b[1;36m31\u001b[0m,\n", + " \u001b[32m'text'\u001b[0m: \u001b[32m'The capital of France is Berlin'\u001b[0m,\n", + " \u001b[32m'confidence'\u001b[0m: \u001b[1;36m0.9\u001b[0m,\n", + " \u001b[32m'triplet'\u001b[0m: \u001b[1m[\u001b[0m\u001b[32m'the capital of France'\u001b[0m, \u001b[32m'is'\u001b[0m, \u001b[32m'Berlin'\u001b[0m\u001b[1m]\u001b[0m\n", + " \u001b[1m}\u001b[0m\n", + " \u001b[1m]\u001b[0m,\n", + " \u001b[32m'triplets'\u001b[0m: \u001b[1m{\u001b[0m\n", + " \u001b[32m'answer'\u001b[0m: \u001b[1m[\u001b[0m\u001b[1m[\u001b[0m\u001b[32m'the capital of France'\u001b[0m, \u001b[32m'is'\u001b[0m, \u001b[32m'Berlin'\u001b[0m\u001b[1m]\u001b[0m\u001b[1m]\u001b[0m,\n", + " \u001b[32m'context'\u001b[0m: \u001b[1m[\u001b[0m\u001b[1m[\u001b[0m\u001b[32m'The capital of France'\u001b[0m, \u001b[32m'is'\u001b[0m, \u001b[32m'Paris'\u001b[0m\u001b[1m]\u001b[0m\u001b[1m]\u001b[0m,\n", + " \u001b[32m'hallucinated'\u001b[0m: \u001b[1m[\u001b[0m\u001b[1m[\u001b[0m\u001b[32m'the capital of France'\u001b[0m, \u001b[32m'is'\u001b[0m, \u001b[32m'Berlin'\u001b[0m\u001b[1m]\u001b[0m\u001b[1m]\u001b[0m\n", + " \u001b[1m}\u001b[0m,\n", + " \u001b[32m'fact_check_results'\u001b[0m: \u001b[1m{\u001b[0m\u001b[1;36m0\u001b[0m: \u001b[3;91mFalse\u001b[0m\u001b[1m}\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# You can use it for detecting hallucinations in your data\n", + "result = detector.predict(\n", + " context=\"The capital of France is Paris.\",\n", + " question=\"What is the capital of France?\",\n", + " answer=\"The capital of France is Berlin.\",\n", + " output_format=\"detailed\",\n", + ")\n", + "console.print(result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "143e383a", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "lettuce", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/TINYLETTUCE.md b/docs/TINYLETTUCE.md new file mode 100644 index 0000000..fe61bb5 --- /dev/null +++ b/docs/TINYLETTUCE.md @@ -0,0 +1,636 @@ +# TinyLettuce: Efficient Hallucination Detection with 17–68M Encoders + +

+ TinyLettuce Detective +
+ Small, task‑specialized encoders trained on synthetic data +

+ +--- + +We present **TinyLettuce**, our approach to efficient hallucination detection. By training tiny Ettin encoders (17-68M parameters) on synthetic data, we achieve better accuracy than billion-parameter LLM judges while running in real-time on CPU. + +## TL;DR + +- **TinyLettuce‑17M** (17M parameters) reaches **90.87% F1**, outperforming GPT‑5‑mini (83.69%), GPT‑OSS‑120B (83.38%), and Qwen3‑235B (79.84%) +- Runs in **real-time on CPU** with <50ms latency and 1000+ req/s throughput +- **Synthetic data generation** creates training data **100x cheaper** than manual annotation +- Complete **end‑to‑end pipeline** for domain-specific model training - generate data and train in minutes +- All models and code are **MIT licensed** and ready for production deployment + +Specialized training on synthetic data beats raw parameter count. + +--- + +## Quick Links + +- **GitHub**: [github.com/KRLabsOrg/LettuceDetect](https://github.com/KRLabsOrg/LettuceDetect) +- **PyPI**: [pypi.org/project/lettucedetect](https://pypi.org/project/lettucedetect/) +- **Hugging Face Models**: + - [TinyLettuce Collection](https://huggingface.co/collections/KRLabsOrg/tinylettuce-models) (Coming Soon) +- **Demo**: [Synthetic Data Generation Showcase](../demo/synthetic_data_generation_showcase.ipynb) +- **Notebook**: [TinyLettuce end‑to‑end](../demo/tinylettuce.ipynb) + - **Ettin Paper (LightOn)**: https://huggingface.co/papers/2507.11412 + +--- + +## Get Started + +Install the package: + +```bash +pip install lettucedetect +``` + +### Generate Synthetic Training Data + +```python +from lettucedetect import HallucinationGenerator + +# Initialize generator - temperature=1.0 required for GPT-5 models +generator = HallucinationGenerator(model="gpt-5", temperature=1.0) + +# Medical domain example: Generate numerical errors in dosage information +result = generator.generate( + context=[ + "Ibuprofen is an NSAID that reduces inflammation and pain. The typical adult dose is 400-600mg every 6-8 hours, not exceeding 2400mg daily." + ], + question="What is the maximum daily dose of ibuprofen?", + answer="The maximum daily dose of ibuprofen for adults is 2400mg.", + error_types=["numerical"], + intensity=0.4 +) + +print(f"Original: {result['original_answer']}") +print(f"Hallucinated: {result['hallucinated_answer']}") +print(f"Modified parts: {result['hallucinated_parts']}") +# Output: +# Original: The maximum daily dose of ibuprofen for adults is 2400mg. +# Hallucinated: The maximum daily dose of ibuprofen for adults is 3200mg. +# Modified parts: ['3200mg'] +``` + +### Control Error Intensity and Types + +```python +# Low intensity for subtle errors +result_subtle = generator.generate( + context=[ + "Ibuprofen is an NSAID that reduces inflammation and pain. The typical adult dose is 400-600mg every 6-8 hours, not exceeding 2400mg daily." + ], + question="What is the maximum daily dose of ibuprofen?", + answer="The maximum daily dose of ibuprofen for adults is 2400mg.", + error_types=["numerical"], + intensity=0.1 # Very subtle change +) +# Output: "The maximum daily dose of ibuprofen for adults is 2500mg." + +# Temporal errors for historical events +result_temporal = generator.generate( + context=[ + "Apollo 11 was the first crewed mission to land on the Moon, touching down on July 20, 1969. Neil Armstrong and Buzz Aldrin spent about 21 hours on the lunar surface." + ], + question="On what date did Apollo 11 land on the Moon?", + answer="Apollo 11 landed on the Moon on July 20, 1969.", + error_types=["temporal"], + intensity=0.5 +) +# Output: "Apollo 11 landed on the Moon on July 21, 1969." +``` + +### Generate from Real Datasets + +```python +from datasets import load_dataset + +def load_rag_mini_bioasq(split="train", filter_min_words=10): + """Load rag-mini-bioasq dataset for generation.""" + qa_dataset = load_dataset("enelpol/rag-mini-bioasq", "question-answer-passages") + corpus_dataset = load_dataset("enelpol/rag-mini-bioasq", "text-corpus") + + # Create corpus lookup + corpus_lookup = {item["id"]: item["passage"] for item in corpus_dataset["test"]} + + processed_data = [] + for item in qa_dataset[split]: + passage_ids = item["relevant_passage_ids"] + context_passages = [corpus_lookup.get(pid, None) for pid in passage_ids] + context_passages = [p for p in context_passages if p is not None] + + if len(item["answer"].split()) >= filter_min_words: + processed_data.append({ + "question": item["question"], + "answer": item["answer"], + "context": context_passages, + }) + return processed_data + +# Load biomedical data and generate hallucinations +data = load_rag_mini_bioasq() +sample = data[3] # Example biomedical question + +result = generator.generate( + context=sample["context"], + question=sample["question"], + answer=sample["answer"] +) + +# Convert to training format (RAGTruth) +from lettucedetect.detectors.prompt_utils import PromptUtils + +train_sample = { + "prompt": PromptUtils.format_context(sample["context"], sample["question"], lang="en"), + "answer": result["hallucinated_answer"], + "labels": [{ + "start": result["hallucinated_answer"].find(part), + "end": result["hallucinated_answer"].find(part) + len(part), + "label": "hallucinated" + } for part in result["hallucinated_parts"]], + "split": "train", + "task_type": "qa" +} +``` + +### Use TinyLettuce for Detection + +```python +from lettucedetect import HallucinationDetector + +# Load tiny but powerful model +detector = HallucinationDetector( + method="transformer", + model_path="KRLabsOrg/tinylettuce-ettin-17m-v1" +) + +# Detect hallucinations in real-time on CPU +spans = detector.predict( + context=["Ibuprofen is an NSAID that reduces inflammation and pain. The typical adult dose is 400-600mg every 6-8 hours, not exceeding 2400mg daily."], + question="What is the maximum daily dose of ibuprofen?", + answer="The maximum daily dose of ibuprofen for adults is 3200mg.", + output_format="spans" +) + +print(spans) +# Output: [{"start": 51, "end": 57, "text": "3200mg"}] +``` + +--- + +## Motivation + +RAG systems need hallucination detection, but current solutions force painful trade-offs between accuracy, cost, and speed. + +**Current hallucination detection approaches:** + +1. **Prompt-based detectors** - Use LLM APIs for zero/few-shot detection + - Can be expensive for large-scale production deployments + - Latency issues (2-10s per request) unsuitable for real-time use + - Multiple API calls per detection increase costs + +2. **Fine-tuned LLM detectors** - Large models (Llama-2-13B, Llama-3-8B) fine-tuned for detection + - High accuracy but resource-intensive to train and deploy + - Need GPU clusters, slow inference, high operational costs + +3. **Encoder-based detectors** - BERT-style models for token classification + - Fast and efficient but historically limited by short context (512 tokens) + - Can't handle typical RAG contexts which often exceed this limit + +**LettuceDetect's breakthrough**: We solved the context problem by leveraging ModernBERT's 8K token capacity, achieving better accuracy than fine-tuned LLMs at a fraction of the computational cost. This proved encoder-based detection could work at scale. + +**But we asked: can we go even smaller and faster?** + +**Enter TinyLettuce with Ettin encoders**: These lightweight transformers (17–68M parameters), introduced by LightOn, support 8K token contexts and are optimized for classification. Unlike large generative LLMs, Ettin focuses on efficient representation learning for fast, accurate detection. + +**The key insight**: With the right synthetic training data, a 17M parameter Ettin encoder can outperform 235B parameter giants at hallucination detection while running real-time on CPU. TinyLettuce democratizes hallucination detection by making it accessible, fast, and cost-effective for any deployment. + +## Approach + +We discovered something counterintuitive: **specialized training data matters more than parameter count**. With the right synthetic training data, a 17M parameter model can outperform 235B parameter giants at hallucination detection. + +Our approach challenges conventional wisdom through four steps: + +1. **Generate synthetic data** using RAGFactChecker - no manual annotation needed +2. **Train tiny Ettin encoders** (17M-68M parameters) on this specialized data +3. **Deploy on CPU** for real-time inference at <50ms latency +4. **Scale effortlessly** - no GPU clusters or API limits + +--- + +## Synthetic Hallucination Data + +TinyLettuce leverages **synthetic training data** to achieve high performance. Instead of manually annotating thousands of examples, we use RAGFactChecker to generate training pairs automatically at scale. + +### Production-Scale Generation + +For large datasets, use our generation script: + +```bash +# Generate 10,000 training samples +python scripts/generate_synthetic_data.py \ + --dataset rag-mini-bioasq \ + --num-samples 10000 \ + --model gpt-4o-mini \ + --error-types factual numerical temporal \ + --intensity 0.4 \ + --batch-size 20 \ + --output data/synthetic_10k.json +``` + +### Data Schema (RAGTruth format) + +Minimal entry used for training: + +```json +{ + "prompt": "...", + "answer": "...", + "labels": [{"start": 31, "end": 71, "label": "hallucinated"}], + "split": "train", + "task_type": "qa", + "dataset": "synthetic", + "language": "en" +} +``` + +## TinyLettuce Models (Ettin Encoders) + +Our **TinyLettuce** models prove that architecture and training data matter more than parameter count. Built on the **Ettin encoder** (LightOn) — a lightweight, efficient transformer optimized for classification — these models achieve strong accuracy with low latency. + +### Model Family + +| Model | Parameters | Context Length | Key Advantage | +|-------|------------|----------------|---------------| +| **Ettin-17M** | 17 million | 8K tokens | Fastest, smallest memory | +| **Ettin-32M** | 32 million | 8K tokens | Best speed/accuracy balance | +| **Ettin-68M** | 68 million | 8K tokens | Highest accuracy | + +Why Ettin encoders work well: +- 8K token context windows (longer than most inputs) +- Modern transformer design (RoPE, GLU activations) +- Optimized for token classification, not generation +- Efficient CPU inference without GPU overhead + +**Training is straightforward:** +```bash +# 1) Generate 10K synthetic examples (~$50, ~5 hours) in RAGTruth format +python scripts/generate_synthetic_data.py \ + --dataset rag-mini-bioasq \ + --split train \ + --num-samples 10000 \ + --model gpt-4o-mini \ + --output-format ragtruth \ + --output data/synthetic_ragtruth_10k.json + +# 2) (Optional) Combine with original RAGTruth for training +python - << 'PY' +import json +a = json.load(open('data/ragtruth/ragtruth_data.json')) +b = json.load(open('data/synthetic_ragtruth_10k.json')) +json.dump(a + b, open('data/train_combined.json','w')) +PY + +# 3) Train TinyLettuce model (2–4 hours on a single GPU) +python scripts/train.py \ + --model-name jhu-clsp/ettin-encoder-68m \ + --ragtruth-path data/train_combined.json \ + --output-dir output/tinylettuce_68m +``` + +The results speak for themselves. + +### Data & Training Setup (Published Models) + +- Source dataset: `enelpol/rag-mini-bioasq` (question–answer–passages). +- Training generation: 1,500 hallucinated samples (≈3,000 total including non‑hallucinated) using a 120B LLM baseline (gpt‑oss‑120b); default intensity 0.3; no explicit error‑type constraints. +- Test generation: 300 hallucinated samples (≈600 total) using GPT‑5 to improve sample quality; held out for evaluation. +- Training recipe: Ettin encoders (17M/32M/68M) fine‑tuned as token classifiers on combined synthetic + RAGTruth (RAGTruth JSON concatenation), then evaluated on synthetic and RAGTruth splits. + +### Training Hyperparameters (Released Models) + +- Optimizer: AdamW; learning rate `1e-5`; weight decay `0.01`. +- Epochs: 3–6 (released checkpoints typically 3 for Ettin‑17M/32M, 3–6 for Ettin‑68M). +- Batch size: 8; max sequence length: 4096 tokens. +- Tokenization: `AutoTokenizer`; label pad `-100`; `DataCollatorForTokenClassification`. + +## Results + +When we trained TinyLettuce on synthetic data and tested it against billion-parameter models, the results shocked us. + +### Synthetic Data Evaluation (example-level) + +Metrics are computed at example level (answer contains any hallucination vs none). Precision/recall/F1 reflect this binary decision; thresholds and post‑processing can affect absolute values. + +*When trained and evaluated on domain-specific synthetic data, tiny models dominate:* + +| Model | Parameters | Precision (%) | Recall (%) | F1 (%) | Hardware | +|-------|------------|---------------|------------|---------|----------| +| **TinyLettuce-17M** | **17M** | 84.56 | 98.21 | **90.87** | **CPU** | +| **TinyLettuce-32M** | **32M** | 80.36 | 99.10 | 88.76 | **CPU** | +| **TinyLettuce-68M** | **68M** | **89.54** | 95.96 | **92.64** | **CPU** | +| GPT-5-mini | ~200B | 71.95 | **100.00** | 83.69 | API/GPU | +| GPT-OSS-120B | 120B | 72.21 | 98.64 | 83.38 | GPU | +| Qwen3-235B | 235B | 66.74 | 99.32 | 79.84 | GPU | + +### RAGTruth Benchmark Evaluation (example-level) + +*Strong performance on standard benchmarks:* + +| Model | Parameters | F1 (%) | +|-------|------------|---------| +| **TinyLettuce-17M** | **17M** | 68.52 | +| **TinyLettuce-32M** | **32M** | 72.15 | +| **TinyLettuce-68M** | **68M** | **74.97** | +| LettuceDetect-base (ModernBERT) | — | 76.07 | +| LettuceDetect-large (ModernBERT) | 395M | **79.22** | +| Llama-2-13B (RAGTruth FT) | 13B | 78.70 | + +### Relative Size vs Performance + +How a 17M model compares to a 235B model: + +| Aspect | TinyLettuce-17M | Qwen3-235B | +|--------|-----------------|------------| +| **Parameters** | 17 million | 235 billion | +| **F1 on Synthetic Data** | **90.87%** | 79.84% | +| **Performance Advantage** | **+11.03%** | baseline | +| **Size Advantage** | **14,000x smaller** | 1x | +| **Inference Hardware** | CPU | GPU cluster | +| **Deployment Cost** | ~$10/month | ~$10,000/month | + +Baselines and judges: we compare against commonly used LLM judges (e.g., GPT‑5‑mini, GPT‑OSS‑120B, Qwen3‑235B) and fine‑tuned encoders/decoders reported in RAGTruth and follow-up work (e.g., Llama‑2‑13B FT). Beyond benchmarks, deployment characteristics often determine real‑world value. + +### Evaluation Protocol + +- Span construction from tokens: threshold 0.5 on token hallucination prob; contiguous tokens merged into spans. +- Reported F1 is span‑level unless explicitly noted. +- Example command: + +```bash +python scripts/evaluate.py \ + --model_path output/tinylettuce_68m \ + --data_path data/ragtruth/ragtruth_data.json \ + --evaluation_type span_level +``` + +## Real‑Time CPU Inference + +TinyLettuce's biggest advantage isn't just accuracy — it's accessibility. These models run in real time on standard CPUs, making hallucination detection practical to deploy widely. + +### End-to-End Workflow + +```bash +# Step 1: Generate synthetic training data +python scripts/generate_synthetic_data.py \ + --dataset rag-mini-bioasq \ + --num-samples 50000 \ + --model gpt-4o-mini \ + --batch-size 50 \ + --output data/synthetic_large.json + +# Step 2: Train TinyLettuce model +python - << 'PY' +import json +a = json.load(open('data/ragtruth/ragtruth_data.json')) +b = json.load(open('data/synthetic_large.json')) +json.dump(a + b, open('data/train_combined_large.json','w')) +PY + +python scripts/train.py \ + --ragtruth-path data/train_combined_large.json \ + --model-name jhu-clsp/ettin-encoder-17m \ + --output-dir output/tinylettuce_17m \ + --batch-size 8 \ + --epochs 3 + +# Step 3: Deploy on CPU for real-time inference +python scripts/start_api.py prod --model output/tinylettuce_17m +``` + +### Performance Characteristics + +| Metric | TinyLettuce-17M | TinyLettuce-32M | TinyLettuce-68M | GPT-5-mini API | +|--------|-----------------|-----------------|-----------------|-----------| +| **Latency** | <50ms | <75ms | <100ms | 2-10s | +| **Throughput** | 1000+ req/s | 800 req/s | 500 req/s | 10 req/s | +| **Memory** | 200MB | 350MB | 600MB | N/A | +| **Cost/1M requests** | $0.10 | $0.15 | $0.25 | $1000+ | + +--- + +## Trade‑offs: Choosing a Model Size + +When selecting a TinyLettuce variant, consider these trade-offs: + +### TinyLettuce-17M +- **Best for**: High-throughput, latency-critical applications +- **Pros**: Fastest inference, smallest memory footprint, lowest cost +- **Cons**: Slightly lower accuracy on complex cases +- **Use cases**: Real-time RAG validation, edge deployment + +### TinyLettuce-32M +- **Best for**: Balanced production deployments +- **Pros**: Good accuracy/speed balance, reasonable memory usage +- **Cons**: Moderate resource requirements +- **Use cases**: Production RAG pipelines, content moderation + +### TinyLettuce-68M +- **Best for**: Accuracy-critical applications +- **Pros**: Highest detection accuracy, still CPU-efficient +- **Cons**: Higher memory and compute requirements +- **Use cases**: High-stakes content validation, research applications + +--- + +## Converting Synthetic Data to Training Format + +Transform generated data into RAGTruth format for model training: + +```python +from lettucedetect.detectors.prompt_utils import PromptUtils + +def convert_to_ragtruth_format(samples, results, language="en"): + ragtruth_data = [] + + for sample, result in zip(samples, results): + # Format context using LettuceDetect's prompt utils + formatted_prompt = PromptUtils.format_context( + sample['context'], + sample['question'], + lang=language + ) + + # Non-hallucinated sample + ragtruth_data.append({ + "prompt": formatted_prompt, + "answer": result.generated_non_hlcntn_answer, + "labels": [], # No hallucinations + "split": "train", + "task_type": "qa", + "dataset": "synthetic", + "language": language + }) + + # Hallucinated sample with span labels + hallucinated_labels = [] + hallucinated_answer = result.generated_hlcntn_answer + + # Create span labels from hallucinated parts + for part in result.hlcntn_part: + if isinstance(part, str) and part in hallucinated_answer: + start = hallucinated_answer.find(part) + if start != -1: + hallucinated_labels.append({ + "start": start, + "end": start + len(part), + "label": "hallucinated" + }) + + ragtruth_data.append({ + "prompt": formatted_prompt, + "answer": hallucinated_answer, + "labels": hallucinated_labels, + "split": "train", + "task_type": "qa", + "dataset": "synthetic", + "language": language + }) + + return ragtruth_data +``` + +--- + +## Key Takeaways + +**Small Specialized > Large Generalist**: TinyLettuce-68M (92.64% F1) outperforms Qwen3-235B (79.84% F1) while being 14,000x smaller. Task-specific training beats raw parameter count. + +**Dramatic Cost Reduction**: Synthetic data generation costs significantly less than manual annotation. CPU inference eliminates expensive API calls and GPU requirements. + +**Real-Time CPU Inference**: TinyLettuce models achieve <50ms latency and 1000+ req/s on standard CPUs, making hallucination detection practical for any deployment. + +**Synthetic Data Breakthrough**: RAGFactChecker-generated synthetic data enables 90%+ F1 scores - higher than what these same models achieve on manually annotated RAGTruth data. + +**Complete Open Pipeline**: End-to-end framework from data generation to model deployment available under MIT license. No expensive GPUs or API calls required. + + +## Bonus: Triplet‑Based RAGFactChecker + +RAGFactChecker exposes a symbolic path for analysis using knowledge triplets. + +Generate triplets from any text: +```python +from lettucedetect.ragfactchecker import RAGFactChecker + +rag = RAGFactChecker(model="gpt-4o-mini") # requires OPENAI_API_KEY +triplets = rag.generate_triplets("Paris is the capital of France.") +print(triplets) # e.g., [["Paris", "is_capital_of", "France"]] +``` + +Triplet‑based hallucination detection against context: +```python +context = [ + "France is a country in Europe. The capital of France is Paris. Population is ~67M." +] +answer = "France's capital is Lyon and population is 67M." + +res = rag.detect_hallucinations(context=context, answer=answer) +print(res["hallucinated_triplets"]) # triplets not supported by context +``` + +Direct triplet comparison and pairwise analysis: +```python +ans_trips = rag.generate_triplets(answer) +ctx_trips = rag.generate_triplets("\n".join(context)) +cmp = rag.compare_triplets(ans_trips, ctx_trips) +pair = rag.analyze_text_pair(answer_text=answer, reference_text=context[0]) +``` + +This complements token/span detectors with interpretable, fact‑level explanations. + +--- + +## Reproducibility & Environment + +- Python ≥ 3.10; tested on Python 3.12. +- Set API key for synthetic generation: `export OPENAI_API_KEY=...`. +- CPU latency context: 8‑core x86; GPU training: 1× A100 80GB. + +--- + +## Domain Fine‑tuning (Step‑by‑Step) + +1) Prepare domain data (`my_domain.json` with `question` and `context` fields). + +2) Generate synthetic training pairs in RAGTruth format: + +```bash +python scripts/generate_synthetic_data.py \ + --custom-data data/my_domain.json \ + --num-samples 5000 \ + --model gpt-4o-mini \ + --error-types factual numerical temporal \ + --intensity 0.3 \ + --output-format ragtruth \ + --output data/my_domain_synth.json +``` + +3) Concatenate with RAGTruth (optional but recommended): + +```bash +python - << 'PY' +import json +a = json.load(open('data/ragtruth/ragtruth_data.json')) +b = json.load(open('data/my_domain_synth.json')) +json.dump(a + b, open('data/train_my_domain.json','w')) +PY +``` + +4) Fine‑tune Ettin: + +```bash +python scripts/train.py \ + --model-name jhu-clsp/ettin-encoder-17m \ + --ragtruth-path data/train_my_domain.json \ + --output-dir output/tinylettuce_17m_my_domain +``` + +5) Evaluate and deploy as above. + +## Limitations & Notes + +- Results labeled “synthetic” reflect evaluation on generated data; real‑world performance depends on domain match. Consider adding a small, manually curated eval set. +- Baselines: we report GPT‑5‑mini and open‑source LLM baselines where available; prompt configuration impacts absolute scores. +- Metrics: synthetic and RAGTruth F1 are span‑level unless otherwise noted; thresholds and post‑processing influence outcomes. +- Links marked “Coming Soon” will be updated as assets are published; model cards will include training details and configs. + +--- + +## Citation + +If you find this work useful, please cite it as follows: + +```bibtex +@misc{Kovacs:2025:TinyLettuce, + title={TinyLettuce: Training Efficient Hallucination Detectors with Synthetic Data Generation}, + author={Ádám Kovács and Gábor Recski}, + year={2025}, + eprint={2502.xxxxx}, + archivePrefix={arXiv}, + primaryClass={cs.CL}, + url={https://arxiv.org/abs/2502.xxxxx}, +} +``` + +--- + +## References + +[1] [RAGTruth: A Dataset for Hallucination Detection in Retrieval-Augmented Generation](https://aclanthology.org/2024.acl-long.585/) + +[2] [LettuceDetect: A Hallucination Detection Framework for RAG Applications](https://arxiv.org/abs/2502.17125) + +[3] [Ettin: Encoder Models by LightOn (paper)](https://huggingface.co/papers/2507.11412) + +[4] [Ettin Encoder Models (HF models)](https://huggingface.co/jhu-clsp/ettin-encoder-68m) + +[5] [RAGFactChecker](https://github.com/KRLabsOrg/RAGFactChecker) From be9adda280495704d3d6a876dbfe0ca5de06ede3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kov=C3=A1cs=20=C3=81d=C3=A1m?= Date: Sun, 31 Aug 2025 01:23:24 +0200 Subject: [PATCH 12/15] Changed Tinylettuce doc --- docs/TINYLETTUCE.md | 427 ++++++++++++-------------------------------- 1 file changed, 111 insertions(+), 316 deletions(-) diff --git a/docs/TINYLETTUCE.md b/docs/TINYLETTUCE.md index fe61bb5..d00a49e 100644 --- a/docs/TINYLETTUCE.md +++ b/docs/TINYLETTUCE.md @@ -34,145 +34,78 @@ Specialized training on synthetic data beats raw parameter count. --- -## Get Started +## Quickstart -Install the package: +Install: ```bash pip install lettucedetect ``` -### Generate Synthetic Training Data +### Detect Hallucinations (Real-time CPU) ```python -from lettucedetect import HallucinationGenerator +from lettucedetect.models.inference import HallucinationDetector -# Initialize generator - temperature=1.0 required for GPT-5 models -generator = HallucinationGenerator(model="gpt-5", temperature=1.0) +# Load tiny but powerful model +detector = HallucinationDetector( + method="transformer", + model_path="KRLabsOrg/tinylettuce-ettin-17m-en-v1" +) -# Medical domain example: Generate numerical errors in dosage information -result = generator.generate( +# Detect hallucinations in medical context +spans = detector.predict( context=[ "Ibuprofen is an NSAID that reduces inflammation and pain. The typical adult dose is 400-600mg every 6-8 hours, not exceeding 2400mg daily." ], question="What is the maximum daily dose of ibuprofen?", - answer="The maximum daily dose of ibuprofen for adults is 2400mg.", - error_types=["numerical"], - intensity=0.4 + answer="The maximum daily dose of ibuprofen for adults is 3200mg.", + output_format="spans", ) - -print(f"Original: {result['original_answer']}") -print(f"Hallucinated: {result['hallucinated_answer']}") -print(f"Modified parts: {result['hallucinated_parts']}") -# Output: -# Original: The maximum daily dose of ibuprofen for adults is 2400mg. -# Hallucinated: The maximum daily dose of ibuprofen for adults is 3200mg. -# Modified parts: ['3200mg'] +print(spans) +# Output: [{"start": 51, "end": 57, "text": "3200mg"}] ``` -### Control Error Intensity and Types +### Generate Synthetic Training Data + +Create training data automatically with controllable error types: ```python -# Low intensity for subtle errors -result_subtle = generator.generate( +from lettucedetect import HallucinationGenerator + +# Initialize generator (GPT‑5 requires temperature=1.0) +generator = HallucinationGenerator(model="gpt-5-mini", temperature=1.0) + +# Configure generation with error types and intensity +# Generate numerical error +result_medical = generator.generate( context=[ "Ibuprofen is an NSAID that reduces inflammation and pain. The typical adult dose is 400-600mg every 6-8 hours, not exceeding 2400mg daily." ], question="What is the maximum daily dose of ibuprofen?", answer="The maximum daily dose of ibuprofen for adults is 2400mg.", error_types=["numerical"], - intensity=0.1 # Very subtle change + intensity=0.4, ) -# Output: "The maximum daily dose of ibuprofen for adults is 2500mg." +print(f"Original: {result_medical['original_answer']}") +print(f"Hallucinated: {result_medical['hallucinated_answer']}") -# Temporal errors for historical events -result_temporal = generator.generate( +# Configure generation with error types and intensity +# Generate temporal error +result_historical = generator.generate( context=[ - "Apollo 11 was the first crewed mission to land on the Moon, touching down on July 20, 1969. Neil Armstrong and Buzz Aldrin spent about 21 hours on the lunar surface." + "Apollo 11 was the first crewed mission to land on the Moon, touching down on July 20, 1969." ], question="On what date did Apollo 11 land on the Moon?", answer="Apollo 11 landed on the Moon on July 20, 1969.", error_types=["temporal"], - intensity=0.5 + intensity=0.5, ) -# Output: "Apollo 11 landed on the Moon on July 21, 1969." +print(f"Original: {result_historical['original_answer']}") +print(f"Hallucinated: {result_historical['hallucinated_answer']}") ``` -### Generate from Real Datasets - -```python -from datasets import load_dataset - -def load_rag_mini_bioasq(split="train", filter_min_words=10): - """Load rag-mini-bioasq dataset for generation.""" - qa_dataset = load_dataset("enelpol/rag-mini-bioasq", "question-answer-passages") - corpus_dataset = load_dataset("enelpol/rag-mini-bioasq", "text-corpus") - - # Create corpus lookup - corpus_lookup = {item["id"]: item["passage"] for item in corpus_dataset["test"]} - - processed_data = [] - for item in qa_dataset[split]: - passage_ids = item["relevant_passage_ids"] - context_passages = [corpus_lookup.get(pid, None) for pid in passage_ids] - context_passages = [p for p in context_passages if p is not None] - - if len(item["answer"].split()) >= filter_min_words: - processed_data.append({ - "question": item["question"], - "answer": item["answer"], - "context": context_passages, - }) - return processed_data - -# Load biomedical data and generate hallucinations -data = load_rag_mini_bioasq() -sample = data[3] # Example biomedical question - -result = generator.generate( - context=sample["context"], - question=sample["question"], - answer=sample["answer"] -) - -# Convert to training format (RAGTruth) -from lettucedetect.detectors.prompt_utils import PromptUtils - -train_sample = { - "prompt": PromptUtils.format_context(sample["context"], sample["question"], lang="en"), - "answer": result["hallucinated_answer"], - "labels": [{ - "start": result["hallucinated_answer"].find(part), - "end": result["hallucinated_answer"].find(part) + len(part), - "label": "hallucinated" - } for part in result["hallucinated_parts"]], - "split": "train", - "task_type": "qa" -} -``` - -### Use TinyLettuce for Detection - -```python -from lettucedetect import HallucinationDetector - -# Load tiny but powerful model -detector = HallucinationDetector( - method="transformer", - model_path="KRLabsOrg/tinylettuce-ettin-17m-v1" -) - -# Detect hallucinations in real-time on CPU -spans = detector.predict( - context=["Ibuprofen is an NSAID that reduces inflammation and pain. The typical adult dose is 400-600mg every 6-8 hours, not exceeding 2400mg daily."], - question="What is the maximum daily dose of ibuprofen?", - answer="The maximum daily dose of ibuprofen for adults is 3200mg.", - output_format="spans" -) - -print(spans) -# Output: [{"start": 51, "end": 57, "text": "3200mg"}] -``` +**See the notebook for complete end‑to‑end examples**: `demo/tinylettuce.ipynb` --- @@ -225,15 +158,14 @@ TinyLettuce leverages **synthetic training data** to achieve high performance. I For large datasets, use our generation script: ```bash -# Generate 10,000 training samples +# Generate 2,000 training samples python scripts/generate_synthetic_data.py \ --dataset rag-mini-bioasq \ - --num-samples 10000 \ - --model gpt-4o-mini \ - --error-types factual numerical temporal \ - --intensity 0.4 \ - --batch-size 20 \ - --output data/synthetic_10k.json + --num-samples 2000 \ + --model gpt-5-mini \ + --temperature 1.0 \ + --output-format ragtruth \ + --output data/synthetic_2k.json ``` ### Data Schema (RAGTruth format) @@ -260,50 +192,31 @@ Our **TinyLettuce** models prove that architecture and training data matter more | Model | Parameters | Context Length | Key Advantage | |-------|------------|----------------|---------------| -| **Ettin-17M** | 17 million | 8K tokens | Fastest, smallest memory | -| **Ettin-32M** | 32 million | 8K tokens | Best speed/accuracy balance | -| **Ettin-68M** | 68 million | 8K tokens | Highest accuracy | +| **Ettin-17M** | 17 million | 8K tokens | Edge deployment | +| **Ettin-32M** | 32 million | 8K tokens | Very fast, good accuracy | +| **Ettin-68M** | 68 million | 8K tokens | Higher accuracy, still very fast | Why Ettin encoders work well: - 8K token context windows (longer than most inputs) - Modern transformer design (RoPE, GLU activations) - Optimized for token classification, not generation -- Efficient CPU inference without GPU overhead - -**Training is straightforward:** -```bash -# 1) Generate 10K synthetic examples (~$50, ~5 hours) in RAGTruth format -python scripts/generate_synthetic_data.py \ - --dataset rag-mini-bioasq \ - --split train \ - --num-samples 10000 \ - --model gpt-4o-mini \ - --output-format ragtruth \ - --output data/synthetic_ragtruth_10k.json - -# 2) (Optional) Combine with original RAGTruth for training -python - << 'PY' -import json -a = json.load(open('data/ragtruth/ragtruth_data.json')) -b = json.load(open('data/synthetic_ragtruth_10k.json')) -json.dump(a + b, open('data/train_combined.json','w')) -PY - -# 3) Train TinyLettuce model (2–4 hours on a single GPU) -python scripts/train.py \ - --model-name jhu-clsp/ettin-encoder-68m \ - --ragtruth-path data/train_combined.json \ - --output-dir output/tinylettuce_68m -``` +- Efficient CPU inference without GPU overhead (smaller than ModernBERT models) The results speak for themselves. ### Data & Training Setup (Published Models) -- Source dataset: `enelpol/rag-mini-bioasq` (question–answer–passages). -- Training generation: 1,500 hallucinated samples (≈3,000 total including non‑hallucinated) using a 120B LLM baseline (gpt‑oss‑120b); default intensity 0.3; no explicit error‑type constraints. -- Test generation: 300 hallucinated samples (≈600 total) using GPT‑5 to improve sample quality; held out for evaluation. -- Training recipe: Ettin encoders (17M/32M/68M) fine‑tuned as token classifiers on combined synthetic + RAGTruth (RAGTruth JSON concatenation), then evaluated on synthetic and RAGTruth splits. +TinyLettuce models use two complementary training approaches: + +**1. General-Purpose Models (RAGTruth + Synthetic):** +- Base: Original RAGTruth dataset for broad hallucination detection capabilities +- Augmentation: 1,500 synthetic hallucinated samples from `enelpol/rag-mini-bioasq` using 120B LLM baseline +- Training recipe: Ettin encoders fine-tuned on combined data for robust performance across domains + +**2. Domain-Specific Models (Synthetic-Only):** +- Pure synthetic data generation for targeted domain applications +- Controllable error types and intensity for specific use cases +- Faster training and deployment for specialized scenarios ### Training Hyperparameters (Released Models) @@ -320,20 +233,21 @@ When we trained TinyLettuce on synthetic data and tested it against billion-para Metrics are computed at example level (answer contains any hallucination vs none). Precision/recall/F1 reflect this binary decision; thresholds and post‑processing can affect absolute values. -*When trained and evaluated on domain-specific synthetic data, tiny models dominate:* +*When trained and evaluated on domain-specific synthetic data, tiny models dominate (LettuceDetect-base shown without synthetic training):* | Model | Parameters | Precision (%) | Recall (%) | F1 (%) | Hardware | |-------|------------|---------------|------------|---------|----------| | **TinyLettuce-17M** | **17M** | 84.56 | 98.21 | **90.87** | **CPU** | | **TinyLettuce-32M** | **32M** | 80.36 | 99.10 | 88.76 | **CPU** | | **TinyLettuce-68M** | **68M** | **89.54** | 95.96 | **92.64** | **CPU** | +| LettuceDetect-base (ModernBERT) | 139M | 79.06 | 98.21 | 87.60 | GPU | | GPT-5-mini | ~200B | 71.95 | **100.00** | 83.69 | API/GPU | | GPT-OSS-120B | 120B | 72.21 | 98.64 | 83.38 | GPU | | Qwen3-235B | 235B | 66.74 | 99.32 | 79.84 | GPU | ### RAGTruth Benchmark Evaluation (example-level) -*Strong performance on standard benchmarks:* +*Strong performance on standard benchmarks (Ettin models trained on RAGTruth + synthetic data):* | Model | Parameters | F1 (%) | |-------|------------|---------| @@ -344,32 +258,21 @@ Metrics are computed at example level (answer contains any hallucination vs none | LettuceDetect-large (ModernBERT) | 395M | **79.22** | | Llama-2-13B (RAGTruth FT) | 13B | 78.70 | -### Relative Size vs Performance - -How a 17M model compares to a 235B model: +TinyLettuce Ettin models demonstrate impressive performance given their compact size. These models are trained on both RAGTruth and synthetic data, achieving strong results across both evaluation sets. While ModernBERT models achieve slightly higher accuracy, TinyLettuce offers 6-23x parameter reduction with competitive results, making them ideal for resource-constrained deployments. -| Aspect | TinyLettuce-17M | Qwen3-235B | -|--------|-----------------|------------| -| **Parameters** | 17 million | 235 billion | -| **F1 on Synthetic Data** | **90.87%** | 79.84% | -| **Performance Advantage** | **+11.03%** | baseline | -| **Size Advantage** | **14,000x smaller** | 1x | -| **Inference Hardware** | CPU | GPU cluster | -| **Deployment Cost** | ~$10/month | ~$10,000/month | - -Baselines and judges: we compare against commonly used LLM judges (e.g., GPT‑5‑mini, GPT‑OSS‑120B, Qwen3‑235B) and fine‑tuned encoders/decoders reported in RAGTruth and follow-up work (e.g., Llama‑2‑13B FT). Beyond benchmarks, deployment characteristics often determine real‑world value. +Baselines and judges: we compare against commonly used LLM judges (e.g., GPT‑5‑mini, GPT‑OSS‑120B, Qwen3‑235B) and fine‑tuned encoders/decoders reported in RAGTruth and follow‑up work (e.g., Llama‑2‑13B FT). Beyond benchmarks, deployment characteristics often determine real‑world value. ### Evaluation Protocol - Span construction from tokens: threshold 0.5 on token hallucination prob; contiguous tokens merged into spans. -- Reported F1 is span‑level unless explicitly noted. +- Reported F1 is example‑level unless explicitly noted. - Example command: ```bash python scripts/evaluate.py \ --model_path output/tinylettuce_68m \ --data_path data/ragtruth/ragtruth_data.json \ - --evaluation_type span_level + --evaluation_type example_level ``` ## Real‑Time CPU Inference @@ -406,97 +309,6 @@ python scripts/train.py \ python scripts/start_api.py prod --model output/tinylettuce_17m ``` -### Performance Characteristics - -| Metric | TinyLettuce-17M | TinyLettuce-32M | TinyLettuce-68M | GPT-5-mini API | -|--------|-----------------|-----------------|-----------------|-----------| -| **Latency** | <50ms | <75ms | <100ms | 2-10s | -| **Throughput** | 1000+ req/s | 800 req/s | 500 req/s | 10 req/s | -| **Memory** | 200MB | 350MB | 600MB | N/A | -| **Cost/1M requests** | $0.10 | $0.15 | $0.25 | $1000+ | - ---- - -## Trade‑offs: Choosing a Model Size - -When selecting a TinyLettuce variant, consider these trade-offs: - -### TinyLettuce-17M -- **Best for**: High-throughput, latency-critical applications -- **Pros**: Fastest inference, smallest memory footprint, lowest cost -- **Cons**: Slightly lower accuracy on complex cases -- **Use cases**: Real-time RAG validation, edge deployment - -### TinyLettuce-32M -- **Best for**: Balanced production deployments -- **Pros**: Good accuracy/speed balance, reasonable memory usage -- **Cons**: Moderate resource requirements -- **Use cases**: Production RAG pipelines, content moderation - -### TinyLettuce-68M -- **Best for**: Accuracy-critical applications -- **Pros**: Highest detection accuracy, still CPU-efficient -- **Cons**: Higher memory and compute requirements -- **Use cases**: High-stakes content validation, research applications - ---- - -## Converting Synthetic Data to Training Format - -Transform generated data into RAGTruth format for model training: - -```python -from lettucedetect.detectors.prompt_utils import PromptUtils - -def convert_to_ragtruth_format(samples, results, language="en"): - ragtruth_data = [] - - for sample, result in zip(samples, results): - # Format context using LettuceDetect's prompt utils - formatted_prompt = PromptUtils.format_context( - sample['context'], - sample['question'], - lang=language - ) - - # Non-hallucinated sample - ragtruth_data.append({ - "prompt": formatted_prompt, - "answer": result.generated_non_hlcntn_answer, - "labels": [], # No hallucinations - "split": "train", - "task_type": "qa", - "dataset": "synthetic", - "language": language - }) - - # Hallucinated sample with span labels - hallucinated_labels = [] - hallucinated_answer = result.generated_hlcntn_answer - - # Create span labels from hallucinated parts - for part in result.hlcntn_part: - if isinstance(part, str) and part in hallucinated_answer: - start = hallucinated_answer.find(part) - if start != -1: - hallucinated_labels.append({ - "start": start, - "end": start + len(part), - "label": "hallucinated" - }) - - ragtruth_data.append({ - "prompt": formatted_prompt, - "answer": hallucinated_answer, - "labels": hallucinated_labels, - "split": "train", - "task_type": "qa", - "dataset": "synthetic", - "language": language - }) - - return ragtruth_data -``` --- @@ -515,93 +327,76 @@ def convert_to_ragtruth_format(samples, results, language="en"): ## Bonus: Triplet‑Based RAGFactChecker -RAGFactChecker exposes a symbolic path for analysis using knowledge triplets. +We have implemented a triplet-based hallucination detection model that you can use the same way as the standard lettucecedetect models. Generate triplets from any text: ```python +from lettucedetect.models.inference import HallucinationDetector from lettucedetect.ragfactchecker import RAGFactChecker -rag = RAGFactChecker(model="gpt-4o-mini") # requires OPENAI_API_KEY +detector = HallucinationDetector( + method="rag_fact_checker", +) + +rag = RAGFactChecker(model="gpt-5-mini") # requires OPENAI_API_KEY triplets = rag.generate_triplets("Paris is the capital of France.") print(triplets) # e.g., [["Paris", "is_capital_of", "France"]] ``` -Triplet‑based hallucination detection against context: +Compare triplets against each other: ```python -context = [ - "France is a country in Europe. The capital of France is Paris. Population is ~67M." -] -answer = "France's capital is Lyon and population is 67M." - -res = rag.detect_hallucinations(context=context, answer=answer) -print(res["hallucinated_triplets"]) # triplets not supported by context +compare = fact_checker.analyze_text_pair( + "France is a country in Europe.", "France is a country in Asia." +) +print(compare) +#{ +# 'answer_triplets': [['France', 'is', 'a country in Europe']], +# 'reference_triplets': [['France', 'is', 'a country in Asia']], +# 'comparison': { +# 'fact_check_results': {0: False}, +# 'raw_output': FactCheckerOutput(fact_check_prediction_binary={0: False}) +# } +#} ``` -Direct triplet comparison and pairwise analysis: +Use it for detecting hallucinations in your data: ```python -ans_trips = rag.generate_triplets(answer) -ctx_trips = rag.generate_triplets("\n".join(context)) -cmp = rag.compare_triplets(ans_trips, ctx_trips) -pair = rag.analyze_text_pair(answer_text=answer, reference_text=context[0]) +# You can use it for detecting hallucinations in your data +result = detector.predict( + context="The capital of France is Paris.", + question="What is the capital of France?", + answer="The capital of France is Berlin.", + output_format="detailed", +) +print(result) +#{ +# 'spans': [ +# { +# 'start': 0, +# 'end': 31, +# 'text': 'The capital of France is Berlin', +# 'confidence': 0.9, +# 'triplet': ['the capital of France', 'is', 'Berlin'] +# } +# ], +# 'triplets': { +# 'answer': [['the capital of France', 'is', 'Berlin']], +# 'context': [['The capital of France', 'is', 'Paris']], +# 'hallucinated': [['the capital of France', 'is', 'Berlin']] +# }, +# 'fact_check_results': {0: False} +#} ``` This complements token/span detectors with interpretable, fact‑level explanations. --- -## Reproducibility & Environment - -- Python ≥ 3.10; tested on Python 3.12. -- Set API key for synthetic generation: `export OPENAI_API_KEY=...`. -- CPU latency context: 8‑core x86; GPU training: 1× A100 80GB. - ---- - -## Domain Fine‑tuning (Step‑by‑Step) - -1) Prepare domain data (`my_domain.json` with `question` and `context` fields). - -2) Generate synthetic training pairs in RAGTruth format: - -```bash -python scripts/generate_synthetic_data.py \ - --custom-data data/my_domain.json \ - --num-samples 5000 \ - --model gpt-4o-mini \ - --error-types factual numerical temporal \ - --intensity 0.3 \ - --output-format ragtruth \ - --output data/my_domain_synth.json -``` - -3) Concatenate with RAGTruth (optional but recommended): - -```bash -python - << 'PY' -import json -a = json.load(open('data/ragtruth/ragtruth_data.json')) -b = json.load(open('data/my_domain_synth.json')) -json.dump(a + b, open('data/train_my_domain.json','w')) -PY -``` - -4) Fine‑tune Ettin: - -```bash -python scripts/train.py \ - --model-name jhu-clsp/ettin-encoder-17m \ - --ragtruth-path data/train_my_domain.json \ - --output-dir output/tinylettuce_17m_my_domain -``` - -5) Evaluate and deploy as above. - ## Limitations & Notes - Results labeled “synthetic” reflect evaluation on generated data; real‑world performance depends on domain match. Consider adding a small, manually curated eval set. - Baselines: we report GPT‑5‑mini and open‑source LLM baselines where available; prompt configuration impacts absolute scores. - Metrics: synthetic and RAGTruth F1 are span‑level unless otherwise noted; thresholds and post‑processing influence outcomes. -- Links marked “Coming Soon” will be updated as assets are published; model cards will include training details and configs. --- From fb567ff38081d4f2d490854b19da94c673c31a53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kov=C3=A1cs=20=C3=81d=C3=A1m?= Date: Sun, 31 Aug 2025 15:00:25 +0200 Subject: [PATCH 13/15] Readme changes --- README.md | 9 +++++---- .../integrations/langchain/examples/streamlit_app.py | 10 +++------- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index bda443a..ab39c2a 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,8 @@ Our models are inspired from the [Luna](https://aclanthology.org/2025.coling-ind ## 🚀 Latest Updates -- **May 18, 2025** - Released version **0.1.7**: Multilingual support (thanks to EuroBERT) for 7 languages: English, German, French, Spanish, Italian, Polish, and Chinese! +- **August 31, 2025** - Released version **0.1.8**: Added TinyLettuce Ettin models for 17M, 32M, and 68M variants, Hallucination generation pipeline and added RAGFactChecker for triplet-based hallucination detection. +- May 18, 2025 - Released version **0.1.7**: Multilingual support (thanks to EuroBERT) for 7 languages: English, German, French, Spanish, Italian, Polish, and Chinese! - Up to **17 F1 points improvement** over baseline LLM judges like GPT-4.1-mini across different languages - **EuroBERT models**: We've trained base/210M (faster) and large/610M (more accurate) variants - You can now also use **LLM baselines** for hallucination detection (see below) @@ -60,8 +61,8 @@ pip install lettucedetect -U Check out our models published to Huggingface: **English Models**: -- Base: [KRLabsOrg/lettucedetect-base-modernbert-en-v1](https://huggingface.co/KRLabsOrg/lettucedetect-base-modernbert-en-v1) -- Large: [KRLabsOrg/lettucedetect-large-modernbert-en-v1](https://huggingface.co/KRLabsOrg/lettucedetect-large-modernbert-en-v1) +- Base: [KRLabsOrg/lettucedect-base-modernbert-en-v1](https://huggingface.co/KRLabsOrg/lettucedect-base-modernbert-en-v1) +- Large: [KRLabsOrg/lettucedect-large-modernbert-en-v1](https://huggingface.co/KRLabsOrg/lettucedect-large-modernbert-en-v1) **Multilingual Models**: We've trained 210m and 610m variants of EuroBERT, see our HuggingFace collection: [HF models](https://huggingface.co/collections/KRLabsOrg/multilingual-hallucination-detection-682a2549c18ecd32689231ce) @@ -266,7 +267,7 @@ positional arguments: options: -h, --help show this help message and exit --model MODEL Path or huggingface URL to the model. The default value is - "KRLabsOrg/lettucedetect-base-modernbert-en-v1". + "KRLabsOrg/lettucedect-base-modernbert-en-v1". --method {transformer} Hallucination detection method. The default value is "transformer". diff --git a/lettucedetect/integrations/langchain/examples/streamlit_app.py b/lettucedetect/integrations/langchain/examples/streamlit_app.py index 8ce5766..2513a5f 100644 --- a/lettucedetect/integrations/langchain/examples/streamlit_app.py +++ b/lettucedetect/integrations/langchain/examples/streamlit_app.py @@ -166,7 +166,7 @@ def get_llm(): @st.cache_resource def get_detector(): - model_path = "output/hallucination_detection_ettin_17m" + model_path = "KRLabsOrg/tinylettuce-ettin-17m-en" if os.path.exists(model_path): return HallucinationDetector(method="transformer", model_path=model_path) else: @@ -200,12 +200,8 @@ def handle_detection(result): # Create callbacks detection_callback = LettuceStreamingCallback( - method="transformer" - if os.path.exists("output/hallucination_detection_ettin_17m") - else "rag_fact_checker", - model_path="output/hallucination_detection_ettin_17m" - if os.path.exists("output/hallucination_detection_ettin_17m") - else None, + method="transformer", + model_path="KRLabsOrg/tinylettuce-ettin-17m-en", context=[context], question=question, check_every=10, From 620d05c4e638591c18284a306d39b7da33059f69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kov=C3=A1cs=20=C3=81d=C3=A1m?= Date: Sun, 31 Aug 2025 16:56:01 +0200 Subject: [PATCH 14/15] Added documentation --- README.md | 3 ++ docs/TINYLETTUCE.md | 123 ++++++++++++++++++-------------------------- 2 files changed, 52 insertions(+), 74 deletions(-) diff --git a/README.md b/README.md index ab39c2a..4d413ab 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,9 @@ Our models are inspired from the [Luna](https://aclanthology.org/2025.coling-ind ## 🚀 Latest Updates - **August 31, 2025** - Released version **0.1.8**: Added TinyLettuce Ettin models for 17M, 32M, and 68M variants, Hallucination generation pipeline and added RAGFactChecker for triplet-based hallucination detection. + - See [TinyLettuce Blog Post](https://huggingface.co/KRLabsOrg/tinylettuce-68b42a66b8b6aaa4bf287bf4) for more details. + - Our collection on Hugging Face: [TinyLettuce](https://huggingface.co/collections/KRLabsOrg/tinylettuce-68b42a66b8b6aaa4bf287bf4) + - See the documentation: [TinyLettuce Documentation](docs/TINYLETTUCE.md) for more details. - May 18, 2025 - Released version **0.1.7**: Multilingual support (thanks to EuroBERT) for 7 languages: English, German, French, Spanish, Italian, Polish, and Chinese! - Up to **17 F1 points improvement** over baseline LLM judges like GPT-4.1-mini across different languages - **EuroBERT models**: We've trained base/210M (faster) and large/610M (more accurate) variants diff --git a/docs/TINYLETTUCE.md b/docs/TINYLETTUCE.md index d00a49e..55fafb7 100644 --- a/docs/TINYLETTUCE.md +++ b/docs/TINYLETTUCE.md @@ -1,4 +1,4 @@ -# TinyLettuce: Efficient Hallucination Detection with 17–68M Encoders +# 🥬 TinyLettuce: Efficient Hallucination Detection with 17–68M Encoders

TinyLettuce Detective @@ -8,18 +8,17 @@ --- -We present **TinyLettuce**, our approach to efficient hallucination detection. By training tiny Ettin encoders (17-68M parameters) on synthetic data, we achieve better accuracy than billion-parameter LLM judges while running in real-time on CPU. +We present **TinyLettuce**, our approach to efficient hallucination detection. By training tiny Ettin encoders (17-68M parameters), we achieve better accuracy than billion-parameter LLM judges while running in real-time on CPU. ## TL;DR -- **TinyLettuce‑17M** (17M parameters) reaches **90.87% F1**, outperforming GPT‑5‑mini (83.69%), GPT‑OSS‑120B (83.38%), and Qwen3‑235B (79.84%) -- Runs in **real-time on CPU** with <50ms latency and 1000+ req/s throughput -- **Synthetic data generation** creates training data **100x cheaper** than manual annotation +- We're releasing a pipeline for generating synthetic training data for hallucination detection and training tiny Ettin encoders on it. +- **TinyLettuce‑17M** (17M parameters) reaches **90.87% F1** 🎯 on synthetic test data, outperforming GPT‑5‑mini (83.69%), GPT‑OSS‑120B (83.38%), and Qwen3‑235B (79.84%) +- Runs in **real-time on CPU** with low latency and large throughput +- **Synthetic data generation** creates training data **significantly cheaper** than manual annotation - Complete **end‑to‑end pipeline** for domain-specific model training - generate data and train in minutes - All models and code are **MIT licensed** and ready for production deployment -Specialized training on synthetic data beats raw parameter count. - --- ## Quick Links @@ -27,10 +26,9 @@ Specialized training on synthetic data beats raw parameter count. - **GitHub**: [github.com/KRLabsOrg/LettuceDetect](https://github.com/KRLabsOrg/LettuceDetect) - **PyPI**: [pypi.org/project/lettucedetect](https://pypi.org/project/lettucedetect/) - **Hugging Face Models**: - - [TinyLettuce Collection](https://huggingface.co/collections/KRLabsOrg/tinylettuce-models) (Coming Soon) -- **Demo**: [Synthetic Data Generation Showcase](../demo/synthetic_data_generation_showcase.ipynb) -- **Notebook**: [TinyLettuce end‑to‑end](../demo/tinylettuce.ipynb) - - **Ettin Paper (LightOn)**: https://huggingface.co/papers/2507.11412 + - [TinyLettuce Collection](https://huggingface.co/collections/KRLabsOrg/tinylettuce-68b42a66b8b6aaa4bf287bf4) +- **Notebook/Demo**: [TinyLettuce end‑to‑end](https://github.com/KRLabsOrg/LettuceDetect/blob/main/demo/tinylettuce.ipynb) +- **Ettin Paper (LightOn)**: https://huggingface.co/papers/2507.11412 --- @@ -44,6 +42,8 @@ pip install lettucedetect ### Detect Hallucinations (Real-time CPU) +Take one of our pre-trained models and use it for detecting hallucinations in your data: + ```python from lettucedetect.models.inference import HallucinationDetector @@ -68,7 +68,7 @@ print(spans) ### Generate Synthetic Training Data -Create training data automatically with controllable error types: +With **lettucedetect**, you can create training data automatically with controllable error types using the HallucinationGenerator class. Generate domain-specific training data with just a few lines of code while controlling error types and intensity. ```python from lettucedetect import HallucinationGenerator @@ -76,7 +76,6 @@ from lettucedetect import HallucinationGenerator # Initialize generator (GPT‑5 requires temperature=1.0) generator = HallucinationGenerator(model="gpt-5-mini", temperature=1.0) -# Configure generation with error types and intensity # Generate numerical error result_medical = generator.generate( context=[ @@ -90,7 +89,6 @@ result_medical = generator.generate( print(f"Original: {result_medical['original_answer']}") print(f"Hallucinated: {result_medical['hallucinated_answer']}") -# Configure generation with error types and intensity # Generate temporal error result_historical = generator.generate( context=[ @@ -105,13 +103,13 @@ print(f"Original: {result_historical['original_answer']}") print(f"Hallucinated: {result_historical['hallucinated_answer']}") ``` -**See the notebook for complete end‑to‑end examples**: `demo/tinylettuce.ipynb` +**See the notebook for complete end‑to‑end examples**: [TinyLettuce notebook](https://github.com/KRLabsOrg/LettuceDetect/blob/main/demo/tinylettuce.ipynb) --- ## Motivation -RAG systems need hallucination detection, but current solutions force painful trade-offs between accuracy, cost, and speed. +RAG systems require hallucination detection, but current solutions have painful trade-offs between accuracy, cost, and speed. **Current hallucination detection approaches:** @@ -128,42 +126,37 @@ RAG systems need hallucination detection, but current solutions force painful tr - Fast and efficient but historically limited by short context (512 tokens) - Can't handle typical RAG contexts which often exceed this limit -**LettuceDetect's breakthrough**: We solved the context problem by leveraging ModernBERT's 8K token capacity, achieving better accuracy than fine-tuned LLMs at a fraction of the computational cost. This proved encoder-based detection could work at scale. +**LettuceDetect's novel approach**: We solved the context problem by leveraging ModernBERT's 8K token capacity, achieving better accuracy than fine-tuned LLMs at a fraction of the computational cost. This shows that encoder-based detection could work at scale. -**But we asked: can we go even smaller and faster?** +**Can we go even smaller and faster?** -**Enter TinyLettuce with Ettin encoders**: These lightweight transformers (17–68M parameters), introduced by LightOn, support 8K token contexts and are optimized for classification. Unlike large generative LLMs, Ettin focuses on efficient representation learning for fast, accurate detection. +**Enter TinyLettuce with Ettin encoders**: Ettin encoders released by LightOn (see the [HF collection](https://huggingface.co/collections/jhu-clsp/encoders-vs-decoders-the-ettin-suite-686303e16142257eed8e6aeb) and [paper](https://huggingface.co/papers/2507.11412)) are small, long‑context encoders with modern architectures. These lightweight transformers (17–68M parameters) support long contexts and are optimized for classification and retrieval, focusing on efficient representation learning for fast, accurate detection. -**The key insight**: With the right synthetic training data, a 17M parameter Ettin encoder can outperform 235B parameter giants at hallucination detection while running real-time on CPU. TinyLettuce democratizes hallucination detection by making it accessible, fast, and cost-effective for any deployment. +**The key insight**: With the right synthetic training data, a 17M parameter Ettin encoder can outperform 235B parameter LLMs at hallucination detection while running real-time on CPU. TinyLettuce makes it easy to use small models for hallucination detection by making it accessible, fast, and cost-effective for any deployment. ## Approach -We discovered something counterintuitive: **specialized training data matters more than parameter count**. With the right synthetic training data, a 17M parameter model can outperform 235B parameter giants at hallucination detection. +**Specialized training data can matter more than parameter count**: -Our approach challenges conventional wisdom through four steps: - -1. **Generate synthetic data** using RAGFactChecker - no manual annotation needed +1. **Generate synthetic data** using LettuceDetect's HallucinationGenerator class - no manual annotation needed 2. **Train tiny Ettin encoders** (17M-68M parameters) on this specialized data -3. **Deploy on CPU** for real-time inference at <50ms latency -4. **Scale effortlessly** - no GPU clusters or API limits - ---- +3. **Deploy on CPU** for real-time inference with low latency and high throughput +4. **Scale effortlessly** - no GPU clusters or API limits (it's just a trained model) ## Synthetic Hallucination Data -TinyLettuce leverages **synthetic training data** to achieve high performance. Instead of manually annotating thousands of examples, we use RAGFactChecker to generate training pairs automatically at scale. +You can use LettuceDetect's HallucinationGenerator class to generate training pairs automatically at scale. ### Production-Scale Generation For large datasets, use our generation script: ```bash -# Generate 2,000 training samples +# Generate 2,000 training samples (1,000 hallucinated + 1,000 non-hallucinated) python scripts/generate_synthetic_data.py \ --dataset rag-mini-bioasq \ --num-samples 2000 \ - --model gpt-5-mini \ - --temperature 1.0 \ + --model gpt-oss-120b \ --output-format ragtruth \ --output data/synthetic_2k.json ``` @@ -186,7 +179,7 @@ Minimal entry used for training: ## TinyLettuce Models (Ettin Encoders) -Our **TinyLettuce** models prove that architecture and training data matter more than parameter count. Built on the **Ettin encoder** (LightOn) — a lightweight, efficient transformer optimized for classification — these models achieve strong accuracy with low latency. +Built on the **Ettin encoder** (LightOn) — a lightweight, efficient transformer optimized for classification — these models achieve strong accuracy with low latency. ### Model Family @@ -202,37 +195,38 @@ Why Ettin encoders work well: - Optimized for token classification, not generation - Efficient CPU inference without GPU overhead (smaller than ModernBERT models) -The results speak for themselves. - ### Data & Training Setup (Published Models) -TinyLettuce models use two complementary training approaches: +We show two training approaches for TinyLettuce models: **1. General-Purpose Models (RAGTruth + Synthetic):** - Base: Original RAGTruth dataset for broad hallucination detection capabilities -- Augmentation: 1,500 synthetic hallucinated samples from `enelpol/rag-mini-bioasq` using 120B LLM baseline -- Training recipe: Ettin encoders fine-tuned on combined data for robust performance across domains +- Synthetic augmentation: 3,000 total samples (1,500 hallucinated + 1,500 non-hallucinated) from `enelpol/rag-mini-bioasq` generated using GPT-OSS-120b +- Training recipe: Ettin encoders fine-tuned on combined RAGTruth + synthetic data for robust performance across domains **2. Domain-Specific Models (Synthetic-Only):** -- Pure synthetic data generation for targeted domain applications +- Pure synthetic data generation for targeted domain applications - Controllable error types and intensity for specific use cases - Faster training and deployment for specialized scenarios +- Trained on 3,000 synthetic samples (1,500 hallucinated + 1,500 non-hallucinated) ### Training Hyperparameters (Released Models) - Optimizer: AdamW; learning rate `1e-5`; weight decay `0.01`. -- Epochs: 3–6 (released checkpoints typically 3 for Ettin‑17M/32M, 3–6 for Ettin‑68M). -- Batch size: 8; max sequence length: 4096 tokens. +- Epochs: 5 +- Batch size: 16; max sequence length: 4096 tokens. - Tokenization: `AutoTokenizer`; label pad `-100`; `DataCollatorForTokenClassification`. ## Results -When we trained TinyLettuce on synthetic data and tested it against billion-parameter models, the results shocked us. +We trained several variants of Ettin encoders on synthetic data and tested them against larger scale LLM judges and fine-tuned encoders. ### Synthetic Data Evaluation (example-level) Metrics are computed at example level (answer contains any hallucination vs none). Precision/recall/F1 reflect this binary decision; thresholds and post‑processing can affect absolute values. +**Test Set**: 600 synthetic examples (300 hallucinated + 300 non-hallucinated) generated with GPT-5-mini for fair evaluation. + *When trained and evaluated on domain-specific synthetic data, tiny models dominate (LettuceDetect-base shown without synthetic training):* | Model | Parameters | Precision (%) | Recall (%) | F1 (%) | Hardware | @@ -240,7 +234,7 @@ Metrics are computed at example level (answer contains any hallucination vs none | **TinyLettuce-17M** | **17M** | 84.56 | 98.21 | **90.87** | **CPU** | | **TinyLettuce-32M** | **32M** | 80.36 | 99.10 | 88.76 | **CPU** | | **TinyLettuce-68M** | **68M** | **89.54** | 95.96 | **92.64** | **CPU** | -| LettuceDetect-base (ModernBERT) | 139M | 79.06 | 98.21 | 87.60 | GPU | +| LettuceDetect-base (ModernBERT) | 150M | 79.06 | 98.21 | 87.60 | GPU | | GPT-5-mini | ~200B | 71.95 | **100.00** | 83.69 | API/GPU | | GPT-OSS-120B | 120B | 72.21 | 98.64 | 83.38 | GPU | | Qwen3-235B | 235B | 66.74 | 99.32 | 79.84 | GPU | @@ -254,7 +248,7 @@ Metrics are computed at example level (answer contains any hallucination vs none | **TinyLettuce-17M** | **17M** | 68.52 | | **TinyLettuce-32M** | **32M** | 72.15 | | **TinyLettuce-68M** | **68M** | **74.97** | -| LettuceDetect-base (ModernBERT) | — | 76.07 | +| LettuceDetect-base (ModernBERT) | 150M | 76.07 | | LettuceDetect-large (ModernBERT) | 395M | **79.22** | | Llama-2-13B (RAGTruth FT) | 13B | 78.70 | @@ -262,7 +256,7 @@ TinyLettuce Ettin models demonstrate impressive performance given their compact Baselines and judges: we compare against commonly used LLM judges (e.g., GPT‑5‑mini, GPT‑OSS‑120B, Qwen3‑235B) and fine‑tuned encoders/decoders reported in RAGTruth and follow‑up work (e.g., Llama‑2‑13B FT). Beyond benchmarks, deployment characteristics often determine real‑world value. -### Evaluation Protocol +### Evaluation Method - Span construction from tokens: threshold 0.5 on token hallucination prob; contiguous tokens merged into spans. - Reported F1 is example‑level unless explicitly noted. @@ -277,7 +271,7 @@ python scripts/evaluate.py \ ## Real‑Time CPU Inference -TinyLettuce's biggest advantage isn't just accuracy — it's accessibility. These models run in real time on standard CPUs, making hallucination detection practical to deploy widely. +TinyLettuce's biggest advantage isn't just accuracy — it's accessibility ⚡. These models run in real time on standard CPUs, making hallucination detection practical to deploy widely. ### End-to-End Workflow @@ -286,18 +280,12 @@ TinyLettuce's biggest advantage isn't just accuracy — it's accessibility. Thes python scripts/generate_synthetic_data.py \ --dataset rag-mini-bioasq \ --num-samples 50000 \ - --model gpt-4o-mini \ + --model gpt-oss-120b \ --batch-size 50 \ + --output-format ragtruth \ --output data/synthetic_large.json # Step 2: Train TinyLettuce model -python - << 'PY' -import json -a = json.load(open('data/ragtruth/ragtruth_data.json')) -b = json.load(open('data/synthetic_large.json')) -json.dump(a + b, open('data/train_combined_large.json','w')) -PY - python scripts/train.py \ --ragtruth-path data/train_combined_large.json \ --model-name jhu-clsp/ettin-encoder-17m \ @@ -312,22 +300,9 @@ python scripts/start_api.py prod --model output/tinylettuce_17m --- -## Key Takeaways - -**Small Specialized > Large Generalist**: TinyLettuce-68M (92.64% F1) outperforms Qwen3-235B (79.84% F1) while being 14,000x smaller. Task-specific training beats raw parameter count. - -**Dramatic Cost Reduction**: Synthetic data generation costs significantly less than manual annotation. CPU inference eliminates expensive API calls and GPU requirements. - -**Real-Time CPU Inference**: TinyLettuce models achieve <50ms latency and 1000+ req/s on standard CPUs, making hallucination detection practical for any deployment. - -**Synthetic Data Breakthrough**: RAGFactChecker-generated synthetic data enables 90%+ F1 scores - higher than what these same models achieve on manually annotated RAGTruth data. - -**Complete Open Pipeline**: End-to-end framework from data generation to model deployment available under MIT license. No expensive GPUs or API calls required. - - ## Bonus: Triplet‑Based RAGFactChecker -We have implemented a triplet-based hallucination detection model that you can use the same way as the standard lettucecedetect models. +We have implemented a triplet-based hallucination detection model that you can use the same way as the standard lettucedetect models. Generate triplets from any text: ```python @@ -388,7 +363,7 @@ print(result) #} ``` -This complements token/span detectors with interpretable, fact‑level explanations. +This complements token/span detectors with interpretable, fact-level explanations. --- @@ -396,7 +371,7 @@ This complements token/span detectors with interpretable, fact‑level explanati - Results labeled “synthetic” reflect evaluation on generated data; real‑world performance depends on domain match. Consider adding a small, manually curated eval set. - Baselines: we report GPT‑5‑mini and open‑source LLM baselines where available; prompt configuration impacts absolute scores. -- Metrics: synthetic and RAGTruth F1 are span‑level unless otherwise noted; thresholds and post‑processing influence outcomes. +- Metrics: synthetic and RAGTruth F1 are example-level unless otherwise noted; thresholds and post‑processing influence outcomes. --- @@ -405,14 +380,14 @@ This complements token/span detectors with interpretable, fact‑level explanati If you find this work useful, please cite it as follows: ```bibtex -@misc{Kovacs:2025:TinyLettuce, - title={TinyLettuce: Training Efficient Hallucination Detectors with Synthetic Data Generation}, +@misc{Kovacs:2025, + title={LettuceDetect: A Hallucination Detection Framework for RAG Applications}, author={Ádám Kovács and Gábor Recski}, year={2025}, - eprint={2502.xxxxx}, + eprint={2502.17125}, archivePrefix={arXiv}, primaryClass={cs.CL}, - url={https://arxiv.org/abs/2502.xxxxx}, + url={https://arxiv.org/abs/2502.17125}, } ``` From 864dd32e21a43ba3807518528dbb7ad02d4ace5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kov=C3=A1cs=20=C3=81d=C3=A1m?= Date: Sun, 31 Aug 2025 18:01:58 +0200 Subject: [PATCH 15/15] Release v0.1.8 --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index ce255a4..0d62884 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "lettucedetect" -version = "0.1.7" +version = "0.1.8" description = "Lettucedetect is a framework for detecting hallucinations in RAG applications." readme = {file = "README.md", content-type = "text/markdown"} requires-python = ">=3.10"