From 8822570cd75c07b2dd1c21e4ebdbf33e47514cad Mon Sep 17 00:00:00 2001 From: Rakshya Panta Date: Thu, 28 Aug 2025 04:46:20 +0545 Subject: [PATCH] add code to examples folder --- examples/README.md | 91 ++++++ examples/graph_class/distill_students_gc.py | 110 +++++++ examples/graph_class/eval_verifier_gc.py | 268 ++++++++++++++++++ examples/graph_class/fine_tune_pirate_gc.py | 203 +++++++++++++ .../graph_class/fingerprint_generator_gc.py | 246 ++++++++++++++++ .../generate_univerifier_dataset_gc.py | 114 ++++++++ examples/graph_class/gsage_gc.py | 59 ++++ examples/graph_class/train_gc.py | 171 +++++++++++ examples/graph_class/train_unrelated_gc.py | 154 ++++++++++ examples/link_pred/distill_students_lp.py | 146 ++++++++++ examples/link_pred/eval_verifier_lp.py | 218 ++++++++++++++ examples/link_pred/fine_tune_pirate_lp.py | 231 +++++++++++++++ .../link_pred/fingerprint_generator_lp.py | 265 +++++++++++++++++ examples/link_pred/gcn_lp.py | 101 +++++++ .../generate_univerifier_dataset_lp.py | 127 +++++++++ examples/link_pred/train_lp.py | 183 ++++++++++++ examples/link_pred/train_unrelated_lp.py | 199 +++++++++++++ examples/node_class/distill_students_nc.py | 88 ++++++ examples/node_class/eval_verifier_nc.py | 257 +++++++++++++++++ examples/node_class/fine_tune_pirate_nc.py | 116 ++++++++ .../node_class/fingerprint_generator_nc.py | 262 +++++++++++++++++ examples/node_class/gcn_nc.py | 85 ++++++ .../generate_univerifier_dataset_nc.py | 108 +++++++ examples/node_class/make_suspect_nc.py | 74 +++++ examples/node_class/score_suspect_nc.py | 87 ++++++ examples/node_class/train_nc.py | 79 ++++++ examples/node_class/train_univerifier_nc.py | 98 +++++++ examples/node_class/train_unrelated_nc.py | 88 ++++++ examples/plots/citeseer_lp_aruc.png | Bin 0 -> 73936 bytes examples/plots/cora_nc_aruc.csv | 202 +++++++++++++ examples/plots/cora_nc_aruc.png | Bin 0 -> 65023 bytes examples/plots/enzymes_gc_aruc.png | Bin 0 -> 76698 bytes examples/plots/note.txt | 4 + examples/requirements.txt | 50 ++++ examples/train_univerifier.py | 98 +++++++ 35 files changed, 4582 insertions(+) create mode 100644 examples/README.md create mode 100644 examples/graph_class/distill_students_gc.py create mode 100644 examples/graph_class/eval_verifier_gc.py create mode 100644 examples/graph_class/fine_tune_pirate_gc.py create mode 100644 examples/graph_class/fingerprint_generator_gc.py create mode 100644 examples/graph_class/generate_univerifier_dataset_gc.py create mode 100644 examples/graph_class/gsage_gc.py create mode 100644 examples/graph_class/train_gc.py create mode 100644 examples/graph_class/train_unrelated_gc.py create mode 100644 examples/link_pred/distill_students_lp.py create mode 100644 examples/link_pred/eval_verifier_lp.py create mode 100644 examples/link_pred/fine_tune_pirate_lp.py create mode 100644 examples/link_pred/fingerprint_generator_lp.py create mode 100644 examples/link_pred/gcn_lp.py create mode 100644 examples/link_pred/generate_univerifier_dataset_lp.py create mode 100644 examples/link_pred/train_lp.py create mode 100644 examples/link_pred/train_unrelated_lp.py create mode 100644 examples/node_class/distill_students_nc.py create mode 100644 examples/node_class/eval_verifier_nc.py create mode 100644 examples/node_class/fine_tune_pirate_nc.py create mode 100644 examples/node_class/fingerprint_generator_nc.py create mode 100644 examples/node_class/gcn_nc.py create mode 100644 examples/node_class/generate_univerifier_dataset_nc.py create mode 100644 examples/node_class/make_suspect_nc.py create mode 100644 examples/node_class/score_suspect_nc.py create mode 100644 examples/node_class/train_nc.py create mode 100644 examples/node_class/train_univerifier_nc.py create mode 100644 examples/node_class/train_unrelated_nc.py create mode 100644 examples/plots/citeseer_lp_aruc.png create mode 100644 examples/plots/cora_nc_aruc.csv create mode 100644 examples/plots/cora_nc_aruc.png create mode 100644 examples/plots/enzymes_gc_aruc.png create mode 100644 examples/plots/note.txt create mode 100644 examples/requirements.txt create mode 100644 examples/train_univerifier.py diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 0000000..78d7ba3 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,91 @@ +#Fingerprinting Graph Neural Networks + +Steps + +1. Create virtual env. Activate it. +2. Install requirements + ```bash + pip install -r requirements.txt + ``` +3. Create folders + + ```bash + mkdir -p data models fingerprints plots + ``` + +### GNN task types: Node Classification, Link Prediction, Graph Classification + +For Node Classification (NC): \ +  Folder name: node_class/ \ +  Filename Suffix: \*\_nc.\* + +For Link Prediction (LP): \ +  Folder name: link_pred/ \ +  Filename Suffix: \*\_lp.\* + +For Graph Classification (GC): \ +  Folder name: graph_class/ \ +  Filename Suffix: \*\_gc.\* + + Example: \ +  `bash + python node_class/train_nc.py ` \ +  `bash + python link_pred/train_lp.py ` \ +  `bash + python graph_class/train_gc.py ` + +### For node classification task on Cora dataset (GCN arch) + +```bash +python node_class/train_nc.py +``` + +```bash +python node_class/fine_tune_pirate_nc.py +``` + +```bash +python node_class/distill_students_nc.py +``` + +```bash +python node_class/train_unrelated_nc.py +``` + +```bash +python node_class/fingerprint_generator_nc.py +``` + +```bash +python node_class/generate_univerifier_dataset_nc.py +``` + +```bash +python train_univerifier.py --dataset fingerprints/univerifier_dataset_nc.pt --fingerprints_path fingerprints/fingerprints_nc.pt --out fingerprints/univerifier_nc.pt +``` + +```bash +python node_class/eval_verifier_nc.py +``` + +Follow similar approach as Node Classification for Link Prediction on Citeseer dataset (GCN arch) and Graph Classification on ENZYMES dataset (Graphsage arch). + +Change argument paths for LP and GC for training univerifier + +```bash +python train_univerifier.py --dataset fingerprints/univerifier_dataset_lp.pt --fingerprints_path fingerprints/fingerprints_lp.pt --out fingerprints/univerifier_lp.pt +``` + +```bash +python train_univerifier.py --dataset fingerprints/univerifier_dataset_gc.pt --fingerprints_path fingerprints/fingerprints_gc.pt --out fingerprints/univerifier_gc.pt +``` + +To evaluate suspect GNNs for NC tasks + ```bash + python node_class/make_suspect_neg_nc.py + ``` + ```bash + python node_class/score_suspect_nc.py --suspect_pt models/suspects/neg_nc_seed9999.pt --suspect_meta models/suspects/neg_nc_seed9999.json + ``` + diff --git a/examples/graph_class/distill_students_gc.py b/examples/graph_class/distill_students_gc.py new file mode 100644 index 0000000..b6632ce --- /dev/null +++ b/examples/graph_class/distill_students_gc.py @@ -0,0 +1,110 @@ +# Positive (pirated) models for GRAPH CLASSIFICATION on ENZYMES via DISTILLATION. +# Teacher: trained GC model loaded from target_model_gc.pt +# Students: GraphSAGE via get_model + +import argparse, json, random, torch +from pathlib import Path + +import torch.nn.functional as F +from torch_geometric.datasets import TUDataset +from torch_geometric.loader import DataLoader +from torch_geometric.transforms import NormalizeFeatures + +from gsage_gc import get_model + + +def set_seed(s: int): + random.seed(s); torch.manual_seed(s); torch.cuda.manual_seed_all(s) + + +def kd_loss(student_logits, teacher_logits): + return F.mse_loss(student_logits, teacher_logits) + + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('--meta_path', default='models/target_meta_gc.json') + ap.add_argument('--target_path', default='models/target_model_gc.pt') + ap.add_argument('--archs', default='gsage') + ap.add_argument('--epochs', type=int, default=10) + ap.add_argument('--lr', type=float, default=0.01) + ap.add_argument('--wd', type=float, default=5e-4) + ap.add_argument('--seed', type=int, default=0) + ap.add_argument('--count_per_arch', type=int, default=100) + ap.add_argument('--out_dir', type=str, default='models/positives') + ap.add_argument('--batch_size', type=int, default=64) + ap.add_argument('--student_hidden', type=int, default=64) + ap.add_argument('--student_layers', type=int, default=3) + ap.add_argument('--student_dropout', type=float, default=0.5) + args = ap.parse_args() + + set_seed(args.seed) + Path(args.out_dir).mkdir(parents=True, exist_ok=True) + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + dataset = TUDataset(root='data/ENZYMES', name='ENZYMES', + use_node_attr=True, transform=NormalizeFeatures()) + in_dim = dataset.num_features + num_classes = dataset.num_classes + loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True) + + # Teacher GC model + with open(args.meta_path, 'r') as f: + meta = json.load(f) + arch_t = meta.get('arch', 'gsage') + hidden_t = meta.get('hidden', 64) + layers_t = meta.get('layers', 3) + drop_t = meta.get('dropout', 0.5) + + teacher = get_model(arch_t, in_dim, hidden_t, num_classes, + num_layers=layers_t, dropout=drop_t, pool="mean").to(device) + teacher.load_state_dict(torch.load(args.target_path, map_location='cpu')) + teacher.eval() + + archs = [a.strip() for a in args.archs.split(',') if a.strip()] + saved = [] + + for arch in archs: + for i in range(args.count_per_arch): + # fresh student + student = get_model(arch, in_dim, args.student_hidden, num_classes, + num_layers=args.student_layers, + dropout=args.student_dropout, pool="mean").to(device) + opt = torch.optim.Adam(student.parameters(), lr=args.lr, weight_decay=args.wd) + + for _ in range(args.epochs): + student.train() + for batch in loader: + batch = batch.to(device) + with torch.no_grad(): + t_logits = teacher(batch.x, batch.edge_index, batch=batch.batch) # [B, C] + s_logits = student(batch.x, batch.edge_index, batch=batch.batch) # [B, C] + loss = kd_loss(s_logits, t_logits) + opt.zero_grad(); loss.backward(); opt.step() + + # save student + out_pt = f'{args.out_dir}/distill_gc_{arch}_{i:03d}.pt' + torch.save(student.state_dict(), out_pt) + with open(out_pt.replace('.pt', '.json'), 'w') as f: + json.dump({ + "task": "graph_classification", + "dataset": "ENZYMES", + "arch": arch, + "hidden": args.student_hidden, + "layers": args.student_layers, + "dropout": args.student_dropout, + "pos_kind": "distill", + "teacher_arch": arch_t, + "teacher_hidden": hidden_t, + "teacher_layers": layers_t, + "teacher_dropout": drop_t + }, f, indent=2) + + saved.append(out_pt) + print(f"[distill-gc] saved {out_pt}") + + print(f"Saved {len(saved)} distilled GC positives.") + + +if __name__ == '__main__': + main() diff --git a/examples/graph_class/eval_verifier_gc.py b/examples/graph_class/eval_verifier_gc.py new file mode 100644 index 0000000..1632771 --- /dev/null +++ b/examples/graph_class/eval_verifier_gc.py @@ -0,0 +1,268 @@ +""" +Evaluate a trained Univerifier on GRAPH CLASSIFICATION (ENZYMES) positives ({target ∪ F+}) +and negatives (F−) using saved GC fingerprints. Produces Robustness/Uniqueness, ARUC, Mean Test Accuracy, KL Divergence. +""" + +import argparse, glob, json, os, torch +import numpy as np +import matplotlib.pyplot as plt +from pathlib import Path + +from torch_geometric.datasets import TUDataset +from torch_geometric.transforms import NormalizeFeatures +from torch_geometric.utils import dense_to_sparse, to_undirected +import torch.nn.functional as F + +from gsage_gc import get_model # GraphSAGE GC with pooling + +import torch.nn as nn + + +class FPVerifier(nn.Module): + def __init__(self, in_dim: int): + super().__init__() + self.net = nn.Sequential( + nn.Linear(in_dim, 128), + nn.LeakyReLU(), + nn.Linear(128, 64), + nn.LeakyReLU(), + nn.Linear(64, 32), + nn.LeakyReLU(), + nn.Linear(32, 1), + nn.Sigmoid(), + ) + + def forward(self, x): + return self.net(x) + + +def list_paths_from_globs(globs_str): + globs = [g.strip() for g in globs_str.split(",") if g.strip()] + paths = [] + for g in globs: + paths.extend(glob.glob(g)) + return sorted(paths) + + +def load_model_from_pt(pt_path, in_dim, num_classes): + meta_path = pt_path.replace(".pt", ".json") + j = json.load(open(meta_path, "r")) + m = get_model( + j.get("arch", "gsage"), + in_dim, + j.get("hidden", 64), + num_classes, + num_layers=j.get("layers", 3), + dropout=j.get("dropout", 0.5), + pool="mean", + ) + m.load_state_dict(torch.load(pt_path, map_location="cpu")) + m.eval() + return m + + +# GC fingerprint forward: model -> graph logits +@torch.no_grad() +def forward_on_fp(model, fp): + X = fp["X"] + A = fp["A"] + n = X.size(0) + + A_bin = (A > 0.5).float() + A_sym = torch.maximum(A_bin, A_bin.t()) + edge_index = dense_to_sparse(A_sym)[0] + if edge_index.numel() == 0: + idx = torch.arange(n, dtype=torch.long) + edge_index = torch.stack([idx, (idx + 1) % n], dim=0) + edge_index = to_undirected(edge_index) + + batch = X.new_zeros(n, dtype=torch.long) + logits = model(X, edge_index, batch=batch) + return logits.squeeze(0) + + +@torch.no_grad() +def concat_for_model(model, fps): + parts = [forward_on_fp(model, fp) for fp in fps] + return torch.cat(parts, dim=0) + +def softmax_logits(x): + return F.softmax(x, dim=-1) + +def sym_kl(p, q, eps=1e-8): + p = p.clamp(min=eps); q = q.clamp(min=eps) + kl1 = (p * (p.log() - q.log())).sum(dim=-1) + kl2 = (q * (q.log() - p.log())).sum(dim=-1) + return 0.5 * (kl1 + kl2) + +@torch.no_grad() +def model_gc_kl_to_target(suspect, target, fps): + """ + Average symmetric KL over fingerprints (graph-level). + """ + vals = [] + for fp in fps: + t = softmax_logits(forward_on_fp(target, fp)).unsqueeze(0) # [1,C] + s = softmax_logits(forward_on_fp(suspect, fp)).unsqueeze(0) # [1,C] + d = sym_kl(s, t) # [1] + vals.append(float(d.item())) + return float(np.mean(vals)) + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('--fingerprints_path', type=str, default='fingerprints/fingerprints_gc.pt') + ap.add_argument('--verifier_path', type=str, default='fingerprints/univerifier_gc.pt') + ap.add_argument('--target_path', type=str, default='models/target_model_gc.pt') + ap.add_argument('--target_meta', type=str, default='models/target_meta_gc.json') + ap.add_argument('--positives_glob', type=str, + default='models/positives/gc_ftpr_*.pt,models/positives/distill_gc_*.pt') + ap.add_argument('--negatives_glob', type=str, default='models/negatives/negative_gc_*.pt') + ap.add_argument('--out_plot', type=str, default='plots/enzymes_gc_aruc.png') + ap.add_argument('--out_plot_kl', type=str, default='plots/enzymes_gc_kl.png') + + ap.add_argument('--save_csv', type=str, default='', + help='Optional: path to save thresholds/robustness/uniqueness CSV') + args = ap.parse_args() + + # Dataset dims + ds = TUDataset(root="data/ENZYMES", name="ENZYMES", + use_node_attr=True, transform=NormalizeFeatures()) + in_dim = ds.num_features + num_classes = ds.num_classes + + # Load fingerprints (list of tiny graph specs) + pack = torch.load(args.fingerprints_path, map_location="cpu") + fps = pack["fingerprints"] + ver_in_dim_saved = int(pack.get("ver_in_dim", 0)) + + # Load models (target + positives + negatives) + tmeta = json.load(open(args.target_meta, "r")) + target = get_model( + tmeta.get("arch", "gsage"), in_dim, tmeta.get("hidden", 64), num_classes, + num_layers=tmeta.get("layers", 3), dropout=tmeta.get("dropout", 0.5), pool="mean" + ) + target.load_state_dict(torch.load(args.target_path, map_location="cpu")) + target.eval() + + pos_paths = list_paths_from_globs(args.positives_glob) + neg_paths = sorted(glob.glob(args.negatives_glob)) + + models_pos = [target] + [load_model_from_pt(p, in_dim, num_classes) for p in pos_paths] + models_neg = [load_model_from_pt(n, in_dim, num_classes) for n in neg_paths] + + # Infer verifier input dim from a probe concat + z0 = concat_for_model(models_pos[0], fps) + D = z0.numel() + if ver_in_dim_saved and ver_in_dim_saved != D: + raise RuntimeError(f"Verifier input mismatch: D={D} vs ver_in_dim_saved={ver_in_dim_saved}") + + # Load verifier + V = FPVerifier(D) + ver_path = Path(args.verifier_path) + if ver_path.exists(): + V.load_state_dict(torch.load(str(ver_path), map_location='cpu')) + print(f"Loaded verifier from {ver_path}") + elif "verifier" in pack: + V.load_state_dict(pack["verifier"]) + print("Loaded verifier from fingerprints pack.") + else: + raise FileNotFoundError( + f"No verifier found at {args.verifier_path} and no 'verifier' key in {args.fingerprints_path}" + ) + V.eval() + + # Collect scores + with torch.no_grad(): + pos_scores = [] + for m in models_pos: + z = concat_for_model(m, fps).unsqueeze(0) + pos_scores.append(float(V(z))) + neg_scores = [] + for m in models_neg: + z = concat_for_model(m, fps).unsqueeze(0) + neg_scores.append(float(V(z))) + + pos_scores = np.array(pos_scores) + neg_scores = np.array(neg_scores) + + ts = np.linspace(0.0, 1.0, 201) + robustness = np.array([(pos_scores >= t).mean() for t in ts]) # TPR on positives + uniqueness = np.array([(neg_scores < t).mean() for t in ts]) # TNR on negatives + overlap = np.minimum(robustness, uniqueness) + # Accuracy at each threshold + Npos, Nneg = len(pos_scores), len(neg_scores) + acc_curve = np.array([((pos_scores >= t).sum() + (neg_scores < t).sum()) / (Npos + Nneg) + for t in ts]) + mean_test_acc = float(acc_curve.mean()) + + aruc = np.trapz(overlap, ts) + + # Best threshold (maximize min(robustness, uniqueness)) + idx_best = int(np.argmax(overlap)) + t_best = float(ts[idx_best]) + rob_best = float(robustness[idx_best]) + uniq_best = float(uniqueness[idx_best]) + acc_best = 0.5 * (rob_best + uniq_best) + + print(f"Mean Test Accuracy (avg over thresholds) = {mean_test_acc:.4f}") + print(f"Models: +{len(models_pos)} | -{len(models_neg)} | D={D}") + print(f"ARUC = {aruc:.4f}") + print(f"Best threshold = {t_best:.3f} | Robustness={rob_best:.3f} | Uniqueness={uniq_best:.3f} | Acc={acc_best:.3f}") + + if args.save_csv: + import csv + Path(os.path.dirname(args.save_csv)).mkdir(parents=True, exist_ok=True) + with open(args.save_csv, 'w', newline='') as f: + w = csv.writer(f) + w.writerow(['threshold', 'robustness', 'uniqueness', 'min_curve', 'accuracy']) + for t, r, u, s, a in zip(ts, robustness, uniqueness, shade, acc_curve): + w.writerow([f"{t:.5f}", f"{r:.6f}", f"{u:.6f}", f"{s:.6f}", f"{a:.6f}"]) + print(f"Saved CSV to {args.save_csv}") + + # ARUC Plot + os.makedirs(os.path.dirname(args.out_plot), exist_ok=True) + fig, ax = plt.subplots(figsize=(7.5, 4.8), dpi=160) + ax.set_title(f"CiteSeer link-prediction • ARUC={aruc:.3f}", fontsize=14) + ax.grid(True, which='both', linestyle=':', linewidth=0.8, alpha=0.6) + ax.plot(ts, robustness, color="#ff0000", linewidth=2.0, label="Robustness (TPR)") + ax.plot(ts, uniqueness, color="#0000ff", linestyle="--", linewidth=2.0, label="Uniqueness (TNR)") + overlap = np.minimum(robustness, uniqueness) + ax.fill_between(ts, overlap, color="#bbbbbb", alpha=0.25, label="Overlap (ARUC region)") + + # best-threshold vertical line + # ax.axvline(t_best, color="0.4", linewidth=2.0, alpha=0.6) + + ax.set_xlabel("Threshold (τ)", fontsize=12) + ax.set_ylabel("Score", fontsize=12) + ax.set_xlim(0.0, 1.0) + ax.set_ylim(0.0, 1.0) + ax.tick_params(labelsize=11) + + leg = ax.legend(loc="lower left", frameon=True, framealpha=0.85, + facecolor="white", edgecolor="0.8") + + plt.tight_layout() + plt.savefig(args.out_plot, bbox_inches="tight") + print(f"Saved plot to {args.out_plot}") + + # KL divergence Plot + pos_divs = [model_gc_kl_to_target(m, target, fps) for m in models_pos[1:]] # exclude target itself + neg_divs = [model_gc_kl_to_target(m, target, fps) for m in models_neg] + pos_divs = np.array(pos_divs); neg_divs = np.array(neg_divs) + print(f"[KL][GC] F+ mean±std = {pos_divs.mean():.4f}±{pos_divs.std():.4f} | " + f"F- mean±std = {neg_divs.mean():.4f}±{neg_divs.std():.4f}") + + os.makedirs(os.path.dirname(args.out_plot_kl), exist_ok=True) + plt.figure(figsize=(4.8, 3.2), dpi=160) + bins = 30 + plt.hist(pos_divs, bins=bins, density=True, alpha=0.35, color="r", label="Surrogate GNN") + plt.hist(neg_divs, bins=bins, density=True, alpha=0.35, color="b", label="Irrelevant GNN") + plt.title("Graph Classification") + plt.xlabel("KL Divergence"); plt.ylabel("Density") + plt.legend() + plt.tight_layout() + plt.savefig(args.out_plot_kl, bbox_inches="tight") + print(f"Saved KL plot to {args.out_plot_kl}") + +if __name__ == "__main__": + main() diff --git a/examples/graph_class/fine_tune_pirate_gc.py b/examples/graph_class/fine_tune_pirate_gc.py new file mode 100644 index 0000000..6fef77c --- /dev/null +++ b/examples/graph_class/fine_tune_pirate_gc.py @@ -0,0 +1,203 @@ +# Create positive (pirated) GC models on ENZYMES by fine-tuning / partial-retraining +# a trained target GraphSAGE GC model. + +import argparse, json, random, copy +from pathlib import Path + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch_geometric.datasets import TUDataset +from torch_geometric.loader import DataLoader +from torch_geometric.transforms import NormalizeFeatures + +from gsage_gc import get_model + + +def set_seed(seed: int): + random.seed(seed); torch.manual_seed(seed); torch.cuda.manual_seed_all(seed) + + +def split_indices(n, val_ratio=0.1, test_ratio=0.1, seed=0): + g = torch.Generator().manual_seed(seed) + perm = torch.randperm(n, generator=g) + n_val = int(round(val_ratio * n)) + n_test = int(round(test_ratio * n)) + n_train = n - n_val - n_test + idx_tr = perm[:n_train].tolist() + idx_va = perm[n_train:n_train+n_val].tolist() + idx_te = perm[n_train+n_val:].tolist() + return idx_tr, idx_va, idx_te + + +def train_one_epoch(model, loader, optimizer, device): + model.train() + total_loss, total_graphs = 0.0, 0 + for batch in loader: + batch = batch.to(device) + optimizer.zero_grad() + out = model(batch.x, batch.edge_index, batch=batch.batch) + loss = F.cross_entropy(out, batch.y) + loss.backward(); optimizer.step() + total_loss += float(loss.item()) * batch.num_graphs + total_graphs += batch.num_graphs + return total_loss / max(1, total_graphs) + + +@torch.no_grad() +def evaluate(model, loader, device): + model.eval() + total, correct, total_loss = 0, 0, 0.0 + for batch in loader: + batch = batch.to(device) + out = model(batch.x, batch.edge_index, batch=batch.batch) + loss = F.cross_entropy(out, batch.y) + pred = out.argmax(dim=-1) + correct += int((pred == batch.y).sum()) + total += batch.num_graphs + total_loss += float(loss.item()) * batch.num_graphs + acc = correct / max(1, total) + return acc, (total_loss / max(1, total)) + + +def reinit_classifier(model: nn.Module): + if not hasattr(model, "cls"): + return + m = model.cls + if hasattr(m, "reset_parameters"): + try: + m.reset_parameters(); return + except Exception: + pass + for mod in m.modules(): + if isinstance(mod, nn.Linear): + nn.init.xavier_uniform_(mod.weight) + if mod.bias is not None: + nn.init.zeros_(mod.bias) + + +def reinit_all(model: nn.Module): + for mod in model.modules(): + if hasattr(mod, "reset_parameters"): + try: + mod.reset_parameters() + except Exception: + pass + + +def freeze_all(model: nn.Module): + for p in model.parameters(): + p.requires_grad = False + + +def unfreeze_classifier(model: nn.Module): + if hasattr(model, "cls"): + for p in model.cls.parameters(): + p.requires_grad = True + + +def unfreeze_all(model: nn.Module): + for p in model.parameters(): + p.requires_grad = True + + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('--target_path', type=str, default='models/target_model_gc.pt') + ap.add_argument('--meta_path', type=str, default='models/target_meta_gc.json') + ap.add_argument('--epochs', type=int, default=10) # paper uses ~10 for FT/PR + ap.add_argument('--lr', type=float, default=0.01) + ap.add_argument('--wd', type=float, default=5e-4) + ap.add_argument('--seed', type=int, default=0) + ap.add_argument('--num_variants', type=int, default=100) # round-robin across 4 kinds + ap.add_argument('--batch_size', type=int, default=64) + ap.add_argument('--val_ratio', type=float, default=0.1) + ap.add_argument('--test_ratio', type=float, default=0.1) + ap.add_argument('--out_dir', type=str, default='models/positives') + args = ap.parse_args() + + set_seed(args.seed) + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + dataset = TUDataset(root='data/ENZYMES', name='ENZYMES', + use_node_attr=True, transform=NormalizeFeatures()) + in_dim = dataset.num_features + num_classes = dataset.num_classes + n = len(dataset) + idx_tr, idx_va, idx_te = split_indices(n, args.val_ratio, args.test_ratio, seed=args.seed) + train_loader = DataLoader(dataset[idx_tr], batch_size=args.batch_size, shuffle=True) + val_loader = DataLoader(dataset[idx_va], batch_size=args.batch_size, shuffle=False) + test_loader = DataLoader(dataset[idx_te], batch_size=args.batch_size, shuffle=False) + + with open(args.meta_path, 'r') as f: + meta = json.load(f) + arch = meta.get("arch", "gsage") + hidden = meta.get("hidden", 64) + layers = meta.get("layers", 3) + dropout= meta.get("dropout", 0.5) + + target = get_model(arch, in_dim, hidden, num_classes, + num_layers=layers, dropout=dropout, pool="mean").to(device) + target.load_state_dict(torch.load(args.target_path, map_location='cpu')) + target.eval() + + kinds = ["ft_last", "ft_all", "pr_last", "pr_all"] + Path(args.out_dir).mkdir(parents=True, exist_ok=True) + + saved = [] + for i in range(args.num_variants): + kind = kinds[i % 4] + + model = get_model(arch, in_dim, hidden, num_classes, + num_layers=layers, dropout=dropout, pool="mean").to(device) + model.load_state_dict(copy.deepcopy(target.state_dict())) + + if kind == "pr_last": + reinit_classifier(model) + elif kind == "pr_all": + reinit_all(model) + + if kind in ("ft_last", "pr_last"): + freeze_all(model); unfreeze_classifier(model) + else: + unfreeze_all(model) + + optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), + lr=args.lr, weight_decay=args.wd) + + best_val, best_state = -1.0, None + for _ in range(args.epochs): + _ = train_one_epoch(model, train_loader, optimizer, device) + val_acc, _ = evaluate(model, val_loader, device) + if val_acc > best_val: + best_val = val_acc + best_state = {k: v.detach().cpu().clone() for k, v in model.state_dict().items()} + + if best_state is not None: + model.load_state_dict(best_state) + + test_acc, _ = evaluate(model, test_loader, device) + + out_path = f"{args.out_dir}/gc_ftpr_{i:03d}.pt" + meta_out = { + "task": "graph_classification", + "dataset": "ENZYMES", + "arch": arch, + "hidden": hidden, + "layers": layers, + "dropout": dropout, + "pos_kind": kind, + "val_acc": float(best_val), + "test_acc": float(test_acc), + } + torch.save(model.state_dict(), out_path) + with open(out_path.replace('.pt', '.json'), 'w') as f: + json.dump(meta_out, f, indent=2) + saved.append(out_path) + print(f"[{kind}] saved {out_path} val_acc={best_val:.4f} test_acc={test_acc:.4f}") + + print(f"Total GC FT/PR positives saved: {len(saved)}") + + +if __name__ == '__main__': + main() diff --git a/examples/graph_class/fingerprint_generator_gc.py b/examples/graph_class/fingerprint_generator_gc.py new file mode 100644 index 0000000..6a193e7 --- /dev/null +++ b/examples/graph_class/fingerprint_generator_gc.py @@ -0,0 +1,246 @@ +# Fingerprint generation for GRAPH CLASSIFICATION on ENZYMES. + +import argparse, glob, json, random, time, torch +import torch.nn as nn +import torch.nn.functional as F +from pathlib import Path +from typing import List +from torch_geometric.datasets import TUDataset +from torch_geometric.transforms import NormalizeFeatures +from torch_geometric.utils import dense_to_sparse, to_undirected + +from gsage_gc import get_model + + +def set_seed(s: int): + random.seed(s); torch.manual_seed(s) + + +def load_meta(path): + with open(path, 'r') as f: + return json.load(f) + + +def list_paths_from_globs(globs: List[str]) -> List[str]: + out = [] + for g in globs: + out.extend(glob.glob(g)) + return sorted(out) + + +class FPVerifier(nn.Module): + def __init__(self, in_dim: int): + super().__init__() + self.net = nn.Sequential( + nn.Linear(in_dim, 128), + nn.LeakyReLU(), + nn.Linear(128, 64), + nn.LeakyReLU(), + nn.Linear(64, 32), + nn.LeakyReLU(), + nn.Linear(32, 1), + nn.Sigmoid() + ) + + def forward(self, x): + return self.net(x) + + +def load_model_from_pt(pt_path: str, in_dim: int, num_classes: int): + meta = json.load(open(pt_path.replace('.pt', '.json'), 'r')) + m = get_model(meta["arch"], in_dim, meta["hidden"], num_classes, + num_layers=meta.get("layers", 3), dropout=meta.get("dropout", 0.5), pool="mean") + m.load_state_dict(torch.load(pt_path, map_location='cpu')) + m.eval() + return m, meta + + +@torch.no_grad() +def forward_on_fp(model, fp): + X = fp["X"] + A = fp["A"] + n = X.size(0) + + # binarize & symmetrize adjacency -> edge_index + A_bin = (A > 0.5).float() + A_sym = torch.maximum(A_bin, A_bin.t()) + edge_index = dense_to_sparse(A_sym)[0] + if edge_index.numel() == 0: + idx = torch.arange(n, dtype=torch.long) + edge_index = torch.stack([idx, (idx + 1) % n], dim=0) + edge_index = to_undirected(edge_index) + + # single-graph batch vector of zeros + batch = X.new_zeros(n, dtype=torch.long) + logits = model(X, edge_index, batch=batch) + return logits.squeeze(0) + + +def concat_for_model(model, fingerprints): + vecs = [forward_on_fp(model, fp) for fp in fingerprints] + return torch.cat(vecs, dim=-1) + + +def compute_loss(models_pos, models_neg, fingerprints, V): + z_pos = [concat_for_model(m, fingerprints) for m in models_pos] + z_neg = [concat_for_model(m, fingerprints) for m in models_neg] + if not z_pos or not z_neg: + raise RuntimeError("Need both positive and negative models.") + Zp = torch.stack(z_pos) + Zn = torch.stack(z_neg) + + yp = V(Zp).clamp(1e-6, 1-1e-6) + yn = V(Zn).clamp(1e-6, 1-1e-6) + L = torch.log(yp).mean() + torch.log(1 - yn).mean() + return L, Zp, Zn + + +def feature_ascent_step(models_pos, models_neg, fingerprints, V, alpha=0.01): + # ascent on X only + for fp in fingerprints: + fp["X"].requires_grad_(True) + fp["A"].requires_grad_(False) + + L, _, _ = compute_loss(models_pos, models_neg, fingerprints, V) + grads = torch.autograd.grad( + L, [fp["X"] for fp in fingerprints], + retain_graph=False, create_graph=False, allow_unused=True + ) + with torch.no_grad(): + for fp, g in zip(fingerprints, grads): + if g is None: + g = torch.zeros_like(fp["X"]) + fp["X"].add_(alpha * g) + fp["X"].clamp_(-5.0, 5.0) + + +def edge_flip_candidates(A: torch.Tensor, budget: int): + n = A.size(0) + tri_i, tri_j = torch.triu_indices(n, n, offset=1) + scores = torch.abs(0.5 - A[tri_i, tri_j]) + order = torch.argsort(scores) + picks = order[:min(budget, len(order))] + return tri_i[picks], tri_j[picks] + + +def edge_flip_step(models_pos, models_neg, fingerprints, V, flip_k=8): + for fp in fingerprints: + A = fp["A"] + i_idx, j_idx = edge_flip_candidates(A, flip_k * 4) + + with torch.no_grad(): + base_L, _, _ = compute_loss(models_pos, models_neg, fingerprints, V) + + gains = [] + for i, j in zip(i_idx.tolist(), j_idx.tolist()): + with torch.no_grad(): + old = float(A[i, j]) + new = 1.0 - old + A[i, j] = new; A[j, i] = new + L_try, _, _ = compute_loss(models_pos, models_neg, fingerprints, V) + gains.append((float(L_try - base_L), i, j, old)) + A[i, j] = old; A[j, i] = old + + gains.sort(key=lambda x: x[0], reverse=True) + with torch.no_grad(): + for g, i, j, old in gains[:flip_k]: + A[i, j] = 1.0 - old; A[j, i] = 1.0 - old + A.clamp_(0.0, 1.0) + + +def train_verifier_step(models_pos, models_neg, fingerprints, V, opt): + L, Zp, Zn = compute_loss(models_pos, models_neg, fingerprints, V) + loss = -L # maximize L + opt.zero_grad(); loss.backward(); opt.step() + with torch.no_grad(): + yp = (V(Zp) >= 0.5).float().mean().item() + yn = (V(Zn) < 0.5).float().mean().item() + acc = 0.5 * (yp + yn) + return float(L.item()), acc + + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('--target_path', default='models/target_model_gc.pt') + ap.add_argument('--target_meta', default='models/target_meta_gc.json') + ap.add_argument('--positives_glob', default='models/positives/gc_ftpr_*.pt,models/positives/distill_gc_*.pt') + ap.add_argument('--negatives_glob', default='models/negatives/negative_gc_*.pt') + ap.add_argument('--out', default='fingerprints/fingerprints_gc.pt') + + ap.add_argument('--P', type=int, default=64) # number of fingerprints (graphs) + ap.add_argument('--n', type=int, default=32) # nodes per fingerprint graph + ap.add_argument('--iters', type=int, default=1000) # alternations + ap.add_argument('--e1', type=int, default=1) # fingerprint updates per alternation + ap.add_argument('--e2', type=int, default=1) # verifier updates per alternation + ap.add_argument('--alpha_x', type=float, default=0.01) + ap.add_argument('--flip_k', type=int, default=8) # edge flips per fp per step + ap.add_argument('--verifier_lr', type=float, default=1e-3) + ap.add_argument('--seed', type=int, default=0) + args = ap.parse_args() + + t0 = time.time() + set_seed(args.seed) + Path('fingerprints').mkdir(parents=True, exist_ok=True) + + # Dataset dims for model reconstruction + ds = TUDataset(root='data/ENZYMES', name='ENZYMES', + use_node_attr=True, transform=NormalizeFeatures()) + in_dim = ds.num_features + num_classes = ds.num_classes + + meta_t = load_meta(args.target_meta) + target = get_model(meta_t.get("arch", "gsage"), in_dim, meta_t.get("hidden", 64), num_classes, + num_layers=meta_t.get("layers", 3), dropout=meta_t.get("dropout", 0.5), pool="mean") + target.load_state_dict(torch.load(args.target_path, map_location='cpu')) + target.eval() + + pos_paths = list_paths_from_globs([g.strip() for g in args.positives_glob.split(',') if g.strip()]) + neg_paths = sorted(glob.glob(args.negatives_glob)) + + models_pos = [target] + [load_model_from_pt(p, in_dim, num_classes)[0] for p in pos_paths] + models_neg = [load_model_from_pt(npath, in_dim, num_classes)[0] for npath in neg_paths] + + print(f"[loaded] positives={len(models_pos)} (incl. target) | negatives={len(models_neg)}") + + # Initialize fingerprints: small random X, A near 0.5, symmetric + fingerprints = [] + for _ in range(args.P): + X = torch.randn(args.n, in_dim) * 0.1 + A = torch.rand(args.n, args.n) * 0.2 + 0.4 + A = torch.triu(A, diagonal=1); A = A + A.t() + torch.diagonal(A).zero_() + fingerprints.append({"X": X, "A": A}) + + # Univerifier + ver_in_dim = args.P * num_classes + V = FPVerifier(ver_in_dim) + optV = torch.optim.Adam(V.parameters(), lr=args.verifier_lr) + + flag = 0 + for it in range(1, args.iters + 1): + if flag == 0: + for _ in range(args.e1): + feature_ascent_step(models_pos, models_neg, fingerprints, V, alpha=args.alpha_x) + edge_flip_step(models_pos, models_neg, fingerprints, V, flip_k=args.flip_k) + flag = 1 + else: + diag_acc = None + for _ in range(args.e2): + Lval, acc = train_verifier_step(models_pos, models_neg, fingerprints, V, optV) + diag_acc = acc + flag = 0 + if it % 10 == 0 and 'diag_acc' in locals() and diag_acc is not None: + print(f"[Iter {it}] verifier acc={diag_acc:.3f}") + + # Save + clean_fps = [{"X": fp["X"].detach().clone(), "A": fp["A"].detach().clone()} for fp in fingerprints] + torch.save( + {"fingerprints": clean_fps, "verifier": V.state_dict(), "ver_in_dim": ver_in_dim}, + args.out + ) + print(f"Saved {args.out}") + print("Time taken (min): ", (time.time() - t0) / 60.0) + + +if __name__ == '__main__': + main() diff --git a/examples/graph_class/generate_univerifier_dataset_gc.py b/examples/graph_class/generate_univerifier_dataset_gc.py new file mode 100644 index 0000000..69d9e2e --- /dev/null +++ b/examples/graph_class/generate_univerifier_dataset_gc.py @@ -0,0 +1,114 @@ +""" +Build a Univerifier dataset from saved GC fingerprints on ENZYMES. +Label 1 for positives ({target ∪ F+}) and 0 for negatives (F−). +""" + +import argparse, glob, json, torch +from pathlib import Path +from torch_geometric.datasets import TUDataset +from torch_geometric.utils import dense_to_sparse, to_undirected +from torch_geometric.transforms import NormalizeFeatures + +from gsage_gc import get_model + + +def list_paths_from_globs(globs_str): + globs = [g.strip() for g in globs_str.split(",") if g.strip()] + paths = [] + for g in globs: + paths.extend(glob.glob(g)) + return sorted(paths) + + +def load_model_from_pt(pt_path, in_dim, num_classes): + meta_path = pt_path.replace(".pt", ".json") + j = json.load(open(meta_path, "r")) + m = get_model( + j["arch"], in_dim, j["hidden"], num_classes, + num_layers=j.get("layers", 3), dropout=j.get("dropout", 0.5), pool="mean" + ) + m.load_state_dict(torch.load(pt_path, map_location="cpu")) + m.eval() + return m + + +@torch.no_grad() +def forward_on_fp(model, fp): + X = fp["X"] + A = fp["A"] + n = X.size(0) + + A_bin = (A > 0.5).float() + A_sym = torch.maximum(A_bin, A_bin.t()) + edge_index = dense_to_sparse(A_sym)[0] + if edge_index.numel() == 0: + idx = torch.arange(n, dtype=torch.long) + edge_index = torch.stack([idx, (idx + 1) % n], dim=0) + edge_index = to_undirected(edge_index) + + batch = X.new_zeros(n, dtype=torch.long) + logits = model(X, edge_index, batch=batch) + return logits.squeeze(0) + + +@torch.no_grad() +def concat_for_model(model, fps): + parts = [forward_on_fp(model, fp) for fp in fps] + return torch.cat(parts, dim=0) + + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument("--fingerprints_path", type=str, default="fingerprints/fingerprints_gc.pt") + ap.add_argument("--target_path", type=str, default="models/target_model_gc.pt") + ap.add_argument("--target_meta", type=str, default="models/target_meta_gc.json") + ap.add_argument("--positives_glob", type=str, + default="models/positives/gc_ftpr_*.pt,models/positives/distill_gc_*.pt") + ap.add_argument("--negatives_glob", type=str, default="models/negatives/negative_gc_*.pt") + ap.add_argument("--out", type=str, default="fingerprints/univerifier_dataset_gc.pt") + args = ap.parse_args() + + ds = TUDataset(root="data/ENZYMES", name="ENZYMES", use_node_attr=True, transform=NormalizeFeatures()) + in_dim = ds.num_features + num_classes = ds.num_classes + + pack = torch.load(args.fingerprints_path, map_location="cpu") + fps = pack["fingerprints"] + ver_in_dim_saved = pack.get("ver_in_dim", None) + + tmeta = json.load(open(args.target_meta, "r")) + target = get_model( + tmeta.get("arch", "gsage"), in_dim, tmeta.get("hidden", 64), num_classes, + num_layers=tmeta.get("layers", 3), dropout=tmeta.get("dropout", 0.5), pool="mean" + ) + target.load_state_dict(torch.load(args.target_path, map_location="cpu")) + target.eval() + + pos_paths = list_paths_from_globs(args.positives_glob) + neg_paths = sorted(glob.glob(args.negatives_glob)) + + models = [target] + [load_model_from_pt(p, in_dim, num_classes) for p in pos_paths] + \ + [load_model_from_pt(n, in_dim, num_classes) for n in neg_paths] + labels = [1.0] * (1 + len(pos_paths)) + [0.0] * len(neg_paths) + + # Build feature matrix X and labels y + with torch.no_grad(): + z0 = concat_for_model(models[0], fps) + D = z0.numel() + if ver_in_dim_saved is not None and D != int(ver_in_dim_saved): + raise RuntimeError( + f"Verifier input mismatch: dataset dim {D} vs saved ver_in_dim {ver_in_dim_saved}" + ) + + X_rows = [z0] + [concat_for_model(m, fps) for m in models[1:]] + X = torch.stack(X_rows, dim=0).float() + y = torch.tensor(labels, dtype=torch.float32) + + Path(Path(args.out).parent).mkdir(parents=True, exist_ok=True) + torch.save({"X": X, "y": y}, args.out) + print(f"Saved {args.out} with {X.shape[0]} rows; dim={X.shape[1]}") + print(f"Positives: {int(sum(labels))} | Negatives: {len(labels) - int(sum(labels))}") + + +if __name__ == "__main__": + main() diff --git a/examples/graph_class/gsage_gc.py b/examples/graph_class/gsage_gc.py new file mode 100644 index 0000000..c9bb957 --- /dev/null +++ b/examples/graph_class/gsage_gc.py @@ -0,0 +1,59 @@ +# Graph classification (GC) model for ENZYMES using GraphSAGE. + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch_geometric.nn import SAGEConv, global_mean_pool + + +class GraphSAGE_GC(nn.Module): + def __init__(self, in_dim: int, hidden: int, num_classes: int, + num_layers: int = 3, dropout: float = 0.5, pool: str = "mean"): + super().__init__() + assert num_layers >= 1 + self.dropout = dropout + self.pool = pool + + convs = [SAGEConv(in_dim, hidden)] + for _ in range(num_layers - 1): + convs.append(SAGEConv(hidden, hidden)) + self.convs = nn.ModuleList(convs) + + self.cls = nn.Linear(hidden, num_classes) + self.reset_parameters() + + def reset_parameters(self): + for m in self.convs: + if hasattr(m, "reset_parameters"): + m.reset_parameters() + nn.init.xavier_uniform_(self.cls.weight) + if self.cls.bias is not None: + nn.init.zeros_(self.cls.bias) + + def _pool(self, x, batch): + if self.pool == "mean": + return global_mean_pool(x, batch) + return global_mean_pool(x, batch) # extend to "sum"/"max" if needed + + def forward(self, x, edge_index, batch=None): + if batch is None: + batch = x.new_zeros(x.size(0), dtype=torch.long) + + for i, conv in enumerate(self.convs): + x = conv(x, edge_index) + if i != len(self.convs) - 1: + x = F.relu(x) + x = F.dropout(x, p=self.dropout, training=self.training) + + g = self._pool(x, batch) + out = self.cls(g) + return out + + +def get_model(arch: str, in_dim: int, hidden: int, num_classes: int, + num_layers: int = 3, dropout: float = 0.5, pool: str = "mean"): + a = arch.lower().strip() + if a in ("graphsage", "sage", "gsage"): + return GraphSAGE_GC(in_dim, hidden, num_classes, + num_layers=num_layers, dropout=dropout, pool=pool) + raise ValueError(f"Unsupported arch for graph classification: {arch}") diff --git a/examples/graph_class/train_gc.py b/examples/graph_class/train_gc.py new file mode 100644 index 0000000..24d995c --- /dev/null +++ b/examples/graph_class/train_gc.py @@ -0,0 +1,171 @@ +# Graph classification on ENZYMES using GraphSAGE. + +import argparse +import json +import os +import random +from torch_geometric.transforms import NormalizeFeatures + +import torch +import torch.nn.functional as F +from torch_geometric.datasets import TUDataset +from torch_geometric.loader import DataLoader + +from gsage_gc import get_model + + +def set_seed(seed: int): + random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + +from collections import defaultdict + +def split_indices_stratified(y, val_ratio=0.1, test_ratio=0.1, seed=0): + g = torch.Generator().manual_seed(seed) + by_cls = defaultdict(list) + for i, yi in enumerate(y.tolist()): + by_cls[int(yi)].append(i) + tr, va, te = [], [], [] + for cls, idxs in by_cls.items(): + idxs = torch.tensor(idxs)[torch.randperm(len(idxs), generator=g)].tolist() + n = len(idxs) + n_val = int(round(val_ratio * n)) + n_test = int(round(test_ratio * n)) + n_train = n - n_val - n_test + tr += idxs[:n_train] + va += idxs[n_train:n_train+n_val] + te += idxs[n_train+n_val:] + return tr, va, te + + +def train_one_epoch(model, loader, optimizer, device): + model.train() + total_loss = 0.0 + total_graphs = 0 + for batch in loader: + batch = batch.to(device) + optimizer.zero_grad() + out = model(batch.x, batch.edge_index, batch=batch.batch) + loss = F.cross_entropy(out, batch.y) + loss.backward() + optimizer.step() + total_loss += float(loss.item()) * batch.num_graphs + total_graphs += batch.num_graphs + return total_loss / max(1, total_graphs) + + +@torch.no_grad() +def evaluate(model, loader, device): + model.eval() + correct = 0 + total = 0 + total_loss = 0.0 + for batch in loader: + batch = batch.to(device) + out = model(batch.x, batch.edge_index, batch=batch.batch) + loss = F.cross_entropy(out, batch.y) + pred = out.argmax(dim=-1) + correct += int((pred == batch.y).sum()) + total += batch.num_graphs + total_loss += float(loss.item()) * batch.num_graphs + acc = correct / max(1, total) + avg_loss = total_loss / max(1, total) + return acc, avg_loss + + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument("--arch", default="gsage", choices=["gsage", "graphsage", "sage"]) + ap.add_argument("--hidden", type=int, default=64) + ap.add_argument("--layers", type=int, default=3) + ap.add_argument("--dropout", type=float, default=0.5) + ap.add_argument("--lr", type=float, default=1e-3) + ap.add_argument("--epochs", type=int, default=200) + ap.add_argument("--weight_decay", type=float, default=5e-4) + ap.add_argument("--batch_size", type=int, default=64) + ap.add_argument("--val_ratio", type=float, default=0.1) + ap.add_argument("--test_ratio", type=float, default=0.1) + ap.add_argument("--seed", type=int, default=0) + ap.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu") + args = ap.parse_args() + + set_seed(args.seed) + device = torch.device(args.device) + + # --- Dataset: ENZYMES (graph classification) --- + dataset = TUDataset(root="data/ENZYMES", name="ENZYMES", use_node_attr=True, transform=NormalizeFeatures()) + num_graphs = len(dataset) + in_dim = dataset.num_features + num_classes = dataset.num_classes + + # split indices + y_all = torch.tensor([data.y.item() for data in dataset]) + tr_idx, va_idx, te_idx = split_indices_stratified(y_all, args.val_ratio, args.test_ratio, args.seed) + train_set = dataset[tr_idx] + val_set = dataset[va_idx] + test_set = dataset[te_idx] + + train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True) + val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False) + test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False) + + # --- Model --- + model = get_model( + args.arch, + in_dim=in_dim, + hidden=args.hidden, + num_classes=num_classes, + num_layers=args.layers, + dropout=args.dropout, + pool="mean", + ).to(device) + + opt = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) + + os.makedirs("models", exist_ok=True) + best_val_acc = 0.0 + best_state = None + + for epoch in range(1, args.epochs + 1): + train_loss = train_one_epoch(model, train_loader, opt, device) + val_acc, val_loss = evaluate(model, val_loader, device) + + if val_acc > best_val_acc: + best_val_acc = val_acc + best_state = {k: v.detach().cpu().clone() for k, v in model.state_dict().items()} + + if epoch % 10 == 0 or epoch == args.epochs: + print( + f"Epoch {epoch:03d} | train loss {train_loss:.4f} | " + f"val acc {val_acc:.4f} | val loss {val_loss:.4f}" + ) + + if best_state is not None: + model.load_state_dict(best_state) + + test_acc, test_loss = evaluate(model, test_loader, device) + print(f"Best Val Acc: {best_val_acc:.4f} | Test Acc: {test_acc:.4f} | Test Loss: {test_loss:.4f}") + + # Save target GC model + meta (GC-specific filenames) + torch.save(model.state_dict(), "models/target_model_gc.pt") + with open("models/target_meta_gc.json", "w") as f: + json.dump( + { + "task": "graph_classification", + "dataset": "ENZYMES", + "arch": args.arch, + "hidden": args.hidden, + "layers": args.layers, + "dropout": args.dropout, + "batch_size": args.batch_size, + "metrics": {"val_acc": float(best_val_acc), "test_acc": float(test_acc)}, + }, + f, + indent=2, + ) + + +if __name__ == "__main__": + main() diff --git a/examples/graph_class/train_unrelated_gc.py b/examples/graph_class/train_unrelated_gc.py new file mode 100644 index 0000000..d55b9d7 --- /dev/null +++ b/examples/graph_class/train_unrelated_gc.py @@ -0,0 +1,154 @@ +# Train NEGATIVE (unrelated) GRAPH-CLASSIFICATION models on ENZYMES from scratch. + +import argparse +import json +import os +import random +from pathlib import Path + +import torch +import torch.nn.functional as F +from torch_geometric.datasets import TUDataset +from torch_geometric.loader import DataLoader +from torch_geometric.transforms import NormalizeFeatures + +from gsage_gc import get_model + + +def set_seed(seed: int): + random.seed(seed); torch.manual_seed(seed); torch.cuda.manual_seed_all(seed) + + +def split_indices(n, val_ratio=0.1, test_ratio=0.1, seed=0): + g = torch.Generator().manual_seed(seed) + perm = torch.randperm(n, generator=g) + n_val = int(round(val_ratio * n)) + n_test = int(round(test_ratio * n)) + n_train = n - n_val - n_test + idx_tr = perm[:n_train].tolist() + idx_va = perm[n_train:n_train + n_val].tolist() + idx_te = perm[n_train + n_val:].tolist() + return idx_tr, idx_va, idx_te + + +def train_one_epoch(model, loader, optimizer, device): + model.train() + total_loss, total_graphs = 0.0, 0 + for batch in loader: + batch = batch.to(device) + optimizer.zero_grad() + out = model(batch.x, batch.edge_index, batch=batch.batch) + loss = F.cross_entropy(out, batch.y) + loss.backward() + optimizer.step() + total_loss += float(loss.item()) * batch.num_graphs + total_graphs += batch.num_graphs + return total_loss / max(1, total_graphs) + + +@torch.no_grad() +def evaluate(model, loader, device): + model.eval() + total, correct, total_loss = 0, 0, 0.0 + for batch in loader: + batch = batch.to(device) + out = model(batch.x, batch.edge_index, batch=batch.batch) + loss = F.cross_entropy(out, batch.y) + pred = out.argmax(dim=-1) + correct += int((pred == batch.y).sum()) + total += batch.num_graphs + total_loss += float(loss.item()) * batch.num_graphs + acc = correct / max(1, total) + return acc, (total_loss / max(1, total)) + + +def main(): + ap = argparse.ArgumentParser(description="Train unrelated GC (negative) models on ENZYMES") + ap.add_argument('--count', type=int, default=150) + ap.add_argument('--archs', type=str, default='gsage') + ap.add_argument('--epochs', type=int, default=200) + ap.add_argument('--lr', type=float, default=1e-3) + ap.add_argument('--wd', type=float, default=5e-4) + ap.add_argument('--hidden', type=int, default=64) + ap.add_argument('--layers', type=int, default=3) + ap.add_argument('--dropout', type=float, default=0.5) + ap.add_argument('--batch_size', type=int, default=64) + ap.add_argument('--val_ratio', type=float, default=0.1) + ap.add_argument('--test_ratio', type=float, default=0.1) + ap.add_argument('--seed', type=int, default=123) + ap.add_argument('--device', default='cuda' if torch.cuda.is_available() else 'cpu') + ap.add_argument('--start_index', type=int, default=50) + + args = ap.parse_args() + + device = torch.device(args.device) + Path("models/negatives").mkdir(parents=True, exist_ok=True) + + dataset_full = TUDataset(root='data/ENZYMES', name='ENZYMES', + use_node_attr=True, transform=NormalizeFeatures()) + in_dim = dataset_full.num_features + num_classes = dataset_full.num_classes + + arch_list = [a.strip() for a in args.archs.split(',') if a.strip()] + saved = [] + + for i in range(args.count): + idx = args.start_index + i + seed_i = args.seed + idx + set_seed(seed_i) + + n_graphs = len(dataset_full) + tr_idx, va_idx, te_idx = split_indices(n_graphs, args.val_ratio, args.test_ratio, seed=seed_i) + train_set = dataset_full[tr_idx] + val_set = dataset_full[va_idx] + test_set = dataset_full[te_idx] + + train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True) + val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False) + test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False) + + arch = arch_list[idx % len(arch_list)] + model = get_model(arch, in_dim, args.hidden, num_classes, + num_layers=args.layers, dropout=args.dropout, pool="mean").to(device) + opt = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd) + + best_val, best_state = -1.0, None + for ep in range(1, args.epochs + 1): + _ = train_one_epoch(model, train_loader, opt, device) + val_acc, _ = evaluate(model, val_loader, device) + if val_acc > best_val: + best_val = val_acc + best_state = {k: v.detach().cpu().clone() for k, v in model.state_dict().items()} + + if ep % 20 == 0 or ep == args.epochs: + print(f"[neg {idx:03d} | {arch}] epoch {ep:03d} | val acc {val_acc:.4f}") + + if best_state is not None: + model.load_state_dict(best_state) + + test_acc, test_loss = evaluate(model, test_loader, device) + + out_path = f"models/negatives/negative_gc_{idx:03d}.pt" + torch.save(model.state_dict(), out_path) + meta = { + "task": "graph_classification", + "dataset": "ENZYMES", + "arch": arch, + "hidden": args.hidden, + "layers": args.layers, + "dropout": args.dropout, + "seed": seed_i, + "val_acc": float(best_val), + "test_acc": float(test_acc), + "test_loss": float(test_loss), + } + with open(out_path.replace('.pt', '.json'), 'w') as f: + json.dump(meta, f, indent=2) + + saved.append(out_path) + print(f"Saved NEGATIVE {idx:03d} arch={arch} best_val_acc={best_val:.4f} " + f"test_acc={test_acc:.4f} -> {out_path}") + + +if __name__ == "__main__": + main() diff --git a/examples/link_pred/distill_students_lp.py b/examples/link_pred/distill_students_lp.py new file mode 100644 index 0000000..caabc91 --- /dev/null +++ b/examples/link_pred/distill_students_lp.py @@ -0,0 +1,146 @@ +# Distill LINK PREDICTION students on CiteSeer from a trained LP teacher +# Teacher/Student: encoder (GCN/SAGE/GAT) + dot-product decoder + +import argparse, json, random, torch +from pathlib import Path +import torch.nn.functional as F +from torch_geometric.datasets import Planetoid +from torch_geometric.utils import subgraph, negative_sampling +from gcn_lp import get_encoder, DotProductDecoder + + +def set_seed(s: int): + random.seed(s); torch.manual_seed(s); torch.cuda.manual_seed_all(s) + + +def sample_node_subset(num_nodes: int, low: float = 0.5, high: float = 0.8): + k = max(2, int(random.uniform(low, high) * num_nodes)) + idx = torch.randperm(num_nodes)[:k] + return idx.sort().values + + +@torch.no_grad() +def teacher_edge_logits(teacher_enc, teacher_dec, x, edge_index, pos_edge, neg_edge, device): + teacher_enc.eval() + z_t = teacher_enc(x.to(device), edge_index.to(device)) + t_pos = teacher_dec(z_t, pos_edge.to(device)) + t_neg = teacher_dec(z_t, neg_edge.to(device)) + return t_pos.detach(), t_neg.detach() + + +def kd_loss(student_logits, teacher_logits, kind: str = "mse"): + if kind == "mse": + return F.mse_loss(student_logits, teacher_logits) + elif kind == "bce_soft": + with torch.no_grad(): + soft = torch.sigmoid(teacher_logits) + return F.binary_cross_entropy_with_logits(student_logits, soft) + else: + raise ValueError(f"Unknown distill loss kind: {kind}") + + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('--meta_path', default='models/target_meta_lp.json') + ap.add_argument('--target_path', default='models/target_model_lp.pt') + ap.add_argument('--archs', default='gat,sage') + ap.add_argument('--epochs', type=int, default=10) + ap.add_argument('--lr', type=float, default=0.01) + ap.add_argument('--wd', type=float, default=5e-4) + ap.add_argument('--seed', type=int, default=0) + ap.add_argument('--count_per_arch', type=int, default=50) + ap.add_argument('--out_dir', type=str, default='models/positives') + ap.add_argument('--student_hidden', type=int, default=64) + ap.add_argument('--student_layers', type=int, default=3) + ap.add_argument('--distill_loss', choices=['mse', 'bce_soft'], default='mse') + ap.add_argument('--sub_low', type=float, default=0.5) # subgraph ratio lower bound + ap.add_argument('--sub_high', type=float, default=0.8) # subgraph ratio upper bound + args = ap.parse_args() + + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + set_seed(args.seed) + Path(args.out_dir).mkdir(parents=True, exist_ok=True) + + with open(args.meta_path, 'r') as f: + meta = json.load(f) + arch_t = meta.get('arch', 'gcn') + hidden_t = meta.get('hidden', 64) + layers_t = meta.get('layers', 3) + + dataset = Planetoid(root='data', name='CiteSeer') + data = dataset[0] + + teacher_enc = get_encoder(arch_t, dataset.num_node_features, hidden_t, + num_layers=layers_t, dropout=0.5) + teacher_enc.load_state_dict(torch.load(args.target_path, map_location='cpu')) + teacher_enc.to(device).eval() + t_dec = DotProductDecoder().to(device) + + archs = [a.strip() for a in args.archs.split(',') if a.strip()] + saved = [] + + for arch in archs: + for i in range(args.count_per_arch): + student = get_encoder(arch, dataset.num_node_features, args.student_hidden, + num_layers=args.student_layers, dropout=0.5).to(device) + s_dec = DotProductDecoder().to(device) + opt = torch.optim.Adam(student.parameters(), lr=args.lr, weight_decay=args.wd) + + for _ in range(args.epochs): + student.train(); opt.zero_grad() + + # sample a subgraph (50–80% nodes by default) + idx = sample_node_subset(data.num_nodes, args.sub_low, args.sub_high) + e_idx, _ = subgraph(idx, data.edge_index, relabel_nodes=True) + if e_idx.numel() == 0 or e_idx.size(1) == 0: + continue + + x_sub = data.x[idx] + + # positives = subgraph edges; negatives = sampled non-edges + pos_edge = e_idx + neg_edge = negative_sampling( + edge_index=pos_edge, + num_nodes=x_sub.size(0), + num_neg_samples=pos_edge.size(1), + method='sparse' + ) + + t_pos, t_neg = teacher_edge_logits( + teacher_enc, t_dec, x_sub, e_idx, pos_edge, neg_edge, device + ) + + z_s = student(x_sub.to(device), e_idx.to(device)) + s_pos = s_dec(z_s, pos_edge.to(device)) + s_neg = s_dec(z_s, neg_edge.to(device)) + + s_all = torch.cat([s_pos, s_neg], dim=0) + t_all = torch.cat([t_pos, t_neg], dim=0) + loss = kd_loss(s_all, t_all, kind=args.distill_loss) + + loss.backward(); opt.step() + + out_pt = f'{args.out_dir}/distill_lp_{arch}_{i:03d}.pt' + torch.save(student.state_dict(), out_pt) + with open(out_pt.replace('.pt', '.json'), 'w') as f: + json.dump({ + "task": "link_prediction", + "dataset": "CiteSeer", + "arch": arch, + "hidden": args.student_hidden, + "layers": args.student_layers, + "pos_kind": "distill", + "teacher_arch": arch_t, + "teacher_hidden": hidden_t, + "teacher_layers": layers_t, + "distill_loss": args.distill_loss + }, f, indent=2) + + saved.append(out_pt) + print(f"[distill] saved {out_pt}") + + print(f"Saved {len(saved)} distilled LP positives.") + + +if __name__ == '__main__': + main() diff --git a/examples/link_pred/eval_verifier_lp.py b/examples/link_pred/eval_verifier_lp.py new file mode 100644 index 0000000..d9c651a --- /dev/null +++ b/examples/link_pred/eval_verifier_lp.py @@ -0,0 +1,218 @@ +""" +Evaluate a trained Univerifier on LP positives ({target ∪ F+}) and negatives (F−) +using saved LP fingerprints. Produces Robustness/Uniqueness, Mean Test Accuracy and ARUC. +""" + +import argparse, glob, json, math, os, torch +import numpy as np +import matplotlib.pyplot as plt +from pathlib import Path +from torch_geometric.datasets import Planetoid +from torch_geometric.utils import dense_to_sparse, to_undirected + +from gcn_lp import get_encoder, DotProductDecoder + +import torch.nn as nn + +class FPVerifier(nn.Module): + def __init__(self, in_dim: int): + super().__init__() + self.net = nn.Sequential( + nn.Linear(in_dim, 128), + nn.LeakyReLU(), + nn.Linear(128, 64), + nn.LeakyReLU(), + nn.Linear(64, 32), + nn.LeakyReLU(), + nn.Linear(32, 1), + nn.Sigmoid(), + ) + def forward(self, x): + return self.net(x) + + +def list_paths_from_globs(globs_str): + globs = [g.strip() for g in globs_str.split(",") if g.strip()] + paths = [] + for g in globs: + paths.extend(glob.glob(g)) + return sorted(paths) + +def get_lp_encoder(arch: str, in_dim: int, hidden: int, layers: int = 3): + return get_encoder(arch, in_dim, hidden, num_layers=layers, dropout=0.5) + +def load_encoder_from_pt(pt_path: str, in_dim: int): + meta_path = pt_path.replace(".pt", ".json") + j = json.load(open(meta_path, "r")) + enc = get_lp_encoder(j["arch"], in_dim, j["hidden"], layers=j.get("layers", 3)) + enc.load_state_dict(torch.load(pt_path, map_location="cpu")) + enc.eval() + return enc + + +@torch.no_grad() +def forward_on_fp(encoder, decoder, fp): + X = fp["X"] + A = fp["A"] + n = X.size(0) + + # Binarize & symmetrize adjacency; build undirected edge_index + A_bin = (A > 0.5).float() + A_sym = torch.maximum(A_bin, A_bin.t()) + edge_index = dense_to_sparse(A_sym)[0] + if edge_index.numel() == 0: + idx = torch.arange(n, dtype=torch.long) + edge_index = torch.stack([idx, (idx + 1) % n], dim=0) + edge_index = to_undirected(edge_index) + + z = encoder(X, edge_index) + + sel = fp["node_idx"] + if sel.numel() == 1: + u = sel + v = torch.tensor([(sel.item() + 1) % n], dtype=torch.long) + else: + u = sel + v = torch.roll(sel, shifts=-1, dims=0) + probe_edge = torch.stack([u, v], dim=0) + + logits = decoder(z, probe_edge) + return logits + +@torch.no_grad() +def concat_for_model(encoder, decoder, fps): + parts = [forward_on_fp(encoder, decoder, fp) for fp in fps] + return torch.cat(parts, dim=0) + + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('--fingerprints_path', type=str, default='fingerprints/fingerprints_lp.pt') + ap.add_argument('--verifier_path', type=str, default='fingerprints/univerifier_lp.pt') + ap.add_argument('--target_path', type=str, default='models/target_model_lp.pt') + ap.add_argument('--target_meta', type=str, default='models/target_meta_lp.json') + ap.add_argument('--positives_glob', type=str, + default='models/positives/lp_ftpr_*.pt,models/positives/distill_lp_*.pt') + ap.add_argument('--negatives_glob', type=str, default='models/negatives/negative_lp_*.pt') + ap.add_argument('--out_plot', type=str, default='plots/citeseer_lp_aruc.png') + ap.add_argument('--save_csv', type=str, default='', + help='Optional: path to save thresholds/robustness/uniqueness CSV') + args = ap.parse_args() + + ds = Planetoid(root="data", name="CiteSeer") + in_dim = ds.num_features + + # Load fingerprints (with node_idx) + pack = torch.load(args.fingerprints_path, map_location="cpu") + fps = pack["fingerprints"] + ver_in_dim_saved = int(pack.get("ver_in_dim", 0)) + + decoder = DotProductDecoder() + + tmeta = json.load(open(args.target_meta, "r")) + target_enc = get_lp_encoder(tmeta["arch"], in_dim, tmeta["hidden"], layers=tmeta.get("layers", 3)) + target_enc.load_state_dict(torch.load(args.target_path, map_location="cpu")) + target_enc.eval() + + pos_paths = list_paths_from_globs(args.positives_glob) + neg_paths = sorted(glob.glob(args.negatives_glob)) + + models_pos = [target_enc] + [load_encoder_from_pt(p, in_dim) for p in pos_paths] + models_neg = [load_encoder_from_pt(n, in_dim) for n in neg_paths] + + z0 = concat_for_model(models_pos[0], decoder, fps) + D = z0.numel() + if ver_in_dim_saved and ver_in_dim_saved != D: + raise RuntimeError(f"Verifier input mismatch: D={D} vs ver_in_dim_saved={ver_in_dim_saved}") + + V = FPVerifier(D) + ver_path = Path(args.verifier_path) + if ver_path.exists(): + V.load_state_dict(torch.load(str(ver_path), map_location='cpu')) + print(f"Loaded verifier from {ver_path}") + elif "verifier" in pack: + V.load_state_dict(pack["verifier"]) + print("Loaded verifier from fingerprints pack.") + else: + raise FileNotFoundError( + f"No verifier found at {args.verifier_path} and no 'verifier' key in {args.fingerprints_path}" + ) + V.eval() + + with torch.no_grad(): + pos_scores = [] + for enc in models_pos: + z = concat_for_model(enc, decoder, fps).unsqueeze(0) # [1, D] + pos_scores.append(float(V(z))) + neg_scores = [] + for enc in models_neg: + z = concat_for_model(enc, decoder, fps).unsqueeze(0) + neg_scores.append(float(V(z))) + + pos_scores = np.array(pos_scores) + neg_scores = np.array(neg_scores) + + ts = np.linspace(0.0, 1.0, 201) + robustness = np.array([(pos_scores >= t).mean() for t in ts]) # TPR on positives + uniqueness = np.array([(neg_scores < t).mean() for t in ts]) # TNR on negatives + overlap = np.minimum(robustness, uniqueness) + # Accuracy at each threshold + Npos, Nneg = len(pos_scores), len(neg_scores) + acc_curve = np.array([((pos_scores >= t).sum() + (neg_scores < t).sum()) / (Npos + Nneg) + for t in ts]) + mean_test_acc = float(acc_curve.mean()) + + + aruc = np.trapz(overlap, ts) + + # Best threshold (maximize min(robustness, uniqueness)) + idx_best = int(np.argmax(overlap)) + t_best = float(ts[idx_best]) + rob_best = float(robustness[idx_best]) + uniq_best = float(uniqueness[idx_best]) + acc_best = 0.5 * (rob_best + uniq_best) + + print(f"Mean Test Accuracy (avg over thresholds) = {mean_test_acc:.4f}") + print(f"Models: +{len(models_pos)} | -{len(models_neg)} | D={D}") + print(f"ARUC = {aruc:.4f}") + print(f"Best threshold = {t_best:.3f} | Robustness={rob_best:.3f} | Uniqueness={uniq_best:.3f} | Acc={acc_best:.3f}") + + if args.save_csv: + import csv + Path(os.path.dirname(args.save_csv)).mkdir(parents=True, exist_ok=True) + with open(args.save_csv, 'w', newline='') as f: + w = csv.writer(f) + w.writerow(['threshold', 'robustness', 'uniqueness', 'min_curve', 'accuracy']) + for t, r, u, s, a in zip(ts, robustness, uniqueness, shade, acc_curve): + w.writerow([f"{t:.5f}", f"{r:.6f}", f"{u:.6f}", f"{s:.6f}", f"{a:.6f}"]) + print(f"Saved CSV to {args.save_csv}") + + # Plot + os.makedirs(os.path.dirname(args.out_plot), exist_ok=True) + fig, ax = plt.subplots(figsize=(7.5, 4.8), dpi=160) + ax.set_title(f"CiteSeer link-prediction • ARUC={aruc:.3f}", fontsize=14) + ax.grid(True, which='both', linestyle=':', linewidth=0.8, alpha=0.6) + ax.plot(ts, robustness, color="#ff0000", linewidth=2.0, label="Robustness (TPR)") + ax.plot(ts, uniqueness, color="#0000ff", linestyle="--", linewidth=2.0, label="Uniqueness (TNR)") + overlap = np.minimum(robustness, uniqueness) + ax.fill_between(ts, overlap, color="#bbbbbb", alpha=0.25, label="Overlap (ARUC region)") + + # best-threshold vertical line + # ax.axvline(t_best, color="0.4", linewidth=2.0, alpha=0.6) + + ax.set_xlabel("Threshold (τ)", fontsize=12) + ax.set_ylabel("Score", fontsize=12) + ax.set_xlim(0.0, 1.0) + ax.set_ylim(0.0, 1.0) + ax.tick_params(labelsize=11) + + leg = ax.legend(loc="lower left", frameon=True, framealpha=0.85, + facecolor="white", edgecolor="0.8") + + plt.tight_layout() + plt.savefig(args.out_plot, bbox_inches="tight") + print(f"Saved plot to {args.out_plot}") + + +if __name__ == "__main__": + main() diff --git a/examples/link_pred/fine_tune_pirate_lp.py b/examples/link_pred/fine_tune_pirate_lp.py new file mode 100644 index 0000000..7b6f95d --- /dev/null +++ b/examples/link_pred/fine_tune_pirate_lp.py @@ -0,0 +1,231 @@ +import argparse, torch, copy, random, json +from pathlib import Path +import torch.nn.functional as F +from sklearn.metrics import roc_auc_score, average_precision_score +from torch_geometric.datasets import Planetoid +from torch_geometric.transforms import RandomLinkSplit +from torch_geometric.utils import negative_sampling + +from gcn_lp import get_encoder, DotProductDecoder + + +def set_seed(seed: int): + random.seed(seed); torch.manual_seed(seed); torch.cuda.manual_seed_all(seed) + + +def save_model(state_dict, path, meta): + path = Path(path) + path.parent.mkdir(parents=True, exist_ok=True) + torch.save(state_dict, str(path)) + with open(str(path).replace('.pt', '.json'), 'w') as f: + json.dump(meta, f, indent=2) + + +def get_pos_neg_edges(d, split: str): + # positives + for name in (f"{split}_pos_edge_label_index", "pos_edge_label_index", f"{split}_pos_edge_index", "pos_edge_index"): + if hasattr(d, name): + pos = getattr(d, name) + break + else: + if hasattr(d, "edge_label_index") and hasattr(d, "edge_label"): + eli, el = d.edge_label_index, d.edge_label + pos = eli[:, el == 1] + elif split == "train" and hasattr(d, "edge_index"): + pos = d.edge_index + else: + raise AttributeError(f"No positive edges found for split='{split}'") + + # negatives + for name in (f"{split}_neg_edge_label_index", "neg_edge_label_index", f"{split}_neg_edge_index", "neg_edge_index"): + if hasattr(d, name): + neg = getattr(d, name) + break + else: + if hasattr(d, "edge_label_index") and hasattr(d, "edge_label"): + eli, el = d.edge_label_index, d.edge_label + neg = eli[:, el == 0] + else: + neg = None + + return pos, neg + + +def train_epoch_lp(encoder, decoder, data, optimizer, device): + encoder.train(); optimizer.zero_grad() + z = encoder(data.x.to(device), data.edge_index.to(device)) + + pos_e, neg_e = get_pos_neg_edges(data, "train") + if neg_e is None: + neg_e = negative_sampling( + edge_index=data.edge_index.to(device), + num_nodes=data.num_nodes, + num_neg_samples=pos_e.size(1), + method="sparse", + ) + + pos_logits = decoder(z, pos_e.to(device)) + neg_logits = decoder(z, neg_e.to(device)) + logits = torch.cat([pos_logits, neg_logits], dim=0) + labels = torch.cat( + [torch.ones(pos_logits.size(0), device=device), + torch.zeros(neg_logits.size(0), device=device)], + dim=0, + ) + loss = F.binary_cross_entropy_with_logits(logits, labels) + loss.backward(); optimizer.step() + return float(loss.item()) + + +@torch.no_grad() +def eval_split_auc_ap(encoder, decoder, data, split: str, device): + encoder.eval() + pos_e, neg_e = get_pos_neg_edges(data, split) + + z = encoder(data.x.to(device), data.edge_index.to(device)) + pos_logits = decoder(z, pos_e.to(device)) + if neg_e is None: + neg_e = negative_sampling( + edge_index=data.edge_index.to(device), + num_nodes=data.num_nodes, + num_neg_samples=pos_e.size(1), + method="sparse", + ) + neg_logits = decoder(z, neg_e.to(device)) + + logits = torch.cat([pos_logits, neg_logits], dim=0).cpu() + labels = torch.cat([torch.ones(pos_logits.size(0)), + torch.zeros(neg_logits.size(0))], dim=0) + probs = torch.sigmoid(logits) + auc = roc_auc_score(labels.numpy(), probs.numpy()) + ap = average_precision_score(labels.numpy(), probs.numpy()) + return float(auc), float(ap) + + +def freeze_all(encoder): + for p in encoder.parameters(): + p.requires_grad = False + + +def unfreeze_all(encoder): + for p in encoder.parameters(): + p.requires_grad = True + + +def unfreeze_last_gnn_layer(encoder): + if hasattr(encoder, "convs") and len(encoder.convs) > 0: + for p in encoder.convs[-1].parameters(): + p.requires_grad = True + + +def reinit_last_gnn_layer(encoder): + if hasattr(encoder, "convs") and len(encoder.convs) > 0: + m = encoder.convs[-1] + if hasattr(m, "reset_parameters"): + try: + m.reset_parameters() + except Exception: + pass + else: + for p in m.parameters(): + if p.dim() > 1: + torch.nn.init.xavier_uniform_(p) + else: + torch.nn.init.zeros_(p) + + +def reinit_all_gnn_layers(encoder): + for m in encoder.modules(): + if hasattr(m, "reset_parameters"): + try: + m.reset_parameters() + except Exception: + pass + + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('--target_path', type=str, default='models/target_model_lp.pt') + ap.add_argument('--meta_path', type=str, default='models/target_meta_lp.json') + ap.add_argument('--epochs', type=int, default=10) # 10 for FT/PR + ap.add_argument('--lr', type=float, default=0.01) + ap.add_argument('--wd', type=float, default=5e-4) + ap.add_argument('--seed', type=int, default=0) + ap.add_argument('--num_variants', type=int, default=100) + ap.add_argument('--out_dir', type=str, default='models/positives') + args = ap.parse_args() + + set_seed(args.seed) + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + # Load meta about the target LP encoder + with open(args.meta_path, 'r') as f: + meta = json.load(f) + arch = meta.get("arch", "gcn") + hidden = meta.get("hidden", 64) + layers = meta.get("layers", 3) + + # Dataset & edge-level split for LP (CiteSeer) + dataset = Planetoid(root='data', name='CiteSeer') + base_data = dataset[0] + splitter = RandomLinkSplit(num_val=0.05, num_test=0.10, is_undirected=True, add_negative_train_samples=True) + train_data, val_data, test_data = splitter(base_data) + train_data, val_data, test_data = train_data.to(device), val_data.to(device), test_data.to(device) + + target = get_encoder(arch, dataset.num_node_features, hidden, num_layers=layers, dropout=0.5) + target.load_state_dict(torch.load(args.target_path, map_location='cpu')) + target.to(device) + decoder = DotProductDecoder().to(device) + + saved = [] + kinds = ["ft_last", "ft_all", "pr_last", "pr_all"] + + for i in range(args.num_variants): + kind = kinds[i % 4] + + enc = get_encoder(arch, dataset.num_node_features, hidden, num_layers=layers, dropout=0.5) + enc.load_state_dict(copy.deepcopy(target.state_dict())) + enc.to(device) + + if kind == "pr_last": + reinit_last_gnn_layer(enc) + elif kind == "pr_all": + reinit_all_gnn_layers(enc) + + if kind in ("ft_last", "pr_last"): + freeze_all(enc); unfreeze_last_gnn_layer(enc) + else: + unfreeze_all(enc) + + opt = torch.optim.Adam(filter(lambda p: p.requires_grad, enc.parameters()), + lr=args.lr, weight_decay=args.wd) + + best_val_auc, best_state = -1.0, None + for _ in range(args.epochs): + _ = train_epoch_lp(enc, decoder, train_data, opt, device) + val_auc, val_ap = eval_split_auc_ap(enc, decoder, val_data, "val", device) + if val_auc > best_val_auc: + best_val_auc = val_auc + best_state = {k: v.detach().cpu().clone() for k, v in enc.state_dict().items()} + + enc.load_state_dict(best_state) + + out_path = f"{args.out_dir}/lp_ftpr_{i:03d}.pt" + meta_out = { + "task": "link_prediction", + "dataset": "CiteSeer", + "arch": arch, + "hidden": hidden, + "layers": layers, + "pos_kind": kind, + "val_auc": float(best_val_auc), + } + save_model(enc.state_dict(), out_path, meta_out) + saved.append(out_path) + print(f"[ftpr:{kind}] Saved {out_path} val_AUC={best_val_auc:.4f}") + + print(f"Total LP FT/PR positives saved: {len(saved)}") + + +if __name__ == '__main__': + main() diff --git a/examples/link_pred/fingerprint_generator_lp.py b/examples/link_pred/fingerprint_generator_lp.py new file mode 100644 index 0000000..9403c0f --- /dev/null +++ b/examples/link_pred/fingerprint_generator_lp.py @@ -0,0 +1,265 @@ +# Fingerprint generation & Univerifier training for LINK PREDICTION on CiteSeer. +# - loads LP encoders + dot-product decoder +# - feature vector per model = concatenated EDGE logits over P fingerprints (each contributes m logits) + + +import argparse, glob, json, math, random, time, torch +import torch.nn as nn +import torch.nn.functional as F +from pathlib import Path +from typing import List +from torch_geometric.datasets import Planetoid +from torch_geometric.utils import dense_to_sparse, to_undirected +from gcn_lp import get_encoder, DotProductDecoder + +def set_seed(s): + random.seed(s); torch.manual_seed(s) + +def load_meta(path): + with open(path, 'r') as f: + return json.load(f) + +def list_paths_from_globs(globs: List[str]) -> List[str]: + out = [] + for g in globs: + out.extend(glob.glob(g)) + return sorted(out) + +class FPVerifier(nn.Module): + # Arch: [128, 64, 32] + LeakyReLU, sigmoid output + def __init__(self, in_dim: int): + super().__init__() + self.net = nn.Sequential( + nn.Linear(in_dim, 128), + nn.LeakyReLU(), + nn.Linear(128, 64), + nn.LeakyReLU(), + nn.Linear(64, 32), + nn.LeakyReLU(), + nn.Linear(32, 1), + nn.Sigmoid() + ) + + def forward(self, x): + return self.net(x) + +def get_lp_encoder(arch: str, in_dim: int, hidden: int, layers: int = 3): + return get_encoder(arch, in_dim, hidden, num_layers=layers, dropout=0.5) + +def load_encoder_from_pt(pt_path: str, in_dim: int): + meta = json.load(open(pt_path.replace('.pt', '.json'), 'r')) + enc = get_lp_encoder(meta["arch"], in_dim, meta["hidden"], layers=meta.get("layers", 3)) + enc.load_state_dict(torch.load(pt_path, map_location='cpu')) + enc.eval() + return enc, meta + +# LP fingerprint forward: encoder -> embeddings -> decoder over probe edges +def forward_on_fp(encoder, decoder, fp): + X = fp["X"] + A = fp["A"] + n = X.size(0) + + A_bin = (A > 0.5).float() + A_sym = torch.maximum(A_bin, A_bin.t()) + edge_index = dense_to_sparse(A_sym)[0] + if edge_index.numel() == 0: + idx = torch.arange(n, dtype=torch.long) + edge_index = torch.stack([idx, (idx + 1) % n], dim=0) + edge_index = to_undirected(edge_index) + + # node embeddings + z = encoder(X, edge_index) + + sel = fp["node_idx"] + if sel.numel() == 1: + u = sel + v = torch.tensor([(sel.item() + 1) % n], dtype=torch.long) + else: + u = sel + v = torch.roll(sel, shifts=-1, dims=0) + probe_edge = torch.stack([u, v], dim=0) + + logits = decoder(z, probe_edge) + return logits + +def concat_for_model(encoder, decoder, fingerprints): + vecs = [forward_on_fp(encoder, decoder, fp) for fp in fingerprints] + return torch.cat(vecs, dim=-1) + +def compute_loss(encoders_pos, encoders_neg, fingerprints, V, decoder): + z_pos = [concat_for_model(e, decoder, fingerprints) for e in encoders_pos] + z_neg = [concat_for_model(e, decoder, fingerprints) for e in encoders_neg] + if not z_pos or not z_neg: + raise RuntimeError("Need both positive and negative models.") + Zp = torch.stack(z_pos) + Zn = torch.stack(z_neg) + + yp = V(Zp).clamp(1e-6, 1-1e-6) + yn = V(Zn).clamp(1e-6, 1-1e-6) + L = torch.log(yp).mean() + torch.log(1 - yn).mean() + return L, Zp, Zn + +def feature_ascent_step(encoders_pos, encoders_neg, fingerprints, V, decoder, alpha=0.01): + # ascent on X only + for fp in fingerprints: + fp["X"].requires_grad_(True) + fp["A"].requires_grad_(False) + + L, _, _ = compute_loss(encoders_pos, encoders_neg, fingerprints, V, decoder) + grads = torch.autograd.grad( + L, [fp["X"] for fp in fingerprints], + retain_graph=False, create_graph=False, allow_unused=True + ) + with torch.no_grad(): + for fp, g in zip(fingerprints, grads): + if g is None: + g = torch.zeros_like(fp["X"]) + fp["X"].add_(alpha * g) + fp["X"].clamp_(-5.0, 5.0) + +def edge_flip_candidates(A: torch.Tensor, budget: int): + n = A.size(0) + tri_i, tri_j = torch.triu_indices(n, n, offset=1) + scores = torch.abs(0.5 - A[tri_i, tri_j]) + order = torch.argsort(scores) + picks = order[:min(budget, len(order))] + return tri_i[picks], tri_j[picks] + +def edge_flip_step(encoders_pos, encoders_neg, fingerprints, V, decoder, flip_k=8): + for fp_idx, fp in enumerate(fingerprints): + A = fp["A"] + i_idx, j_idx = edge_flip_candidates(A, flip_k * 4) # candidate pool + with torch.no_grad(): + base_L, _, _ = compute_loss(encoders_pos, encoders_neg, fingerprints, V, decoder) + + gains = [] + for i, j in zip(i_idx.tolist(), j_idx.tolist()): + with torch.no_grad(): + old = float(A[i, j]) + new = 1.0 - old + # toggle in place + A[i, j] = new; A[j, i] = new + L_try, _, _ = compute_loss(encoders_pos, encoders_neg, fingerprints, V, decoder) + gain = float(L_try - base_L) + gains.append((gain, i, j, old)) + # revert + A[i, j] = old; A[j, i] = old + + gains.sort(key=lambda x: x[0], reverse=True) + with torch.no_grad(): + for g, i, j, old in gains[:flip_k]: + new = 1.0 - old + A[i, j] = new; A[j, i] = new + A.clamp_(0.0, 1.0) + +def train_verifier_step(encoders_pos, encoders_neg, fingerprints, V, decoder, opt): + # maximize L wrt V (via minimizing -L) + L, Zp, Zn = compute_loss(encoders_pos, encoders_neg, fingerprints, V, decoder) + loss = -L + opt.zero_grad() + loss.backward() + opt.step() + with torch.no_grad(): + yp = (V(Zp) >= 0.5).float().mean().item() + yn = (V(Zn) < 0.5).float().mean().item() + acc = 0.5 * (yp + yn) + return float(L.item()), acc + + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('--target_path', default='models/target_model_lp.pt') + ap.add_argument('--target_meta', default='models/target_meta_lp.json') + ap.add_argument('--positives_glob', default='models/positives/lp_ftpr_*.pt,models/positives/distill_lp_*.pt') + ap.add_argument('--negatives_glob', default='models/negatives/negative_lp_*.pt') + + ap.add_argument('--P', type=int, default=64) # number of fingerprints + ap.add_argument('--n', type=int, default=32) # nodes per fingerprint + ap.add_argument('--iters', type=int, default=1000) # alternations + ap.add_argument('--verifier_lr', type=float, default=1e-3) + ap.add_argument('--e1', type=int, default=1) # fingerprint update epochs per alternation + ap.add_argument('--e2', type=int, default=1) # verifier update epochs per alternation + ap.add_argument('--alpha_x', type=float, default=0.01) # feature ascent step + ap.add_argument('--flip_k', type=int, default=8) # edges flipped per fp per step + ap.add_argument('--seed', type=int, default=0) + ap.add_argument('--m', type=int, default=4) # probed edges per fingerprint (via node_idx size) + args = ap.parse_args() + + start_time = time.time() + set_seed(args.seed) + Path('fingerprints').mkdir(parents=True, exist_ok=True) + + ds = Planetoid(root='data', name='CiteSeer') + in_dim = ds.num_features + + meta_t = load_meta(args.target_meta) + target_enc = get_lp_encoder(meta_t["arch"], in_dim, meta_t["hidden"], layers=meta_t.get("layers", 3)) + target_enc.load_state_dict(torch.load(args.target_path, map_location='cpu')) + target_enc.eval() + + pos_globs = [g.strip() for g in args.positives_glob.split(',') if g.strip()] + pos_paths = list_paths_from_globs(pos_globs) + neg_paths = sorted(glob.glob(args.negatives_glob)) + + enc_pos = [target_enc] + [load_encoder_from_pt(p, in_dim)[0] for p in pos_paths] + enc_neg = [load_encoder_from_pt(npath, in_dim)[0] for npath in neg_paths] + decoder = DotProductDecoder() + + print(f"[loaded] positives={len(enc_pos)} (incl. target) | negatives={len(enc_neg)}") + + if args.m > args.n: + raise ValueError(f"--m ({args.m}) must be <= --n ({args.n})") + + fingerprints = [] + for _ in range(args.P): + X = torch.randn(args.n, in_dim) * 0.1 + A = torch.rand(args.n, args.n) * 0.2 + 0.4 + A = torch.triu(A, diagonal=1) + A = A + A.t() + torch.diagonal(A).zero_() + idx = torch.randperm(args.n)[:args.m] + fingerprints.append({"X": X, "A": A, "node_idx": idx}) + + # Univerifier + ver_in_dim = args.P * args.m + V = FPVerifier(ver_in_dim) + optV = torch.optim.Adam(V.parameters(), lr=args.verifier_lr) + + flag = 0 + for it in range(1, args.iters + 1): + if flag == 0: + # Update fingerprints (features + edges) + for _ in range(args.e1): + feature_ascent_step(enc_pos, enc_neg, fingerprints, V, decoder, alpha=args.alpha_x) + edge_flip_step(enc_pos, enc_neg, fingerprints, V, decoder, flip_k=args.flip_k) + flag = 1 + else: + # Update verifier + diag_acc = None + for _ in range(args.e2): + Lval, acc = train_verifier_step(enc_pos, enc_neg, fingerprints, V, decoder, optV) + diag_acc = acc + flag = 0 + + if it % 10 == 0 and 'diag_acc' in locals() and diag_acc is not None: + print(f"[Iter {it}] verifier acc={diag_acc:.3f} (diagnostic)") + + clean_fps = [] + for fp in fingerprints: + clean_fps.append({ + "X": fp["X"].detach().clone(), + "A": fp["A"].detach().clone(), + "node_idx": fp["node_idx"].detach().clone(), + }) + torch.save( + {"fingerprints": clean_fps, "verifier": V.state_dict(), "ver_in_dim": ver_in_dim}, + "fingerprints/fingerprints_lp.pt" + ) + + print("Saved fingerprints/fingerprints_lp.pt") + end_time = time.time() + print("Time taken: ", (end_time - start_time)/60) + + +if __name__ == '__main__': + main() diff --git a/examples/link_pred/gcn_lp.py b/examples/link_pred/gcn_lp.py new file mode 100644 index 0000000..f3018dc --- /dev/null +++ b/examples/link_pred/gcn_lp.py @@ -0,0 +1,101 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch_geometric.nn import GCNConv, SAGEConv, GATConv + +class MLPEncoder(nn.Module): + def __init__(self, in_dim: int, hidden: int, num_layers: int = 3, dropout: float = 0.5): + super().__init__() + layers = [] + h = hidden + if num_layers <= 1: + layers.append(nn.Linear(in_dim, h)) + else: + layers.append(nn.Linear(in_dim, h)) + for _ in range(num_layers - 2): + layers.append(nn.ReLU()) + layers.append(nn.Dropout(dropout)) + layers.append(nn.Linear(h, h)) + layers.append(nn.ReLU()) + layers.append(nn.Dropout(dropout)) + layers.append(nn.Linear(h, h)) + self.net = nn.Sequential(*layers) + self.dropout = dropout + + def forward(self, x, edge_index): + return self.net(x) + +class GCN(nn.Module): + def __init__(self, in_dim, hidden, num_layers=3, dropout=0.5): + super().__init__() + self.convs = nn.ModuleList() + self.convs.append(GCNConv(in_dim, hidden)) + for _ in range(num_layers - 1): + self.convs.append(GCNConv(hidden, hidden)) + self.dropout = dropout + + def forward(self, x, edge_index): + for i, conv in enumerate(self.convs): + x = conv(x, edge_index) + if i != len(self.convs) - 1: + x = F.relu(x) + x = F.dropout(x, p=self.dropout, training=self.training) + return x # node embeddings + + +class GraphSAGE(nn.Module): + def __init__(self, in_dim, hidden, num_layers=3, dropout=0.5): + super().__init__() + self.convs = nn.ModuleList() + self.convs.append(SAGEConv(in_dim, hidden)) + for _ in range(num_layers - 1): + self.convs.append(SAGEConv(hidden, hidden)) + self.dropout = dropout + + def forward(self, x, edge_index): + for i, conv in enumerate(self.convs): + x = conv(x, edge_index) + if i != len(self.convs) - 1: + x = F.relu(x) + x = F.dropout(x, p=self.dropout, training=self.training) + return x + + +class GAT(nn.Module): + def __init__(self, in_dim, hidden, num_layers=3, heads=2, dropout=0.5): + super().__init__() + self.convs = nn.ModuleList() + self.convs.append(GATConv(in_dim, hidden, heads=heads, concat=False)) + for _ in range(num_layers - 1): + self.convs.append(GATConv(hidden, hidden, heads=heads, concat=False)) + self.dropout = dropout + + def forward(self, x, edge_index): + for i, conv in enumerate(self.convs): + x = conv(x, edge_index) + if i != len(self.convs) - 1: + x = F.elu(x) + x = F.dropout(x, p=self.dropout, training=self.training) + return x + + +def get_encoder(arch: str, in_dim: int, hidden: int, num_layers: int = 3, dropout: float = 0.5): + arch = arch.lower() + if arch == "gcn": + return GCN(in_dim, hidden, num_layers=num_layers, dropout=dropout) + if arch in ("sage", "graphsage"): + return GraphSAGE(in_dim, hidden, num_layers=num_layers, dropout=dropout) + if arch == "gat": + return GAT(in_dim, hidden, num_layers=num_layers, dropout=dropout) + raise ValueError(f"Unknown arch: {arch}") + + +# Decoder for link prediction +class DotProductDecoder(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, z, edge_index): + # z: node embeddings [N, d] + src, dst = edge_index + return (z[src] * z[dst]).sum(dim=-1) # logits for edges diff --git a/examples/link_pred/generate_univerifier_dataset_lp.py b/examples/link_pred/generate_univerifier_dataset_lp.py new file mode 100644 index 0000000..12fc43e --- /dev/null +++ b/examples/link_pred/generate_univerifier_dataset_lp.py @@ -0,0 +1,127 @@ +""" +Build a Univerifier dataset from saved LP fingerprints. +Label 1 for positives ({target ∪ F+}) and 0 for negatives (F−). +Outputs a .pt with: + - X: [N_models, D] where D = P * m (m = probed edges per fingerprint) + - y: [N_models] float tensor with 1.0 (positive) or 0.0 (negative) +""" + +import argparse, glob, json, torch +from pathlib import Path +from torch_geometric.datasets import Planetoid +from torch_geometric.utils import dense_to_sparse, to_undirected + +from gcn_lp import get_encoder, DotProductDecoder + + +def list_paths_from_globs(globs_str): + globs = [g.strip() for g in globs_str.split(",") if g.strip()] + paths = [] + for g in globs: + paths.extend(glob.glob(g)) + return sorted(paths) + + +def get_lp_encoder(arch: str, in_dim: int, hidden: int, layers: int = 3): + return get_encoder(arch, in_dim, hidden, num_layers=layers, dropout=0.5) + + +def load_encoder_from_pt(pt_path: str, in_dim: int): + meta_path = pt_path.replace(".pt", ".json") + j = json.load(open(meta_path, "r")) + enc = get_lp_encoder(j["arch"], in_dim, j["hidden"], layers=j.get("layers", 3)) + enc.load_state_dict(torch.load(pt_path, map_location="cpu")) + enc.eval() + return enc + + +# LP fingerprint forward: encoder -> embeddings -> dot-product over probe edges +@torch.no_grad() +def forward_on_fp(encoder, decoder, fp): + X = fp["X"] + A = fp["A"] + n = X.size(0) + + # Binarize & symmetrize adjacency, build edge_index + A_bin = (A > 0.5).float() + A_sym = torch.maximum(A_bin, A_bin.t()) + edge_index = dense_to_sparse(A_sym)[0] + if edge_index.numel() == 0: + idx = torch.arange(n, dtype=torch.long) + edge_index = torch.stack([idx, (idx + 1) % n], dim=0) + edge_index = to_undirected(edge_index) + + z = encoder(X, edge_index) + sel = fp["node_idx"] + if sel.numel() == 1: + u = sel + v = torch.tensor([(sel.item() + 1) % n], dtype=torch.long) + else: + u = sel + v = torch.roll(sel, shifts=-1, dims=0) + probe_edge = torch.stack([u, v], dim=0) + + logits = decoder(z, probe_edge) + return logits + + +@torch.no_grad() +def concat_for_model(encoder, decoder, fps): + parts = [forward_on_fp(encoder, decoder, fp) for fp in fps] + return torch.cat(parts, dim=0) + + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument("--fingerprints_path", type=str, default="fingerprints/fingerprints_lp.pt") + ap.add_argument("--target_path", type=str, default="models/target_model_lp.pt") + ap.add_argument("--target_meta", type=str, default="models/target_meta_lp.json") + ap.add_argument("--positives_glob", type=str, + default="models/positives/lp_ftpr_*.pt,models/positives/distill_lp_*.pt") + ap.add_argument("--negatives_glob", type=str, default="models/negatives/negative_lp_*.pt") + ap.add_argument("--out", type=str, default="fingerprints/univerifier_dataset_lp.pt") + args = ap.parse_args() + + ds = Planetoid(root="data", name="CiteSeer") + in_dim = ds.num_features + + pack = torch.load(args.fingerprints_path, map_location="cpu") + fps = pack["fingerprints"] + ver_in_dim_saved = pack.get("ver_in_dim", None) + + decoder = DotProductDecoder() + + tmeta = json.load(open(args.target_meta, "r")) + target_enc = get_lp_encoder(tmeta["arch"], in_dim, tmeta["hidden"], layers=tmeta.get("layers", 3)) + target_enc.load_state_dict(torch.load(args.target_path, map_location="cpu")) + target_enc.eval() + + # Positives & negatives + pos_paths = list_paths_from_globs(args.positives_glob) + neg_paths = sorted(glob.glob(args.negatives_glob)) + + encoders = [target_enc] + [load_encoder_from_pt(p, in_dim) for p in pos_paths] + \ + [load_encoder_from_pt(n, in_dim) for n in neg_paths] + labels = [1.0] * (1 + len(pos_paths)) + [0.0] * len(neg_paths) + + # Build feature matrix X and labels y + with torch.no_grad(): + z0 = concat_for_model(encoders[0], decoder, fps) + D = z0.numel() + if ver_in_dim_saved is not None and D != int(ver_in_dim_saved): + raise RuntimeError( + f"Verifier input mismatch: dataset dim {D} vs saved ver_in_dim {ver_in_dim_saved}" + ) + + X_rows = [z0] + [concat_for_model(enc, decoder, fps) for enc in encoders[1:]] + X = torch.stack(X_rows, dim=0).float() # [N, D] + y = torch.tensor(labels, dtype=torch.float32) # [N] + + Path(Path(args.out).parent).mkdir(parents=True, exist_ok=True) + torch.save({"X": X, "y": y}, args.out) + print(f"Saved {args.out} with {X.shape[0]} rows; dim={X.shape[1]}") + print(f"Positives: {int(sum(labels))} | Negatives: {len(labels) - int(sum(labels))}") + + +if __name__ == "__main__": + main() diff --git a/examples/link_pred/train_lp.py b/examples/link_pred/train_lp.py new file mode 100644 index 0000000..0ce62ee --- /dev/null +++ b/examples/link_pred/train_lp.py @@ -0,0 +1,183 @@ +import argparse +import json +import os +import random + +import torch +import torch.nn.functional as F +from sklearn.metrics import roc_auc_score, average_precision_score +from torch_geometric.datasets import Planetoid +from torch_geometric.transforms import RandomLinkSplit +from torch_geometric.utils import negative_sampling + +from gcn_lp import get_encoder, DotProductDecoder + + +def set_seed(seed: int): + random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + +def get_pos_neg_edges(d, split: str): + # positive + if hasattr(d, f"{split}_pos_edge_label_index"): + pos = getattr(d, f"{split}_pos_edge_label_index") + elif hasattr(d, "pos_edge_label_index"): + pos = d.pos_edge_label_index + elif hasattr(d, "edge_label_index") and hasattr(d, "edge_label"): + eli, el = d.edge_label_index, d.edge_label + pos = eli[:, el == 1] + elif split == "train" and hasattr(d, "edge_index"): + pos = d.edge_index + else: + raise AttributeError(f"No positive edge indices found for split='{split}'") + + # negative (may be absent for some versions/splits) + if hasattr(d, f"{split}_neg_edge_label_index"): + neg = getattr(d, f"{split}_neg_edge_label_index") + elif hasattr(d, "neg_edge_label_index"): + neg = d.neg_edge_label_index + elif hasattr(d, "edge_label_index") and hasattr(d, "edge_label"): + eli, el = d.edge_label_index, d.edge_label + neg = eli[:, el == 0] + else: + neg = None + return pos, neg + + +def train_step(encoder, decoder, data, device): + z = encoder(data.x.to(device), data.edge_index.to(device)) + + pos_edge, neg_edge = get_pos_neg_edges(data, "train") + if neg_edge is None: + neg_edge = negative_sampling( + edge_index=data.edge_index.to(device), + num_nodes=data.num_nodes, + num_neg_samples=pos_edge.size(1), + method="sparse", + ) + + pos_logits = decoder(z, pos_edge.to(device)) + neg_logits = decoder(z, neg_edge.to(device)) + + logits = torch.cat([pos_logits, neg_logits], dim=0) + labels = torch.cat( + [torch.ones(pos_logits.size(0), device=device), + torch.zeros(neg_logits.size(0), device=device)], + dim=0, + ) + return F.binary_cross_entropy_with_logits(logits, labels) + + +@torch.no_grad() +def evaluate(encoder, decoder, data, split: str, device): + pos_edge, neg_edge = get_pos_neg_edges(data, split) + + z = encoder(data.x.to(device), data.edge_index.to(device)) + + pos_logits = decoder(z, pos_edge.to(device)) + if neg_edge is None: + neg_edge = negative_sampling( + edge_index=data.edge_index.to(device), + num_nodes=data.num_nodes, + num_neg_samples=pos_edge.size(1), + method="sparse", + ) + neg_logits = decoder(z, neg_edge.to(device)) + + logits = torch.cat([pos_logits, neg_logits], dim=0).cpu() + labels = torch.cat( + [torch.ones(pos_logits.size(0)), + torch.zeros(neg_logits.size(0))], + dim=0, + ) + probs = torch.sigmoid(logits) + auc = roc_auc_score(labels.numpy(), probs.numpy()) + ap = average_precision_score(labels.numpy(), probs.numpy()) + return auc, ap + + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument("--arch", default="gcn", choices=["gcn", "graphsage", "sage", "gat"]) + ap.add_argument("--hidden", type=int, default=64) + ap.add_argument("--layers", type=int, default=3) + ap.add_argument("--dropout", type=float, default=0.5) + ap.add_argument("--lr", type=float, default=1e-3) + ap.add_argument("--epochs", type=int, default=200) + ap.add_argument("--weight_decay", type=float, default=5e-4) + ap.add_argument("--seed", type=int, default=0) + ap.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu") + ap.add_argument("--val_ratio", type=float, default=0.05) + ap.add_argument("--test_ratio", type=float, default=0.10) + args = ap.parse_args() + + set_seed(args.seed) + device = torch.device(args.device) + + dataset = Planetoid(root="data", name="CiteSeer") + data = dataset[0] + + splitter = RandomLinkSplit( + num_val=args.val_ratio, + num_test=args.test_ratio, + is_undirected=True, + add_negative_train_samples=True, + ) + train_data, val_data, test_data = splitter(data) + train_data, val_data, test_data = train_data.to(device), val_data.to(device), test_data.to(device) + + encoder = get_encoder( + args.arch, + dataset.num_node_features, + hidden=args.hidden, + num_layers=args.layers, + dropout=args.dropout, + ).to(device) + decoder = DotProductDecoder().to(device) + + opt = torch.optim.Adam(encoder.parameters(), lr=args.lr, weight_decay=args.weight_decay) + + os.makedirs("models", exist_ok=True) + best_val_auc, best_state = 0.0, None + + for epoch in range(1, args.epochs + 1): + encoder.train() + opt.zero_grad() + loss = train_step(encoder, decoder, train_data, device) + loss.backward() + opt.step() + + if epoch % 20 == 0 or epoch == args.epochs: + encoder.eval() + val_auc, val_ap = evaluate(encoder, decoder, val_data, "val", device) + if val_auc > best_val_auc: + best_val_auc = val_auc + best_state = {k: v.detach().cpu().clone() for k, v in encoder.state_dict().items()} + print(f"Epoch {epoch:03d} | loss {loss.item():.4f} | val AUC {val_auc:.4f} | val AP {val_ap:.4f}") + + if best_state is not None: + encoder.load_state_dict(best_state) + + test_auc, test_ap = evaluate(encoder, decoder, test_data, "test", device) + print(f"Best Val AUC: {best_val_auc:.4f} | Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f}") + + torch.save(encoder.state_dict(), "models/target_model_lp.pt") + with open("models/target_meta_lp.json", "w") as f: + json.dump( + { + "task": "link_prediction", + "dataset": "CiteSeer", + "arch": args.arch, + "hidden": args.hidden, + "layers": args.layers, + "metrics": {"AUC": float(test_auc), "AP": float(test_ap)}, + }, + f, + indent=2, + ) + + +if __name__ == "__main__": + main() diff --git a/examples/link_pred/train_unrelated_lp.py b/examples/link_pred/train_unrelated_lp.py new file mode 100644 index 0000000..d7216ae --- /dev/null +++ b/examples/link_pred/train_unrelated_lp.py @@ -0,0 +1,199 @@ +# Train NEGATIVE LINK-PREDICTION models on CiteSeer from scratch. + +import argparse +import json +import os +import random +from pathlib import Path + +import torch +import torch.nn as nn +import torch.nn.functional as F +from sklearn.metrics import roc_auc_score, average_precision_score +from torch_geometric.datasets import Planetoid +from torch_geometric.transforms import RandomLinkSplit +from torch_geometric.utils import negative_sampling + +from gcn_lp import get_encoder, DotProductDecoder + + +def set_seed(seed: int): + random.seed(seed); torch.manual_seed(seed); torch.cuda.manual_seed_all(seed) + + +def get_pos_neg_edges(d, split: str): + # positives + for name in (f"{split}_pos_edge_label_index", "pos_edge_label_index", + f"{split}_pos_edge_index", "pos_edge_index"): + if hasattr(d, name): + pos = getattr(d, name) + break + else: + if hasattr(d, "edge_label_index") and hasattr(d, "edge_label"): + eli, el = d.edge_label_index, d.edge_label + pos = eli[:, el == 1] + elif split == "train" and hasattr(d, "edge_index"): + pos = d.edge_index + else: + raise AttributeError(f"No positive edges found for split='{split}'") + + # negatives + for name in (f"{split}_neg_edge_label_index", "neg_edge_label_index", + f"{split}_neg_edge_index", "neg_edge_index"): + if hasattr(d, name): + neg = getattr(d, name) + break + else: + if hasattr(d, "edge_label_index") and hasattr(d, "edge_label"): + eli, el = d.edge_label_index, d.edge_label + neg = eli[:, el == 0] + else: + neg = None + + return pos, neg + + +def get_lp_encoder(arch: str, in_dim: int, hidden: int, layers: int, dropout: float): + a = arch.lower().strip() + if a in ("gcn", "sage", "graphsage", "gat"): + return get_encoder(a, in_dim, hidden, num_layers=layers, dropout=dropout) + raise ValueError(f"Unknown arch: {arch}") + + +def train_step(encoder, decoder, data, device): + z = encoder(data.x.to(device), data.edge_index.to(device)) + + pos_edge, neg_edge = get_pos_neg_edges(data, "train") + if neg_edge is None: + neg_edge = negative_sampling( + edge_index=data.edge_index.to(device), + num_nodes=data.num_nodes, + num_neg_samples=pos_edge.size(1), + method="sparse", + ) + + pos_logits = decoder(z, pos_edge.to(device)) + neg_logits = decoder(z, neg_edge.to(device)) + logits = torch.cat([pos_logits, neg_logits], dim=0) + labels = torch.cat( + [torch.ones(pos_logits.size(0), device=device), + torch.zeros(neg_logits.size(0), device=device)], + dim=0, + ) + return F.binary_cross_entropy_with_logits(logits, labels) + + +@torch.no_grad() +def evaluate(encoder, decoder, data, split: str, device): + pos_edge, neg_edge = get_pos_neg_edges(data, split) + + z = encoder(data.x.to(device), data.edge_index.to(device)) + pos_logits = decoder(z, pos_edge.to(device)) + if neg_edge is None: + neg_edge = negative_sampling( + edge_index=data.edge_index.to(device), + num_nodes=data.num_nodes, + num_neg_samples=pos_edge.size(1), + method="sparse", + ) + neg_logits = decoder(z, neg_edge.to(device)) + + logits = torch.cat([pos_logits, neg_logits], dim=0).cpu() + labels = torch.cat([torch.ones(pos_logits.size(0)), + torch.zeros(neg_logits.size(0))], dim=0) + probs = torch.sigmoid(logits) + auc = roc_auc_score(labels.numpy(), probs.numpy()) + ap = average_precision_score(labels.numpy(), probs.numpy()) + return float(auc), float(ap) + + +def main(): + ap = argparse.ArgumentParser(description="Train unrelated LP (negative) models on CiteSeer") + ap.add_argument('--count', type=int, default=100) + ap.add_argument('--archs', type=str, default='gcn,sage,gat') + ap.add_argument('--epochs', type=int, default=200) + ap.add_argument('--lr', type=float, default=1e-3) + ap.add_argument('--wd', type=float, default=5e-4) + ap.add_argument('--hidden', type=int, default=64) + ap.add_argument('--layers', type=int, default=3) + ap.add_argument('--dropout', type=float, default=0.5) + ap.add_argument('--seed', type=int, default=123) + ap.add_argument('--device', default='cuda' if torch.cuda.is_available() else 'cpu') + ap.add_argument('--val_ratio', type=float, default=0.05) + ap.add_argument('--test_ratio', type=float, default=0.10) + ap.add_argument('--start_index', type=int, default=0) + + args = ap.parse_args() + + device = torch.device(args.device) + os.makedirs("models/negatives", exist_ok=True) + + # Dataset & edge-level split + dataset = Planetoid(root='data', name='CiteSeer') + data_full = dataset[0] + splitter = RandomLinkSplit( + num_val=args.val_ratio, + num_test=args.test_ratio, + is_undirected=True, + add_negative_train_samples=True, + ) + train_data, val_data, test_data = splitter(data_full) + train_data, val_data, test_data = train_data.to(device), val_data.to(device), test_data.to(device) + + arch_list = [a.strip() for a in args.archs.split(',') if a.strip()] + saved = [] + + for i in range(args.count): + idx = args.start_index + i + seed_i = args.seed + idx + arch = arch_list[idx % len(arch_list)] + + arch = arch_list[i % len(arch_list)] + encoder = get_lp_encoder(arch, dataset.num_node_features, args.hidden, args.layers, args.dropout).to(device) + decoder = DotProductDecoder().to(device) + + opt = torch.optim.Adam(encoder.parameters(), lr=args.lr, weight_decay=args.wd) + + best_val_auc, best_state = -1.0, None + for ep in range(1, args.epochs + 1): + encoder.train(); opt.zero_grad() + loss = train_step(encoder, decoder, train_data, device) + loss.backward(); opt.step() + + if ep % 20 == 0 or ep == args.epochs: + encoder.eval() + val_auc, val_ap = evaluate(encoder, decoder, val_data, "val", device) + if val_auc > best_val_auc: + best_val_auc = val_auc + best_state = {k: v.detach().cpu().clone() for k, v in encoder.state_dict().items()} + print(f"[neg {i:03d} | {arch}] epoch {ep:03d} | loss {loss.item():.4f} | val AUC {val_auc:.4f} | val AP {val_ap:.4f}") + + if best_state is not None: + encoder.load_state_dict(best_state) + + test_auc, test_ap = evaluate(encoder, decoder, test_data, "test", device) + + out_path = f"models/negatives/negative_lp_{idx:03d}.pt" + torch.save(encoder.state_dict(), out_path) + meta = { + "task": "link_prediction", + "dataset": "CiteSeer", + "arch": arch, + "hidden": args.hidden, + "layers": args.layers, + "dropout": args.dropout, + "seed": seed_i, + "best_val_auc": float(best_val_auc), + "test_auc": float(test_auc), + "test_ap": float(test_ap), + } + with open(out_path.replace('.pt', '.json'), 'w') as f: + json.dump(meta, f, indent=2) + + saved.append(out_path) + print(f"Saved NEGATIVE {i:03d} arch={arch} best_val_AUC={best_val_auc:.4f} " + f"test AUC={test_auc:.4f} AP={test_ap:.4f} -> {out_path}") + + +if __name__ == "__main__": + main() diff --git a/examples/node_class/distill_students_nc.py b/examples/node_class/distill_students_nc.py new file mode 100644 index 0000000..1f26764 --- /dev/null +++ b/examples/node_class/distill_students_nc.py @@ -0,0 +1,88 @@ +import argparse, json, random, torch, torch.nn.functional as F +from pathlib import Path +from torch_geometric.datasets import Planetoid +from torch_geometric.utils import subgraph +from gcn_nc import get_model + +def set_seed(s): + random.seed(s); torch.manual_seed(s); torch.cuda.manual_seed_all(s) + +def make_masks_like(train_seed=0): + ds = Planetoid(root='data/cora', name='Cora') + data = ds[0] + g = torch.Generator().manual_seed(train_seed) + idx = torch.randperm(data.num_nodes, generator=g) + n_tr = int(0.7*data.num_nodes); n_va = int(0.1*data.num_nodes) + tr, va, te = idx[:n_tr], idx[n_tr:n_tr+n_va], idx[n_tr+n_va:] + mtr = torch.zeros(data.num_nodes, dtype=torch.bool); mtr[tr]=True + mva = torch.zeros(data.num_nodes, dtype=torch.bool); mva[va]=True + mte = torch.zeros(data.num_nodes, dtype=torch.bool); mte[te]=True + data.train_mask, data.val_mask, data.test_mask = mtr, mva, mte + return ds, data + +@torch.no_grad() +def teacher_logits_on_nodes(model, x, edge_index, nodes): + model.eval() + out = model(x, edge_index) + return out[nodes] + +def sample_node_subgraph(num_nodes, low=0.5, high=0.8): + k = int(random.uniform(low, high) * num_nodes) + idx = torch.randperm(num_nodes)[:k] + return idx.sort().values + +def kd_loss(student_logits, teacher_logits): + return F.mse_loss(student_logits, teacher_logits) + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('--meta_path', default='models/target_meta_nc.json') + ap.add_argument('--target_path', default='models/target_model_nc.pt') + ap.add_argument('--archs', default='gat,sage') + ap.add_argument('--epochs', type=int, default=10) + ap.add_argument('--lr', type=float, default=0.01) + ap.add_argument('--wd', type=float, default=5e-4) + ap.add_argument('--seed', type=int, default=0) + ap.add_argument('--count_per_arch', type=int, default=50) + ap.add_argument('--out_dir', type=str, default='models/positives') + args = ap.parse_args() + + Path(args.out_dir).mkdir(parents=True, exist_ok=True) + + with open(args.meta_path,'r') as f: + meta = json.load(f) + ds, data = make_masks_like(train_seed=args.seed) + in_dim, num_classes = ds.num_features, ds.num_classes + + teacher = get_model(meta['arch'], in_dim, meta['hidden'], num_classes) + teacher.load_state_dict(torch.load(args.target_path, map_location='cpu')) + teacher.eval() + + archs = [a.strip() for a in args.archs.split(',') if a.strip()] + saved = [] + for arch in archs: + for i in range(args.count_per_arch): + student = get_model(arch, in_dim, 64, num_classes) + opt = torch.optim.Adam(student.parameters(), lr=args.lr, weight_decay=args.wd) + + for _ in range(args.epochs): + student.train(); opt.zero_grad() + idx = sample_node_subgraph(data.num_nodes, 0.5, 0.8) + e_idx, _ = subgraph(idx, data.edge_index, relabel_nodes=True) + x_sub = data.x[idx] + with torch.no_grad(): + t_logits = teacher_logits_on_nodes(teacher, data.x, data.edge_index, idx) + s_logits = student(x_sub, e_idx) + loss = kd_loss(s_logits, t_logits) + loss.backward(); opt.step() + + out_pt = f'{args.out_dir}/distill_nc_{arch}_{i:03d}.pt' + torch.save(student.state_dict(), out_pt) + with open(out_pt.replace('.pt','.json'),'w') as f: + json.dump({"arch": arch, "hidden": 64, "num_classes": num_classes, "pos_kind": "distill"}, f) + saved.append(out_pt) + print(f"[distill-nc] saved {out_pt}") + print(f"Saved {len(saved)} distilled positives.") + +if __name__ == '__main__': + main() diff --git a/examples/node_class/eval_verifier_nc.py b/examples/node_class/eval_verifier_nc.py new file mode 100644 index 0000000..3565ceb --- /dev/null +++ b/examples/node_class/eval_verifier_nc.py @@ -0,0 +1,257 @@ +""" +Evaluate a trained Univerifier on positives ({target ∪ F+}) and negatives (F−) +using saved fingerprints. Produces Robustness/Uniqueness, ARUC, Mean Test Accuracy, KL Divergence. +""" + +import argparse, glob, json, math, torch, os +import numpy as np +import matplotlib.pyplot as plt +from torch_geometric.datasets import Planetoid +from torch_geometric.utils import dense_to_sparse +from gcn_nc import get_model +import torch.nn.functional as F + +import torch.nn as nn +class FPVerifier(nn.Module): + def __init__(self, in_dim: int): + super().__init__() + self.net = nn.Sequential( + nn.Linear(in_dim, 128), + nn.LeakyReLU(), + nn.Linear(128, 64), + nn.LeakyReLU(), + nn.Linear(64, 32), + nn.LeakyReLU(), + nn.Linear(32, 1), + nn.Sigmoid(), + ) + def forward(self, x): + return self.net(x) + + +@torch.no_grad() +def forward_on_fp(model, fp): + + X = fp["X"] + A = fp["A"] + idx = fp["node_idx"] + + A_bin = (A > 0.5).float() + A_sym = torch.triu(A_bin, diagonal=1) + A_sym = A_sym + A_sym.t() + edge_index = dense_to_sparse(A_sym)[0] + + if edge_index.numel() == 0: + n = X.size(0) + edge_index = torch.arange(n, dtype=torch.long).repeat(2, 1) + + logits = model(X, edge_index) + sel = logits[idx, :] + return sel.reshape(-1) + + +@torch.no_grad() +def concat_for_model(model, fps): + parts = [forward_on_fp(model, fp) for fp in fps] + return torch.cat(parts, dim=0) + + +def list_paths_from_globs(globs_str): + globs = [g.strip() for g in globs_str.split(",") if g.strip()] + paths = [] + for g in globs: + paths.extend(glob.glob(g)) + return sorted(paths) + + +def load_model_from_pt(pt_path, in_dim): + meta_path = pt_path.replace(".pt", ".json") + j = json.load(open(meta_path, "r")) + m = get_model(j["arch"], in_dim, j["hidden"], j["num_classes"]) + m.load_state_dict(torch.load(pt_path, map_location="cpu")) + m.eval() + return m + +# KL divergence helpers +def softmax_logits(x): + return F.softmax(x, dim=-1) + +@torch.no_grad() +def forward_nc_logits(model, fp): + X, A, idx = fp["X"], fp["A"], fp["node_idx"] + A_bin = (A > 0.5).float() + A_sym = torch.triu(A_bin, diagonal=1); A_sym = A_sym + A_sym.t() + edge_index = dense_to_sparse(A_sym)[0] + if edge_index.numel() == 0: + n = X.size(0) + edge_index = torch.arange(n, dtype=torch.long).repeat(2, 1) + logits = model(X, edge_index) + return logits[idx, :] + +def sym_kl(p, q, eps=1e-8): + """ + Symmetric KL + """ + p = p.clamp(min=eps); q = q.clamp(min=eps) + kl1 = (p * (p.log() - q.log())).sum(dim=-1) + kl2 = (q * (q.log() - p.log())).sum(dim=-1) + return 0.5 * (kl1 + kl2) + +@torch.no_grad() +def model_nc_kl_to_target(suspect, target, fps): + """ + Average symmetric KL over all fingerprints. + """ + vals = [] + for fp in fps: + t = softmax_logits(forward_nc_logits(target, fp)) + s = softmax_logits(forward_nc_logits(suspect, fp)) + d = sym_kl(s, t) + vals.append(d.mean().item()) + return float(np.mean(vals)) + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('--fingerprints_path', type=str, default='fingerprints/fingerprints_nc.pt') + ap.add_argument('--verifier_path', type=str, default='fingerprints/univerifier_nc.pt') + ap.add_argument('--target_path', type=str, default='models/target_model_nc.pt') + ap.add_argument('--target_meta', type=str, default='models/target_meta_nc.json') + ap.add_argument('--positives_glob', type=str, + default='models/positives/nc_ftpr_*.pt,models/positives/distill_nc_*.pt') + ap.add_argument('--negatives_glob', type=str, default='models/negatives/negative_nc_*.pt') + ap.add_argument('--out_plot', type=str, default='plots/cora_nc_aruc.png') + ap.add_argument('--out_plot_kl', type=str, default='plots/cora_nc_kl.png') + ap.add_argument('--save_csv', type=str, default='', + help='Optional: path to save thresholds/robustness/uniqueness CSV') + args = ap.parse_args() + + ds = Planetoid(root="data/cora", name="Cora") + in_dim = ds.num_features + num_classes = ds.num_classes + + # Load fingerprints (with node_idx) + pack = torch.load(args.fingerprints_path, map_location="cpu") + fps = pack["fingerprints"] + ver_in_dim_saved = int(pack.get("ver_in_dim", 0)) + + # Load models (target + positives + negatives) + tmeta = json.load(open(args.target_meta, "r")) + target = get_model(tmeta["arch"], in_dim, tmeta["hidden"], tmeta["num_classes"]) + target.load_state_dict(torch.load(args.target_path, map_location="cpu")) + target.eval() + + pos_paths = list_paths_from_globs(args.positives_glob) + neg_paths = sorted(glob.glob(args.negatives_glob)) + + models_pos = [target] + [load_model_from_pt(p, in_dim) for p in pos_paths] + models_neg = [load_model_from_pt(n, in_dim) for n in neg_paths] + + # Infer verifier input dim from a probe concat + z0 = concat_for_model(models_pos[0], fps) + D = z0.numel() + if ver_in_dim_saved and ver_in_dim_saved != D: + raise RuntimeError(f"Verifier input mismatch: D={D} vs ver_in_dim_saved={ver_in_dim_saved}") + + V = FPVerifier(D) + V.load_state_dict(torch.load(args.verifier_path, map_location='cpu')) + V.eval() + + with torch.no_grad(): + pos_scores = [] + for m in models_pos: + z = concat_for_model(m, fps).unsqueeze(0) + pos_scores.append(float(V(z))) + neg_scores = [] + for m in models_neg: + z = concat_for_model(m, fps).unsqueeze(0) + neg_scores.append(float(V(z))) + + pos_scores = np.array(pos_scores) + neg_scores = np.array(neg_scores) + + # Sweep thresholds + ts = np.linspace(0.0, 1.0, 201) + robustness = np.array([(pos_scores >= t).mean() for t in ts]) # TPR on positives + uniqueness = np.array([(neg_scores < t).mean() for t in ts]) # TNR on negatives + overlap = np.minimum(robustness, uniqueness) + + # Mean Test Accuracy at each threshold + Npos = len(pos_scores) + Nneg = len(neg_scores) + acc_curve = np.array([((pos_scores >= t).sum() + (neg_scores < t).sum()) / (Npos + Nneg) + for t in ts]) + + mean_test_acc = float(acc_curve.mean()) + + aruc = np.trapz(overlap, ts) + + idx_best = int(np.argmax(overlap)) + t_best = float(ts[idx_best]) + rob_best = float(robustness[idx_best]) + uniq_best = float(uniqueness[idx_best]) + acc_best = 0.5 * (rob_best + uniq_best) + + print(f"Mean Test Accuracy (avg over thresholds) = {mean_test_acc:.4f}") + print(f"Models: +{len(models_pos)} | -{len(models_neg)} | D={D}") + print(f"ARUC = {aruc:.4f}") + print(f"Best threshold = {t_best:.3f} | Robustness={rob_best:.3f} | Uniqueness={uniq_best:.3f} | Acc={acc_best:.3f}") + + if args.save_csv: + import csv + with open(args.save_csv, 'w', newline='') as f: + w = csv.writer(f) + w.writerow(['threshold', 'robustness', 'uniqueness', 'min_curve', 'accuracy']) + for t, r, u, s, a in zip(ts, robustness, uniqueness, overlap, acc_curve): + w.writerow([f"{t:.5f}", f"{r:.6f}", f"{u:.6f}", f"{s:.6f}", f"{a:.6f}"]) + print(f"Saved CSV to {args.save_csv}") + + # ARUC Plot + os.makedirs(os.path.dirname(args.out_plot), exist_ok=True) + fig, ax = plt.subplots(figsize=(7.5, 4.8), dpi=160) + ax.set_title(f"CiteSeer link-prediction • ARUC={aruc:.3f}", fontsize=14) + ax.grid(True, which='both', linestyle=':', linewidth=0.8, alpha=0.6) + ax.plot(ts, robustness, color="#ff0000", linewidth=2.0, label="Robustness (TPR)") + ax.plot(ts, uniqueness, color="#0000ff", linestyle="--", linewidth=2.0, label="Uniqueness (TNR)") + overlap = np.minimum(robustness, uniqueness) + ax.fill_between(ts, overlap, color="#bbbbbb", alpha=0.25, label="Overlap (ARUC region)") + + # best-threshold vertical line + # ax.axvline(t_best, color="0.4", linewidth=2.0, alpha=0.6) + + ax.set_xlabel("Threshold (τ)", fontsize=12) + ax.set_ylabel("Score", fontsize=12) + ax.set_xlim(0.0, 1.0) + ax.set_ylim(0.0, 1.0) + ax.tick_params(labelsize=11) + + leg = ax.legend(loc="lower left", frameon=True, framealpha=0.85, + facecolor="white", edgecolor="0.8") + + plt.tight_layout() + plt.savefig(args.out_plot, bbox_inches="tight") + print(f"Saved plot to {args.out_plot}") + + + # KL divergence + pos_divs = [model_nc_kl_to_target(m, target, fps) for m in models_pos[1:]] # exclude target itself + neg_divs = [model_nc_kl_to_target(m, target, fps) for m in models_neg] + pos_divs, neg_divs = np.array(pos_divs), np.array(neg_divs) + + print(f"[KL] F+ mean±std = {pos_divs.mean():.4f}±{pos_divs.std():.4f} | " + f"F- mean±std = {neg_divs.mean():.4f}±{neg_divs.std():.4f}") + + os.makedirs(os.path.dirname(args.out_plot_kl), exist_ok=True) + plt.figure(figsize=(4.8, 3.2), dpi=160) + bins = 30 + plt.hist(pos_divs, bins=bins, density=True, alpha=0.35, color="r", label="Surrogate GNN") + plt.hist(neg_divs, bins=bins, density=True, alpha=0.35, color="b", label="Irrelevant GNN") + plt.title("Node Classification") + plt.xlabel("KL Divergence"); plt.ylabel("Density") + plt.legend() + plt.tight_layout() + plt.savefig(args.out_plot_kl, bbox_inches="tight") + print(f"Saved KL plot to {args.out_plot_kl}") + + +if __name__ == "__main__": + main() diff --git a/examples/node_class/fine_tune_pirate_nc.py b/examples/node_class/fine_tune_pirate_nc.py new file mode 100644 index 0000000..fcb2cd4 --- /dev/null +++ b/examples/node_class/fine_tune_pirate_nc.py @@ -0,0 +1,116 @@ +import argparse, torch, copy, random, json +from pathlib import Path +import torch.nn.functional as F +from torch_geometric.datasets import Planetoid +from gcn_nc import get_model + +def set_seed(seed): + random.seed(seed); torch.manual_seed(seed); torch.cuda.manual_seed_all(seed) + +def make_masks(num_nodes, train_p=0.7, val_p=0.1, seed=0): + g = torch.Generator().manual_seed(seed) + idx = torch.randperm(num_nodes, generator=g) + n_train = int(train_p * num_nodes) + n_val = int(val_p * num_nodes) + train_idx = idx[:n_train]; val_idx = idx[n_train:n_train+n_val]; test_idx = idx[n_train+n_val:] + train_mask = torch.zeros(num_nodes, dtype=torch.bool); train_mask[train_idx]=True + val_mask = torch.zeros(num_nodes, dtype=torch.bool); val_mask[val_idx]=True + test_mask = torch.zeros(num_nodes, dtype=torch.bool); test_mask[test_idx]=True + return train_mask, val_mask, test_mask + +def train_epoch(model, data, optimizer, mask): + model.train(); optimizer.zero_grad() + out = model(data.x, data.edge_index) + loss = F.cross_entropy(out[mask], data.y[mask]) + loss.backward(); optimizer.step() + return float(loss.item()) + +@torch.no_grad() +def eval_mask(model, data, mask): + model.eval(); out = model(data.x, data.edge_index) + pred = out.argmax(dim=-1) + return float((pred[mask]==data.y[mask]).float().mean()) + +def reinit_last_layer(model): + last = None + for name, module in model.named_modules(): + if isinstance(module, torch.nn.Linear): + last = module + if last is not None: + for p in last.parameters(): + if p.dim() > 1: torch.nn.init.xavier_uniform_(p) + else: torch.nn.init.zeros_(p) + +def reinit_all(model): + for m in model.modules(): + if isinstance(m, torch.nn.Linear): + torch.nn.init.xavier_uniform_(m.weight) + if m.bias is not None: torch.nn.init.zeros_(m.bias) + if hasattr(m, 'reset_parameters'): + try: m.reset_parameters() + except: pass + +def save_model(model, path, meta): + path = Path(path) + path.parent.mkdir(parents=True, exist_ok=True) + torch.save(model.state_dict(), str(path)) + with open(str(path).replace('.pt','.json'),'w') as f: + json.dump(meta, f) + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('--target_path', type=str, default='models/target_model_nc.pt') + ap.add_argument('--meta_path', type=str, default='models/target_meta_nc.json') + ap.add_argument('--epochs', type=int, default=10) + ap.add_argument('--lr', type=float, default=0.01) + ap.add_argument('--wd', type=float, default=5e-4) + ap.add_argument('--seed', type=int, default=0) + ap.add_argument('--num_variants', type=int, default=100) + ap.add_argument('--out_dir', type=str, default='models/positives') + args = ap.parse_args() + + set_seed(args.seed) + with open(args.meta_path,'r') as f: + meta = json.load(f) + + dataset = Planetoid(root='data/cora', name='Cora') + data = dataset[0] + train_mask, val_mask, test_mask = make_masks(data.num_nodes, 0.7, 0.1, seed=args.seed) + data.train_mask, data.val_mask, data.test_mask = train_mask, val_mask, test_mask + + target = get_model(meta["arch"], data.num_features, meta["hidden"], meta["num_classes"]) + target.load_state_dict(torch.load(args.target_path, map_location='cpu')) + + saved = [] + for i in range(args.num_variants): + kind = i % 4 # 0:FT-last,1:FT-all,2:PR-last,3:PR-all + m = get_model(meta["arch"], data.num_features, meta["hidden"], meta["num_classes"]) + m.load_state_dict(target.state_dict()) + + if kind == 2: reinit_last_layer(m) + elif kind == 3: reinit_all(m) + + if kind in (0,2): + for name,p in m.named_parameters(): + p.requires_grad = ('conv2' in name) or ('mlp.3' in name) + else: + for p in m.parameters(): p.requires_grad=True + + opt = torch.optim.Adam(filter(lambda p: p.requires_grad, m.parameters()), lr=args.lr, weight_decay=args.wd) + best_val, best_state = -1, None + for _ in range(args.epochs): + _ = train_epoch(m, data, opt, data.train_mask) + val = eval_mask(m, data, data.val_mask) + if val > best_val: + best_val, best_state = val, {k:v.cpu().clone() for k,v in m.state_dict().items()} + m.load_state_dict(best_state) + out_path = f"{args.out_dir}/nc_ftpr_{i:03d}.pt" + meta_out = {"arch": meta["arch"], "hidden": meta["hidden"], "num_classes": meta["num_classes"], "pos_kind": ["ft_last","ft_all","pr_last","pr_all"][kind]} + save_model(m, out_path, meta_out) + saved.append(out_path) + print(f"Saved {out_path} val={best_val:.4f}") + + print(f"Total FT/PR positives saved: {len(saved)}") + +if __name__ == '__main__': + main() diff --git a/examples/node_class/fingerprint_generator_nc.py b/examples/node_class/fingerprint_generator_nc.py new file mode 100644 index 0000000..3a62216 --- /dev/null +++ b/examples/node_class/fingerprint_generator_nc.py @@ -0,0 +1,262 @@ +import argparse, glob, json, math, random, torch +import torch.nn as nn +import torch.nn.functional as F +from pathlib import Path +from typing import List, Dict +from torch_geometric.datasets import Planetoid +from torch_geometric.utils import dense_to_sparse, to_undirected +import time + + +def set_seed(s): + random.seed(s); torch.manual_seed(s) + +def load_meta(path): + with open(path, 'r') as f: + return json.load(f) + +def get_model(arch: str, in_dim: int, hidden: int, num_classes: int): + from gcn_nc import get_model as _get + return _get(arch, in_dim, hidden, num_classes) + +def list_paths_from_globs(globs: List[str]) -> List[str]: + out = [] + for g in globs: + out.extend(glob.glob(g)) + return sorted(out) + +class FPVerifier(nn.Module): + # Arch: [128, 64, 32] + LeakyReLU, sigmoid output + def __init__(self, in_dim: int): + super().__init__() + self.net = nn.Sequential( + nn.Linear(in_dim, 128), + nn.LeakyReLU(), + nn.Linear(128, 64), + nn.LeakyReLU(), + nn.Linear(64, 32), + nn.LeakyReLU(), + nn.Linear(32, 1), + nn.Sigmoid() + ) + + def forward(self, x): + return self.net(x) + +def load_model_from_pair(pt_path: str, in_dim: int): + meta = json.load(open(pt_path.replace('.pt', '.json'), 'r')) + m = get_model(meta["arch"], in_dim, meta["hidden"], meta["num_classes"]) + m.load_state_dict(torch.load(pt_path, map_location='cpu')) + m.eval() + return m, meta + +def forward_on_fp(model, fp): + A = fp["A"] + A_bin = (A > 0.5).float() + A_sym = torch.maximum(A_bin, A_bin.t()) + edge_index = dense_to_sparse(A_sym)[0] + if edge_index.numel() == 0: + edge_index = torch.arange(fp["X"].size(0)).repeat(2,1) + edge_index = to_undirected(edge_index) + logits = model(fp["X"], edge_index) + return logits.mean(dim=0) + +def concat_for_model(model, fingerprints): + vecs = [forward_on_fp(model, fp) for fp in fingerprints] + return torch.cat(vecs, dim=-1) + +def compute_loss(models_pos, models_neg, fingerprints, V): + z_pos = [] + for m in models_pos: + z_pos.append(concat_for_model(m, fingerprints)) + z_neg = [] + for m in models_neg: + z_neg.append(concat_for_model(m, fingerprints)) + if len(z_pos) == 0 or len(z_neg) == 0: + raise RuntimeError("Need both positive and negative models.") + Zp = torch.stack(z_pos) + Zn = torch.stack(z_neg) + + yp = V(Zp).clamp(1e-6, 1-1e-6) + yn = V(Zn).clamp(1e-6, 1-1e-6) + + L = torch.log(yp).mean() + torch.log(1 - yn).mean() + return L, Zp, Zn + +def feature_ascent_step(models_pos, models_neg, fingerprints, V, alpha=0.01): + for fp in fingerprints: + fp["X"].requires_grad_(True) + fp["A"].requires_grad_(False) + + L, _, _ = compute_loss(models_pos, models_neg, fingerprints, V) + grads = torch.autograd.grad( + L, [fp["X"] for fp in fingerprints], + retain_graph=False, create_graph=False, allow_unused=True + ) + with torch.no_grad(): + for fp, g in zip(fingerprints, grads): + if g is None: + g = torch.zeros_like(fp["X"]) + fp["X"].add_(alpha * g) + fp["X"].clamp_(-5.0, 5.0) + +def edge_flip_candidates(A: torch.Tensor, budget: int): + n = A.size(0) + tri_i, tri_j = torch.triu_indices(n, n, offset=1) + scores = torch.abs(0.5 - A[tri_i, tri_j]) + order = torch.argsort(scores) + picks = order[:min(budget, len(order))] + return tri_i[picks], tri_j[picks] + +def edge_flip_step(models_pos, models_neg, fingerprints, V, flip_k=8): + # Rank-and-flip edges by gain in the full loss L when flipping entries in ONE fp at a time + for fp_idx, fp in enumerate(fingerprints): + A = fp["A"] + i_idx, j_idx = edge_flip_candidates(A, flip_k * 4) # candidate pool + with torch.no_grad(): + base_L, _, _ = compute_loss(models_pos, models_neg, fingerprints, V) + + gains = [] + for i, j in zip(i_idx.tolist(), j_idx.tolist()): + with torch.no_grad(): + old = float(A[i, j]) + new = 1.0 - old + # toggle in place + A[i, j] = new; A[j, i] = new + L_try, _, _ = compute_loss(models_pos, models_neg, fingerprints, V) + gain = float(L_try - base_L) + gains.append((gain, i, j, old)) + # revert + A[i, j] = old; A[j, i] = old + + # Flip the best k edges for this fingerprint + gains.sort(key=lambda x: x[0], reverse=True) + with torch.no_grad(): + for g, i, j, old in gains[:flip_k]: + new = 1.0 - old + A[i, j] = new; A[j, i] = new + A.clamp_(0.0, 1.0) + +def train_verifier_step(models_pos, models_neg, fingerprints, V, opt): + L, Zp, Zn = compute_loss(models_pos, models_neg, fingerprints, V) + loss = -L + opt.zero_grad() + loss.backward() + opt.step() + with torch.no_grad(): + yp = (V(Zp) >= 0.5).float().mean().item() + yn = (V(Zn) < 0.5).float().mean().item() + acc = 0.5 * (yp + yn) + return float(L.item()), acc + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('--target_path', default='models/target_model_nc.pt') + ap.add_argument('--target_meta', default='models/target_meta_nc.json') + ap.add_argument('--positives_glob', default='models/positives/nc_ftpr_*.pt,models/positives/distill_nc_*.pt') + ap.add_argument('--negatives_glob', default='models/negatives/negative_nc_*.pt') + + # Hyperparams + ap.add_argument('--P', type=int, default=64) + ap.add_argument('--n', type=int, default=32) # nodes per fingerprint + ap.add_argument('--iters', type=int, default=1000) # alternating iterations + ap.add_argument('--verifier_lr', type=float, default=1e-3) # learning rate for V + ap.add_argument('--e1', type=int, default=1) # epochs for fingerprint updates per alternation + ap.add_argument('--e2', type=int, default=1) # epochs for verifier updates per alternation + ap.add_argument('--alpha_x', type=float, default=0.01) # step size for feature ascent + ap.add_argument('--flip_k', type=int, default=4) # edges flipped per step per fingerprint + ap.add_argument('--seed', type=int, default=0) + ap.add_argument('--m', type=int, default=64) # sampled nodes per fingerprint + + + args = ap.parse_args() + + set_seed(args.seed) + Path('fingerprints').mkdir(parents=True, exist_ok=True) + + # Dataset dims + ds = Planetoid(root='data/cora', name='Cora') + in_dim = ds.num_features + num_classes = ds.num_classes + + # Load {f} and F+ into "positives"; F- separately + meta_t = load_meta(args.target_meta) + target = get_model(meta_t["arch"], in_dim, meta_t["hidden"], meta_t["num_classes"]) + target.load_state_dict(torch.load(args.target_path, map_location='cpu')) + target.eval() + + pos_globs = [g.strip() for g in args.positives_glob.split(',') if g.strip()] + pos_paths = list_paths_from_globs(pos_globs) + neg_paths = sorted(glob.glob(args.negatives_glob)) + + models_pos = [target] + for p in pos_paths: + m,_ = load_model_from_pair(p, in_dim) + models_pos.append(m) + + models_neg = [] + for npath in neg_paths: + m,_ = load_model_from_pair(npath, in_dim) + models_neg.append(m) + + print(f"[loaded] positives={len(models_pos)} (incl. target) | negatives={len(models_neg)}") + + # Initialize fingerprints with small random X, sparse A near 0.5 + fingerprints = [] + if args.m > args.n: + raise ValueError(f"--m ({args.m}) must be <= --n ({args.n})") + + for _ in range(args.P): + X = torch.randn(args.n, in_dim) * 0.1 + A = torch.rand(args.n, args.n) * 0.2 + 0.4 + A = torch.triu(A, diagonal=1) + A = A + A.t() + torch.diagonal(A).zero_() + idx = torch.randperm(args.n)[:args.m] + fingerprints.append({"X": X, "A": A, "node_idx": idx}) + + + ver_in_dim = args.P * args.m * num_classes + V = FPVerifier(ver_in_dim) + optV = torch.optim.Adam(V.parameters(), lr=args.verifier_lr) + + flag = 0 + for it in range(1, args.iters + 1): + if flag == 0: + # Update fingerprints (features + edges), e1 times + for _ in range(args.e1): + feature_ascent_step(models_pos, models_neg, fingerprints, V, alpha=args.alpha_x) + edge_flip_step(models_pos, models_neg, fingerprints, V, flip_k=args.flip_k) + flag = 1 + else: + # Update verifier, e2 times + diag_acc = None + for _ in range(args.e2): + Lval, acc = train_verifier_step(models_pos, models_neg, fingerprints, V, optV) + diag_acc = acc + flag = 0 + + if it % 10 == 0 and 'diag_acc' in locals() and diag_acc is not None: + print(f"[Iter {it}] verifier acc={diag_acc:.3f} (diagnostic)") + + clean_fps = [] + for fp in fingerprints: + clean_fps.append({ + "X": fp["X"].detach().clone(), + "A": fp["A"].detach().clone(), + "node_idx": fp["node_idx"].detach().clone(), + }) + torch.save( + {"fingerprints": clean_fps, "verifier": V.state_dict(), "ver_in_dim": ver_in_dim}, + "fingerprints/fingerprints_nc.pt" + ) + + print("Saved fingerprints/fingerprints_nc.pt") + +if __name__ == '__main__': + start_time = time.time() + main() + end_time = time.time() + + print("Time taken: ", (end_time - start_time)/60) + diff --git a/examples/node_class/gcn_nc.py b/examples/node_class/gcn_nc.py new file mode 100644 index 0000000..0142be8 --- /dev/null +++ b/examples/node_class/gcn_nc.py @@ -0,0 +1,85 @@ + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch_geometric.nn import GCNConv, SAGEConv, GATConv, global_mean_pool + +# Common heads +class MLPHead(nn.Module): + def __init__(self, in_dim, out_dim, hidden=64, dropout=0.5): + super().__init__() + self.net = nn.Sequential( + nn.Linear(in_dim, hidden), + nn.ReLU(), + nn.Dropout(dropout), + nn.Linear(hidden, out_dim) + ) + def forward(self, x): + return self.net(x) + +class GCN(nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, dropout=0.5): + super().__init__() + self.conv1 = GCNConv(in_channels, hidden_channels, cached=False, add_self_loops=True, normalize=True) + self.conv2 = GCNConv(hidden_channels, out_channels, cached=False, add_self_loops=True, normalize=True) + self.dropout = dropout + + def forward(self, x, edge_index): + x = self.conv1(x, edge_index) + x = F.relu(x) + x = F.dropout(x, p=self.dropout, training=self.training) + x = self.conv2(x, edge_index) + return x # logits for node classes + +class GraphSAGE(nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, dropout=0.5): + super().__init__() + self.conv1 = SAGEConv(in_channels, hidden_channels) + self.conv2 = SAGEConv(hidden_channels, out_channels) + self.dropout = dropout + + def forward(self, x, edge_index): + x = self.conv1(x, edge_index) + x = F.relu(x) + x = F.dropout(x, p=self.dropout, training=self.training) + x = self.conv2(x, edge_index) + return x + +class GAT(nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, heads=8, dropout=0.6): + super().__init__() + self.conv1 = GATConv(in_channels, hidden_channels, heads=heads, dropout=dropout) + self.conv2 = GATConv(hidden_channels*heads, out_channels, heads=1, concat=False, dropout=dropout) + self.dropout = dropout + + def forward(self, x, edge_index): + x = F.dropout(x, p=self.dropout, training=self.training) + x = self.conv1(x, edge_index) + x = F.elu(x) + x = F.dropout(x, p=self.dropout, training=self.training) + x = self.conv2(x, edge_index) + return x + +class NodeMLP(nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, dropout=0.5): + super().__init__() + self.mlp = nn.Sequential( + nn.Linear(in_channels, hidden_channels), + nn.ReLU(), + nn.Dropout(dropout), + nn.Linear(hidden_channels, out_channels) + ) + def forward(self, x, edge_index): + return self.mlp(x) + +def get_model(arch:str, in_dim:int, hidden:int, out_dim:int): + arch = arch.lower() + if arch == 'gcn': + return GCN(in_dim, hidden, out_dim) + if arch == 'sage' or arch == 'graphsage': + return GraphSAGE(in_dim, hidden, out_dim) + if arch == 'gat': + return GAT(in_dim, hidden, out_dim) + if arch == 'mlp': + return NodeMLP(in_dim, hidden, out_dim) + raise ValueError(f"Unknown arch: {arch}") diff --git a/examples/node_class/generate_univerifier_dataset_nc.py b/examples/node_class/generate_univerifier_dataset_nc.py new file mode 100644 index 0000000..4d6f269 --- /dev/null +++ b/examples/node_class/generate_univerifier_dataset_nc.py @@ -0,0 +1,108 @@ +""" +Build a Univerifier dataset from saved fingerprints. +Label 1 for positives ({target ∪ F+}) and 0 for negatives (F−). +Outputs: a .pt file with: + - X: [N_models, D] tensor, where D = P * m * num_classes + - y: [N_models] float tensor with 1.0 (positive) or 0.0 (negative) +""" + +import argparse, glob, json, torch +from torch_geometric.datasets import Planetoid +from torch_geometric.utils import dense_to_sparse +from gcn_nc import get_model + +@torch.no_grad() +def forward_on_fp(model, fp): + X = fp["X"] + A = fp["A"] + idx = fp["node_idx"] + + # Binarize & symmetrize adjacency + A_bin = (A > 0.5).float() + A_sym = torch.triu(A_bin, diagonal=1) + A_sym = A_sym + A_sym.t() + edge_index = dense_to_sparse(A_sym)[0] + + logits = model(X, edge_index) + sel = logits[idx, :] + return sel.reshape(-1) + + +@torch.no_grad() +def concat_for_model(model, fps): + parts = [forward_on_fp(model, fp) for fp in fps] + return torch.cat(parts, dim=0) + + +def list_paths_from_globs(globs_str): + globs = [g.strip() for g in globs_str.split(",") if g.strip()] + paths = [] + for g in globs: + paths.extend(glob.glob(g)) + return sorted(paths) + + +def load_model_from_pt(pt_path, in_dim): + meta_path = pt_path.replace(".pt", ".json") + j = json.load(open(meta_path, "r")) + m = get_model(j["arch"], in_dim, j["hidden"], j["num_classes"]) + m.load_state_dict(torch.load(pt_path, map_location="cpu")) + m.eval() + return m + + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument("--fingerprints_path", type=str, default="fingerprints/fingerprints_nc.pt") + ap.add_argument("--target_path", type=str, default="models/target_model_nc.pt") + ap.add_argument("--target_meta", type=str, default="models/target_meta_nc.json") + ap.add_argument("--positives_glob", type=str, + default="models/positives/nc_ftpr_*.pt,models/positives/distill_nc_*.pt") + ap.add_argument("--negatives_glob", type=str, default="models/negatives/negative_nc_*.pt") + ap.add_argument("--out", type=str, default="fingerprints/univerifier_dataset_nc.pt") + args = ap.parse_args() + + # Dataset dims (for model reconstruction) + ds = Planetoid(root="data/cora", name="Cora") + in_dim = ds.num_features + num_classes = ds.num_classes + + pack = torch.load(args.fingerprints_path, map_location="cpu") + fps = pack["fingerprints"] + ver_in_dim_saved = pack.get("ver_in_dim", None) + + + tmeta = json.load(open(args.target_meta, "r")) + target = get_model(tmeta["arch"], in_dim, tmeta["hidden"], tmeta["num_classes"]) + target.load_state_dict(torch.load(args.target_path, map_location="cpu")) + target.eval() + + pos_paths = list_paths_from_globs(args.positives_glob) + neg_paths = sorted(glob.glob(args.negatives_glob)) + + models = [target] + labels = [1.0] + + for p in pos_paths: + models.append(load_model_from_pt(p, in_dim)); labels.append(1.0) + for n in neg_paths: + models.append(load_model_from_pt(n, in_dim)); labels.append(0.0) + + with torch.no_grad(): + z0 = concat_for_model(models[0], fps) + D = z0.numel() + if ver_in_dim_saved is not None and D != int(ver_in_dim_saved): + raise RuntimeError( + f"Verifier input mismatch: dataset dim {D} vs saved ver_in_dim {ver_in_dim_saved}" + ) + + X_rows = [z0] + [concat_for_model(m, fps) for m in models[1:]] + X = torch.stack(X_rows, dim=0).float() + y = torch.tensor(labels, dtype=torch.float32) + + torch.save({"X": X, "y": y}, args.out) + print(f"Saved {args.out} with {X.shape[0]} rows; dim={X.shape[1]} (num_classes={num_classes})") + print(f"Positives: {int(sum(labels))} | Negatives: {len(labels) - int(sum(labels))}") + +if __name__ == "__main__": + main() diff --git a/examples/node_class/make_suspect_nc.py b/examples/node_class/make_suspect_nc.py new file mode 100644 index 0000000..cabcbeb --- /dev/null +++ b/examples/node_class/make_suspect_nc.py @@ -0,0 +1,74 @@ +import argparse, json, random, torch +import torch.nn.functional as F +from pathlib import Path +from torch_geometric.datasets import Planetoid +from gcn_nc import get_model + +def set_seed(s): + random.seed(s); torch.manual_seed(s); torch.cuda.manual_seed_all(s) + +def make_masks(n, train_p=0.7, val_p=0.1, seed=0): + g = torch.Generator().manual_seed(seed) + idx = torch.randperm(n, generator=g) + n_tr = int(train_p*n); n_va = int(val_p*n) + tr, va, te = idx[:n_tr], idx[n_tr:n_tr+n_va], idx[n_tr+n_va:] + mtr = torch.zeros(n, dtype=torch.bool); mtr[tr]=True + mva = torch.zeros(n, dtype=torch.bool); mva[va]=True + mte = torch.zeros(n, dtype=torch.bool); mte[te]=True + return mtr, mva, mte + +def train_epoch(model, data, opt, mask): + model.train(); opt.zero_grad() + out = model(data.x, data.edge_index) + loss = F.cross_entropy(out[mask], data.y[mask]); loss.backward(); opt.step() + return float(loss.item()) + +@torch.no_grad() +def eval_mask(model, data, mask): + model.eval(); out = model(data.x, data.edge_index) + pred = out.argmax(dim=-1) + return float((pred[mask]==data.y[mask]).float().mean()) + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('--arch', default='sage', help='gcn or sage (unrelated to target)') + ap.add_argument('--hidden', type=int, default=64) + ap.add_argument('--epochs', type=int, default=200) + ap.add_argument('--lr', type=float, default=0.01) + ap.add_argument('--wd', type=float, default=5e-4) + ap.add_argument('--seed', type=int, default=9999, help='use a NEW seed unseen by the Univerifier') + ap.add_argument('--out_dir', default='models/suspects') + ap.add_argument('--name', default='neg_nc_seed9999') + args = ap.parse_args() + + set_seed(args.seed) + ds = Planetoid(root='data/cora', name='Cora') + data = ds[0] + mtr, mva, mte = make_masks(data.num_nodes, 0.7, 0.1, seed=args.seed) + data.train_mask, data.val_mask, data.test_mask = mtr, mva, mte + + model = get_model(args.arch, ds.num_features, args.hidden, ds.num_classes) + opt = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd) + + best_val, best_state = -1.0, None + for _ in range(args.epochs): + _ = train_epoch(model, data, opt, data.train_mask) + val = eval_mask(model, data, data.val_mask) + if val > best_val: + best_val = val + best_state = {k: v.cpu().clone() for k, v in model.state_dict().items()} + model.load_state_dict(best_state) + + Path(args.out_dir).mkdir(parents=True, exist_ok=True) + pt = f"{args.out_dir}/{args.name}.pt" + meta = { + "arch": args.arch, "hidden": args.hidden, + "in_dim": ds.num_features, "num_classes": ds.num_classes, + "seed": args.seed, "note": "never-seen negative suspect" + } + torch.save(model.state_dict(), pt) + with open(pt.replace('.pt','.json'), 'w') as f: json.dump(meta, f) + print(f"[saved] {pt} (val_acc={best_val:.4f})") + +if __name__ == '__main__': + main() diff --git a/examples/node_class/score_suspect_nc.py b/examples/node_class/score_suspect_nc.py new file mode 100644 index 0000000..ca0005f --- /dev/null +++ b/examples/node_class/score_suspect_nc.py @@ -0,0 +1,87 @@ +import argparse, json, os, torch +import torch.nn as nn +from torch_geometric.utils import dense_to_sparse + +# Univerifier +class FPVerifier(nn.Module): + def __init__(self, in_dim: int): + super().__init__() + self.net = nn.Sequential( + nn.Linear(in_dim, 128), nn.LeakyReLU(), + nn.Linear(128, 64), nn.LeakyReLU(), + nn.Linear(64, 32), nn.LeakyReLU(), + nn.Linear(32, 1), nn.Sigmoid(), + ) + def forward(self, x): return self.net(x) + +def load_json(p): + with open(p, "r") as f: return json.load(f) + +def edge_index_from_A(A: torch.Tensor) -> torch.Tensor: + A_bin = (A > 0.5).float() + A_sym = torch.triu(A_bin, diagonal=1); A_sym = A_sym + A_sym.t() + ei = dense_to_sparse(A_sym)[0] + if ei.numel() == 0: + n = A.size(0); ei = torch.arange(n).repeat(2,1) + return ei + +@torch.no_grad() +def build_z_nc(model: nn.Module, fps): + parts = [] + for fp in fps: + X, A, idx = fp["X"], fp["A"], fp["node_idx"] + ei = edge_index_from_A(A) + logits = model(X, ei) + parts.append(logits[idx, :].reshape(-1)) + return torch.cat(parts, dim=0) + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument("--fingerprints_path", default="fingerprints/fingerprints_nc.pt") + ap.add_argument("--verifier_path", default="fingerprints/univerifier_nc.pt", + help="If missing, load 'verifier' from fingerprints pack.") + ap.add_argument("--suspect_pt", required=True) + ap.add_argument("--suspect_meta", required=False, default="") + ap.add_argument("--threshold", type=float, default=0.5) + ap.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu") + ap.add_argument("--in_dim", type=int, default=1433) + ap.add_argument("--num_classes", type=int, default=7) + args = ap.parse_args() + + device = torch.device(args.device) + pack = torch.load(args.fingerprints_path, map_location="cpu") + fps = pack["fingerprints"]; ver_in_dim = int(pack.get("ver_in_dim", 0)) + + # Build suspect NC model + from gcn_nc import get_model + meta = load_json(args.suspect_meta) if args.suspect_meta else {} + arch = meta.get("arch", "gcn") + hidden = int(meta.get("hidden", 64)) + in_dim = int(meta.get("in_dim", args.in_dim)) + num_classes = int(meta.get("num_classes", args.num_classes)) + model = get_model(arch, in_dim, hidden, num_classes).to(device) + model.load_state_dict(torch.load(args.suspect_pt, map_location="cpu")) + model.eval() + + z = build_z_nc(model, fps) + D = z.numel() + if ver_in_dim and ver_in_dim != D: + raise RuntimeError(f"Dim mismatch: verifier expects {ver_in_dim}, got {D}.") + + # Load verifier + V = FPVerifier(D).to(device) + if os.path.isfile(args.verifier_path): + V.load_state_dict(torch.load(args.verifier_path, map_location="cpu")) + src = args.verifier_path + else: + if "verifier" not in pack: raise FileNotFoundError("No verifier found.") + V.load_state_dict(pack["verifier"]); src = f"{args.fingerprints_path}:[verifier]" + V.eval() + + with torch.no_grad(): + s = float(V(z.view(1, -1).to(device)).item()) + verdict = "OWNED (positive)" if s >= args.threshold else "NOT-OWNED (negative)" + print(f"Score={s:.6f} | τ={args.threshold:.3f} -> {verdict}") + +if __name__ == "__main__": + main() diff --git a/examples/node_class/train_nc.py b/examples/node_class/train_nc.py new file mode 100644 index 0000000..74529ed --- /dev/null +++ b/examples/node_class/train_nc.py @@ -0,0 +1,79 @@ +import argparse, torch, random +import torch.nn.functional as F +from torch_geometric.datasets import Planetoid +from torch_geometric.utils import add_self_loops +from torch_geometric.loader import NeighborLoader +from gcn_nc import get_model + +def set_seed(seed): + random.seed(seed); + torch.manual_seed(seed); + torch.cuda.manual_seed_all(seed) + +def make_masks(num_nodes, train_p=0.7, val_p=0.1, seed=0): + g = torch.Generator().manual_seed(seed) + idx = torch.randperm(num_nodes, generator=g) + n_train = int(train_p * num_nodes) + n_val = int(val_p * num_nodes) + train_idx = idx[:n_train]; val_idx = idx[n_train:n_train+n_val]; test_idx = idx[n_train+n_val:] + train_mask = torch.zeros(num_nodes, dtype=torch.bool); train_mask[train_idx]=True + val_mask = torch.zeros(num_nodes, dtype=torch.bool); val_mask[val_idx]=True + test_mask = torch.zeros(num_nodes, dtype=torch.bool); test_mask[test_idx]=True + return train_mask, val_mask, test_mask + +def train_epoch(model, data, optimizer, train_mask): + model.train() + optimizer.zero_grad() + out = model(data.x, data.edge_index) + loss = F.cross_entropy(out[train_mask], data.y[train_mask]) + loss.backward() + optimizer.step() + return float(loss.item()) + +@torch.no_grad() +def eval_masks(model, data, mask): + model.eval() + out = model(data.x, data.edge_index) + pred = out.argmax(dim=-1) + correct = int((pred[mask] == data.y[mask]).sum()) + total = int(mask.sum()) + return correct/total if total>0 else 0.0 + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('--arch', type=str, default='gcn') + ap.add_argument('--hidden', type=int, default=64) + ap.add_argument('--epochs', type=int, default=200) + ap.add_argument('--lr', type=float, default=0.01) + ap.add_argument('--weight_decay', type=float, default=5e-4) + ap.add_argument('--seed', type=int, default=0) + args = ap.parse_args() + + set_seed(args.seed) + dataset = Planetoid(root='data/cora', name='Cora') + data = dataset[0] + + train_mask, val_mask, test_mask = make_masks(data.num_nodes, 0.7, 0.1, seed=args.seed) + data.train_mask, data.val_mask, data.test_mask = train_mask, val_mask, test_mask + + model = get_model(args.arch, data.num_features, args.hidden, dataset.num_classes) + optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) + + best_val, best_state = -1, None + for epoch in range(1, args.epochs+1): + loss = train_epoch(model, data, optimizer, data.train_mask) + val_acc = eval_masks(model, data, data.val_mask) + if val_acc > best_val: + best_val, best_state = val_acc, {k:v.cpu().clone() for k,v in model.state_dict().items()} + if epoch % 20 == 0 or epoch == args.epochs: + print(f"Epoch {epoch:03d} | loss {loss:.4f} | val {val_acc:.4f}") + + model.load_state_dict(best_state) + test_acc = eval_masks(model, data, data.test_mask) + print(f"Best Val Acc: {best_val:.4f} | Test Acc: {test_acc:.4f}") + torch.save(model.state_dict(), 'models/target_model_nc.pt') + with open('models/target_meta_nc.json','w') as f: + f.write(f'{{"arch":"{args.arch}","hidden":{args.hidden},"num_classes":{dataset.num_classes}}}') + +if __name__ == '__main__': + main() diff --git a/examples/node_class/train_univerifier_nc.py b/examples/node_class/train_univerifier_nc.py new file mode 100644 index 0000000..989bb66 --- /dev/null +++ b/examples/node_class/train_univerifier_nc.py @@ -0,0 +1,98 @@ +""" +Trains the Univerifier on features built from fingerprints (MLP: [128,64,32] + LeakyReLU). +Loads X,y from generate_univerifier_dataset.py and saves weights + a tiny meta JSON. +""" + +import argparse, json, torch, time +import torch.nn as nn +import torch.nn.functional as F +from pathlib import Path + +class FPVerifier(nn.Module): + def __init__(self, in_dim: int): + super().__init__() + self.net = nn.Sequential( + nn.Linear(in_dim, 128), + nn.LeakyReLU(), + nn.Linear(128, 64), + nn.LeakyReLU(), + nn.Linear(64, 32), + nn.LeakyReLU(), + nn.Linear(32, 1), + nn.Sigmoid(), + ) + def forward(self, x): + return self.net(x) + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('--dataset', type=str, default='fingerprints/univerifier_dataset_nc.pt') + ap.add_argument('--epochs', type=int, default=200) + ap.add_argument('--lr', type=float, default=1e-3) + ap.add_argument('--weight_decay', type=float, default=0.0) + ap.add_argument('--val_split', type=float, default=0.2) + ap.add_argument('--fingerprints_path', type=str, default='fingerprints/fingerprints_nc.pt') + ap.add_argument('--out', type=str, default='fingerprints/univerifier_nc.pt') + args = ap.parse_args() + + # Load dataset + pack = torch.load(args.dataset, map_location='cpu') + X = pack['X'].float().detach() + y = pack['y'].float().view(-1, 1).detach() + N, D = X.shape + + try: + fp_pack = torch.load(args.fingerprints_path, map_location='cpu') + ver_in_dim = int(fp_pack.get('ver_in_dim', D)) + if ver_in_dim != D: + raise RuntimeError(f'Input dim mismatch: dataset dim {D} vs ver_in_dim {ver_in_dim}') + except FileNotFoundError: + pass + + # Train/val split + n_val = max(1, int(args.val_split * N)) + perm = torch.randperm(N) + idx_tr, idx_val = perm[:-n_val], perm[-n_val:] + X_tr, y_tr = X[idx_tr], y[idx_tr] + X_val, y_val = X[idx_val], y[idx_val] + + # Model/optim + V = FPVerifier(D) + opt = torch.optim.Adam(V.parameters(), lr=args.lr, weight_decay=args.weight_decay) + + best_acc, best_state = 0.0, None + for ep in range(1, args.epochs + 1): + V.train(); opt.zero_grad() + p = V(X_tr) + loss = F.binary_cross_entropy(p, y_tr) + loss.backward(); opt.step() + + with torch.no_grad(): + V.eval() + pv = V(X_val) + val_loss = F.binary_cross_entropy(pv, y_val) + val_acc = ((pv >= 0.5).float() == y_val).float().mean().item() + + if val_acc > best_acc: + best_acc = val_acc + best_state = {k: v.cpu().clone() for k, v in V.state_dict().items()} + + if ep % 20 == 0 or ep == args.epochs: + print(f'Epoch {ep:03d} | train_bce {loss.item():.4f} ' + f'| val_bce {val_loss.item():.4f} | val_acc {val_acc:.4f}') + + # Save best + if best_state is None: + best_state = V.state_dict() + Path('fingerprints').mkdir(exist_ok=True, parents=True) + torch.save(best_state, args.out) + with open(args.out.replace('.pt', '_meta.json'), 'w') as f: + json.dump({'in_dim': D, 'hidden': [128, 64, 32], 'act': 'LeakyReLU'}, f) + print(f'Saved {args.out} | Best Val Acc {best_acc:.4f} | Input dim D={D}') + +if __name__ == '__main__': + start_time = time.time() + main() + end_time = time.time() + print("time taken: ", (end_time-start_time)/60 ) + diff --git a/examples/node_class/train_unrelated_nc.py b/examples/node_class/train_unrelated_nc.py new file mode 100644 index 0000000..157b72b --- /dev/null +++ b/examples/node_class/train_unrelated_nc.py @@ -0,0 +1,88 @@ + +""" +Negative models: different random seeds and/or architectures trained from scratch on the same train split. +""" +import argparse, torch, random, json +from pathlib import Path +import torch.nn.functional as F +from torch_geometric.datasets import Planetoid +from gcn_nc import get_model + +def set_seed(seed): + random.seed(seed); torch.manual_seed(seed); torch.cuda.manual_seed_all(seed) + +def make_masks(num_nodes, train_p=0.7, val_p=0.1, seed=0): + g = torch.Generator().manual_seed(seed) + idx = torch.randperm(num_nodes, generator=g) + n_train = int(train_p * num_nodes) + n_val = int(val_p * num_nodes) + train_idx = idx[:n_train]; val_idx = idx[n_train:n_train+n_val]; test_idx = idx[n_train+n_val:] + train_mask = torch.zeros(num_nodes, dtype=torch.bool); train_mask[train_idx]=True + val_mask = torch.zeros(num_nodes, dtype=torch.bool); val_mask[val_idx]=True + test_mask = torch.zeros(num_nodes, dtype=torch.bool); test_mask[test_idx]=True + return train_mask, val_mask, test_mask + +def train_epoch(model, data, optimizer, mask): + model.train(); optimizer.zero_grad() + out = model(data.x, data.edge_index) + loss = F.cross_entropy(out[mask], data.y[mask]) + loss.backward(); optimizer.step() + return float(loss.item()) + +@torch.no_grad() +def eval_mask(model, data, mask): + model.eval(); out = model(data.x, data.edge_index) + pred = out.argmax(dim=-1) + return float((pred[mask]==data.y[mask]).float().mean()) + +def save_model(model, path, meta): + path = Path(path) + path.parent.mkdir(parents=True, exist_ok=True) # <-- ensure folder exists + torch.save(model.state_dict(), str(path)) + with open(str(path).replace('.pt', '.json'), 'w') as f: + json.dump(meta, f) + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('--count', type=int, default=50) + ap.add_argument('--archs', type=str, default='gcn,sage') + ap.add_argument('--epochs', type=int, default=200) + ap.add_argument('--lr', type=float, default=0.01) + ap.add_argument('--wd', type=float, default=5e-4) + ap.add_argument('--seed', type=int, default=123) + ap.add_argument('--out_dir', type=str, default='models/negatives') # <-- where to save + args = ap.parse_args() + + dataset = Planetoid(root='data/cora', name='Cora') + data = dataset[0] + + saved = [] + arch_list = args.archs.split(',') + + for i in range(args.count): + seed_i = args.seed + i + set_seed(seed_i) + train_mask, val_mask, test_mask = make_masks(data.num_nodes, 0.7, 0.1, seed=seed_i) + data.train_mask, data.val_mask, data.test_mask = train_mask, val_mask, test_mask + + arch = arch_list[i % len(arch_list)] + model = get_model(arch, data.num_features, 64, dataset.num_classes) + opt = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd) + + best_val, best_state = -1, None + for ep in range(args.epochs): + loss = train_epoch(model, data, opt, data.train_mask) + val = eval_mask(model, data, data.val_mask) + if val > best_val: + best_val, best_state = val, {k:v.cpu().clone() for k,v in model.state_dict().items()} + model.load_state_dict(best_state) + + out_path = Path(args.out_dir) / f"negative_nc_{i:03d}.pt" + meta = {"arch": arch, "hidden": 64, "num_classes": dataset.num_classes, "seed": seed_i} + save_model(model, out_path, meta) + + saved.append(str(out_path)) + print(f"Saved negative {i} arch={arch} val={best_val:.4f} -> {out_path}") + +if __name__ == '__main__': + main() diff --git a/examples/plots/citeseer_lp_aruc.png b/examples/plots/citeseer_lp_aruc.png new file mode 100644 index 0000000000000000000000000000000000000000..f7f2e741ffe4e0682762313b7bf82aef72148c52 GIT binary patch literal 73936 zcmc$`gp3;!Vez^iwd&6 z^!9f5k`fVd`M=)~cJp))F~h%<2Hu6-UER!!fZ%S{)fb`0>j(q^ApwDg@{^~2*_$(D z8MjBrNq5CDj9Sqo#!~9HJEC~wVklz5KZMggdGhW4r!S#w{QSHeY%y_h&k2=jPU63u z@aZ_xE4PJvr_a`nbIy5j+iUow%s>q7xnUuLkMw+aQqVeJ@yzV(fO51`t@#t>|1LT< zny{&dH2+=j=LB&v|6SMO2zfdFyC`@`Km6}L?}xjg|GWPw)b{`17lbdJAJ0XjA%^t0 zEX6_5aZ97KdrejL{qGkxcyzPxBE%+lDuT~XpERJ{9YR`{P2E+jeeq(RGJiX*d}p3b zm!IR0ooASUk3;cqKJ1GbtIT~g6{ zk6LDzV5L7RI)aSaX-t=6=s1A9;piu$8yy)Q0>dq*w=}j8r!c$i)OY;+`APNNy_a*X zvMfOd24~n_5o7k4k_cZV-pfoN54Kx9n5(W zLCTl+aBXev*KNn+zrR0wd?#Vxbx9lS{P~ViU@%Xfs3&2t1R4L{ryCc)D7Uodr@6h8 z8?bGVo1&S@&$mi>N5ELQH%%yYKSl&~Hneec%iwx7Esa6uY%t!%d8YAo88&SVJ@oaN zz{YA`h_#da{z|{Vtm=hNfpv?2C+6j!ahs6ZI@g)0#V-EdMEQ#^8Wx9NfZ2k!2G50D z2JIaV%Is_FPeD-fl6Aa(gGeUI^6m}SIn{({g}tK|$nO02PRtGlDe`=aqvY@hrHHpb zwaZjZ@%s2!%5r(rcGb%2YQWiP!2aygNyEx^hHs7Uj#l*%==Od`i z6Io=PMyF~VhY?4JQEM1WZ$NoVkadfhHh0SF%ZqdCnej9si&8ioUcjOT8_1P)ZdohL zl5rYOHL-S-4LtcqJ$RiaGLE;5=()18e`-$7yQZC1My)hKK5LU|dmYP=^TYb3WG($N zb5ZC_rCrzV{-FF~mRsw0bP5mfDK;yUrj2?A*#&s!IqP{!F$t9z7o&%_m}ST=Lf@j z6Ll^Y#ij3SoyR``vuVAvgImor0Y`0O_xC;i{$dxjsH2iU+l{EwwKhR7obR?%e=Mj# zUEDiAJv5SIjD$>CdUW3TJ5qdmeXOh`(7NX}*-fT4QB*`^ zaLa{)lClig{;jnhi$8l@2C^j=56_)4313e_)9=jrt`6js@#y6Wdf~S)NDEfj5)GGg zoJ?=%(6vV&!h5q`B`FL^WhGlO$_4uEZGgT1{#rl*Gx24;R!AUz8(4gg9>vLKShYP! z-+8>eR#i=n!7ZR)beuPj9STf%NU2g3l|fa(QnjkUP(nsV#sbx6Jvm(dh**=)R+Vvu zb<4?~kY)V?UYC^p1Ox)HfWKP0md0}^yM(U%yP8D8jWY8rxrS%6QoJg)1z#9&_R z%LY<3r2cNj?j(DLQ?w7(07vs1%IPLLQ1V$%yCqF zTAx+l?QS>iEC3;3};kB);YZYSeF5^~$ui5WBAo` zA74yttU>FNWiW2pZza>geW4>3Or-CFE%?wppMr#38rDA>+J{Qy)#&J^LAztKPwegO z{hLK>SRBx1-J2B=RI%_M>z-ADO4Q4Z3RK6t5PF%e%!AEpc$o!K7u3kn6-)eNc~A3C z0LXB$LPV01UQj<*t6`4f4nJ=@bi`Be<*UmddqDLOP0jSKva$aXq-L?wz}!^VI2J6Y;S6n%W>>M^aG{dei0)5q5S zYg|ypYXPi?;F)qOX&GS8scUKN`}?TEs5}L`J%sRxRg>IRxuDZ|V6l~XliFI(TU_Vc z!g?Gc=`i##7>p3au_{rz18k3Q`uy(3^B}WpqC3s|3D}5t;)zdAztTe_a_r`}E4x^{ zn#zbLA-o@x*w-1wU8(%=OP1}G)V=tKBBdQj07#Q(=Usc$js5jtXiSdPaG_4`^w^$1 zs8C(`h3*EOAO}ZF=7SfC9*rkJFuB#53I^Z|EQRGEYu#JU+-?VglpL!knWuxydggQyuYvS zz=8&QUoC-G>4hxJ#DMpDC;dlvvGF>%-vdXE1@^}T7d!|&pPOaIyaV4Mz@yhWQhechp$R>Xr`50 zHIW8gw>CCT6J=-r=$8dl)Cng}o1nwgVO*%Vl3-T?Gvlv1W(oHXC)LlQNE;bRS9@;j z$$XsFwPpm_nqq4O-LB_SfeyV*XWoJy#UYtRoe*6voS%Mma4ab+b4|pV2WV3hvB&tB2k*oSucM^NghRCFFmbja7p;;k?;_PIvXD<1pSFaFq{%c9O zA$TXh0ze!ZFxO=U7@x42LB`&b$*m(_{*F-csD&q+L!)v zOUJ`01bz&aWMQ5|zEh4G!N8Kqg)r)@f-sCi(jY;=(PzkGVCR@5Hy^j2u9tMGY(l?$ zMnb}A1AgX0lzg_PMwicf@rwWw2CB%;V6vn3RXAV2O9RjCuBc5Irm3(%4!Gfk1&B0V z{>=N%1tI6k1u$o2IZ$C>x+8bx zgPV=g5}K&eMV-e?Q<9SrTlYkrAVU276cdYrnPD99bnlMGYxXY&LaXF`0sJJKCyuY= z2SK(AFMRqQdvH!lCDXh>U96W>7=sPYzCX-TOykyhZ%WM~W9lT0zW9c+u4)E87>g`V za`D2tWM#^tG58t;LoZ}*dU=6&OUGzrE&1ag?f}1>Tsc?4R8&5DJ3n#SZ}A<}{x0l2 zAy?3E55>@%%u@BTvyqS?b>obR9j;}lLP0?RUu4`Crg`@VrSLJ3A$I8GkFnXVn7aOD zAFfB)*18Iu@EpvMUC$N;ZuJ#l0XO%_oHI1b;%kEdg|3X_${ErTX8RZuMtwr&r&h*N)-IPz#`&PKA> z`#5sw_ugVxe6-cowu9cJxeeqn`@Qqj>=)g0Kw2iyY|Y2Tqlolr>)|3)+Xny&^TXU= z6!T*Wl>%UOEr2vS$I-z(@BjR^NU*VAKH+mV9TuMpG-@tu9w<+^)3vI6%@_m%L1!-u z(yh&z9wfiuhfX$lIe0u+2qRu0RZck40@eVisKRha0P&j9qf3V^r?6Np;5|AbykCfLI>QRhuzo5A@+Y(=qC+TJ1;F28rEeH=lEHfhLa!+dKu~Dtlv&1>nR{KY&u{?Q) z6qcI+O0C#(vJl^M-V~tA^-@S)5FuV7OcO~>&(6caZ}4pbPKO^A0P)+he!B0t1@0&o z1b67f;nT9e|J02N>VCkjQQ*tTcLAPW1&Wxl{WXNyPX3p_7H%yCRCylaPrKwc6{7|a zChk6Mp@f|n#H%b6Ytd(5>+-e-A84{v&`tQPo^Dk5-dk9VwQBO=Yi|lZKOA2eSF>N? zcaBoCQUADCWv_|odY)ITWxa}}I0jz&lrPzOEa^VzCnV3Mr1{Sq@S8!on!=?$4w4;c zeO%I~H8AHmv!rkVZ+*qeV4lqxl8Rk$qmnd*%B0pSd$i{cXv%(P5G*(-htHIT-tkFe zPc-D5l+$({%$408{nH+OUiGdBa{VivD+5?7lj^e|Y;z)-viDcJN<2NR*$w8(6yK{Q zpT<5qR53g}oa%D#@yicXkw=BQzHD+vhi9<$ghgVI;p0}B1PlZMAuc$n?`z#a(ZjH`*gt|~svI}L$Vol0>1%G4f7RD~e>cMPDqwR~O286jfRz^)(nb(FQ-&l_g zU24A!XG6%caTDfMS*K&IA9XFN*uTLp`BjEIoI|?hff+Mp|2k01kr$Icon`PNuR}9P z+c>XGpr#KuCvW?+ozJnH!0(wuN$6C{%xe)YoF9aq+%Jy2UyHJYpBe)-Y;VKEnkBR4 zn+%vJ57k}j=Ci&22JIi9!%Qx${$bQ)s0&J}zP6Z4toK&c9^oyX^^!Mbuui1)vnW%&=O=z9Yrl9&GomSx+-a689Ev9%;zx1jq%NWqgy? zvx!;*x3rc2L|#coWQM<{;MvNP)s6>0J-!||mB~1ZA zcQ@@72%oMX`4O?n#W%H4?gEZ%7IC3mS{zXVwj#oWcWTeX4Hs@ek4t7Fq~)6sBBV#Z zOi`>_mV{wB#a>P5uZYo(cy?8`weSpkr>DaVXPsC4n)`BPEzFu_?KcT20#7LW@jh6;rG|^WF4okyqG--lIl9EV^hVL9`;X!p4(1QNF9r4=*v|&Q$d|{iCl_E?I!; zXC5W^8SHIQHmrF%aU|AFwPmarQ`;RJ7*S$l_? zeBHP(8sWXzh%n*@TaVse02~Rr-BoF|9c7$;%(AgwDQRD^hT9)u?%K2_|C!$9k)=Q- ziLgUI{H$x=B$!CdS)`X|Ei*l8^JD&1E!8O+tfL(zgTJS|kpAveboN!i#(io0bg?T@U;{$o-(@qwjPco> zf074&SL2`k@vdd+DeIL+5-ynBz>OMfVx(OXnVg)oc3E_u@7(h<`}RBxTVNyf-<g#gKPS9qyDCKc`$ZiPJ02272OhW%pal^x(6sV0>@&seF#=o zP6gh<^L&jIOIHV0QX$AhX^=EJBlW$l5B5I7(k%ofBx0d2cgdxt71fc|Z_g#cpJ|{> zmwP4T@;`}l#}Mgbm;a!7@&tWjV(6Jd%+lEO9%!V623DQ`Du&g~tmL{$J^P>=3XV7l zrFRhZEA^)*-4N8n`P!cII#=hw3|wUSi0WpoQc}-i2I!AQQ0{YPk%ZTwGU!=ek=~9J zg~Y(vbiwj}h?CXf1FFG2(Y=F*J?^X~$no22WJe2gVY{fybG_Gf(NrXek#&-#5ftk$YJU%H!;iLnNjF?1%c&-QP>7+5LKI)S09F7WdFc=@IdcBwK= zt@Rrv%teOp!*=3vGFDwS9T0o@LbMd1m^v4z>rVYUV%BSWD%01QB1jBkrFrC zy_lQ|(iZiVGN1jnx*^nMXBG7{orz*g<9CS3>%yfVRqj&vSBX5ehy%sb&q02f+0+P8 z%FhrG`c*{2PVEI}JAR3bxamcE)Dy~tykl_3aLV}bURNmAiILVI=1Sq^)00213A8gx z%g^WB0Ay;}Ru@;-^4BN3f5xgE2BpgK#NDj-g(&3K9IAub#50XPcCT6ywbLFp{^AO1l{qXf&#O}g$2Y0q=|^^<`>s2 zdDfyU&88=ad1QT}(r&NS3I~WC@fGPl2xMXq{wNg|lDjz=9Ms}}m_6n;@$LKj>}<=x zbFV>Jhr{&m5o8QICf3z+!TA2II^es@ULAm8E9vR!PY<$91?io$#9ZQ#HUK%-ODq7O zPAA^~c*b|emFN7~l-r!~l;!LmPlb)f6?fe`1R$#5aZnqx+pLymt9^Oy;zPTibT%RQ zEo6E_1t7=N_+bD80ruMRD{SCEeRd!43J_vl9UZ26Ia14et7-+BsR&G_HusgBr{!{g zFjIU`4bURvk$GY+?|WSUmA0(bX_S9Bj)n zc*jlpc^g6c`E<_fjWIJD*z%ncW73;Urqf6G9n{>G51_mX&z1uN=?Q?#w_H&*eEKn0 zh?k5S{Funs)6=sQ5K%iZ19|eOzVnN@%k#Y9)v#>&TLve#q;xcJN^os;P#p(IjLqy0 zxYOM%GHZEUulOD3q0*JO|ICc+Y8EamqO=hzV9HI=dz`hc0ms{#X{u&`cvS#t>VD;u zAm%caKjQ7b*U8Y!T*oQ=1y zZ?`~&9FM|NRZi<6CEZksT#|A;iE^i341Xax(F92!4a-?RBlExDq3KqIXF|2sOMW8G zchl)&eMMz{WgXM%$(-%Bch+Vv;cCVY`81-0v=&Q2oF44C zf9P|1v{iB@l*hi)iZ35@f%ow=0opveSS~$hq|JXJmM8nv)(jK`mH}*^ZO`{xBUbiW z{qIHhfILZeyafzN9T#{G6t~`2$dP+jv>+994a#j?)*bM<#dQ?$1k7X(rr{%>lI3i9 z^31lQ3emVoOP+YUpZ2h3AvqS_WcF%_MwAXUys zzY*SmY770k0)(+SUx%;~*u3VY!C9w9dHULSlTuRy%NWxrTwms+F6f?FaX-7wgQdOh za~psL_AI7|Y38uN5(<94%dYQDqIS%a$phDVXBS zF2%=16+yyj+vrOEN8AqM8cbu$Ul?u+xuP>E07aTx<&?>UmIt z4Yyi`ixgB*uZ0+rIRRz9XIC6XRi%6SS%w>D%R{Kl_g7%Y09*f$J~MI*XUmcCft_jx7$6G;7SAll1s0wI*t2I2D>E5^Fo!D?p0(XyCBeQ&Z}5Kig4rOv?&D|ja85pSX2!$oUHxzXdr&$Xf<1+=!EpEO8ua&;a!oouOqNH@BrQe^(vCH$FkbxzVT;nISu=^$Viz)&l0Fxk;J%vNs;6$disINj$xP!RcUCX{(Ysm^b;V zPB-JhlLkG#BeRc+BpXLtn3?Ut@h(0I;))vi%d_FN!*uriRw`BxrU5P&CRu;)%!*OK zv9;6D)AMbTcgsylZl81-`SR!Y9*2?=p#N`p%7ZPtJ?h;-F!TmJrQHr-%H!ScE1f|mY6VD0NXFpz)%edpHZST%T?h1)oUzhU!9><^VBB^s zTXmSYXr44=x4wwuAfSPinWwx61!q^49`)drVeOO5i4MLyx4CZId;H}@45&1DqPJuK z75AjU_F_$V4Qk68Pv`Vn!4q$UP@>C?|- z6~M}r=bhw9e#qU`c)-YaDwt>hwmdtuHe|^>vZ`W&@B&_R2|`;C9h$Ui1TN*YP(5v0 zeD&s!WuRpOrRb#AkjQ4cJTwICjb;E-nS=i>wOCn;pl^An)^aNKTL)~3zGad$T)Xt7 z>Z!ax`8D|+XM54o#Ky0W5ksozi(}=gRql;?ti@+By?BK*}oT^{#EzFzn6hRye{pjErOW+) zr;lUkGjAE=^T*NDtj59{BgKn8j9nY~Y%rLikx_56d?vI4fcw_S)c;y(ehdT2Bo#dj zS$Hh_oS;X`%*?C@s;C%_abc=RncNFx7hUKFXIY!yI~aJMaa{|NZ*(q%zIGeVt0pmIJbR&;Rt^Y155XCN*g7_uwW4 zWY{@hAcpO-S1!`BVnq}ZVjDuSD%L$$C4#?fZH?O0+tm9r7Ik(OxO|$CaTkU9=MJ=% zT}027XH~&ddUdCaJ7s#{qGgn#WE9s_Iw|N<#UUp*1ZC{ApG0l>4B)GR$P!|by6l>) zo`jK1PSUt%O9``o50AU^2Mbfz6aOwb29Zu2rX>B~Jr6XJ2uVxx3*Fd8{(?VOeOB^} zh+F5{c7q!K0NGA3`r8w`Oo`B#<}Nwqd=@-4+-)LSUEsz&0{n3a>RgGMumZJx+Bhgr z=6`IA+6li_taIbBv+-#o87uLu-{SP;-d^z+Lz2et1c>yd{v81CKX2t2R#agl)0*6+wYA7Fa^lj?(=+SZg}X5WHDKQqB+j)xJNly95 z$Y4oOYEDvWNiK8ByE`a9RlAIw3w16hbVV`OZ;4^NR1}tb+EPt4yDc`-Ze8tmxF(M- zL6#VM~4}Nydt$Pe_xaK-R?+WmY#OCL+Sp6h#f0+|CUR5 zX0oK-y2d?!y5Ry+uV^7-VVOsp$!wBRe+BpcY}q3V9q za@v(jWN+^CS!)1;{Qnk?f=;rAC7{nZ5uMtfRgh7CdY*oV^zWFCebcdU?Tr985onPxv`1!%I@9MS&BsRo{bY84f!#CbI}0na**4On=>H@ z$syyQcRfeFB5lu12+vOmjS2~X9x?Y`b54%KiRhRSyGwSgj;$@FIxV7zNXWd_Q{K|~ z*meFF`&_VZIt9b_mveIt6iBPdqVDO;V?)PDu*Fdt)jA(~#$$=v!^7|^*^`e*)9_aF zMF+zYv|Lb4QZacXL^ucWiaA>GPbD@1o9A5KKnk?Dr~n3XWnssMQ2+=n0x+@*AlK1A zJ`zAOOL>0*i~b#yzkCa{l(7SSKY&Rq2S9dOL>DNHGx`AI=CX57Pu`_BFTYC2^17hS zO_@Ww-IK*q>VGn2BWKYO)}p%Uqt$jGHn!|>sQ~!V#KGD+hBGB4{Z8BZwAp!UMeCHY zfQH+l|Bo=HN9EV_>F#oui2kxGtT93b2QejqFg*|5W@V;dk6W>je0W1#3sALqz(vr; z$xBJy^YHM{G)A=S)=!r^4j0PFM?HlV8T}l**w}AXoZ4HglhM3}#fCZ#ha%^E&}T^e7Mp+4zgtvwO&{!Q$VQ~!nrkRx zO?9wlll;ON|bl{RQA*E?04Nk=}y6JCE-6^6cR+D!NJZ>_;4xev)LaW^Po^rOtce?cj|O z^66GP+iRGk_o6P84P6{dPdE=VM%+^mm*S|9DO|s_*>gWC5)KvK*gZdPE6SUa`)%28 zz(3wg`$Oisv}8)2>Cbo7=i2l?!X!EL(E45+hS68g>wiB9w*Qd|p+%3Iq$PaTPvClU zaI0DW0}>$#=EZM@rSM=b3bE+y(u!7!gG~^&oh}xreFq9()w}y3K8D%&g7j(XJ$?<^ zS*U4h(?@&5T?-+QhJ`VBvCd`vSg6uT@^FcdC(>UgjF2~H7(=K;%0n~`UHN%w@5D)M z>6^2qt*)nnDKW0iA>0#;1m4BS`w*G=`@Q_3hAqEPGVBCrE=U)9A{yngH- z)9r5fnqChmw*+Kw`(}Mlc3%Kl>LcXb-$Ea{VO(aR=#&%Ey5;bL(QTxWi*5m;YG%;j z3BAf>5}WdL<@pyEo0DynBK75K2d5itO4JRlTSr@UKZZxY0{H{?6`**)Fn;L}#O*<`j z)=-G!CD@b?-CfG-4gemR(2Dyh*457%@};Q-qA+>&6c(){0)+Jg0w{sM)N|gB?m?}p zH{bY^7t7B6eD6aTzMS7~a%rdIx(g)+vZ?fN3W(aRCAA&f%(YX2ZS-1ELOM<2Z_!+B znFGz>xwIZC==p#U>$`3HyS;Yc74<1`|n> z51`yKHvF2&54&GQV~qe3T*vsqHPtZYxpC=B0VK>PtVX|qp&*yHLe1V+pe)(bigH)^hY#x>ZFH-@xt52&WpLN-lAIa* zbZdm~HBv{@Q$Ewxr)~?$WrH~M;38=|-mbkvM1ACDV%2nQxCn1tm>d+-eQc_jpD>^S z3I!7+By$>y+`mu6UaZA1kDPn;otCfu)H#lxIW1brbl(+<3eQ>Ry*D<)So_i7(= z0WW>oN&7a)n2Gxn)x~n8x)uPwRl4I6>XvKecSBtUY>TgJ&Q5+K%y>-R-FL7Pyv*ak z=gK}OW#30o-a@Z>XDBF!w{0Tit=nJy=YJIMrZo{HWh=hDOK1rzz+=*$T{gW?dcI~j zj)YI?U!0!)kW;2JuOtqtjf&|W{IpCZFoL#gKCvycc%f}#Nl0}MBA_7WmV0@OvLpM| z@aR{Q_yc>Yq>R62M~JCEA%wT+frlwfgaitt32C7v?@y=xP4Ntd_Ww0z0ez%pwD3Rw z`@8LvO0r65lE>n^Iw~$3Tj^8*q85Mld7%6$QkIkgg0!EO2H|V~^+XlP%Q?9R$L#XA zZT$IueNw?`y}PGSq}wKfA}$nCR+a+9(Ka3N$#rgxzF7|qEw2!17)G$6MLfPw{*NZ6 zu5I_vb#k5e1U7+$rIu?JyRk9qLiP$E1p@>4m<}7bRQ^I>Xr>wF1q8Q058)@)V1lDf z_}8NKBX)VXR;mdf{X#D>t@GH#&j;NT)Wu8!Px#*b3+@&rGu@4y*!wdVEx3)?y4ANX z&j;!6rgWIOZQ!hZGVvR(4V}ma-5%#&3pYX%5`~$@x;fLgY5E;<%1Z%)PxRV%4~A(u zJ`u#gLFkxfKEU8Mf*NS8%c2LD@2g!aF)5i#B(9!?k)15vp3ig$&6SlcS>b;~g4<&x z{kL!}rvK z)v5H_C?=+x5xd7PpP6_M+WVCvjp9xyeg_$pm4b?mIa63^OhTr*p=$f7RukU7Y{k2d zk84B??+=!^wSSMOyW1p?JX7tMavC(}!}tvndy2VDvu5yhV1c~v>O24iG1_?jce@z% zVFk)xECktCAPc3~9oAdlF|j`gz(JpXzxEspnIDjzG1=nc;O{Cns%wL5uY>wNdbmsx z98WLGWWA?r^;wPO3AO7Qnak^2S5;OH>m)!BKG1ll-tr2kNX_?P{rje!au^$lRDNzs z5#(nxZr`PP1&4K6|7$Q`-V`c6OJEHnc{!p!>Iz8Wh~77Eh)>>8(Tsd6zhm(2F7A=K zW>U|u`n|;n>Br?x&n)^2UbHut)Y82LP4b?+osNzeiw-CYLUoSu|K9S z&he>$&28Z{L&q_wUYGpVrs2>SgZ#EOtTm{zeIs+G!cJ{e{Mc@y_`q9xP>vF4batpM zi=pcNlJ^8m{Ue5^4zw1o60Son^E|+US712yN4x>Bh`F}p22%Wr1NZbv?^Zijn!5j6 zh#=}+7rku7X4QpbI_!q>7f>LNJXT$XPliJ#tW-&<7JCH3G9QJ-3lBANDPKPxWWyOh z@OuC>4s9bFb`?K_gdE=wX!0fmaAQ!8_T%-hx+#c~rVIZ$`-soKATZI_Hpuo#t;2!Xk6c5kF;?GHbLdNC$*5D6OH*0G#F_FW|S$+kYeM_H`*p#+8aYLiO z>KSZmUO#h-`U%hupM$F^s=0dL({_sn{^abe+J&k=QQm1Lc~>9RT1HVtSN_}JtMpnP z!YttM*?fv#=wjDkS%vY9iH70xEr^(e9_{@ZkTkvD5`1W*U`e;y-gDa#Zv#w}+;#2j z?vEbI%Bm>PB6|@a37&p)1h7Ut(#TOvM^v)=;(i zP)?QewPt?kJMBAScBJn8qUOjL?+Xy8JfVo$HJJ+u_rLmUn6%^DmBb~1D2Bu_d-6@<%cic}hbc)PQdb zMfX3(^x;Qd%}rZuQdON4M^ta$KfZH6&JYBb1TQ+nq>_G1OiYka)r%+sjU zf?<5D@R(H(<&X4QSFlI>Y%Ny3RvAst#u0SHj#JqN0!J?B z6%A?EH3?M?GfS_PZp*IkvEAzHeL(gaF2WSvqEnk0X+kRL+O{eCckN4D0fz?!SDauf*=^RU>Bl$H8`rj* z2GcSRwvgW&2CbSx8C?ndkBF^doyNpgJ5M36WO4Z}id{92-#jixK8D88#UGxDfq$d` zdFv`BLqC1uD9ciSmOwtDCo0W5Gd_ta?s)jjs*kCYD&nwr)oh2R~{ z@)D%?vwJ`gP(J!H6{2rGaiNF5*nRX**+(SUua4?z#l&3u(3Z{@9upJP&6vCr+$*Gj zbgf$-%1vm&jwCSGeGDc%d9lt=(|NtGgW8>yTtiI-InMk<=qzCjI;YXvM|3`CsJJx+ zW#~xrG}q8rjVgjIR`pKX@PO2S&7b$QZ2h9Kq+&o}YYO+pvLSBxG~=$tFAdrWZ{yqu zb(0g&KdIdrmeQP@W_a1#d2wvR;NT3*GM)9s*=@fAgZYw*v#O@sU!pwx2zTaLObw0% zCF6fF1ntH4Zm_gJ)o!=@ieV@GqR(m%^d!-3?-8H(bpiiVR)#u{QJrA5=Ho>|xpkiI z6y7q(2%~u!K9dpuXPGqbS|Tc%mWC!$>gMZq15t`-(cj|HWm_LTvVL%UlouS@XnAoi%W;joo{ z8j6V!AoPGToP|Kp6ri;9Up5E zmD+$q{+9Ehg6#;s(XW{gUnmNn9ttQZhpH}SV(L^@zCMe=9T;(HXUAMOkuOJUhhtS9 z3Ew0%DNf#s^sp>1!zLe>jmNmu1r8DO=wAV{kV1Y{GVto z8}<~9T1UGd+cT>i+6D&3RIIu2zd!H8vza=Kev}G!3}%GYPgcL^NnhK0%xUA!qfNjP zJRv5_41nbr^-(jKCmV4oSBtqs$QKE=iz+*+hsZVH$sYnd9yl9TkTyN-t(~1=&^%K+ zPXGM;)LZ5<(AqshkAsiAVeu#Xc^9jJ`>@Whr>dj?qgU7%Yy0|ast%x5yl*_N=Mn<$ z`ab~(wCETMmpm>0^lt#FXPN6H%9;D0=Y{wiyUEDQvu0*y3Ri6W`u?VAJMR18IKxM+ zbdLUwAdB^JwXri?jCm71IqeIj<&dEoS|QG9+);pJ5HNzWpOz#qic#(=R22hCa;l{KX^}Xu45&3p516ag%)j z4*QvlM|LMoM4#D61a@4X$E}7rmsj#nY69mgbmhfd=2&8NbNJJmMEIEGf&+z5kEt#H z>?Wnb!%iHBZym2>JvbL1ZPFvXNnEH+=rk^D-TCic;g}mKx3VBtSSug}lzyoqrSNuH7&QcLZ68ZuJsXGCWT@T90k7n2nVQ#Al}?I0>r@DRwz4w;l>AYNpgeeGejwY z!&xDC25a@Mwy?FBgal*yCfNXv-M^pjSk4xz#B)$FrX_OgNXX31aUfk)4jNyx^!G4$ z%oErG5{+)YmD{gx?7QnF3WK@1qbXO|Fv9N|kv^%!^Cc4l{D}hxb^z$%NbPNWy>W%A zlx$h~^EgFXj&p_fmOID?-GIOWVd`?8L{c!xJ1E0Z=TZLzGOQZo;wkOjHtCT2q->*{ z(g_ozNFpB*6KDhosLHCU>)ZsL9808aciG|o+xFoc9D{HCS(ERLum)eR&k8IH4zhTT zw)#1IC9aHhr*rOXAOE1FdbDHDPY23zh>B>mmQ4|AoRVQs_I_W#VSQBS?`R1@&?#M&yB{xlK=rIIGbT%vvQS36n##D$ zsYFgbP~sK{v`J43GW-%uN)ze)f69llGQ_*Y1OrZ=hRRAhVd}2W1!w)iHr`j$emVNp zTUDz-lm0CzC8BAYUxCi6zg@HAdxW~*3aF#h^iS~Tv9d7GBgnmu=VI?-Vt(jffp!@o zU|~3PT&9Qxtv4}pK;y%Cz8#=OswGZcTx^IuALSoBcCO5Tr=~7r8?%|vwvr5?aWUU) zcjsk*=R|*fhQnJ+RE?Cym%{dM)dtJb{!CB-Hf(|TR4en}5%QNK-PPK>ATlX$UU&SK z;7h>5iuy^#OQb9q9~b9761<|VY(056hYpFS7pifkfU}c)rf%84`QTNC8$RiUHyT}D z;XG-`qBe=FF#R2VPgHYIaJ(5tGYji_eIxlck9PfXnr4rFBc(9!58wdTVmM{r*C{>r zFJG>kRkTD+d9AR0E%nIFZCvNKSb2)6yLibAXTQRB;#cR!;^MBFVhkPtezwz9y5IL@ zO`dD1s;-XT@Q>Ntyh{)(w{;B2|2?m~%QO0EGd*)0X_;L*cmFi_o$bjW2?c$TIvVgz zq4DzEp{%DA#k8^40ymJySqA;j^8c z$Y9;AlEPi^C@ekI6nyBll2aA zjPEi}9)iH2oc0ZHi#K+A z!vh!Z>(9FHwPvVX_D`aW;FE5e#wasMM`cvB@&`pyt)(>T(^I++%7!^U0C+&&RsE#V zg2fIE4gIr_lksFcTwIPbbf3O$DIL#IH{-bV%iiD7lgo^>aFdOoxI>Fzqch;Gb~x?q z>MEI48(HaBzx#;P05}7+F69j2)HZx}dAbqg!~?h9yIL`zwJQt_8PXx88$6?G+tg|y z`1HVczKsyd&f#+UYX?Q-hH}B*iY@u8*ch#)QvBb9SBnR&)9MhUsuS zsGLC3fETE9$yFzU9M?#QNZEoGRp|hQaQIo_TdEHMzVqEQb34)M4*x7~CGpMg!;&p` zZUBBDb|mf&kN^iZ%xn2OeCMDXLx&xn%1;wwBFNytLBW`$1*Zhz24d4{QuPBDRSjVM zn6K9FuK*K3$duVXejxDMtQ^{aj!BZ{#f($~f!3EO7hVgoD$WbOMoN{mM{h0)1oIbd zDg{tz!O{c?2Zhu5jZf}{b<{0b3cxxV*l6?}RExfu4NLd|uACZMqWcQ(Mw0M**68y<753^*!JI1@5UM9pS9C)d%RwOTVPWlIcnT4Ul;ZiDUu zEJ;xyt&L;pf3K zjd3CujMEqp6+^~X$?mG5Q(0NAdXU%#eDbuZ4#XOg-8p~T;DfRX`$mqh{zz59u#=IG z7=V1RWlGhBKxLG)1-MdNoawhz^|Fu)oSIn-uX~WXT;b^viO;6Hbza8H@1Io6zC4RZ zkJ05WF|2+#ThGpZ+ZRdsLPNvDI^^V=(Dl?z(23YB5`zCe2w>euo@JHK&ichFmI0@~ zZtaem3~X?4l=>ad2-dtp^Znrq%A&g;44L zrH`jW)ba$(QW6ybxMe{qX7dlvQeu7>AB@}Fcbr$q&>RT#zC5p++|%YRdk<`<7c6?u zfs6-8uZVQAQVX)cy+{|_w)FTgzRocv65|T|kpZBuxPeNJ#xeh!7Q+%C_pJ%6^buMI z=)`jaFXQ_sZ^fI^f^?pQZE3k{Yrh5L2{7WmTIfavna{QiQrEtHwrTtbnJu@pynJOD zMMg#fM-#8-NR2A)%#vh?36V)<9j2?_9uimf_V)g@BmH7pqN4>@I4o6yCLVUkBW&>} zX^~ZS0+KXu%>ucBxp-(#m&c&;`qpfVMZ+sITOT$KPd)CzMx6cXe9AX-q3)g@gNw9A zF>EKByu1xM{HuuweU@0kXIA-x#)D*tVoHlSF97vy!;+%u-`hGOAF= z{(JLPz2URm9Bx+$29R@~B7%R&0ZfB{J7JW^u*|kIu18Th7w(THA|;T&v^i z-4&_V+2gG8H)~vPH1CF&O)$cBfx+_D*pmO#=>b>l>D10L_CFuibFk}8t2V6@)FPLd zWFS|hfw!6&879@fq$$Fm@%pTbxp zpK_G<>4D>N74TOe03gddf0McD4mC0bE?!w#j-08b?Cy~|z63Bj;O;VpuYL)+q|$!x zeeaF{H`$l}$JJGaRkd|px|J?LkwXiTN()MtbSfz&BHgVhdJqH-Dbg(hQj#K|s2o~B z=>|olq#M3@(EGjjeb2A^JbJf#uf6tMbB;O2nBNq#c-h$G>ohd`8;=!&j;~zUSR&PH z({vul!u?!ZzFx1Ea8GI(Yq*M3jVWAh+K(CKg3&-9SfUZLvqHpI0o74H+86DnrYEs& zI>3o!CN8#EX*im^HrcRlI$;NRbzgqPHb5Bo3_`cWW)Cs|kXcES^imVUevz%27E|TQ zN_8%X2Ib(8Ck8Cx>iA3cr+<(6G}9X4<)tY^L0~(uNqD~%EfgKocrQn8 zTu-5ZqY0TJLCvZ4OZNL5E`WL|$@k`RX>2gxXNhrWu$&cClEhvS|Rs7tKy zeZoD$z6L)3$+z1$W5U_Ge`7SJe?rNK{`C#hBMdp1G8wLVV>P-QYIxc4dAX|AuEb{ZQdo8wER}B(DJ1zPq-m(eUDSEOQ3#GQ=sryUS&iMRwA2z+r z>aivQXZGq}aPvMQ@A>%i%dU#%>CV==w}$cKPap!`|FW;OnBs!av6FQDnnPF?SZ8=W zTd?Ki@Z%~YlAsK`Bw;nBN{T-+RN0J1{i&dB2{ROc86V3tbtgRBEv?WKcJyEw>)9UJ z4EnOkxWkA_ohE5&hpwN8Tp>TTpD~Gfo7dgK|GiXm<8t_WGR*lw8X$s6wp1!w-mKun z3c9Z4&ee2ucPlA9S~41seexuCa-~k7?9>?=tKM8@&v{+EwCi`}6tXxa&q%~^X1zb_ zBVO8c9GvO1vgTq@!|ys^cJ~93AinS*gBT=%vT~!D$c+Iz$J1 zMfKae@88QM4Vds%FXn$xAUzkrN54Imv{G%Ze|~YKMyu}b=ge!rm*cq=9?Bi_G#vTU zcrHgiJKk^d;62_Y^xd`X@dkXOdARDTyL&8Cl17w=#uZ!2Uj-bqiQ%$t)P-~_ z;%W3cSTr|$)`35nSugc2 zUPp)2c2g(T-12_nknZvE}0`0U(BBe2Shg?wGleTr<8|Mv1A?J+V^z5(A?VjjRS8<@wJP^ zy|y@yFE10GWL=PqkBiGOi#nLR0`++V#cu=2%3R+BE@0E8~Hw6AuZ&8pf=oMdja^t4QiDh+Ki80|onT3oD> z0U9G)VYZxRhcp`Ur0sbW+r`?S*+ip(9#$$(1tq)}pG`$@Q#){^?rTA2iCAmK{s2w8 z*W>YYQM-I)H@;khAyqB>4Q%tGq;lSh=4_z`$1<3aQcVyDIZdN)Pg z+%V|e<9aWrmgzg+Dl#372Cijpcv2_wKV2=nK44~q1ZB7wUNjQfZYSwY)@D|Je(f{s z9Pe@+;WH_tB0Gq_VftwwsTuC|O1wQQsG#t!%nDcV7T)CV96z2b{i$xOg5|DSd43_)7&<$fTd6y{Q;2<1gS?nT0L>0!i#bU3MBFb79;8Wn$ zzqe_%H$%?ztH9)qN)zU+Pm%Qc#ZCR)mFS$G#%TtGuCk`FZ||*Q3kUvsZc>XtHG{^A)oe3WG9oH3c&z~_B6sYaN5VgwO ze8fuDf0~kjPbWfh5;GzL|OJ3te?{n>K1N0 zg06FEz+=R@S##$Eyr2MQ0`A-)M-o)^?EVMf#^DkUA9=qH3ZQYW%Y&t!_7c-0RLv~A zYJ$YDVr-GftNjgrf?^KSdwWlr?sxFxJ|e@8I(N!@L51ri;$xETx3s6(gkw*tHx;R{ zu&aRPk;~RL0t*({LJ;Hg%0R+Thz~G~q34 z)b{Tdm5B=c-NXGiQ;s`mUn-?;C-`iV&b zN;=T@{fOX~`}EQP{al1FF`g!A#JDvo0VnjD3lk;f?jvk&+%jXn8)Em-Wf-v+A5CD$ z0d{BlQo}2ETher2gjNwJiUc|bP!f$>AM5Y{3<7>ai$A*1U{GhQ-WhWz)?)_6=G0fD zt1|PSg6B{4!03vyCIc3Qn(um>i`hleFwM~uut@~%z5cW0f${MVM+-8P_C7u3zB4|$ zTffpon0T@8r7aEnNvI#cMa;@_FBZ8QrzH~(wo$CCtk~e6bPdRhtM3R|P7pJn>G6P- zkKij*x+o?pN;jA0bIyNz`KiExDYKBUaBt~88%d2@&sKR4jdYRW5S?<@bJ|^nmy! znSE&yzzAPKdR*ZbNw=rHiM-~+};TGp2>TD=!5o?1O5=B^Oft7rwPL4-g6?C7bKexbO*rj4!nS* z+mOKP>$x&sddQv)Oi&d~P4Buca1VKD3j9#nhrhX1v+u>pEQ?Dg;*_e>i-3y<%zqhv{CcM2h&DN(VBhx1UQ*~=C1Y+NHx zZ>0<~}^=?XgzLMUu><60qQvk}6u5mLi zbMZIs+}e@X*H>xOP`LJ`VYXNz_sN0p+K_$ouWwg1qt?CF=Q{S|mvA*3Z-4*sxx)O27g%ukcBVvUU%PgFwv4&q1a@;#2>VTt>Lg2Y_ z`atZ~{m8IC1rL(w+w{zqiUr))sIz^OrrqoHcpCa7Fbql82k(t?Fk_kCm-PHe@raxy z3Y-mc;L~D|@-FU)4nqdAQDg5&GG49SpCSaD&{eos1G8bVqmMoof$ORs45wN;p8fKOlOnwTb`A89XDrK?Uvg0Zk)jH`r>TWaJlv4K1bG( zGFiWV5Kzt&IoHqgNqS-h@8UZj?kxzkdt7XNDsAu1(;7Ro&hIn|<|BQ+p1Tv}XEDcBbgv$&?By3%g1ZF&?!@Y$X?2oU z)CzvDfKA$r5YP6?OU%``rr<7EGIB324^QrxpO5Y`U}Bz7|1RO~!zb0qRUyk*Alv44 zOE&SW?8Hz;;G$N6=2XVxbD^z`M`v!2osrlcd;FFq7`f=ugfHr>pZK~*w)Is;@P+aC zxqv;%iMVxfBPVXZMy>_s{YyRDJBMvihpHAE3DMuVq6jl>c!a#h6-5( z&yRJ_dOa7$k6EES(CEX5(7UT76M|r4ORGq$qKUB7wo+&7}O z&yChg9IdPM97Cn+KfwAD&~i}xYH>W!l6#aUy)|gvJ^@aNo@a8@6lnOG4hbSz$ffsw z)_u$}4V4p?6%pxMJy@UnMOdo~{RodayXgpl3Wh*9g;9a?8{ z9=1sdnpu96SF3hDMMZ(ea6^RQb5Ea_+If`aFbOx-${ThL)ooTpb-WizwNy&sZ4R z?2YAx)hUK5H@4{l&9Jttd#>f%6NEDRom&yz08$DGy>k=`TLS1;Ru!nPRVMPnSR5>C z@HSfVuctr@!~)|b>fiFUh{J+H5fOuuKQO{Jh(krX4TH;wH8(c)(jXM&4kUhh$cLw z4V~2LY@F5wMf6!SkFLvV&!3wnZh#Xq9sO$k)FUR?k!y;Y!X#UZLphy|HNLC2Y*Q%N zl(>iRv%Ei3FotU|oL7I*(T4565woZ%ip0dF0XyPNX6s`X95&_l+C)n+0lu&f$o?~bUQDEjt(+V>ggb1H^xTr0 zq`jc8{VPpnjQR_hZ1S1bmVJ!?MSb$eS72vs3~N<*sP_5S_aqDcBMaSOjFpaMk{KAQ zC0va!op<&xx}l}8XI+loPcG32%NaE0H7rIqb0oPk2*4+5BrHNH8zeam7A*Q>++i9T z?DxTd>a04324yiNC<}`3WZm0=Ahf*=R?<|yv8+Bu7D1Qk_>430kY5$>3N(_lzedMe z1%v;EeyB!;OL9;tNK$teD|_NUT3BB+!F{6gw&gK7uR1rANWSN9)z0KN=o{_=^D*vE zJtR~{LK`C$EZrM&K#4zKVe#wE!V8_|3ft$GE~Y+gfdx4~4-3-EsGUVs9?u%Q#m5iq z7x0mkulD^8Y|fczT=k^HM(6hr?@F(!G}3itQ&ei5zC@?PsT+34rSPmvrG8!W1LzH* zuNf$As-S`g-APNz}vb~2D-=N^&;BZ1kvKcHvMnuTJh)K={ z6b~rTXdg=JGvb2eeFbx_vFFxE3)8r5UPw^-JwD`mXk*I?g$89+(3^GtLfR@n5$)1+ z!;7R2S(q|f1G?`zziuvYVA1x<|CS457F5WMN-cXhtQyhjDdrHnFxmIbh~Dy-K0yXL zWHv$sQ0&5%BWgLwQ+HQe1qWU&HJImTF7@IMRwx!FhP~Ft6uWcAzy<==F+!q{*AR>U zhb;FNn(Y;}kxMNr%le!>o1~=c5_Hf>jQVd>oiwt-!@G?RHt9dVIt##cGm7oTPp!e15P1h0>pwNPOi-;ziVDmvi6?o>!VG%W`{|CT6I@ERRP|C8HHs?x^lR zeK!ZbD`$ajI2lTc6%f?dbS!E${2F0nYs(7J+@YPgz1bfflOB z?2RE=;)SJ2vF%q4SbAMf@iwIUJsZ~?MvntpAjNeMEk!7zPia3i{h4>0fpRzcBTi=u zVdPm-W?A#Sy>2G5x3yfE!#w- zo@8hFF7)o#S6xS1!&+=E8EWa6!-1MWgOg@7SgPB3JaSYg$MT{CH9A^$_(`1lun6N3 zG+b{NOGW@M*cQvK(qw^?g0VmSNfB8St`}pC!<5A$VluJB8X?Pj@ zO{0m#Ub9HL5)ra1sjZZZ$%^*nc>mA;fZ9XDKDet>pauS)MBBT5TW1J`~F2@3L)(I>(_^ zz%WkQ_Dk=F@MbO9??4C*{C>Q@(xW(5zqcoN-n{^{@qa!?R`s|3D#u3wv*Zj|MQ>i) zCl`KjlL%&hc7-Wskw-O$pHHE-_vG@J<+H4~@!c$HNZ@mGmD2>JmEx6p8@~v%zkJ#NWIL~Cx)*kI)vN}2afACC_ve8jf z8ag_;>BBUsn~a#8j70tiv^-%_1KNM#;4yD<{S`{T$*YB%ZqUCaa#@fab0Y+ips?MkEQN)2aYRm$K~bVGnU(;f z_5r62RwN}=A=mv3OVq1+caGn%Z#$mQUR2xCyLJs5ZX5DR`SuR$cY%ni!4P!G2L+mO zPij*GpZ2B|nGZk{njQ~k%FjmhY}MSREDMwXX9Z|8Rg)D`P;>(LrET-ggzr8hhI=ZM zqM($vdkyF6QEd9@+`I(y?Spk-23H@m1eOuSaoX}QY6HzII(op2@+v}WP>Wh)`NgJ;CYy`6DSm}MA7Tb^Gqn5@f^hfFWN23xpBVaEf0Uv5JZzq7?rg;U=Q-^ z5bn%kLxtm&5S3?w*r>;+>&BI$-a;qYVffq2cegR}G+qo_7W?!w&>?_hmDdMxe5GA37mK{ZW$A)YTzVt}w0k z6|}^sEci1eUlWk1I|yp?9xx}>cy^wUKQi!~vcgp`(a?QtD1gmv_~9~E|Ecr30zyJM z-(P)s z<^GU;UJZ)IS*x#hB80`bmcsYI?WWFpE<3}LbQE%f5z{2^p1)`t#chg~n(-RIo(MA9 ztFQNp`2B&}?}bjw?oy@FDbgF&mw1Pk;Y(7ufrM2iw*GYf&c64Ak72=SL0gi~O-)mz z&#A+5{{2Rk*z#~SGx!5i9^#EmqhV6YT*f+#q*xV9apE#vU8RdWlkk}E99mEKD@Gga z*>+cHv6Y1_xR+faReFB^=v4#N&W`m6S%88K!wJC=avfYH^X%8L9*xvGbzv>v2_ZSZ zDDL%gF&+#M@43y~4N(v2=;)AlcIJnX5vyPIh<%R_NoqIyrGfVzFL~9AVcWh6mK|KbyZXhLwAVqzAFG;C&zF0q_yK`LXwejtY@P}*i zxnjD*lSljaBLudB`z7}UaAP;wBVwBM?%pk4wtITMH}v3Fr$u`BSF2;zoQnM1^8x*Q zHg8vz=6UnzzKZnV<(-8C(0~p>@)X!G?&V0N{;w~%gSB5ay>!J*pF9$~d)qBwpOn80 zKMmeC@@HyjV-U(?5{oLgEI(&1gYb+f-g3QqQ9dOKyPT*F`$>$t_b0Gs!Z1snOK#YH zd@7dMZ5aE0icKju%dQKdUutd^S53({7Ea9-ufBEC*VI-iyMfwi#ziI|Qlg4-vMulQ z7axqMQy$3#-9xc)oNXz3Q?2!ZJuDtL4`9uws}=#Y4ZlbsIm9*z3CZWipwso9;jIG} zBP6$~fo2Fz@`T~*`&(&>hICp-7yRwu;@`uxK#ri#Zp_xU-Uw94JE)l6%jdc^uP&ISYQvl1P8s z2R@%s9q{i*MPTESP%t_)nca71obk0rtTFtNeA1|C*ms|=b2L(DS53O%F^2<^$<-h3 zQCzvAT$GHm&;ov<55E2?$iUTGnX1$`@4JCJ&En$XG~>doOLz_hcxVw^Z_T_m7pk=t z7o5IBUHI1~C5+1Pdpp|nH4z<)@Luuu7-6yb(&`qK@2%hA+@2?`Pr7pUwFDc-rp?49 z!m_sk9TVfXuq!H^LTa3y-T}3>`7S^Aks=>Izb4~=1|~M#=aOH&H4#LgKhxSVCVHp! zw8G)hn?6ZLcVP|LBcj)L8Ji8MQ0HcV%EmZ(50#|^CD%9Lp~=LNmmBZ5 z4-*~K>p%pujyt509g`3n8w#iG7+$9F!&GFb-2neYsMf9iVv^c@XVUDPj?>aZ3G|tW z?25kzAXD8p%Cm*<3e>67D;$XN=E{XuCYjlhDkNk<(|Rd-XJ>7Nj}^OiieU0S;oT5w z)bhz1B?>EJINbZ0489fo3f3HulB~UuHj!2_ZiH>74+czT(X6r zkwKG$Og&&F3DZ`+b={F#(N{|1SA-VR!DL*ic#r_bJ?F%a#GOvek&Y_ekJHzOn^D`y zoJUVrvee}ZPA%9|aOe=h?L-MPr($!j5k)H_2V>tPU51=>H^z1c|u%AH9TI9q#~w&-1#3$qVD zd=Rb?G2ug+zoE-dW$s}@RC0GLic@ppB=80VOCfMoznDE`|Hj}aoS-wcM-u++vkW(O=TJ}FNJ?qFTOrC9zM6Ww1CZC+uuFvUKyB2mIg)2n3a4lG&rkZ$*^ryK~5+U%bzRg;QTe57vsv+YFbpl{uevUH$mZQBa4$3Jz{?hQ^NoE4W22lU&Wd58(+q#y<~(b3a+{Z-v#a zev&h@5dxcfrMlwI@|_d4{+P&iXq zQpxJu2}L&WGm$coKRc`lLIK9UUBihFC`r!L^9^FgQeM1#9Fo1tbSOydLP2z9c&fNt%*Y_2bw=77N8zr)Lx^43J)X$ z!rb#yU@Qd0AL$|Vr>E5&tt>T@ZCpJFS&K(wCX~PzQ7!*{6$Qk#7@_+LvzPz=lN%F{ z5h1(yZ8f7feIX*tv~S?s1P$ZQpZCVZbTYpc>M8F!7bKCv5Iavz7sVE|uX&I`sD`ae z-`fCwFoL6bsZU&jDT$$1?w5VMavbxuG{b{4J|=k;u=L4YA;y!^V6W{X0@2jbfy=4I zAMz%FcLr?45>BcEM0T0pV4aA&Y1*ZpG)JjqyKVxM1*XFN5sUIgkYBukmnv;?W0uex&=CSQF!6o%e4R46 z!r@-g!a|WDKIip{Z;ziul6fy6$d5s`BoC4GNq0zxVI&kDRDrv=*;;l|^0Ll9&m-qdTUCt`Iy$A-`m7WU2=fy%<*IgIx} zR{|FTgSO@o7pc-Bz1B$QvG?n^LNYSuCdmKa(Q=vOii$iUvw1FKPa3RREWHf2RW+OQ z#CM%)5{_t}smODtoshimsnFH12`DHI31_})=+$j-{`pzjHb`7eRaJ)$40Ha*xzF?R z{p|@T5kFz!{#S}q-;Caj-~LEq`SpdR*i*D1rVK~QMdCeoe*5hSC>&Nd2U=ky*XiYk z%Bu@N7=s^UdYzw#4ZqowPJB|!S4KUX8dqQ0;UW;viVcel7L>Gy`Z(Lw;}a9}fm?MI zGMX3^>OPG4VP|JQk3a-kcb(YG>FUN}AaeG$aIvGA(G@g7te1;Ocufn#u zOUw~86gmZ(XKfkAn}pR9PCs0!(Z!uw-EAnn` zXfQFR8FhWocPH*6&cazAVA2s!M~ROJ!bh?9hXgM%v|q-OhYs3BJL9rtJp^fKo?*u;*!0i_E$}2b6OlD z4cy~#-FUGMw(s}DNU+PnFK@@!PPo?(0b%2n*+*1(YSPD(}w#u~p|8}>B0=Qfvu zeq%$WomZLOE^OP*I2z=GTFxK&+UurDjjIhBfD@VX-<=$0!SclX?HSd>eO|9j(_ce< zbdm1tIdnF)=0%3r9N5C5;V9#oIJF7o$mvXMK_v$BT*E0`a;%%xzCLdZOGv)dd$A+f z{2IjDy*Ei|DE%V;Zo=OS@?irO(Z$>2^3yMLkm5o08w4FmBZ;w`ns|x)`1L;IYEnGX z#$JGW{VfY|778-%@s{F|keFIv=&`G--DcIAXG!-qJ6h9x=eStFJ_|hDu}dVle=5R1ugjIa#rRQ8 zM`TG90U5ewXSMEyd79aY*CN^#>iW_fVYLz-6m^b30!ZWGBFvAiJXPg@tHP-tM)J;u zH!S{7(JI`G4HWT*Ipg}-obv5GHKz!VcdSJk_b%Y5dsD$aNf@pd9)eM>DgI$xs z>Lnt(Zs`aV+xlzg#HqGo@>JRwNQnL%rh(L_gV3Y(mn(jBhh5J8V6<*fbdQoPwIUB0 z8Fd28#7mIyRSrdNYg%%AuV_>$OIw3*~$j#5y4Njwc`KYwY!{)doe?ixz`?0tba3lRhq;YVPkCWUI7gT zl-HQoRXI8B=c=DDafPGq8UR^jSLc}{Vjh$GrEy?kMs7j{%R!{r_+OaG|N97X^6hJT zi^iVwClb=VH_9aZ6?#YaL1dzlf3+%#j5ACT-FA~q;h|_-Zh~Oh%x|g2A4hSisl|L4 zROFBATO)CYO#Cw>2v=k3Y>1yczqESbO=bCWf~9uv&g6gv?vR{t3{hH8sSRf9m)vFm zjd{m8nY$k^*34$ztRcJPa7z)Rfa=10`v}BES~|qmm=C3_EeWEb@R*$ufMYB$dTuKD zn)Y^4_+}apiNwFQDZ0tV#x_mz-bZ)mu@^cL3}W4)<0qvs@_owutm+4?^vGwn9TaYW z=Pu_elUoj70?t(PV#cMc8sE^ywQ@8^gtcd=Tzf%=B!~+3x9m!uQ^&$E%$$EepcP9+ zxna?x)IvSCt(7LXy`MS--xs%*&?|kd+fu0KdKYhCS%1PxPCmLVI)Cq9*za!7^vCXF zBOv)27}4JOmKx)^wP*rtVXuGo_o;ntV%;7t$zg}`uZ0g5B|KtQzZr4g8R@S%q^E)Q zIqvloob|a8l5eqD80;6>TaT^ZsKS_}&N}y*)v|@)1fb^4;Fz6AX2GQWe|LF{%xqn@ zx`5&*v5E6`Ul1R>fA-kvK5L~i#+WCF+Ql8nSKMInCo(Q; z_B&kr&iM0lmI{VW4L==jboUnb10MewCH6?B$LK2orY=CxMJ9u!h3?0^^?%v2!H6p)n7+(hKtMZjYXKx><7bhn=Z~iVHjn&}YL^ zR3uY-7+P}kKEy%-a)Kyko}i#obNqI`7qb@7e**;jpZEt3pjqd`!>^4$AV&ms(9v9W z{Nb6f*+PC_ui%p78Q_wEsa-afls6fJBu(w2uw@_Tz{n*R^M?=#LdRa0X@kMWf4@@_ ze#fV0&;gU!1z!21rjuhnKno6|>&KACpANCFJQ}&4aawwxF9tQ`p8|a&rRZ+ZEbp$h z;GQfD%6n~1yU@!~LEPqw$wGga>Kz3|x6>jys?aP2kXHfwmN0gaA@IdF7m^-dyxF_@ zSsue4g&*qlhmWiwpU)Q$FXhtwTRvAZ_FgDkbq%Lcs^~tE6OG!LS0zMSiA7*uC3YA zGj^TnY2l@f?H?5BldGI>4gTdt|C8N;lc)+)|8SMg=bXnUOmnA^*LSGGRQT>c5I@z- zZe54g2IM~`B?Tj~mfkXEyY+gYD8!s>E9|XF;9@N{IUwY*Y)M%YZcJThv=*vOlXR`qg zRd=3?K8<9;h%HeWs{L^q^t@l}{)Shys|M%nDVC^QLKf+o3$iS}%MzJJr4=r`6Zdb_ z5X8pePG}0#fIHdwTmNx`vW?lE7HGtBu5SxXP4$GjlMM#r38ju}dgw-|>rd_|BMkoa z7@yYGbqcRDDWr-|9Xb{1Vlbae6(^G_?Wg;%vPOOLP8HDzA1?@m)I1?$K1m8d_suX|gM@#H0u_K0Fnp6WqzAc7Ojqj_yB(sHgU0wu*Up(7~Gq#xv7jm!Q)% znRxSFgEU-$j6vO!(jCFlm1^QY2HX9{C5#Uw;3B0YAPK(Kla3s%EEsiSJI#Ex_xDsc zqaGkoXzj*mS!lUfGO5>p0zm&+mmTPootY(Yz_4_^zJs6lYKmxJ*T(eD>P)-s(=(S( zgzbOfA{oy5l@1PER|ofS?53kTx27_TMceDx+M zA-ONOoBOQ1z0j(xcNHD^5QAL(J&CMXg!4mhx+r!x2C4YYbIkG1h9t`zB-`5r<{5U8 z@)|;6fq7+$=rHpqkG@E(#hSVkyDkpac~#v>UR@}ipTzUs)7h|iD0tz?syE{DzjniY zlM;j$B#DP`e4eXoi=DyfK*atCf?f!BgL58OiMl2!s zU$-V0M?BP*pH(LR7`o3%FIeU8bqLgS#VA`n039Z|>Z{2L+p7_D&3#(95)$15t)FfP z`3govpA^!RtegZU>vK6ToNf*bxQvm!b|D}2<8s|`ExW0FhG-WHW)T!X7hEgbq|#1B zMdd0C;`>}~tsDqeW-==yx5~Nq!GN0y#G+3J2aUhJG$4gBA{^<#dk(4MZtqJA3v$y& zO*4%IMk-t~7MuW^V*2*dO`&LfaiR&>y#HUh!5+2=>{m;ODZfKoVd0BS=i`+jb3k!> z_^V;2>7$gG2@G4M_i`p?+LGt5ufzaE8XRe`rw zhav<%YJ3Ou4ln{MFg!u%MuUH<*rI&aN6%@zX^TM0a+bjJ_lX9WF5kv#I1qN73j4mI zZRb;TbHH1bKQA|zyTy1@7x#hY67oelHr zd#-FVe~M;V`PS&+?*19(0xu4h%MWeC7}3vp>hv7AYFo!Js(lIum$$-9nkdX%di^hD zuAaX$mx4auax-V4xBm#?5=JOWgCOjie#qXoUA*= z)KczV33xftvpiPKn6oCqTbFK4tV8tWKQwQy0hk4-#+W@<$P#j(Q+Ky7|EaG0onK!I zcVJ(Xfm^<9;@fJcky|h=_vb{DtOjQs%eki|%@PjEzx>+}r8Q*p?AE9@?ElKjXk_@j z77+U|1Rll^AxF#Q8MBQNk<^7>gOSXjOY|^A2^yeG*m!-&<#VKIm)28?^_l)q^ z#a_{fkJ)&T4S}{XGr9AFJ=aK^WGjtk+(AIGI7d1a+S)9%dceYoL-kD9OIoR}@xh8`Ek1^M?8I8+wLwOVpTU+-_T zu8Ny$KB_17ivm*ffLXe_30F@^n)KY(oc-`A7lseLHn=n^f%1xKMM?XtF$2kb;nk_N=YwsI9IfFqZ4q~8d=J`k zae&M0%M6CxDt?Dc?A$yUYIU7}3734p(yVx#*Os+O=nOpmD%VJdWU^vkxdmFXjzy|2 z#K_5Y@o@#Yk5d`pkGy{oqG#*M;^xp}<$Nd4TG6dJ&hkBEi|cTkt8FJi(VQ39+C!g? zJ{;M!9?jOvs!pJW9-9Wql*^XO4wcK+t!S;)OD&I#?Vj7$`?(anAuDjvbQOsnsyLsyF1pYzLWQ-Lu@nF(z;hA5~@2STZ37n zjrt|F=3#&WF4+0G`5(cHXygfjKN^s8W~;e4^ho}6PvK|{@XtB(o^p(oIS%j_h@#iI zU=gnS1!I<;HLhIs77&%LP3}Bmzt8HkI3Up&p4b|Euh_JX3NsCwB)Q`qTZAY2OO7%< z`L`5lzY6l`(APHPwH<`%4cO=u`+wDOh~4ptBKp(hOon%YHpM3Je)J6FUZ;6fIT>`Y z+R7kkd2g)-k9;ox3^377#TU3|nu!T1V3sD8XUY^E=^6LL81(>>QLRxqQaa=TNmv)d~ljM(o)+IaSVQ3fXx+MLB#@ znEeN}$C%=M7lwAGH33O!?R!5jE7oG@^odJ86p4Urm@@0k_eu}u5YyPhdVrh?hkYvT zx=fvCYRBDD3vQrN-7j+64Ei<&H&;8&^#>gX@d26Xc|(}P(51C&m??4p!Db;HL`qXJ zMHB;g>HHP#P^J3I$!mpQF`wcU;%_8f|Irm;pE<%xyD?T~F>pd_HtR3;f3;)HDIL<4 zuWZY7nPg|0bk2*C(y-yT96y(ipkQ)cOeH7LBrRAKzXWW= z+}R;g=1dDhoq~9M>lOl*wBXYZJ7?T3lYY}BvdA`wSb zM#l|iJ2UoU(l)Gk22hE(*WLbtDBw|Cvg=7N=}hE$_YzDN{2C@;%tAK&SNlX?{&kPK zE=Y_L)K%%<{>+Y;%xix7y|1pP>DD%!+`PxK z`-_$JOdgD{Y+fl7U)r3<1vBaO-kMSxdyny>y@hq4&`l-hGv2Y`)cEgwOyLgA#;>r{ zf(%0+YvRGc?X#a7uL$0zXde1`*lttRZnXZHU49A@rH5c7LDyrMl$2z2z}f#N6Vpyk z{EPx6?JKh)8>teL%1_@L0eOCb_v|5F@&&`<7kHVA20xUao(Thz@3*+A>nqF=8&BehpN+mLnm*r?h6K+ z#OUz0hJNOL-hnB7UtT-t0k1=T`BmfI9gLknW+WeN;95LhCgvO)b?>8*iP8I%Dy!WhX=OQ?kLv zF^dw?o2+Trv?LpJ<(Cj`YH?Z5`tlXQ1)6HqIVr~S9leE3_H`x??Gfd7jN8n#w0VCF z7%`_d3ug8Rs;ARVA!Pe3;xsdN$jv`F(`}v`%kUAFh<**Zob~iPMvV+jQ`crRQ)v(b ztDoj+Cq{Rkh_6TOot?@_{8bdRwsLWtcEnHUiwwJ-fKpC*{Rf#$*z+La%?tMkPJvqH zzf)KmPpXGE>SozKY{+_jsAG@eUOc9KOXxAztVze{yS zHd+&com_K!0@&ehbmvwYgXk4{dORNf)sFNSpNV)L!&#sZi-{STK%}Fjrp|fOed28j z3P_HY?bzj7TuzU0l<@W_Zo^?URatK(2KJS!Cz1$@h8YgaWA_Klah)5!gi>>@B}`&S zFBO6qy(H#0&fTzeW47VIeV&(n{oObJ3xL-XoNsAGzlB=FUds~t;l~rfOySOB7bXYG z#X0uF-`@m-j{gaM8|RN-y@}cX-I7=8`-{i2jq2yW*q2Y!(Rw1cDYV_QLl5_J-1a<+ zF^X*ouV3;1dliJwdpsI&Pbl2%ms-ISb$)1mLbjxI$3-G7Q$MGbTi@4`z4-R%saS>U zd|v98D4Rzucm4MCVaTuG()v#=lgqohiU!T;vxR=I8%~mbx+Iji z;PCH)>C=2Razsjl?Qe6?c7nOA<<)cN{61Jc*Z5OgAY{w>U)=9hjIdPI^lqGnSqS5A z?}y@)Jhvmlbm{75A9fSIQ&fw6zYjKZl9$7?3Lc9FRrI(XBi3D5~asA z2HlUAB|sqFOQutNRc>;bkhwclfxloJ8PR~j|6tO}XyCdDpP4@a=n77B&zSGHUe|N? zJ39Sx(FSbL zD6nP;{0X;x9*n{kI8oSVfqD8rXR1=d_MkNe-FHZX9s*2BdM}wsQHJ#uR7-As*Tr~T z7-Dz90%C=?Wo2}}uOOQ0*&dwhxZl`YVkgyW(Vcjw&%IW>!?6R+3k1-(7V=ToKxMD` zKmW=jTVd9@kM!!a95szclD`KmteDh^j2$tf^nfx>)|DdG3X|Q0Zht=i-Ni@Jd+~*7 zdv)&01I`#uErRElanpl`BSq{<;`z9VWnf@#Ns1DI0gP7ppRboOxys#kR%SQ1Pckl^ z*?+OT>?sFc@nw+xXonJDppDqLxQ;RNM^i(j33|R5tBy?r#)VU0Sw{nY?4hMin*C-4 z7&BY=UC=rj^U3*nu%2f0%9J?p@bY_|=Ek9wu{TC_i!kR;d1T~)ezGt@1ZHUPLtNt1 zb8{K{$3MTQFv~FK*v4_=sT@lD?-J0{hNaS*b5{-T{mjwb5MR7Sj7@sA?K2N0^^6JD zyK}y38X9E!ab)T)gxAj(TacgNiT;~UTvy(nrsP4;!tCV#37Rw{Pb9tMt|h^$Yk=`W z!>--nM=Bh;dl=RL|Mue0%$KdEgS7~ElN#r-tC!o#KmcAn0n-Qy$^?!_gWmHw`_cb7>V4#FvP!N#@L0TFK>F$=6mXzj* zqJYxf-KC^72GSrU(kLYz64L**@!spz-}~WZ3_myqhqLz+E9RPOE;;|t_LoHei^5*; z!I7H$(0qlX9nM)h(Y>$lU;0yGGN3uazBHJZ0jEwwz1Gz+W>_|c1xwF!Cu4nk^~*ar zW<5lCgzCkZKslb>gf3{cCWGE7mgV+xFT`^-bS9j>3`;6O)eDpV zA~fY;y!MTgxTG(T8Pz{rv2S<8bK$FL+hTnPB*_z*A$9$B9i{-%$;+_hlgH#TzqcGe z?=X)EvzxdV9!xa215fT#t{H&=GBfm#g9fz6M*n6Ko=;{8EtV(m1{52cKDop2#wda< z0}X1U4InhI3Zs5s%N!Js*J9=YOqNy6?5`;$BcpOY7J^f90`Zq-`_4pmW_bt7K&Qac zs>pJ$|VH?Fm|ac5Mh%c_#M(q`farOVRD@QXSDE-ZRnF{Xg9_ySszUKE-Xw= z-URn`CV^*xUQ!CNZlEoTtFSO|V<%E8{*y)=+at54^JG+1>66%ZumuM}V!!~J>%`^c z#1s|r<_C&`rlzLujn-a=BD})F58GrN9*AQG2DXI&?GcipVBV1 z>B^_ZIqUYx*ul>X)W%qURR?^9RNmKlWIHz!-XbKp-S5k?GoSM=mO)bg+S!IszBG_bTQ2JdsOxObOsH-QoCc<8P2@DDn6M0gX`ts*qi@Z_&Sai)_v%*Ptr-QSh zJMFY6Jd5L857y*|fOkiS+X83*x`ietyn%gI^<;>`V>uYXDY@Dbc|h>Y*B3=%C&FlK zWCR@XoS)(i$1@XYrbO?OZ_i~(6@k7;@C!;2Eb}Jer)L$Ak(Q4|pI_2%38h3lSoceP zVPF-7bKPAV1>H8~lCu(E_FO&BDc{ z@#nR8An)9-@{)?`ZFqu*HMD|aJU+EY8JV0#aUo2lM;SwqG`)5rVp1nu1yia#Y@x%% zo-Ec#oW&}Y7sR2=ZutYTn^Gzrq^+@qz1^s#K2uJB4kNMe)lGf=X!@-j7% z+2mr&2T)T;Gf`^fDxytx|0^onA`2r=Thu%AFrn$`vyo;spPAv zNa9#e(Zn=xxI5RWcKq$0ddj>z6GGJOrgK&JPbF{M6?lx^ax?9$M0*F;nCGyIj(IMT zPPODs8gi><%+v9hkNyh`i+p*jnRVV@v-E*3wPUgnw2?s_3o~0fYW$`qg%3u;z&VoG z*@c%XWne|9Z+ z1gXzH9LIWk7CXjSsKH(dyx%Re9*P=}%eieoBb~EAuT}`s)0L;76L@+f_T<((+|D=D zwtGuEXZb^BoZku|anv!X^!Sgae=Bagmm`mUF(#(dG1>jJ;GB%$vx$Sv0loX9RqVPR zn<@adYDyf@RW|QJxd_W@s1!7`t4|DG#Yv7~l`1k^vpo(A$jNa<#L~XSTEwer)bE_6 zIhzpMigrMswPb0-}$Mcs-$2VXbml4~j|=3%Fq0Et8_6E@x8_ zsVXR3+*kx=!<^zz*LmsYhRW)Tx4m^9dw*VXYn;ORu{kfLyC+}T8aBVj!{ESN3E&)) zpXY1*@ZsxZkigv{-C9X2dl>d?xH|Dhz0RF$8G1aXJ4CHHVJx;Fdt0zH75>Iz2dA++ zupZr3jJNRT9@m_@2$Kx|9kJ>AR_06DK%_2h$rl|DM{)R;Nc1uQB&Xj-)+x{g*WJhVK@0iAMLE8@YAX5bQ9A{KM57SLQ-EG2e=MwKonkF zN|@%fIvO3)c>@gWT!_9ZO2z{_L)vfgg__hJR(-E}eVhK|Il7Oz5+WJkmHxC!R|z9e z3(_i^+(A03U!s9xOn*)5{bC!nTGve^GHRzh!!Y+Abdl>1=WCX?hQA-5Q9B$P5Q|d! z#ZEki7jDy4Z78aTh8^IcdbExsBZ&9uyUU|M2S07em?wwG5t{u@<5x%k^DeO{WCb{XqOeA^@VaDpPo&J$NB@;&l8WHubDk=Az7AU!6?YJQ`CE2;dy z;jvHN+fc`37S=~eUL5Wat(==kZUFI)dtcuANSXj~ji3-T?@M8VD|FopLP^SLdBhKz z+mm0uly8X)T@v^)@mP?(DOmAHig_2hB~!!{OQdw$yuC4^rTxUkKUN)2@C4WLfiNWU zsbql`PfeehhekdI}M%31%_ZQ{z|a z$xySLkkB25dB7rK7Ng{;J+cR!GHF@(xLT)uu-P1n$aK(mZILsD99?zW)673;s zB5M)X@Q{#{KMwFqW0p7WZ?I(Z^65^<>$748xl_~HA5+fn$fe7y>yy1(p-BGg%9T9F zFkfw;{1JYbWtRNK7Sq3$`rwj)E%{eHui7787Sd;>H8eu!CfA@LBPT6fq^Ioy@4=Ko zh?ZT$Hy=K2+IP!FA!Z~om3LjKs2O<8uKxyW-DZtyMzORoI9EFnaYT~+g(5FHd-e3j z0;qDU`FeW`A!97erD=a=?IGQS5y*)@`>r<(C3@cTU{j6F^7DnI;R>dQBPmAR+7KGt z@5{dEd33OiT!8e>P|~aXXBl+QGH7CIdwGyoITd(c^Azu^(hxBQwI@Ve@INFP-?uRt z3OCfHWLZ~?(IlB(JwcsHzI0Rb3CysaoX;y_=t#p+2YoO60b96#tHhx6kY$ ze4YB#w5a(Y1$oeX#H&7%-z|`)5rw~I`9|dh7xkamsBw)xBrm)}hp=JFj3A&vf*gEZ zljyK^YO(&jxb8BG6mW{`;-LBT2UC>*}VTl0=tg$aAu}M|0dUB^K))_S6@m@O0@n= znJd$F5J!<(9r-E0KKbBL-f6w)FilO(-S1sJcRp!Z(+YBxP5U`qr=|Tf4cBMlxHNOr zN|-|{Qs~H#l8*AZ;JUS#)2@EY@WWZlk=L) z+fZhtW$urF;7N~9bM0wCGZLxOnrP-wAin=G86DJ>ZI{Xh4BkG${tN@1dpXwY^H=(E z!isgk?lW6{eksWAryQ&yC)MexR_}R{vF2kwh|vH1c`KyQA~Yoygi-vLd;^BOqPzQG zv9aXMY#?OSK;Q^s9JElnLLntBDBtFH0r|UJtIoZ$Nwn>qU*mJLO9xBa<5t3}FoHn` zEx~yGVr4{PV&U)Cnp|k^EDcj}a_I}o*LWZ4_!Yb6z$@JyDAbAg;#>@5sYyWXTQfX# zwwFiOzy>EnU*aRnKz%cpkKyf}c(rc!TRbd*Kt-VcJ^J-Uk*nF_JwBN%8LqTl@47kv z1wQ;9;)pdU=F`1fh9D*wl>R>iT$u#5CnZJ+uSpJ_bM+QOrKdaM?0=9DK>-2?Ll_|` z|0(K`qqVf$;=GS)_SmaAFF03lQn}%Typv2Uo5H2_MAe9e3Tv#ZLAHCVJmfbdq^0|+ zuwh!t&kncae5Ui7$u-*Fr$&2sfp+aqaz^p1A7HU;6=Bnh)*i|cK$?E@D z{UT{=C&qHs=IeVu7|JKM-yILxss-!R(zsW`k!bfJ-`CHtH=jhLPWp-9Z?nP4uUK@% zzU<5Q^EGU$p%W=IW_UwbUuv@OsX+4HT%?w@t&0alg=6mDWu{la1LkX0oQJ5wY;6;w zUZ!KRj_(-aM&YoIUF#sfCQ+rUub)!Ncw? zr=?tNz)r>w?<jlh(K;>Pg*0>t0qQ}KM7W$>s!zdGUioDvg2RZtyb zQKA4_Z91*UVm4G^vtoCp#oKP=a>9)+t(SxZ1T?4eFjK(|-U$1W_J@1QesPxOmD9}) zpw~E~(SaR2)^`~F%MWxny963Q zzMBTF8AJzRp_ObNyADQ1Mz?i1A#V9vZ&Ni#Rk4Q%8iu!l1%W?(ol;#xzo_1Gc_oz` zUo~G`#G27Gk<1WbF8v;}UgT=K#!c`k^hUTz=Mx^+EBZ50-`)behSa#)pAM8;X-OyY z5&;Vf$s=w(T-F8*26WGiPbSyxuS*&8RPu4L#&w|A2f@@1IeuH+hl9p|p}cJv=TsnT zp}^kq!$XvHe`ONQF&Vmi1c#iDv|bg}pM=SRaK~ycP6s6*F$gGwvLKpk?TWXoM{Z~N z#$iU9v`44(;oD7b%=JRMuor z8U-_#P_$BWmANm{C8id5QmaqLHCYYbpLYN80@6qQv}Qi`yR6H6CYLYtaB0epM6 z?L@81Af>XkWU5d=jC+oy4%kY-^~hD}MMiMIXketD`PkZzG`HNzIKwFTMEFx!*_{*7 zpo~L)KTwz|AB<PL$fGcbZp1Uci_CYgU+qUL}nm{a2} zS-o_SlS188c3~sZ<~&NTLHQ&gi8P1Q7@;p3QcToOAFQ?^Z*jwfk{=8JDq{&`fTY-)+ zT;AX|P#_>Uo`vvReAvv_<}#b|5)|Ul%tWZpjnbxM|SgQW;>%q zRhhq&b#mP42%5WvpbYel5DKpH%F0s%2QA5MV%N%axQPY$FULO#7V1E{!+)){<>G3GDgWaA5U3cco7te|*p3c>(C`4vQv;D`AD)U>kaGQaF_G-d&ZwwaxE0*Kj zkKF|fz@DX=7P@cXE)6fxgJ#dqpH>AHw1_0!+8(vrFSLsz89q(jfJXf`#|S(9et{NVZ-`xI0H6E8V?g_cn(M+1s3W|2D@XA49~Gp4U)A@EJCW!a zS&PUkHBxmk|_OD@wOoRXV+$YzhSClAUEWg>uIn{lIzb71HR9pc-~)_AYD7*EPNJyj4rt+L`pm>KGAy{JYO6{`>`2PQA0@! z{duM!+vby-F~HZvxDRzn|Rj*T9O5XLZfJ};kA}83ZOXOUkJu@6*L|PHRt=l=%H2u zRo(B9l|=)7J36KdxJ4(|>{a_w$AQ5lo66}+JThGcU=(7!j`qemcG$omhhghxZFuZ0 zUF%&TKS!!PWhE~t*jJ!6V2w6%{VCeE@3n?22B*bqPbuq4_{9qB*%8*n%LsDuB?j_s zsrSOx*lh=0^|Zp3e)GQpYBF<<9hltnY>@?eBg3~;Gpq`Unc z=cEP8th5ZAKEKiHXhunEfFSj6D=}d}SmTQsKAl4O>@tH~^Z`$@ITzcX)R&4xyl)893O7-(ct2hoTI(yQ&^5!lb0iHZM z?tZ5#s{|GgFHLaiwFTMVip&D*54PeoUO&+PVR{h6Hue{}z&of3#6VK+-`3aNQc@;wvoDi~(F^z1|Q;$QKEvLY*f*3b8wo{sU< zK09AqvGyUk>wQX>gQ~CLGIP0#H(IV0E#~HB2VBUg3jZ$!%RL<^Ni8 zM!5gY;787L6=d2-65S~T#Rg;K`<$HoJOK@ye*3(J*nRZ$w{Y~Iz$9nBgUfXi58dFudBVn|~ zsX^?GBS5a!urT~EFNSxg2$KZHkJO9oIEJ7#toUAU;QM-N_32+2bgB?q_D(e4XdC;n zIq+d)ewsEZ7@Lcz#r)Q?F9Y_AnGs0K57L7?K53W^_B1XO)$@PZsB`dVcq7%AuVEY} z_jdkawz+cM6^+hwWbFUE4A?(c!O9LwBma6T%uET$YTga7>miu|^S1qrVmZ`Wb0%RV z`XB-UW}0>-&V(z;(z5o;k>_Q{?{t<7$WkRcjo6Cq-L)Vd0rBs}u2s{SaKd=Qfr_4(K0XsZey#Gvfrq?*Iyllags_JA3CJkb5(3%CY!pGs_O3L!C ziMVlrD`Al$wZSClII%+b#WSx?oILwd>4(GIDN0I8T7j9-hi!-PQI}cv7KJfz zRJzuu+m=o?kQkhP5cC7R2JEm6p}e&nc1(B}U$RI*y&hCXSRYIXvO>>F35)|9u9JrG ze3{wbg6@upUGpdfI_};;JrM=XQGV{*kzo;qYu^^eaH7tyV5%915Y|w)B$CHPPB!eSD z1VX!IQ7~sgdgm1ox;fxHmJsSe9kHe1+wpD*ej>OWM|D#(yjG?*yGvkPiT^tOe#CiU zY3o_5OP4PXH|<)@i6*lYsK0vqyNeRqoTnl?2Xqd#mHhSG7c^wRC z6v{44s+qzo;G|hvot&gl&YZgL*fshu=qe$q?=dGx|NQ`}0*9vlI(md*rVOjef~ukG z76XTii*)@7VPd{Xh{y|z&6S6l_LP_A5*3m>ioaIFd&0^!B<=~*)aQumjhcbiWB z7)93pzmJZh5LG*KsDU0K$(X_tjB33uD*6FZs%ryMlI!gQdIM}Q-SEcnJB(sUCHa`A zIIqS2jT0scG3eeC@V<_)oP60Oz)4lS61`XwRL_zqXbhf5=lAzLeth=pSr~NBYWA#m zhPTGzFbR&gr;<(?W^P>Ju;K6&fh877kynIn65YWhz_2;2hQduuAbtSOR^xA*v#N?7 zVab?M9E`$2g@%UWY%AD+rZkh=m3}!CGw^Q9ub@5r&s{I{-ZsUci_sP`ULSZO_+Gg{ zv(e>kQ5}oP!ESLy_Yw9jIBX~XFhUUJg|<*CQ{l;@2e(V$^Bu@$qcC2W4V9Yx@oH^2 z*#%l2F{t%{`@!WWrwX*9qR*3jJHkX>Pf#|GC;~;tz;^s{l2Z88p$^+Gh>UH9-B!rM zqM`B$vUYqe5)&~JIRIr4uZQVIXw zCnGl4G_d=Eo}Qj>_XG}K?_1@fFz!ji^yFebOtO!^R-;BmP$gU{+ma#~gRIEA(m~Ht zV{7*5IVqXh&cjv*j6p>JzAbUlPqY1NEl?Ewxr8OBltn9_w08_M70iA^+(Io=rdlwA`TBsL*Vb0 z6dSbi>RIOKL77~U?jd&z%5n&aIsO}$P84HEmRrk2OEsTFUUPGPO(4^%oY`d8!4RT8 zBk;^&pi*pZvQpbZj}eIA8owK)*Nf;dYZ2QXdvKztyP>S72d}q$NwqgcxQ^C0;^26< z&{Wbe%%30So}!ItK~E8wb&8CPj5Y87A^u3*|6F~C$4T!ezl@L$F3oe`*U|tR&bdC! zy(cm$NzZH&!~5Rh{*Kd$iSz%xy4n7A*bxD=&gj9wR$ql-&{_UlSh=x)Zkzu8kj{Ci zbuCX6rZSqca{p9#Cd!$?R*=T#~Qj|$Apk3|X{SalJ zL-%3hAPSn^T?iO7#5L5cD6|0)^(*$@JI>L0Sfa+QU>N4_H|o;~I$g%=TG?=;oaqI! zG3JBzzPFE5Vwp6NINI$nwP73eLPcZI7;34-d<0~{za1#rDEZnduO9wGBJ8jJ>}A%Q z=7uvJ^ln;3x(o_9B7l7Vd1)$0!xJ zmPhI-0+xPM{NL73MTw>g$N%qR^!X?UM%TcJG@Cw;bJ};@bFbs~E&6!Mye-nM@`le2*+zXJdj(#NaXr(Lw^=_3h2Y*|d&zZSaus$EsmKUiq(| zMot{QUFHN2HR0L`~4A+>$m- zYV`zSsZw1YdhKov7jO6H6S43t3c^~LLIvxO+dwKGQi&AAdp&d4&e;>Bsw z=dHE?@@$UFL{UhFhBp^gANR zMs%l&ZWYVKaSW2;ToC`YG7!uY4&QIwx`oy}<%Yx%}>NqK>w&jvk?=X}u5e0hq0b0_s%PmOM(3{NJ5E zen2w`zn!#{AOYlDn0s{&r^5$yAaMNSX2T!Ny^C*`&WbJytD+7XiGS>oodqE;I7o4Q z{heWmSdin9Y<{Nu$<;Y|MMVRq92ACP1x)1n?cg@J;nE1`gHd^M*shtoJ@zr>gXNI2 zC5JV`E9@L98F4{$KPVeB|Gr2KTDUaz1>RAY?TW9gWx`P1TM+`7`uWx-rgRZLsuyv#)`m-8W|nj+9n;crDPRW!|-Q zMx&C|s~mEE3x$GHy}elk!28kK&sg21PR7C6kLbYMM_Tv(@H-I1uDf&S_+u)g@a4Pv z9no4XdeSBSv`08JUKvT?9ciZV<&_10zJ?*dj-sOzMe*oh^I#}v04o*~&m_9H5fmAI zGw6c;sbQF2GkE*n$6o@UC)BETD){YzCkjp}H=7d5Z@}4x3cs5K9NntOn_7REjVV=J z8t@|%4uwn%3~oGx@`8=PHcR+@Hzo=&T5aHA$)@R(;b0=C0WFO#qF7+h5{DFdRz}9a z-V=1k&k8p0_h&U1>h^GTj|9`Bi~4v1E*Ol%{Czb9tyKg=;4$a>kA6g}W8sSHK$Yr` zI|ycQ=PcanCpbR=feT6nnstRJuvld9`pd4_TpoRB>4E=-Fk}_3$lpYZ4L-4a6BLPl z^XlRuU16BW!ay$yfC-_@d8&kQDtR2Sjtk|{OBLJOEm%M90Pp3GXRymVg~Wc~W-6kp zmog*BgiMx($iaW7JttLx7>E1i;fDIR=3Ox{t%hzS%GD0&;X~pSHB+*#P2eD9fGm zezD2|&g{wK2QFp+Z>Ad&9v*$}$_tl6mxFMd7dcio<3KN_NJil>XqC6?mLD7R-=0ns zEP-0AU4^mK&alNNBRU8|z!(0JBBX;0bykIor=nW;F8Ks zq#p|>C_e5$_jCu`|8{t)C7g3nI!-YyU0s||A_?R=*y!!ub%n(pQOLLWZyqIR4XNWG zPBu0^RWy(_22`nmRQQm?j9@M^&YA@}K74YZ_O=5Y9BFWyVcg5G05-cDbNCPW*E=}J zEg9?g?c8*ltYAjjEDlZON}bQlN8z?H7+h%4zBL@>%FTa#z?#uhg3U(qWAOO4#ORXg z{CX$nplkkSlzS+sj1SS;<@zehWf?71uhe13eMxW{GW)UsQ!m)&{s|C#>6y9){Ji6( z1(6)feRnQvYd841u)%WLloF6Umo}RdWp(@gX)AS6>@=)hDP^VMdoK+?6vdDRuOXDdXRo z0`McS$G2e3hle+K&+~cZGvC*H7I@L-tSq^Lp&#eT+LO<8wep$2)k0yRhu^j9DiUQ^ zz&GZ!yQB}3<3HNvTKDqKE$_7o$s=Rbs~Z{1e7bpl^ThM`hk5;fO$J&gZx_r}{h~^GYFx= zcH4gW`Tob+*FU~KUut#Uef9y0XI4FwOiW#tqp@nsPMH(vEi%vhFQcI?vssUPwnkIT z`kVu-n(}Apn7iY)nrt|NIPUSN))tjjM9|3oEjIP%h1S#$7Q}}zwksA&2cuLk$XUm& zN8DCwXX5QUUk$8O&XQJR=yBi9n+UjC`BB?xP_IHQ?-C2W>VCxL-{6MUiJS2prh&Dc zbj}fi=RS~xP(JF8->W$^o(I;%IqG;eEGC^6>SwLgY2OX`Wz8|_QbYTi6?M>Ze%0+B zylIkYHjFxnrTsVV&r*COMPbTl47~u_0s}C2)=ZXXiH&DKRcRIauPl0GuxZZO2JwZn z(KGw_WKbEr;zPr}ZC?#Vq}V^qOI2`C7#3FyYRI;U#A=*^(g$)uW3NEbZ3&>~>Bw#J zOouC3df%UF4w?C+Wy2l=jWafPU)*xiDlw^Dvdu7i%(c6QZZ=K%<}B(jj2g0^m9`4H zWIH>YMB@I<<hG1u!@UeH60oT&Z{~8UGqzf&@9HtndRJo zLNA{ekLm_unq{ECp>v#j&qnnrQs-o8A4MvH%a@~aJbNPNW-;x2RNYtGa&ZMO{KDJQ z14-pIl1U|s>UML9KujWr&#inA4CIpe!(%8vyLs6j*fCJr_>k@}0>goC#b0oR&WJWP z-k{9Psi9&~Kf28EaP5D_H)*_Vu{{i%Arw!>+ zCe>dVh`#k;Z)76LRXdtAl;j9WXP&R@pY_)p%ASw^p;6@ks1U}rkE!Q(j|2f$vSh&_ zT=mzDy$?^Kl6|RY`UF9LK`D%84M2Atr&d5Vdc&m!Jb8*Y}aDj zM8xWtizgefgw%JwFP2Qx=PPffPlX*8kEcuE_d@s5-es@kTsyl(3gTq{kC3Cs0!MUk zJZ#q^N0NqOMp~-~>O4rn{`}w1t8!c@=&Lqqi!81H7^M1v6$D~i&&%=TZfcfh$D?EL ze-O}~_{f#qN==yx?U06EdZDP?=U!eB6Upn?({tNtc=Ed*ExD9Zcfz&ndUD(nI9)6@rdm>w5vrM~a7qq5U#6Eq2wG(8- z-g%7yfg9R4rj2fFnrlf)sFklxIp;BhtBylO!TG=cPKs&)i@@$2ZehC&_I(UGU3@z< zQTmFb4 z^w#J@!?&f=xW_~LMnbh{81?f0>QPbsp?Q$k;;SZ;9|=gBJ#mFS2jzXLv5}Ih>KO~6 z{M;+xbH>5&pWy|h6c07F2pP=#JTP>k17Z~GTnuV1Rc&ocyMXVpiV|paIjl~L>Ty#< z9G20wR!JUPew8cFndeZ0@?WqM z>|&%YMz6E_ZR!t9_z!;=?x|DeK%=O2IlLQ7&>T<=iNFF?!XtG+=wbo9)+A^uAit?o z)u#(ROe{z=|Lg}+8$|W_hzq`XuCB9?gOUvDmF4N!KUnc%3d?^DiZks`u<_e<9*!F4 zi9ye4P=4UWvDX@#Um%_XCGdP^=!8G%C-3NT{}!4S278CghU>Q z3d&5P+cNVrQrGN)NdM9fj^Sv8JYsAOb;p%OPlU8`S=U*=9fwXuoDZ5&W)Lb1tcONV z(KX*vAQ2#W1H;b>xAv+nhCc$UC+YFasmlvuDN|)j-t|K>%?NOiYZRWAa@Bv?{Tim2o%8b6*{oUbc-{K3zYcOZR z(+r3_wQNCGRYXDLYw0A!SjVMJ#MEX>N^~^9pe(JB_)R z?(xps^g};yZ-tGXmIs(RqwVVDs)be<^mjy_bvb00EN4h8f?cP|;Z>7L>ddvHA7XC@ z>R@80md2+vF&NEAzApC(vIuj%No?wPd9!ovWr|6xH@jTA!n#XL<=k?* zSawnFQ_D)XsVi)L+c#pDXdwBy8+P;>4Fol4zU+@_rg5@27Xo6~+c0&5NI$@^ zgtaVaPfw_|9IJ7ueD9knUkg*`ITYCMHK4^!>;T1~9oXF+z|CmCVpF5<1oWclCKbH9 z9R>YN;!QE^7LhLCx-GGulez)j(~Wcwntd(nuaRg;4bVwsP{ho9>`LGncyWcZQ~SXi zERI)cnElY0ZAt32Z@Z_iBiC0i?st>&)l65C4gQ1Nw+icve$3&l9o{VdLP)jTebb)@8{3AZ9oJzSf|}Y z@-SZJ&`;MClG#Pm%=FK1PF=G+N-2KuMTiv|E#mgMA!G^lJkkZ7dp9n~n3ZDU?F&tC zj^3#ky*~VY%``UiavSupTanAmX5guG?wtiILbm+`xqYdr>xAUANlGOqG5d8MG9M!# zVrkcPGv45WdEkc7)^piS$1hkMMhX|m@S73VLLYBEM+*6_ffw+zKJ)W6UA2F))TMMG zZDZ}xDbklg&llIyB$L`0tJ}9OC>qdl&qdI&H>xV$?QMcgbeaYG6kOPFg^1w$o9tGy z7<5%#^7&Kk=%=pUqc4m%^Os~;sy{j`zh0hexuJPg()D2c=pZTKQzqsp5VJ@N=@53^ z(yL9VFvZ2$)D5aywjjAzm|XQ@@$*{cM&f0@3L_<-E15xCZgNtaN z-#_0v0+G+fzf?R_G$I)^4361bqd89`pN2}VSJWTeTwds%O>Zr+soVKb$3eM)Qob)L zn)Qkg)%Ulb^bW$82S@{OCEoA*-#_r~uU@imBC+TQ;4EYwt%@*mQ|A)v=K~9KWcgNv zK7ckRMdjmF2#LO!NlOxQwNQ6Qc~J^!mfqJt63d~GXV8pbsCrXMEVAITv5zaT|8jO9 zC9^M8GVNRX6FmCdK>vuVQcvk}*zQvAP(Tkp2Y8d5+QKPq3U)iS)H#?6%VC=6ViH4Z z73-NjXHA@@YHlCv!_!L|LE|UO?>9!z$FaMaJD|WOGpf|6TCB7hetV?KG0zM#@%C9+ zPMJ+5pJ)gNOnUaNk^1;12T200otLb{?=C4yCN3vBZ~)ID@ZLFO|oc0sL1v z+lqf9VGqajKu~Kcvt3ONyJ%r_zn=^y>tIo4UFG*TD_TXCzLyD#l#%JeFMZ zdmDI3XE$#b;fjkwzVXWfzn+*(zX<>wO~lhyacQNozSo-JnB5ypDycLvDZ@9i^toCh zL?|0%$;otumz{$gb8|w?40JDUW>iguN?8_Bu%8;(Nu>zteJCl@HOes%Sb^iGbdfM& z-!_xQT$-?ZujsvhCa3wLIfv(%%j}?x3ctBv;i0S;iKB814x>si%waiIzAL$%mnzx& zS@}-JljN{u$qZa2s=OpN4%66#54kujlv^8>*#`8NRPN>kMP_?_0_fkS?r_&)SZpD` zq>}~2DjeRE}y_5Ln3z666x06*G^Oyl*hxYKie|MKkG(46&<^7Ra(fKcKbNp{=m z7ji3RqFt9Y937S`XNPig5aF{xz2TBBehbcmk*uz!ocBGmX%LJWI^;ZE9a4UknPV2a zoAqI(<;AF3@-o|YCnhC&8J<2}-#y+cfv8=#xvL)VmloNCDO2er^K)W&CQS5CzpO|{piS8MrwF2)1ar+achps^d=r|7J0=$4)S>~eqAq%$bdn% zF(@;#F}2Bjy7Injp?Z*#&kN%F^w;t7=*0568`E;>DD6u9@Yp2FX_?dJw9K;;&0=LU za~yM<{TH($&TjS+^Si$u6;TnWD$M*k-<3h@8lpTmrOZPJ%$}!nng_txTQ12vlS{2C z8h>(iUTgZH*j3vnOSO)Z0h~i!>pv!gm?|(*-nsC($~{u%QsOj2JCh!G90s9X&Q)Pe zA!e7>RR~!_cr}6!(Z2WM^K+>tCm%XaAYf1Hb{~x2zLH}oUA!#cW*^J;Ibt@Nk=L5FURRM^VebR?H6$ZdQ+=M8 zn8j@FADf+}NKvqRDJEMy>N#Uo^I_VZB88PS)#&Fcgj;<|az0)#L@UPV6Dw23yAikj0zc>gM7F zDq0+Gz@phn3Mr4)Qgm^WpJs0Pi97eXAc*;rR~CVCxh|p6zUw48>oQ9i0|h6(;*pGq z4~PIC&-4NkW`f>Iao)IDZdfR>WGGDeey7S!YnyJSAA3HF|85wS3|%h;PY=-S+JO#h zbHymHN&@@ZW=v164KCkCuo={DD{-PO>$!IqQdLX_@(j^2urGXK<7OuuwGTrk>!-z6|Q#OL-qb9A_cQE90T;IEaLyVVELxsQR9sCBiFoKmdv0cjmiD^-vkjauAQ@;b?|rOzcc2&qAD*b&gFBuM+6TJM>6o zf}jD1OWJ$CuxLfcPKeHAplQf5RG3g~*PuvO3JINbx*13*T{hH7l=%F^H zzrw-5XusE!ktgl2eCNQ8ZAn~di~HfSblHj3**;+jN>XHOF_7xFho;o%`jtE)r=NwbF;U0)Iu2gqNEeUt(g6lWV?2 zmy9v^gxBat9VUfXi;pt?+1yq|(f-%AlhVOv`;z7N`-K~B$~<;dMaK#z{yXgcSqbW? zd@tlIXvcQI*dKAD3@WGz~#<>Zgs1awq2O z>&6t^plI>NFpM{|kIN+gVf!~1;2?Fq;3&0nd0;^auB)HJTg`-~ zaim0~{xhkXQpF+wk50zDw$f`v1>X!#u{c;R>?D+DQB+f?iXN-h3iNHH+`x&J^OH#5 zK!PX#prDkE;E6eRLljbIKG~1}RP({#9|~-@d`GOZJz7hyY5*_Kgy=Da36ty}1f(U4 zP#(VJI@9SE>4vC($TYE#bs&{T(VypnNBWI0aa=I)K8~NQm7aHnx5V`mgHlgT-s*C?f(XzEyWU$m^d2<9x(8m6iA!p;!%>TtWC%%=WW| z93>_gW^06k{!o@KN^-9z#p`(URv~oW3H~5xCd>>B~87QbkFeQbVKDJy-vwd zNPD^iI&({15Krj+*q1PV&Yx9U3y8IC(l+YEd0&_1x!G@q7Vb!p3ps6dr|KgB?3Wf-_!ooiVIU2wZLQ z`K3NbGLqCyxa4M7Qn<`ZN_ni_i{4Q|+K5VcB_m z@3N0UqFiTt){~p8i?C~Mt+*b+m ziB=xHpn>i)q1clQXM!S`E(A|}NHJsTBN99#ob#%OD~VgS0nqcJJNDq+ITNT|nQ>)) z;DV-SH;>kET9fCoA4C-#Km8YzDCK3c8xEzETKt7cWL+!N1L_DGp4{Q&26_)0wTuh* ze0Cbb|Fl$*>PqB;j-Qc>-|h3VPxZkRS$!ys8G^W%4IFcm^W5Lakc@SY*EK$oQ&k{Y zNsfzN0fgd=Nv4oTvTcD}NF_TV|hKIH9Nq3VA_g~|x2d%$V=cE*c)Przd%q@wBJw|mkHpI)tK)qIW~ z_x$d(U%SQfXs!O}poT7`?eF^n3BuoczSbmlB zzPiKEcH6)r%qz`8+^#N1Pt zZ)zHi!5sQ4!jMg=_pEe*s-FSV=Amh;4CkoP_%_nAB>N6%nK`qDg?ja4y18qvHImiP z$TswS5hRCRuBdI8Vu>gUU5385pLG7oiiidYK`aq&{3wq-R~~Ge+Oj_F2X_|H`SvUm zVZelqk0XM7(j4w|c?~`71i$C{qa50~hVJ%De#pj^gAA9Rw-b6yti$9aU)Df#T>0B3 zTOj+#!LzXxOad@e!u8enhUK~h-y?^8Z*T7hU&V?{fvqx=`kaw2jKB`j-sCV#AJ7GD zHn+IIvX#2Mq5iL>Ra#S$7Lw^^UX=1lUWYDwBj?$>`1e=ebs#tvzASZuLujo|ll=Nz z!wKBdmY2;Gev%RqQ;MR>Ml5121e*rjhd(CUUa0vovJ=RCf-`J+3=`?C))>D}AV z{kA#kJQ*+p*v7WIG-ZUy?7f=zp}8%t8{hQnc4n@`-Rhe?iD|q^KqmeZ2yebA;(*4W zX(YWg#4c0qG#m|;r|7K2-5Uz1MWuBzmK}=RzHZ6;xpm!rl|~~_q1jpo*nlz2JrqG_ zWba5(Q<{YvDPZ-%Sd4cq)C!8TND3o024^MADOQ(_vOoNmeW+Y&j;(p|vhod4MLY2W z-wo}M7V23hEnMDJR4qTqw5~3hr{BzFcrKg%?b;1VRnb5Qt`j~Fd;M{v$Upl|dFE0J;D#6Mlfc4sr-vy~A|%p<7{cf`MV$WjW&yBt=@u<^;%zWTb!YIk2UIoo?4xDL3Sq-rY4 z4n^7iw_A!Fr2X>5p>o|WaJYsWm)zUeWJIfx@jils5~E*j0V;!KEAJp+*DoJ+8h#S= zP&e+o<|F4mDcLKizMuV#69I?vOVmcJ7&NKhPB4mWb?1I#q9fcKQ|y%^g<9r%iFjppBs8s$^ z3fO#He$Qd*%v6-NonL{aqW4{yF_kh__a6_2<#}C2Pi;OKavadHDwYaTIbp}XqZ;P8 z`9jhsb5YbYFve-KUv2ow7}6J()krDa%Pz;iV^p2fQnjn=hxgQcL-*oriDVxeX)vhk z{|azcsYv+Jd;Lh|(UnUwD_Hu7SZj>$;BR8^&O`^#^)~5gd{WmNApso->DNV}h%qqcG^iIA=y>3Rdzx#3^Il+QP#+krLuEW zii*O7v5R3wh^!S(N_Mhkjj}e@>^txMsB?b5clqP}?|u8@d{mC+yFAZx-Pe6x*IhhO zDZ+1{^Ssi=8;oZ|-6M}v1VtpI^t1TwDbJd8<%eg$e~~h7JqrYVXIEYpYs{!l@!RkO z9hXIm^H1wyKN*n7bno>Jy&rsGR2wN`UJIwR;SckcYpcHx%ocS_I*r^yx62(t>m=py zna-FtdTuFs+Crt@(CVDeN(p8QglK1xm579w{$K~}+`}VBMzT`+(-Mn%m?g|Z@`A1p+)U@Er9*&_`aR3_ zh+xVV3I%ug3hvDS%(V%67q54n5j%*v%lJ!`=${X*m#$HwKT{2yu1quYUwy4s`miD{ z9bO(>Ruzi33sLw%E&+j;gr?jOhQrx{X)jJ+wH0!1`hjY+Rzv7!=L`KRC$gT0 z0AuL~wIc4*7DjZ3inCaQHWo@Z^4jKD*-&~&l)SG`QU6Hs5|GG!ntV@G_P)=X1c+G{ zlCj5d`{jmo@;$J%W~ZB{MTC>4B-4dH8;rzQMtYEa9mv%LiX&-L^~O<>yI`A4!!-eh_!=e^v%f>%uH3Ky@+h19SXUJmh7 z+RYV6X0Bg?Gq>SkjM;`4$J)E-X(GKrEo{9<9aJ++6sA1b$&R6{?l(Q6^`dZLGw7&f z8W6{3KsWME=XJsK9H7IF^28}OuLF<}wRkj6mqOSku@4`mqNggbl-xS6JD3u5YCK)Q z3L_;zi_)E#poMCe>o186{e-dl*~pN;>4w znOimIv?;A<)%K1Hk9-W?DDf5f6W%+bG&8vtb+=Rgt$8Xl*tOSL_l7bwzGPuf6Mjb2 z6FQz`V>VL!l0F9J3`Kt2CtYjO)^hHfK8tQfQqT$PMpL0UyTsRZSgqC-j19m=II_DB z9E>(z0(4v3yVlXf?sS?Q^{PI)^n7kB7iUB4b-IMXL@eU zZmo-k1QX&GNtmbSnT3Vzn$bEh-nb0c>z*Be ztueJrIY{sgNi&u>{zI1qvkELN!)=qW3qFax2Ptm2lt|XM`YOcFWHN!W1sAHXrVfjo zv(0s~=NoV0VPO-yRX_SP8i0FUCusa%CMSP#a7dHVs0ysCew6^bZ4CDBw1Okgp1^MO z4dgw)t^9uHX8ruFk^FHbWptj|5#%3sj;FE1D^Q1FzpTNXy+ceTDN)*vJlk_h=fKUx zEi_F3E?%sgd5T9EyfQVlz^JFQ#N!)(%M-bgcaK#(&FW%sw;5_F*tSpe zVG{f-)((o%blU>b|F4#P&%p5TQ6xA2dod&M$YwdMpz_-ZCg?)P=^k{{Z09XUApGxa z{r4kGbuDlLgsr{TZ8N8r>c37-r5)BH5vxlqnW`tM988A?o# z`7v)Q?*Ek({C%`^5Uc?B#Lw3iyqRvX1QlORuQ z2uM^3MVRo$OhnR60HrcNd=QhFrQQW+E3w7%KoM2)f0HD5jX|=`#}M+PH}UL@4KS+3 z3>Cc1>H5OWFq#AJkt*)w55CVJob?=&k*K+cJ$_g7j%BR~7U z;8L%Z!ewxvY~N0m1YyPPEtj#$ejV@wF9sJw=h+2a2L+!_O=6AOzH9?e)oJQ)!Q)jw&=K$x`%YJ@BfgUPss&Esbx6T4}kvUk1? zbiTu6F}u706P>-D^r;chx1*@ya0AMLKwe{C2S6kpAY!!J-V5pb4h;ohQEumM5o?e! z57>8%_}odC339<-q9}W0UUrAJ@f+Eg7}VzV?0|m1-Tv*n*#5lU=Z-HlZ<6QGZS@e1 z#1t-4m0yizt}VxR_|a~yMX>lzhb41#`%gd|i#{2|J8;NGLpViu%-tUMJ3M6|47IX2 ziGNP1nPmr=M=RX|8YBgBM1sw?w|OX6!X1wCx*W?IRE{&qY2xirzqTeu3F84ghR^&I zrG%13tlrD>4rp34IS8^b8c*gnaxK%yGRhjQ7RnL_ibYH@Wm_lU3R_d_vjTs)BNZ7m z@w-2dz_AeCIyI1lv@GDmx%Lmv?Jb#Exhb9o+*b#?qQe(vJWo90OL|})sVMY4Tex@x zP&=k0eon=RZjajna2=87dcB%wAz!lTS<+kn!M_as#o8f11V4+M>X!OH(N&KGmbAIpt&BTgJE zz(jO-n_)$83>PbUu0K3BwxJsh)0TyD;ygykkrg_F!ObqJ%?0B2>bQ3;z5G$^GVt_r zVh@wrl14aXt-(uQ2Z301Fbh#=Y-$G5JGzns`5 z#G9ESvL3Uq)fpW@dSN*qE9pY=E}dN^+IGD%5HljoL72)X*>xn->FtF>yxm19Fa??k zUk@EV{6ca;hf14rq|0|>ZG7e?8-Sq1zA`_@`W*=_q2+# zzil!Q66?wcS$NtN%VWIArr{L(638&W-~W_E5wPzQ&jrt%)=EwZ<_l!v)yIipXe|B1 z4!i}ah<&qHY$=9YD*RA4YP>iKNXWIVL3JtyBtYh-#t5FJ=|m)io8`COAeNC9R8|B; zC?ln%YvYb*4I=2htu_`bsx;8wo!mmj|2$Hxwoctbi-@Q zNHOJ^a6gj4hLVZ-jL3_l{^^3eZ=Ly;nNt|Jl0MzzcfW%OPY{v*%`4DyMvmn!M56op9`na)j8b+Vk}daMiu6c;jh)f&zo0VdjwcuBzs#& z*KHrkaL(2x%7Pw_J@)=klFDN{E`E~-#xl!Zt|bB=QkBE{YM0oM>^JNh=m(NwnBszEvD5Vz4lt^`^S2{ zk5yXL$-MCcASyb2&ex=h_n3N>7*BZLT%k@DN5-}zQLk|FJXkTtR4qNx@Bs$w6h;|; z5_ImbCyPn?k}($Hp?8m-wNo9DoHL+avE|RI<}7ZYaAMB!IC1Og@WkqFKNRc5z!Nbm ze?&qqWa3l``C?126o}gO?tz~1@SSko$gq@-)u6@ff#+@TUoVxIu66Y9NAyp@{NJx%c0q#L2;=lVA3>;S;*| z$@>k&@pUOVV^fGfB^V7X9aNj@(=dHHAu#ypoDZK3%cQYOy}&S=((+H28KbT0oc#Z= zX2_OA1O?@EHFYg)j2X|T$awWz5_ff^^dd58y z!8V>H7_Lg`Q6sQExY;U?*XI%Z;QIOSsh%sXG)AV&5}G{hvmQc8^oMPLvhDWPwVX$k zJ=rZNIk~5t+0-uehvd_$m*mbGwXftpjn=EZUNcXzO=%+^77w%k{j!S^S-H#q9}PxsGT|KlS~i1s3V5Qnx^Eto_q+ zNt5?FUc)|f!56GNb}rhpm~!s==W3x)1+&p)3ypDloxtVfp%$jOzjT_)mUNEAKDq3# zOvjIxk)-2KT~=$Ae$Er}eD8NN{6po(t;RBXlhHwGB6C2CIi}j%u)gpZykQ%SYfW~d zC>BL5eW;TAN+tzHjD^BOZqhVu8D>Z?XaS%5M}o}A2d0}+@u7;EOoV1-BiW)^7P2!2 zuiS`ZIg!dgBB>jASbdmpwMT7-fIvi`oc5HQ?{IgMTgxk1eBgS{l@8zAt}FXB z_=-B9A1NNPi^3lsq<3eV_CL93X-AGnl~fSQk&QUbZ(0-Qh0;t{9?XG`POD6K4D*WL zU{K>y(7I@51?8>k(~#IWl@YN+H|L~zRKgXs2|*sC$(~I4ZY{!9HrLqUX=3Ztm7lhm zHwHOla#evr4vV|MTiN=!n%d!#+)Uhl zY?pp|N-rhUb)vhdTla$-f|{P>>Xa+Gt>U18Fj`9~xrn(?-Wem{f!Q}QtQ=#a?c8A* z(Lownev%ZOD&&+nG%^iWUoIP`XUI60i{AH&U{}#%O#YtF$TluoqdOLHz_@y z4R!QjSkCJ};c*VhQw<_O3BB9n{q{VrC0ygNR|yaj?E?3icy6_%Um_?8_PVB}hK8PW zhUjiS*~4=im$@{&ADQ7zBqw_T?Y(%tl;?5N29vn#h7{0h@}&#`=AWO?XnDEE7dK=rOK((CbdKG81~@1wfU}^vGlgoZ9yb8+Zas! z&$TAfsMNWZZAL3M=uO3o8>DvXUQgW?>~flV#RCieDgE~^gQco9?bWU2w_>w&`|Lms zIPX5^F7P~cSQe0}J)u)>WehTkqV-7J`UFhSP6;dM-HwmnY<0V>nI0cn?>L;tqvZXr zMPGx-uGn)Szv{x;s~{1qcZ;|#e)q+*vkMsS66DUQnLTlIFs%|8DI2Ls4;lO?14EPC zWyAM7z9tRaBh8WflnI=y_efr3ifZ7&IsUis8a2;L5w>X$J~nyTgTq%q(@q#!szjiy z^eJPaDifrZowMlA@I0Me=f9j7Fl1{IH}c+0V&e`2$MbMk90O)@t#^ICfIg(3n=C$f zvM0z^v;4BfoE~I+*Fp5^Pok;)%6>Ssn|fy)Oe@rt`pwmT%9b$W9Nh|D7B z#{HZ@veOjDofl6Ul1x|N?Rulpumbp}Q-FwZqe|TDOfoZYN}3X&5;ZA$xH9$lkWd>{ z;n=Ou)BoC{`p@S5iRcuDg-orKPuiKOK_xQ2^BwxojAm#~P|PUAYZEbfAR3cyzOZ29wh6 zP@Brn>bK>pm41#F%y*TuDX(^`>pvmTND5L})6^|%))Kn6_#mF6iJ>Vb#+CcXjgy8ZIz>U`5=brxBT}^D<-YN5&GV1f+uhPqiBbcRb9H@8|9SfIaThg= znsvWh0_Bfx(h~wcd+z}e=GV*mFwyqQh%-~h8Rq20sb*ml`OD3-%Gg#~iGJpuAgm;C zk!RGD6?LHp_K%duarNtTH1g>@e$%VT#>s!<%w9KBkwgF0z?IAN65CBehVSw+;6j6ykA44Rrs`Qd<+B<_ucTx9Rl~~} zd=XtfMZZ%fVG57ehb>%!iQ0KW@iDz27=>1FS>N5j&A$W_4{~+?hXiq^1*}C%lJf%L z!5Pqf=eIaen^L@rw)pMB$k;TGBrSWR4o-qIl+Mrm{hyKf%#(VHx}U->#Bgy1A%~N2e_tydomzxED1tJ$_Cxe4*H`jZtXN&OR=PZmVwtYWKh4-Q26tJ&D zD}@jR!|fr0@t90$03?JERUW(AC&KM-hm1L?hKg;J(AISQF7-JTV0!4ZAn`AX} zOjAD3t_BR>a3m8qn>F#eQM14lMFF214Z0*E;ujG?3JrvMq(~Vw4}ocYyz#Dkj;U9) zWY4jAWd76efY;PAg-+716jCHV|Ihgg4ME?1p1+|>+sWbeWfiO)BzT2}Km>6HwWW)T z6B7Q0<3Mrw49$I)9gi)OHd5$tu_f7uEvyCSP zq4lg%DceZ)+*KE(Xi%)0wAB`G;2`QYOOmyV7GcRSt&ONei! z?>izW6%ji`cv~v^&mSal410YJt#gTT4wDCI^ksRc%_gopBVNqi3+PbIGXJ2bNL|$OTQCUMo)^P z;>Z!Dq%IxF2mru$J-^vpi>k_I)OvFKk$Kur38Pe%aPJM!J>T`2G{5CK+zM5Sc zeh(i(_Vn_GqR;t=wKD+;UI9;zubA_%i5(nd2In(95%P2Nt{fxW;GmfTXD@^?{~I~Y zO|&QMwZvIVUI62EAkj~Hp%s|fL32KrZrzZ*=wZ@4d+aPq%hZuP5yFnimRvwf`G5cW zJWw$QL_31XQV692z|2Uk<#r&_)Rzm1(d6vSA4H>T37-Z9nNvZZUAzvWit=u2*vzax zKVg|)hVf@%a`Q0!ax8`Ys3U9KOpNDvo_?9t;P3w#uLz;71F*5=Mu5Ir6o_k~yBnyV z9CYWmPg;%RF46#!lUv6jBf@mTM>Zl32S}A$E9-YNiIaK2Zj0@wM+ZsJt;@$y!Sj9= zA{KHBV8lqPM$Gv}K?}MMdR5IU4!;IBuS}9*BBInAB3R{ zj^3lwr-qwYhUJKSY2Y?)c%d%Gb3$Ie7frkw%C2C410VXf9bWb-cw*)y)#QHP4qb?~ z`h9CHOf~ft{@kOLf3j~lU;f{Jg--%)3lZHosEvux42XngO62<5N;L3wWi#2g|Fdy| zOEAT9Od+#e3uceF!>F@5q)9-@zP14P z;us-pEv2NcGXn_yM9{%L^hMN^5=zA7D1c-%SLjs*guFB#X|HbpQv`iotn|4PS#-Oc zSNA{`z^a=)JkDJbFxsfr9#T^1GfYe6%(HgyTHd&q1m7;$0*t4q)3yPBjn7}e^Qi>y z106>?+=fITSRPJKPnQ+cMT(plho9)N3O=-t^SzGsPSAP10L{KrGaCt@iV``s;kz}* z5<;dd>#3FFu(ak}$Lt0epk2ES6!#Ly77NCBaviS^;84gXowGs;^YBlYD8HXHSv-E^0Z!sjo zkex>azGJg#ES|gqcsp`5^Ig3-Xb?_h?cHU&inNG&*`vKKK(d@cbA6viL1#2_X=XSY zGJd<(;=HESW+1cP@@Fe8;iXgm=gq!CsQPJ<2AP>B$Y)^~98EC3VAyQ%$ObOYMX-f|_gGNw+Go6oaFE0Ajf#fE&WS?LbIoxBd9Do1#qe z$V%yL3^!Z}bh=NAv`|)y9K~a&KV<3^cH4vd?w2twA;WkJlQ!Y+s4g#5QU^W00cW^4 zLPac>=91N3h$v3`B4VmNuR1H1}Y_`d!gUnsvHWHrP{iVZq# zEn-)CKC5qy-z`W&(e}6u){XZH6P3Dq$b9wCi5j0n1oHZWqm>d-K^%Ci%6-yf*H+v< z=YaIOT^+ANt4N5CkAL28B^2>@=>ai~(qEhQ$*WI!%o`u}fQfihL9E8%iOj1xkT605 zKgpp5Q zP@f{zH=Q~PZqi87!L9fcJ~0+w!611J2JbPua9~e_!fHG0h@p+#MstTeUX_BKJ%Wr*9`j{QMaXS(~@O)G1Ghy%XsbWLXe9l9Tn(#&!hyFc&g6NWmn(cgIV$)#ekY|d-RxqW(3~YrB~R3oL|7Ff#r!Gd9-N>*PputGp>E?F48hs{ewczJ{z6FHDXLCBVXh#kee-+jj^))2nwSDFjD>nw!HDd-x6C&)0dIEoh=-g^kb5^Ifs%ZtNmG9D(g0O?q!#0 zN3vp2vdF&givuAk<$|?}FwJ@uBvL|61S++xB3RsGb0GZrHU_A-gPq~KIvnF~2>t-P z`7ZA+Oc2;xKL|vYwU7i@MuF%M-B=x#w|CYTufv;W(ypqAycBp9^b{p6=-oho`#p5| ztWdg1)ihY@>&!&`LLh7U3;{3AATi-Q(JVgza7(lTJJ+s|S)p|jN{ci}5KD~T1{yJQ zt0JN5h!ix1$4nzYi@AJ%uuI{YrDy=xLh*P1-DVS;%_?xTJuHerRf@xMUf)TU{VANyqZV|Uufh^0V;!WO}gT*GX_hPnI*55YE% zS!%ChJJ)IsYg4{4_x(?s)L5&dS8VBffXundvAc5a2oak5n|}0r>o`rx@)u5*91J^Z zeso`)+u@%kx_*og4t>}_2F7h)?b)Wr+dpi&u^dsX8=ndd`iOV(!otEaf49<`s7IW} z+Q12WGd2YUi|#?x;JB~?xZU--*J4&;Bx|d_U<9hz^|7ZU4s)u)d;T1y0j=;DHgnGE z=OAIl;G@4N2bG9Ed_b~g#0n;t2B|n8|FN6taK5^5aZ7(Xt@G%HQlK7tP&v!a1BarX za9D58g5u)zgd99x@1hMy+DeDhe{ItZXoV3S(D?5%X)}Fn-gs!oDaH};X7v{@hM8o~ znowglK^}8D&FNWIKjX(7)|SVQnO5;(OQX*_nRR~NJK($rUchQi{ig-8 zAQ1oI9ovPh0pcZsoQoSAI=)2YCm6j8Lj-EQTfB^0x&l;0U_l4PL+~1pad`<8dyy;W zH!^W44saM4mu%rYx3@;`^zr*ox^gVl(*S`UFa8DqdAB#Lw;x@gb?DKCPIl1S!9S%K(u+gou-a>nMjIcxiiKQW)v~43-KQOpe3VK$r+_ zaSBR^-FXnSH*p7|8T6Wjw)@X*B)F+69B+dQ$t)To{&E5YtBDDMG|8E=-NKP@Al9;@q91QX3CQx$l;t>J5%q{e zg6%GV%(8j9M+IbYwp}?IYx29&1Vvet&}kn9E>%f@X;^TrjJfW*3U5lwd*H;8)?aI7 zl-BvUj{}Uj=_mM}75jhkm8dN^DtIAstobZV0hxhE8{k`SS%0%-wC)9r18x_Ifj7}WHlQDT z%x@~*!ewKn9$Yy7222R;<{QKE8)~E3v@{9^+zD`D-;LtErUTspkzygAr4Bfl*stb7 zQ?7u{J4fMsKMV1LHnNbPx44@QYL#b~0WF;Y-CIaeF_6JO)&Sj?6t2vTcb!YmqV3q+ zurWM5k5IVheE)v9?dY5tf_MBJ*lx8uLm+>89nzy|Xe`1h8n6hMT^?)jw}5_T!Uv`n z8hG!m3`RqT_RB5)jp7wiJIB1i1$p&r5Lkq%WmaSEG&E_|o2Aj;5xPu#K&KaCm945@ zU;k()>iWSSVh+#s*0#^k&}b7j2MPT(3;%w!`st!ib^v-I*VPb`rAbyWM=0NxmtEqM zp2`bW3+c57XlM&eO7lb|kQ7ArOo*Ul9~4_yhtOFVCxCqk*f3CvO`<{Wjmu?VnNAEn zib7wROyC4Phh#ly_EZr>yTD8j7eTLBWJleEhyd^HdA@+6%K{l`h(LYudc}}3WPxcL zf;u>VEo^ofS+ZNpmdY-ekj+GJGE1|YF$7&p?ScQWN8)F*YIP_xU52QRP4 zLZeG+=a(Mp(jm3@8dM}?jTS>4N!6F#S^?p3m1z(t7`=7h`2>wEH{byeWfb&?Zh+rv zoZcDw$j)Wv-N18fc156;a$-D>0vn4!HZwk_iea_^s&rj0Q2&jwZXLuYxO zA`%ddB%XmYKMMAu@UDtPr_JaPcaSz#cdx?}XdefMRCcQrP=yz64e3xyzw>j>d_K=2o@g#HJoVH`{|&l^U&VFR0s$P<^-ay zR)Yo6B!n0{D*LU_=_OWt3yt4wB6^*+)k6@EEy{C3CAhRbCXfwZlh&kK#Rby(Ok- zGJBM&Fqu+%f+8M0ak%13O^FDY3~Bwc9we0c$o&7zdMtT4C12-FbVk9m4oi%fb05Nf#@(_&`R!7wI8fHxVMKdQYbnvWWn^=7FYnkrp%9> zV1b#|1tIcq_TfCb!DCX^5E}%Mwc3bY?}ure)rT(arlAqs4B^}r10vZ@amSwb-Q!5z zMr}=xD9)tBD_qwRzd(rDQMQZ)UkC(LCP8{7ZKQ&7Ne1Z|lQ=K%!wY&s7I!FY81^eqI zQ(Ipn#wiE(xWbhiJa65?uQ(HLnQ@w6%uTm30#dwZoAP6pP0`K5*!;Zyr^B+?t?}sIp6q zamEyFYXG52Eo!i|>B8UG)<%$qSuiV2?N79D^ zFTvrG39IEKqk0z=Ie8iy)%^_Guxbv5b2^jE)}R?17m{jAWS8}5w;F;nG(5KFE)0EY zUon9(N?oclS6RhxTmyAB+k`yfQS%L>{+qxh=u{v`ytoYeecwcCP!^PrjfG56G(lrd zgeMk46=uf)Ehr3aKAcC+f79-4uR-Njs<6~>_M-+V1dXhW z%7p>gL$cclXJGfIs-Ufrvl{T(4PoY!SFXV*t<_tO32sSYssMkng}xS+1e`tLG{lAN zKi5<;T`goj@sj*N^Fa$Dk92M~qO7efSlk@|yro4_djMKneopsUTkfasg7T8G^6YA-2Rl&&LvWG?VlSwJ8zfGH< z7=SRXh=qaB6n(jsj&&$FTtS;QrCd$-Fj`xX=4&XsXlM z`_DqxR8XtxM8=D|;B#(g8p|z5@GATrmgyqq>n8}^!QO)ixff~$V3O{T!vR|-flA+R zW={>8XTxu)JyX zjr^7JDU9{+Qb1jI!k|LTt3Cfxaz(&BRlpA(aq;FnfJe<*Hel1skIH9Yr5x7D;>IfB z-Z}{hAOe>@%cDy? z=jgzQ#dz$6{-EA%8(Sk31beI$`rcCIeBG=3W2wPfI5{gr`m0Gms#$Q}AP zVsvWQ`aU=j0P-3(J{w@9p@~xZGZcbGH0WQB{t(*T(JzbtkG_i3SoW10s3G;gJrIjN Oo0^KIa^{6={{I7_{TCMi literal 0 HcmV?d00001 diff --git a/examples/plots/cora_nc_aruc.csv b/examples/plots/cora_nc_aruc.csv new file mode 100644 index 0000000..f1a17ec --- /dev/null +++ b/examples/plots/cora_nc_aruc.csv @@ -0,0 +1,202 @@ +threshold,robustness,uniqueness,min_curve,accuracy +0.00000,1.000000,0.000000,0.000000,0.501247 +0.00500,1.000000,0.845000,0.845000,0.922693 +0.01000,1.000000,0.875000,0.875000,0.937656 +0.01500,1.000000,0.890000,0.890000,0.945137 +0.02000,1.000000,0.890000,0.890000,0.945137 +0.02500,1.000000,0.895000,0.895000,0.947631 +0.03000,1.000000,0.905000,0.905000,0.952618 +0.03500,1.000000,0.920000,0.920000,0.960100 +0.04000,1.000000,0.920000,0.920000,0.960100 +0.04500,1.000000,0.920000,0.920000,0.960100 +0.05000,1.000000,0.920000,0.920000,0.960100 +0.05500,1.000000,0.925000,0.925000,0.962594 +0.06000,1.000000,0.930000,0.930000,0.965087 +0.06500,1.000000,0.930000,0.930000,0.965087 +0.07000,1.000000,0.935000,0.935000,0.967581 +0.07500,1.000000,0.935000,0.935000,0.967581 +0.08000,1.000000,0.935000,0.935000,0.967581 +0.08500,1.000000,0.935000,0.935000,0.967581 +0.09000,1.000000,0.935000,0.935000,0.967581 +0.09500,1.000000,0.935000,0.935000,0.967581 +0.10000,1.000000,0.935000,0.935000,0.967581 +0.10500,1.000000,0.940000,0.940000,0.970075 +0.11000,1.000000,0.940000,0.940000,0.970075 +0.11500,1.000000,0.945000,0.945000,0.972569 +0.12000,1.000000,0.945000,0.945000,0.972569 +0.12500,1.000000,0.945000,0.945000,0.972569 +0.13000,1.000000,0.945000,0.945000,0.972569 +0.13500,1.000000,0.945000,0.945000,0.972569 +0.14000,1.000000,0.945000,0.945000,0.972569 +0.14500,0.995025,0.945000,0.945000,0.970075 +0.15000,0.995025,0.950000,0.950000,0.972569 +0.15500,0.995025,0.955000,0.955000,0.975062 +0.16000,0.995025,0.955000,0.955000,0.975062 +0.16500,0.995025,0.955000,0.955000,0.975062 +0.17000,0.995025,0.955000,0.955000,0.975062 +0.17500,0.995025,0.955000,0.955000,0.975062 +0.18000,0.995025,0.960000,0.960000,0.977556 +0.18500,0.995025,0.965000,0.965000,0.980050 +0.19000,0.995025,0.965000,0.965000,0.980050 +0.19500,0.995025,0.970000,0.970000,0.982544 +0.20000,0.995025,0.970000,0.970000,0.982544 +0.20500,0.995025,0.970000,0.970000,0.982544 +0.21000,0.995025,0.970000,0.970000,0.982544 +0.21500,0.995025,0.970000,0.970000,0.982544 +0.22000,0.995025,0.975000,0.975000,0.985037 +0.22500,0.995025,0.975000,0.975000,0.985037 +0.23000,0.995025,0.975000,0.975000,0.985037 +0.23500,0.995025,0.975000,0.975000,0.985037 +0.24000,0.995025,0.975000,0.975000,0.985037 +0.24500,0.995025,0.975000,0.975000,0.985037 +0.25000,0.995025,0.975000,0.975000,0.985037 +0.25500,0.995025,0.975000,0.975000,0.985037 +0.26000,0.995025,0.980000,0.980000,0.987531 +0.26500,0.995025,0.980000,0.980000,0.987531 +0.27000,0.995025,0.980000,0.980000,0.987531 +0.27500,0.995025,0.980000,0.980000,0.987531 +0.28000,0.995025,0.980000,0.980000,0.987531 +0.28500,0.995025,0.980000,0.980000,0.987531 +0.29000,0.995025,0.980000,0.980000,0.987531 +0.29500,0.995025,0.980000,0.980000,0.987531 +0.30000,0.995025,0.980000,0.980000,0.987531 +0.30500,0.995025,0.980000,0.980000,0.987531 +0.31000,0.995025,0.980000,0.980000,0.987531 +0.31500,0.995025,0.980000,0.980000,0.987531 +0.32000,0.995025,0.980000,0.980000,0.987531 +0.32500,0.995025,0.980000,0.980000,0.987531 +0.33000,0.995025,0.980000,0.980000,0.987531 +0.33500,0.995025,0.980000,0.980000,0.987531 +0.34000,0.995025,0.980000,0.980000,0.987531 +0.34500,0.995025,0.980000,0.980000,0.987531 +0.35000,0.995025,0.980000,0.980000,0.987531 +0.35500,0.995025,0.980000,0.980000,0.987531 +0.36000,0.995025,0.980000,0.980000,0.987531 +0.36500,0.995025,0.980000,0.980000,0.987531 +0.37000,0.995025,0.980000,0.980000,0.987531 +0.37500,0.995025,0.980000,0.980000,0.987531 +0.38000,0.995025,0.980000,0.980000,0.987531 +0.38500,0.995025,0.980000,0.980000,0.987531 +0.39000,0.995025,0.980000,0.980000,0.987531 +0.39500,0.995025,0.980000,0.980000,0.987531 +0.40000,0.995025,0.980000,0.980000,0.987531 +0.40500,0.995025,0.980000,0.980000,0.987531 +0.41000,0.995025,0.980000,0.980000,0.987531 +0.41500,0.990050,0.980000,0.980000,0.985037 +0.42000,0.990050,0.980000,0.980000,0.985037 +0.42500,0.990050,0.980000,0.980000,0.985037 +0.43000,0.990050,0.980000,0.980000,0.985037 +0.43500,0.990050,0.980000,0.980000,0.985037 +0.44000,0.990050,0.980000,0.980000,0.985037 +0.44500,0.990050,0.980000,0.980000,0.985037 +0.45000,0.990050,0.980000,0.980000,0.985037 +0.45500,0.990050,0.980000,0.980000,0.985037 +0.46000,0.990050,0.980000,0.980000,0.985037 +0.46500,0.990050,0.985000,0.985000,0.987531 +0.47000,0.990050,0.985000,0.985000,0.987531 +0.47500,0.990050,0.985000,0.985000,0.987531 +0.48000,0.990050,0.985000,0.985000,0.987531 +0.48500,0.990050,0.985000,0.985000,0.987531 +0.49000,0.990050,0.985000,0.985000,0.987531 +0.49500,0.990050,0.985000,0.985000,0.987531 +0.50000,0.990050,0.990000,0.990000,0.990025 +0.50500,0.990050,0.990000,0.990000,0.990025 +0.51000,0.990050,0.990000,0.990000,0.990025 +0.51500,0.990050,0.990000,0.990000,0.990025 +0.52000,0.985075,0.990000,0.985075,0.987531 +0.52500,0.985075,0.990000,0.985075,0.987531 +0.53000,0.985075,0.990000,0.985075,0.987531 +0.53500,0.985075,0.990000,0.985075,0.987531 +0.54000,0.985075,0.990000,0.985075,0.987531 +0.54500,0.985075,0.990000,0.985075,0.987531 +0.55000,0.985075,0.990000,0.985075,0.987531 +0.55500,0.985075,0.990000,0.985075,0.987531 +0.56000,0.985075,0.990000,0.985075,0.987531 +0.56500,0.980100,0.990000,0.980100,0.985037 +0.57000,0.980100,0.990000,0.980100,0.985037 +0.57500,0.980100,0.990000,0.980100,0.985037 +0.58000,0.980100,0.990000,0.980100,0.985037 +0.58500,0.980100,0.990000,0.980100,0.985037 +0.59000,0.980100,0.990000,0.980100,0.985037 +0.59500,0.980100,0.990000,0.980100,0.985037 +0.60000,0.980100,0.990000,0.980100,0.985037 +0.60500,0.980100,0.990000,0.980100,0.985037 +0.61000,0.980100,0.990000,0.980100,0.985037 +0.61500,0.980100,0.990000,0.980100,0.985037 +0.62000,0.975124,0.990000,0.975124,0.982544 +0.62500,0.975124,0.995000,0.975124,0.985037 +0.63000,0.975124,0.995000,0.975124,0.985037 +0.63500,0.975124,0.995000,0.975124,0.985037 +0.64000,0.975124,0.995000,0.975124,0.985037 +0.64500,0.975124,0.995000,0.975124,0.985037 +0.65000,0.975124,0.995000,0.975124,0.985037 +0.65500,0.975124,0.995000,0.975124,0.985037 +0.66000,0.975124,0.995000,0.975124,0.985037 +0.66500,0.975124,0.995000,0.975124,0.985037 +0.67000,0.975124,0.995000,0.975124,0.985037 +0.67500,0.975124,0.995000,0.975124,0.985037 +0.68000,0.975124,0.995000,0.975124,0.985037 +0.68500,0.975124,0.995000,0.975124,0.985037 +0.69000,0.975124,0.995000,0.975124,0.985037 +0.69500,0.970149,0.995000,0.970149,0.982544 +0.70000,0.970149,0.995000,0.970149,0.982544 +0.70500,0.970149,0.995000,0.970149,0.982544 +0.71000,0.970149,0.995000,0.970149,0.982544 +0.71500,0.965174,0.995000,0.965174,0.980050 +0.72000,0.965174,0.995000,0.965174,0.980050 +0.72500,0.965174,0.995000,0.965174,0.980050 +0.73000,0.965174,0.995000,0.965174,0.980050 +0.73500,0.965174,0.995000,0.965174,0.980050 +0.74000,0.965174,0.995000,0.965174,0.980050 +0.74500,0.965174,0.995000,0.965174,0.980050 +0.75000,0.965174,0.995000,0.965174,0.980050 +0.75500,0.965174,0.995000,0.965174,0.980050 +0.76000,0.965174,0.995000,0.965174,0.980050 +0.76500,0.965174,0.995000,0.965174,0.980050 +0.77000,0.960199,0.995000,0.960199,0.977556 +0.77500,0.960199,0.995000,0.960199,0.977556 +0.78000,0.955224,0.995000,0.955224,0.975062 +0.78500,0.955224,0.995000,0.955224,0.975062 +0.79000,0.950249,0.995000,0.950249,0.972569 +0.79500,0.950249,0.995000,0.950249,0.972569 +0.80000,0.950249,0.995000,0.950249,0.972569 +0.80500,0.950249,0.995000,0.950249,0.972569 +0.81000,0.945274,0.995000,0.945274,0.970075 +0.81500,0.945274,1.000000,0.945274,0.972569 +0.82000,0.940299,1.000000,0.940299,0.970075 +0.82500,0.940299,1.000000,0.940299,0.970075 +0.83000,0.940299,1.000000,0.940299,0.970075 +0.83500,0.940299,1.000000,0.940299,0.970075 +0.84000,0.935323,1.000000,0.935323,0.967581 +0.84500,0.930348,1.000000,0.930348,0.965087 +0.85000,0.930348,1.000000,0.930348,0.965087 +0.85500,0.930348,1.000000,0.930348,0.965087 +0.86000,0.930348,1.000000,0.930348,0.965087 +0.86500,0.930348,1.000000,0.930348,0.965087 +0.87000,0.930348,1.000000,0.930348,0.965087 +0.87500,0.930348,1.000000,0.930348,0.965087 +0.88000,0.930348,1.000000,0.930348,0.965087 +0.88500,0.930348,1.000000,0.930348,0.965087 +0.89000,0.930348,1.000000,0.930348,0.965087 +0.89500,0.930348,1.000000,0.930348,0.965087 +0.90000,0.910448,1.000000,0.910448,0.955112 +0.90500,0.905473,1.000000,0.905473,0.952618 +0.91000,0.905473,1.000000,0.905473,0.952618 +0.91500,0.900498,1.000000,0.900498,0.950125 +0.92000,0.895522,1.000000,0.895522,0.947631 +0.92500,0.895522,1.000000,0.895522,0.947631 +0.93000,0.895522,1.000000,0.895522,0.947631 +0.93500,0.895522,1.000000,0.895522,0.947631 +0.94000,0.895522,1.000000,0.895522,0.947631 +0.94500,0.895522,1.000000,0.895522,0.947631 +0.95000,0.875622,1.000000,0.875622,0.937656 +0.95500,0.875622,1.000000,0.875622,0.937656 +0.96000,0.875622,1.000000,0.875622,0.937656 +0.96500,0.855721,1.000000,0.855721,0.927681 +0.97000,0.855721,1.000000,0.855721,0.927681 +0.97500,0.820896,1.000000,0.820896,0.910224 +0.98000,0.776119,1.000000,0.776119,0.887781 +0.98500,0.711443,1.000000,0.711443,0.855362 +0.99000,0.616915,1.000000,0.616915,0.807980 +0.99500,0.477612,1.000000,0.477612,0.738155 +1.00000,0.000000,1.000000,0.000000,0.498753 diff --git a/examples/plots/cora_nc_aruc.png b/examples/plots/cora_nc_aruc.png new file mode 100644 index 0000000000000000000000000000000000000000..9488254f9b34e1c2215628489f4d9785c3b55e0a GIT binary patch literal 65023 zcmd43hd0~r8wMP$R%umhbeOFgMNzwAgraJS2=|CV5y~a~jeGrH` z3j{j1dFdkX8~unD2=D_8ReJ$7@UVyazVfmI>AZrzarJ<@I=#O0*3Qe@$-`YjMC_r6 z)Pp;YQ0Nby*yyd(Ue zMa5CegP@@6S&_Cmwe*F%JN`lMQ|y;L%0f?^aI|{=kj0g@eCU8G+dx@=i0;c;OI38&wp(L-t4{R9xW`5r#s!En~7o*M48uZ zpL84077mb^01?&P{-zdqCWDn{j~M1-yHw!8nl=Ls2xNPg@fj#Kw4i{`|6sylA<;lL z4u87gEAF@Tk}#AX1zpD(gub0lNN(u`L@BE7r z?HW|)5@z#guH0LjFrcHO>)8ZDm!D0%ir|1kg) zhvos{?dkp)|Nd^8_EpN!R@wLOFOK(D`R6e36p4O$14YWoZXQ!?XNJTqS81DOQH(7az^R;%g$efHvn6AEGbBa~`Uc4M->;tPiyR^DWHsoxjIMVtxrNZ-95 zx1>)IIR^f&c`|8JCWOhgWgn}rug?e$SL6c?#9RPGZgQfcHNnR)FEsz)3krCf}0?H9Unz#Ar;!^_d+ z;~3O{O#cj zkW$|J0|5b0XmL?d9ORUsw=-g(7goNCN2AxOxsey$lpEET6OgJa}29(i9WTO2|MGO`?npp7f`S4OphfxFMc zfbK-pXlYP^N|TdsxWE+-F2UkW^z~m)2dtJE!(gx;q?5CARe?^reKZi%Cp}$BLJ6&S zZDS?3+EL?d(hLyTr3>9Dm}$yt1-ecbatbw&3TqS05`RZm*q5YzK^bh%hw0V!op&Fw)q%2 z09ShOym5+0^Gqdy#0`67xn+~t z4acrcR8Jxp43(%pg>yawUHijgVsW>kdFS&{qj|miVnREB2h%~g+fl)fOJE5#kC%Zv zhfjRMMYU4Mfk*QRdOc&f-^!4dQ9X&dK|Py&CPJ?azB%CuEr^^y%4=4cQ}g}+*o)YA z=V{V5k-^7K!>jyBeX>jJ6R)F$ALAiFLdVmZ$(FFFn1$tEg`B+cU#}hOq1@dr1tMjq zKZI;B<2Oe?>>`#B!XPOvomQa-Ch!;}F*^YZnFv6p#NMgeP0RUWoRLuR+U_eL_7W!D zn{;+|aA>z-x&?Fh23}~e4gxGSOB&FWSa-AAQ)onOZ0Zb@7HF6Wkj6KNRxK5u=x!cD!XBs3mm-2_D6zNg37V$TT{btDasI5nv832;`}vvpUoaIB>SzY~GDYZOmDUvr#QZSsFNS*T0q7bK&P6Md8+AVKC=kTr@54i zU1l5?EA&)wpL)~)V6olL_ZKuO`U=q>GA{>MO-3j0dny98Y2gEdVytUcB#cz+zBAvs zNTGA9D_dDz9Try;2{OaOA>{k1SvuJDF*}_R$6@EwAAG5JWg96bDA>~P-v$6pF%1=b z6HvkIA%PXH;KS@W(x|{E>C=}&W;>k)vet$&V=U2f8!)x!c^1n!l$p>RJ^5f(5~sj%I~ z9{FuW?S%! zZ>MqVWVgR5SXiTm_UJ4{?VX?1=%60p{g)7)T*F(tUVlEbV1dNDMf}R4psvd%59fl+ zd@(_ZtylLeIwAC%_p#;?!N`i%<1Py#faekAldjdh`rr$Ch!FBoHp~|fBkCgGn^;=9 zB_$_^ANa3TVje*J_eV`8PZVN%n7c2FHV&54WbW)Vc%gT}j3<^Ff3dBfT z{&ElE-1x|(`*4_fjqv_hpw#%hQ|-s(EtmZ-!8_#(M`rJGkn%1p>W?fhP8K9F^~>*S zH^Jib={1QH4#;P`(J|^Jw`;sqN`CJVN1Sd+*!MiNcHxaQ4qP73)%de{TZ63!4cutK zQL%3N2;J4P=EKum%RhJ1W;eQV+2k1 z*he0XE?M359l#r)c9%B`jod|>x86_a!E=eIBzV!AX`iv$$lDwxGf2e((U!gVweNE= z@tTc~!ZN*`yM^l$0m?NJR5r?g-0WQ2V`2@nV>Mf?DKAZLja;!?N7bEP1{1}^k_x>_$FMnT%CX6At?_f9{MW^7UIxWuUwx(nd3_2t zOGhHPj;S{hzB3vVm2*Z)vTp1rKs}JCp#azSDCWyK0;r9Hpg9zBng^*;ZZy6beOrPL z`k?F*n0z=_Hf9yrE6=BE;nm6ORt$Y@^!+U2jqBZ~dxBTXo61M0rZVdA5>@BaWZYPh z#db>umQ;3-x3#sk&$(ns7Nc}kFSJ4;Owv9MJ9CtM`sRTL=#4h7rr<{tgXOxJfK^lP zr9QWMq;bf2e~P=zG}PcRJkq#WHBj3%V8*%n<+O*H zUGZmKmr}N|?N=avF@Kflcp_pUhBZV|(V$k1Y39CF zNv_-uu&VXF6k**k6Y95y(Ird8#@oDJR|%_8#u+QY_nv%l+@;8|JOhEx)7}Stda=%T z68*GUCOB1BiNJ;UK*Mmg<<)1IFaa+?BeSYS#vC;+MstAK>LQWj&6PHqca8%hctTEe!T z0`#^Ah<00t6*XJgDd zgr)w5h{e}<7-PP{vs%w49FPvZl3g9%m`ssU0|t~HGIOhPzmnENq{6e*pjfKhq{7PW z;I^z+UMd|zdu|hYf?Qdk=-JR(-%&|#_vHyfc6DiDh$X|p>tnXCvN{y4`Y}*!du!(3 zRBrfUOHI9I{FlOFxX2V55cEcX5>=?46PhQz_2;lm0LP#VIO?XTw8ke#>G7B6cll11 zkRj3@X)`dQ<*vP=mz`{jWgrb<#Tu8(yA!Q;IKHRzjN4|Id6q zkdpHk{&@LqHp}PYSm&Hzl)?u2pdK9-Uwv#=R$}k0Z^`r-*Q9H`y7%GkX^7`Y;d8(* zL4SqimVI@o2tX+?N-26Y5(;!F2#``-=(*5`xB+lR>+|{&r)IvOqCMM`XLXQhxEe%x zOz>ipMuoH*Ry9%j7S{PTNhgc1uqHqw{h*ySUF7bjy({5Fp=H2PJ~M_@Ey6_D=^sxf zuU7I`>PYD|EOk_?=R^qQRF3`k&O-M>3vni@3C>shn%3hDSCsJxubKbQp!bd7)+wJc zOM9(@j7lcvw+n~_{BNh)Fw5E;4@SVh5wN;O&fqzga~3yedUnu{(NuDhdZvC^fyX0XHNu}cAwqW35?E0sc`|pi~3Eo9I zD?U+q6pXV4F6mhu%iDw3*AD#sfbwxz#Y(UA_9Y!B=;NTR!J(jq8DY-Io&0svQB%8y zj#|ffIo%)jfjRTd081w~N~hs=I^R?9DseBsNU27z3NfVjV_jzI*#3#hfY!&>>)0j5 znHzB9?@%E6w**9WGbQRhyngqiw<>u^fsd{$`m_rfY-Y$`eXC}}Z4jsLAH(n5^Hq$` zlWFy2$SKxLA*FbQKX7FvPM!0%capl!FAhLNWN+5e30_aL7#g#gzWV4d$Y4FM)9&<# z64TcMx?ITVp25`IN4OAnWF^MRCim2RcGSg+Op#39QjO-DlVfbv6O5Nn`#7|;G3DJa zRl>g_wm&t3F*3;tnv0g12T;s?_m$RsguPjdW60M$$XrsFCa58j?mp;Ju1EP^!Z9EQ zcxt9vOi)f?=%2$*atJBiQ$rK{-(}RkR*Hz!7=dwb7Wpj#?zS=2P%aoPz8`4M1~c&5 z?Um_1b28!|m^Bn2v6wW*L_zSvy;Rr~pVhDalnBm%j%I?dV9HET5c0NohTiWO+&pvCi_I7|-+_y&lyG0mKRGxgnby^ptvKUX)qWu>?T%uWI< zC)^r}_8WAcmO9q_sS=eEwc9T@{;a*;56Ng>Ni`m(Ul3C)kDkMjAQjAJdv91z9U+}> zQyqBxT}R1ohu9-+c6S@tg;be*aFPb8$B|Jp9f8U5=*pEs>m*L6@MLaveNsK%g<_8w zXK3!ntwhHZYcp!>EvNgH{WGg^j7wcv33KwB?bpab&jC8`*8Wn9>f8@^GUBRe>tRmn z@ybWz8b>Zg9rlJuBH}GhgG|Cxx_VM2bnHw@KUV7=Y4h3V{!u#=b3LZ0Iyn_-ggWZ$ zQ~C#Z`~)4V4}{9UKi22PtxgrByR#nS$W3=Lw_v|Qq!E&XMZ>d6e79-^MX;Mb7!F?^ zsCF3?t9H%R%vAjxos!_g3k)Fc<#f-$GNU_B1+tqnOt6fn?*I;eEssL_>#33Zd@-L4}>#bgX}c0I){?rb-8?nH8AcMYBo$p z6o{+hY(D1K`vY{4F&|BzdBbJZG_v}tM1F-t<4d(eKSo0SJPRxQpvK@d@Xn0j7w{ib z6SgZMW$Rat!LRgACj&QI6w>n*nno zd(wxUgTn@>o1UAZadQ*(3|xxMz59SL%rvsH;%tNg31VE#eg+tGE8YjACf0wZh(K;I ztjhroc}Y=G=co-%Q*+6X<&(#x0#eAV;!tPB% zszWC2bZZ}Y6?zE1rX^m%8`p1IgE@SES>)1m@PIPS9|bJ%lG`}qx8GN_R+^zZU;X1+ z7Z(;5QpIkvu$Tgd8ZCtaBp1W3_dBH@$45)Z$@N^gsW=>?E4R*!qW~;BrcqBt<%86l zzdr_o&&=!y%qf~eLJyGh^YgJ)cHKD}^?uv0OZ1S6*)aH{f1e6HIlBR?%4QlseB%P` zOnF(!0|@1)UyK;|FOvT;Df*V!ujU*1>12Ev(GDrPiKev9*@RI-7+3d8?#+fQExXWx zRCCVJfpP}&5$mI$p}0_EBS%6&b9*m9=ntL@k?#VxQIT8x!3S%~4^XGnPwGOK*Es2c z#z7UJgHi>f{R9OCwQHlw?Qd@6!WG385YUOGm;zxZpAWk0-%M7A1XSX5boQCX0fPb) zR7P5F-tBvCvPp{)igCISDl(B-j%RXl10~xpIou>QNhMvHhBOyH%T30Y3FuQi_lH_ zd?3fCmEPz9*s6=GXP$b-=5~Rn(`Jg}@c9IPVU4yTR3_`nLLMA7J;ooeYmVieOHxb$ zhsoYl*!pokyMO}tmwF>fojHRnf()|r*LG{kTxXWy@cxTmD1A%sUMaU@?}-{3EFdz+ zq$_V#+xS0gPNyhVoj`oM88frWy)8YmF~{F=ytmO0p-f5s zdyCurT@aGk|5faFP}!;fnXSAF?Cft#eA~E(?mA4eGPhH#Ws}2qYEY7Z6?1;9fj4wK z_Y|My?C}}c{eJCx0f4NVoF)QYh=rUek@zUl)^W+#V*jbcYitnY1d>0H++T`-uss+`+g_Z$o~6HJ<`k(ALjq-WD)Lsdjzk5H6j^ zPK<_lSC@0HQNnc(hYhgi9tskFjMMp7*}nHHl6vGXrdh<)iKp?I)W$VrYT+_WQS)bu!{~V9?&0G!e>Gb~5a9akabpkI=NXx{0cyr%ppkT?Mm^FYD$(Om8@hyzsnK7G0 znd72d;@#V8h9-PPJbd+D*2CvI4J=bqxv*kCZjXJ?Xtc&RsXZc71*f7ZS_?hfyx&=#Nn;Ybb*JT<0`0e1)@Yi8)6{gvMNx)Lpd2iq`%%HqCg&p(KdFhbW zo-7R?VYAtUzEOEMY#NN%W z+KH(H-1n&f0Qo05gyIDJz|#+cR9JvyMU+{igJ}_>V52R-K#vMwM~E;h((~i$0At?N z$jFGJ-8pBtbFm-kai%G0qhak|ZkJnYfz#eQ1g;dJaN6kTI>O$h+{X8NeG&=N23uBu z>oMTxbNohpxYspT9e$N#u3;(DTk3*d%K^?M8#yQt4E7O9Yj1zD3Z&n|?SVmmltF8Z z{La%7pyJKqSI@dCSLL^U4D7YV)Wyd$N@N25gIQ|9FvtgN*Sx*5s;WLb@w6Vn>PTilEDd~KJy@U}((8LFTVJ>H}Dns6lbkU!-nLv_80fXq* zpp%u^pWGzs#;K}cx>972b3WVvJgtp7bj(B zxA&-w49Qo` zjPgeHa3hD2{F$*tdGhVu5j;jG&Cy4I8XM?-W!9gVcW zqif0L?qb6YlH`5%5pVSpqgqxC6T1;_y%I0z7c<`UE2pMqTf@6gg#2pKG|^bUp)NCp z`dXYWMQ>>Ao&x&b&4v!GAiwzpDtly1b=LX#HFGp4>W25;UB_$=j#76d# z84A#K-8@wM?~PSzSYxH4OohNMfXMJHWd@eZ13szv%O9bM?7jd8<2y2Z@#4Pkj6Z5* zWaO!lQBn{b}4Uo(g&fONY|fvwN=~gY+9C<2tYw7cnxXrHykVccSk28E<#z{33^32 zNqT8PE7-F1jDVG*b$4qAgip_&{65gh&9l4m-)n>8YiSFi{($!DS-2{_qiRGh7MQrU^Q0=T9drv$;k)8l=ufYSfH_Ku9OW#h<$%!11(e=6ES9wQTz z^zNadp|7Nm_=1wThHQm@=1{4Y3zsc8fGLkV{1?u>_F3h7_x}B}utqYBWk%HSW(6(v zx%iYZu&k2D*y&ziOY6GN(a=adzwl4_-4?pj4Dw|&ZmPIurA+2|DGjTa>xu`JdrPe8 zyFB-g7-MV4CLj)Yr2DbNtj)u*g$q2QcZTx61=x)ALT${fFOn3}x2 z{TE=7({H2Lsi_C_^e&tI!Bgj{THM!d^2a7^>M?e3O9qo?1Ho&Ho*q{;9~MGxUScER zc9k*Zq!H|5YQ??%bs>F2X4o1FOY+EA#qHSh$6qIrva54hHZ_>hDc8<;-adF)z`+Pn zpbY-OJ;0UZ`Jz_vzLZoSKW14){>@)pr4Xrd5aTra*w;o>^rnDNl;S~gah9QRWBRzw z7=vnzrLa|LfBmkF%UCj}0=Bs#>Jh(-%ZDnj{HDs)$D09Zs&f1(BDHx%okuqK{HUj! zGlQ6!foq8i-P6$szm+e3UY%8#>79f#ay`#&G%7wb*zOk8S$QnujDJb`V^+tf!9C9|$~@R;UG&BjKu z4`mxZ9)i?GtZa;1Hnmeo_38}181&)4aZK=EDql?ST=Y0b@#Kv!hCIFbnk#VoH4(LS z58kFowx$4YIX(UpqI;Qcii3i4vEd@c$W1|{qoXU~5U;!@8)T5znhk`QL8_!2yR6N5 zpj(=3HtVg|Y(kEUNR{Z*w`+4udPR|_`1qfEsJ|?RO1v5-X!8ayyQ#&0v-L?%#WQ8f zA5@olFMvSTkmMHVVrnC!wk889s#M6Kvv((FEq!S27o=|D$vn4y;`r@Z02 zq)S4tqs#=V0&2hK7&%m0c!qVT-IRsYSw`N_QVaX8Z^2Hg6EIMX0e4Mr|EBZ69j%mL z>v!A*Jzr0_h!Xo<>fI|n0vA|(N6oi6Q-tf@Kn@-yeEx)D{xclT_8Ao$syURU{6YJi zvvP)j=WIQFjmy&1fW)&^T`CX=J0PP9^niDE`y+L7$L&(a>(PffO_i=SaUG4TPtfaQ z);`y7`eN`7$npA?gS4lxmec7+al81N{v@|Jr(soN=!wZOcqjc!-17Mh;ol3a4N3)_ zVng>mXQ&)J|XWj$V6uN81GHm_gs@Dx%y(@Ca^6OM6IrC1YO5pJ^_APF!ex2YmX{e5S!9i2@3!tHj?CiV4Dxjd~TRS&|o6z%&aUmxvY)C$`H}0gN5&V_} zigPdTPgq$gxK&TUkp2jmQuDEP520%_WCGp52ak#x{3wO>qi733FHO>_Z|XDNlnm3m zuz)|5*l&$1>_?5u4{n2@j!lyyr9BA}t0A898z(2{OeaVYh4qAn?dhFmOdnE0U9%i* zMMoz3OwIYGjPDxs#hNepH9)q1W{!*-lrhNs$NS}~tg0<>T23)sTJ-9w z0sP7=<<6a^v!J4)VZQ?ujbzj5jXt@}L^OH7!p$A25Ll;;RGbci&Bv)1%qzAeR#Kn5 zsJ(Q@M=Z~SZvQ-?!5hKlJ>P*9W-YTFbh2|_`=+wi%EAT9Yhj(%!Q55=AG>v@L-eA}rR$bA?+V>y->Rtt!XD`4nN9eZ=*auX)8wG{oH=X~C%Z^|ULOx%FE~n~htVNl z*f4vH6Q|Dw0{+noAizdD$s#((!QFW{SK20z;*J8Cnr>u#Z&YQmN#nA^?;yoBt`iO# zTN9C==D8YT>!Tx$JT5|ksb*ysX5}Hg;Zk(KBDu-{+}s~%DcSw{NV@}wms(kSd@F1r z!@Do8$N<&Fn_}@g_yFkZCgkLBr^$t~N^Jg4t4S;!)=T;E z={k4dCIU;M0{NL3^JmqKzrGCc3*Dww(WW)=>Iu%EEoIp!6mFe?Gof;b_or zh*Nqw%az8A^3f5V(a}NF$B|J&5Qp#RBftmKx8!Ik2OJ%(Cdjn(%y$hDbhBFx}x*e!! z|873I)8w}oqTSH>AQt+qvGHFbPgZep4vm$! z_p4>SdSHBXJ|}8rzT+O7!4S%R+9$#Beo`K%!h`Dk?yXq@TT{ETY3lA=f4WS+Ma&ne zr4b=II-Wwrib3HqnUo(?(USKUm@pW-uy?_zdK}l*<0LR~yc*@m%i?t|1Vgnk@UfZ6 z3Bt$EjhgQV=(qB^-facoPCtE%qJV%v(c$04{hhWoiVZ@ud@!cQL8 zjAq6Nq)|waxXy$>Nq?ejh?;}{Ug%`W`W()3!xdRi>qODasSlMcj`j@VDs5~a?v2M^ z4JJKAclzX9Y@VVjRwIPdLL2smoF_aWOFCwfvdlut^99>)xH0f)DgN19%j3VC2=jyk*AiwF9Yr~iiQDbh z>wGxt_FuZ+En#{#Vd#0`N9wCeO^1Q-Ry=G)AkMpQP;V`P>{L`#wEi+oaEWpR#IUB0 zPU&dvj-^Sr;~$MuK@`?GHOK@B9vx{*D^FT#GcPi=B{ zcPmXqg%-!9oQPQSjdn$6!*@^uII8ULm6N@E@mm5~vPP^7`U^V)WS%P96nfn%fb-chuU&QUqgwj1AfnTGzP zU;mwrEYQjLnn{>V8(Eo_d)I)x>h?|fxWFYTCCar*BxTFV{Yvhxakot zQPl1JI^R*^#+X3a_eh@;Lv8;d8w@9!L@*<;2R38)jBLhuQaDun8=2WcKTc>&SstUdSy$yQpeV?2pNCvHZ0e^-Jc=bSAdyl^c}0LLz4+8VcF$7zB<%M|ANPL6MO-p z`Af)6?hvv!!|py6%}Bc4G!4Tdn#M7wbjTuk^+X(`aW`0B$m76O0PiN#In4X)H$fkm zXU82!0v@h}rAJHM*iTHtR17nk6y z#&DoUT^n(Y!o&c+e+S5`$nSucx*8I%`lEa+VA8uvOys8veCavDb+D;F$iCzSDiTxIuR=0;`(kp+toq2rk*DofoTFiqVycDk#zbw#|2Lc~B+q z+cVplX8%P}wn{{}`|$1S?g3dhm>ng0uobnR44G8-_xJhyZB5n68J|T*)9afyye)E8 zXyrrJK;K3Qe_jZonDffcbl*U@0ws5`3UK^VIqCk&wdn;&uGb?+tOAM>brEUf3_V}( z0%jw(15(&)HjJ^_pHrS6hxY`~&;=~t{=~ibt8FFuG8?y?PT*7e5l-J^m_%fVbTzd2QviB~`xP5o#O%jp6 zRt|J_3;eUVk3ROOJg{%cV)CBcgEPjU>Qu6j|5j(aVRhW#?7?R zx#IkM)8dj66@xj+SGFm28+GYXYfOKYtw_HwY%%@gKP~V)N@7pnx&2Jx=TMyC_ zt)Ox9lml}T*NDEou#@8AtvK|lj_uzceYYRR>ldCfgKX(nbg3TcD#_|73Ut$@@fg+! zd(ova1$=pd86&+l?<&TO6?p<&DKpK49t1KI%1e7YZDb7iE+bZ+VbnJJupt3|Gdu6o z{S8*tb09GDw`OU9YwO86BQlM^v`pXLvsfFeK%Ml0&#rY7xF_RrqBmir8(Ka-li`zd z7@4l?>XMr3vZ;4f-dFWX4>IX=r_jtt%S^%k)y~D}458q>)eimAc=#W|auY_z+tK`ccAcJT*Yf^z9$mb^rHz2rQ-REri@Cn;H^mh1xezCcM?1(g#ohj_G zP%Ay_2)6+W+r>p5m_PQA)?w!edXt-10+loVcUdV=I|P(*FSz;l3f|gzjR-04nKM=NP1DB!x~s|q+7kzfzrhtI&pSO`^}psI^3&lC$o^{qNza0eQh z)Qep89@cRa8rW=Y(ma^1ACjaW3s#?f0_Wrr7N(M2NCeBC?ga=4ub%)20pj^nmA71k z%G>)a#3MRfeWek4U3TYbTuHgJumVl!gFgP)(49vWh=(1D$8=kVbKD`PB~RzM#fz=c z_Q3pb{!o+e^Cs$%FSoY~)AIs4| zeTHcU$k<7PIgyr{TG0ZLzl<(X#GeSVFw?q2Fi|d!h=-FU?x{<6+rW*w67;YgKi9Z=oU~f7!*TAE}=ls=R5tR1|#pswimjwiBlUxcbA{0p3}&Ep8QX}bob{G4bn6Tjwy+~!mAn6;J_cSuk}9`QLaViI2ubt17@10o7S zxegTNxG2#Z0-`AvBPa>hpXW^<^xHI02PpYxj4it6>=v{m-kPngg2A!p=whB}nu#;8;4Dm^#Hjvm}#!X(gr9@Td?9HtBVYq8VQ2?ZG9g z!>?mIw9W6SIICP!I}!!BZ7_T(LHYp7i^+7;wVzJ(%6P<^SX&Acrwp`qXhEQ?>ckYS z>4lW;72-@n`}EtW*8X^2ZVac8xO+xN1lQRm@7R5PzjXfR%aeJk6FtaQ3|+xEIn0n1IE-BtAK&L zv+AMszg7F7dOFg}0rO6>dh=lZpO-lJOhK%J7qA-qks01G zne$E!QE6$c-V;U%mY{fPUefqF|u%q@q=Pc!v&h{8Zkl?NS_@2ea(o&>9m}sx4o|clC8j{DX#@Cf2nG zcnNaF(AYSQ+E7I$S{u=*sjJvv3>=}TWeqc4%|y)l#8ot zVtDvPgBzw677u`_62dJn-D*^Y^N~R4{B@O3?=}Yg@`5Lt?r6)#DSs68%Icq+(101v zz)vUZ9^7oMu_JI`IJIKY3m8V%8U7o(CjG-kA?w8jhNr-IoUj?s4w@n>ssldVv-e-l zc24kfc5)JnU;9=IU(17@#|9k6yS$}7t@oiG$Ws%rp!0Y%0gtSsA--1)Jj6)2x zfoMfrO7Omsva!2X%`Mt|#|!7g4NN!;wFEvU=5k@rb#NN~myC;o^h{+hhU|03tR~gm zzh{q(ZgU1W7OgdVM*-7baeikFpw0PSq$qESwaA}-g_J3VgVixJdY>LKZbQm>*(ANJ z-&d|F+eiep=LYZOz|w#+ty5887{s`{KQ^tsz;w3kpu^{HI<6HpP=bq2i0FL(1RX}o z&wMSl7>HF~mXh}G;TUN|Fp5?Ptu0G^jfv$M=Thw#JLzG{uzO?CK1If|%?4MK{Op$o zs@v&yi}8aNa)1dirpT>M90Up_X?^UzxvR27dtZ*iNTRMcb*%jBOK)Nhl>rGD_nrZg zc(U2*;tK=j+~QA=!CoO;_tfjimE;J>Hyr2R;UGYm!EDQ`syoO!l3vkSE%}~LqeUx`Tj7--{Bpz67o+<+JK`< z)~5k0?Wn` zl4~1Q_Tui&pPTCyc-w?0{D~W$=zmCZzLe!ftA!@ejQUL3 z7lkpc?7XCk6S3qeVY%_ywW+MbE+z5{TMA+D2kPQgcE`^G&GDiCENdB-X%n=9NjkPC zesOl);2Gb4ZCN!nsuTv-ndVSnNBV@41qFBVb8^xXQxbCbCCJM(Q+NLdB`<|b^tg&z z2z*75BU^!>Rbko@?At>L|EQZ<+aDM-?^IC|_`WfkzU%MbBL*%cVb?qEsg<`F+YV%D z!|4UU*a*YusfK$8}hK=OZxTM??d)U{=r%7k(Y^^6C(U*Omp>0*H0RmuDZ#(a^nwc&!0bkCDPs9E%ARNb{26z4ew=r!s3`n4-@J?4tf0r# z3O>ALA=mGYhpjIBi_%a8S|-4$Hp?c>>9Cuczptx5CD^5C3`U7Qu@(~@G@{yUP`Ca0 z9$T=;X?~Y}qF*k7>bilJR@9*z*wS`K1Mr5rf${Q5w+N0pea_(TW|~^&QQLsxp zm?b2hn=-4ijhp~IQu!}^<=O_!e$M{R55P$l1#QSW&Ad_Zr3sw95Lex=J38?9l`v;F z@74`G&55MeP9(poNaOuuH(}vkeM?IYA7Jbw2{_XnWmi(hx-h6-qQfE(O9vdZu|NO! z%Kxw#v~f*kT?!B*hOPU_BNibo)YE&g#&zj9K(Z|i{8diIc+u{iG1lIRFH64})cAB5 zBQw(EgZ!fQmaiw`Ps>fqTk61z{WRyOXsLHzm;2E?8f-40HC3jPh@s;DKchI})@RXg zUiqoipF0!`SN<}D;^d7;O5uNZTSE0;2xNmDZADK*ooO|MM00@olk-^o*(U zrt@EI9A92lFAUvLZr9h+yD4flCty*4h4x`}Z&NoR1J3Cp;$Ikh#-;^H; z+fXwC-!$OrEem`V!T&R)?}qD(etY>2iuW<;^xh?VMG2b)K6OLG1S7&FZvpO0>aUd~ zy8CId_wtG~=9A4;vDVfA?J<{IbosF5C^Tkjyw;|#{Em0Hz$h;0J^xwPls;7Ae+2FP zcaC=VQv;U9Z(4s^`}U#ZRYYV@dP>*q-R|d93!Vrm2LsX|0x;|tAT_~U zmtO2FrYDI9KwE%QymgZy)x8%xyOIXGMLe3y%=q-&DH3OE8`>ijv@SKwB>1ghu*nAJ zv2e1h)~8Uv@8Z{W4Za`BO_emq-&2A9oZ*w)aaptm?FNL@d=V;H)6+A~G+SgUPeZ|O zf2HmMR3Y%G8#!X|D53&-myH0ke;%T`C+5ep`mnihl=IOMY~s}uFvCZ z1oad2DgmPM!2;%k_`<4o-qV)GEv+Kp|KRJpEK}MpovS(RE#>pt7gUrmz z2q9&Z?I>lGY(iFcR(52Ta;%6DGAfi2vPbw`uhV#U>ht^h@BMzrd7b-p-}iN0&+GYo zUQ1IkNX@J8>zIf5b#*C6KbsB@a-Dzg! zaWZ2B*l=2R%&UfG0FVRL~pfemro6M-+Mj7y(`E% zWVwf7q=3f-=psat>EztU3=9-rOBHP_>Y3<6eWagXSlD4S=eP5ZD@-OlQ7CAA#=i7= zY+Txk&-PTnYB=l{C+hDnfAC{7wBvASIhw2blyZN_9P4K}K~|wk29G;%RDe?E8Mu%C z-0;eNq__Jqz%D=Tc){5OuZq~fQ|47`Yi^Fcz*i&u28HTLkK|M~myYpxePJbM*RndR z%Q?3`R2CnWvAeYl8v7&RVyZDw-_{0(>uzyS?Qlsa4kXpy)6)C4>u8(oP*?+#&F=0> zDb+#GGR;%LSCo5?Bk_btNnx{WNafq7VAa8g5_a~PS)q^0hSa7nRJjG5iSILTPFCje zZ6~2%khr+edF$9< zaHvyEbcg}YR%61E*4EZSph)Y@%RuQT0KtfQ#~148mztVk9T$lcENb5M2~`t?{f<2v zdv-E;h%gkDw-<@_)%tTi$XjrUk-=9dL{mh*5ljlk=%1{Hi7o8JCYWy z@LF2W{f$4^v$%dbWu%R^aBY03t3E!l*I+|iTbmth8+e#le|%xSCzm55Aa2nPdDoDd za`0`Z+*iKo7W!h3O@FP_>0RI6(MrwaH24*_52^`CQz^@<2mbt^Cf3%Tnd*{3B}H#;!j22+s;JQ zYt0pkIkp=|LP@J+C(xyiGd8#~5gDdc8~i&`$~`AWS@)Sy>ThhWYr2o8nJP}Mr|xt& zb_f^=8)J7QA3sIhv^^}#s3R-?#$fN-;_}ev#`4cm%}nmQsG1+Gk(Xxy5=s!>l?<*G zyiN3~$XQ!4vpzl&a{U-Sc^Zul<0EE@BU9ABM>+EOGgl>WwFHFSulG*^+OAB$m&if0 z#%n_u!9jmN+)b_SS|3fv<$(@R^mZ(^h{*SBciLSl@|wzZzV)G%NC)9*xd0`iMKvj2=#jeQN>>Wp{4 zN!jOwAYpSQQ&~#$6c7Xc#;RZgu|AE8w$yF9YaZ`-UywnlSMb3F!Q`S4Y}2D~&cZci zOpM$jjf0`0;_W|^Co0X886U)^YP{nniTldNq72#l$)uyO6z8JUZu;u&W)N0b;4ye| z;pF=b-MqJ+ikEGAOv>Jz!a!qDr~F6>oqtwEMW502$!>v8d6Y-Muf5Egssadphv#H2 z2bJ!kIYK^5^rB`su2I+phaPj-AwP#xnM^y%DbIV@r?ipl*;>E|jYIrq-E(FED7+Df z3W?>cIydG0R5lv9%E|?k$X^3>+;-pWIXNt;_C71?z2)<-OM4cl7$Qh0ZCl=k;5Yp2 zy^2n?@L?>;q-59j?9$5LtoH$9mittR#l-CF-#P1rLW2leOyA9SU~r+AL0wJ9wzjZ0OdY>}j<**!a%yh0 zc?U+)hw`qrc`K+%DEh7)(!;-b#mg8Bh7h?huLwf%kE>4(5$pY)`nthEgAtb_Mz*Kn# zt|>BxF-8b~m7CI!XJse=)C-PLS$Cny>n$-7GRVMiVAvTHOaxc&EOEBCcUFRo@l6X0 zJA2R%h7N6+jNGP|dM0UdL`9G_99i@c`=WZLO6G%C& zx>#&&N~ci#W^++5bYqF8*Yif`Of^kDc^Fo%kS(Lfb1|}df7yWLv#+ryZfIcDk;W>) z7BlIPy7uVpdnc`h16%XV5QgXw^jjypo^MXN`>`Ks1SKi4kN8l{yxO{FY406US}M%7 zYXt;1X=VDgIrFUYF~)*N@zIhx|r~J8!t0J$Wu;+54>MpeW1v>^8Y;5X=r#=A z{BhRIRW`;mR5wN%zvh*x$><|36=BYNR@A5nX75Y2feQTvZ{m+uz=Zt|@bTM!GoO@L z)BRIWhv-GQ{%k{oddZawK*k6)g6nMi^8{&BxH}t}nV!M@(?iR@5#gYpcg` zw!(E&E~%lz;n1ZMFLMwIO>3Vg)EPL=wbj*lS*BX`3g^j7;zX~17fz(Ig-y+AZfj2D zfSE3>H89ibSBOF*M2H36(CS6~bR0#dRKiJ?ylT&Nf?BjLABkUI-)7n6?fl}@;Xc!P z4tX2rS9XC-1@hx!_{nQs`HAKblKwxLYYmCkY~lOofX6od<5 z{NiYXzs|XLW0I3g{U^R|Dx;kXp&XF_Hoy78mp7UQ2E0{ni(z0?hkHROJ2ipW%H=+E z`4~`Gx9z}#Py6WUo7WGCa-YHBpH|ClCk3?5fON*w@5=@)s5O0QYv(t>i=KkdmWxc(=NP0Hu z<{9xobVl0U<>C1FM&v&N)wa`+MRcj2S_1#aLhe`AGOXbs8hBh#a28AunZrFgCNk7z zH?$u=etahE3-H%O$y2xzJv8&G*CtZ$uY*kZPMA6kms$iUgrBpQh%$)Ym^P9C!9uGS z;yL#og#Fg51BJ&ep*2ZFgzmQLbx^fl+nDJ$vXuVj>%VlX>8O4j7R)BsM?n2^luiz` zw}s$BD6XCB1S`@EHfI;BYo;qETSTiAFu)RJ2iJ!tbC4nxf@W3q0*32q{LPHzXTYwD zn@I#&Ld17LfzbGd7w?vLvs5VF-A9;=7>Yi0F^!uck&K&Q-cnlMc-72dE(Z#aDJE#m zxMkiuC{Z}@H6y|8aZFeS-C2D92PebAPz(vdAWi6nfje^WDOrZ3gT!quPDeq&jf#eL9S<6KDsdHfW)CfFs z>ghAgHk}35ZCP+~TBn|^Psd8_R@&N>Hs4tEgNr#TX0jx>3-YE^)*N4T8q>c0(v?S_ zO&8hr7PdQs0hf5s?fl`4<*XjETPNqMINvR-&o(G8_e|>M8qif-S)CglldNKn8}u$d z`lVW2`c%%*ca74ARCA7gnYqw8B+$2iq2S55S?ap)@@9t)*MlfvpCgr&R$uT+Lr;&s;tB|;g~4Q^9S&m3bdf^QsEw7d zX!rgpI_5~QVzcaAf8(i}b8c^iT8*01E;nZJD*Gz62Gh=?oRZr%2JDu~51L%b4UPvk zw5P3E5Ua;Twu~O*i4SJ&p3l!8UXTbD+x9WQg@Rc=?^Srhu?kRl_1s>jW=ZhpI`EBF zI$=SopluYlmFUm)vGJ7D%llj#Cj>)lmXZIY{8r4qKjIa5ONtM=tE)e5|CIOg=!f7J zAe!I;Urd|026ZsP+3D%ASTXxqZvCRo#rr+x7R0>Y$&|ArO$+RIdhf;5!;$8<2i*Q1 zgUBn7{JP8AYZCa^oC_Q7HF&XVF;doDK*z?-rzW$iGaj&(+f$#iRd;9WC<%+i+IH;~ z=Cef983pnr7PSLI*FH2d*Um65_hz&%qc9IS-%xYwe;^kV+HuZgQYDfY>>D2(yjH(A z*;gaSHMqhwUM;3G^4oKfp+ms8&K?CBU+CGCQ!zRVuY#HFb^F&rzTGn6mJb2~xPo6S zL^2$d<02C`f9tQSJs?Q9{MK{f_7q6A zc8k%eM^x>ys!V_N;n0#9W=r+dO5mQa8N=l=QXySVxBX{z*ofHwScR0~%(ETBrV4SB zInn3UnmuoSrmkeM>7)nM`JP4FK;7F;J;pa;y5>anOT0%k{jb>;yxf<3>hp(Yy*tl9 z|JITw&Ng)6*6wTb)^17ntmt7rP`xJ7EXx=gRpW%^Yx zXY5*V?MT~QQVe<)Z?Wtd#A@(TBCL=A2d3t^dhC*Pk`Xf#?sDvoZh~spP)rfj&&SsC ztD+XkGjoi{)FIDr1dX?qZfa*W#;oDKSoT0yPmijWR(S7cuy%;L3r(1;PfSkjC+EOQ zRRSP0FMh2Mu}GRI!pTlb&U9Z?%XN$Oaj(_ zp}ua05YUY57=sesq1c3Gd|Z9FT6sVCl8hw#F$qVis0rVMb&elrPV4**ey)eYu^ncU z6Yd~7$(I6d{-w@IEou93&Jnajl+gJrf2MrCIXVWsn0~vSFDI_;qT$F(+o;F9in6Q0o`S6@u!M z+3L!b`<=d(^9>{`jql4H33rqYc_y~{!&aMjN0{pD%^Z8V1Utb>jnqev0QQ+otFBgl z6?B*cgj)a;42jwm1)byIpw7&;>EAv9%76o8{P`t{rw_>A*>yA;?35{ud)~ffhgcTX z*Jm)^ool%5G-wgw=i$WlFUgt2*s5Nn9 zbz+?(eM)ENJt2RvlA9*+p>5e9b~cpl>CYRf5>xn_ zbv@P@E5uj&zsN^;JS-#q!^Tq}__fAy`OrcdG*{WV76%_otr03+FXbeNN+@<{rB46; zf%w1nD3wF$-qrVS5gPH$Y}dF@AWC4yNu@II#8?6x!SX<;(7E=tv7yleUT)~RYoFbk zFAqF-<&GhD(d@}H#*;pOgPn4DoJqv3t!%1M{<|0>bh3v-t7k)>#JI4ouI^>v!N2L@ zA&N|XI|VhU%nZ6)p51-um63@h{%8okI|LPtgCt7K-lWNa+@y|HiunohtGXyfScZIX zcOfh^M*=StXIuxx2$>b6tm4LetTnZ1TNyMT# zM{w9n#y3MLxNAct9LF9IT!tAm8uMdm{L#a7gRxZpnNx*Y)EekHNEE}yS&6EDGEPp^ zQ#mMK#f69uZI9yLYi&ivBY558Og`o!2U1VkNCurwyxFt*3H2R_u0DT#pA5dFx8GOU zCiD2NAShKLdrOsMNJiK=(I7I z_#7jBo}eHBO%pXU8I}Z-DmM$lL-H^zQK-&`gya0WOj;&!(`ybFGB{H^WUUvPzk8?# z2J$=V*NW_@@2YZNK0}F`75fi`y&Hs^e?hI$UFk}Vdqf&hcIw9&3b}PNGze!ccmMTR ziEA7-k>9Yig#ZjG=iJ}x|9+HK3XVD89@rkq9uhC;_S+G>Nu!51=>;|P5jm;3s-Vvv zIT;Of3Ri7G-Cm)Of2IiRJsHEjowEm*{o2rzi!c7*;z4F5msjAnM4O$eK)g04OBRF^iylt1b~#fwA9~@*hO-a0EH)5j zLbHRE5P@}sf8`lQl%JU{YzRY{2Oq1Ld?4pi4()I9bCxS1dPC%U~4>lTar!czH~>nWwNAD!VRfxXcKC4;fBem8*w$`kWi72c=F<|z}-|xd8D9+*W zG(}e6pEcGa^V?-T?^#`z7;ofcOF0dugiS8O0^b(tVr(-7c3j-F&_l$S>&Fonv)+PreexYCy}!5EwmU5lQsIya|76O#Ep_Wnw-0pgO=8X z>+7fZP?q${%I=pGx9UoJ!m-4X-FPMe!42A+gCmW&#p8kmJ>(NSI}~)$u9;zbW^?9n zqpB$O+rP^;cpe2}-Xtx}0|yiX13lufbKO={dg~P2j3M}IH)N4cV^78hS8})pKqfT& z9>lDKX;^a(Eql@woXaC*ZKtz`6%$V2ze*O&79m{b5n(dA!aZ0!ZNMx+7EDUp zUI3c0zZ0x|aEMh3_m)TN)@lth?*4dLmaNUN_e||wik&^&l$WBh|k2ieyFE)3b{3N#f~ZuqqA|3Gu446cE7M3pwZOX{A_P@9q;2CWzm8 zMMhACK3qI_ z_h=JL+2Jw(W`Bfd8UD;8j-;@2{K!|O$~Tz5*qB(%fD~Bhf>PE-E}Fs zmtyAu5oyudd7!~u5Zq45^pLQ)b1?C$R+b}s?w-tXluX@`oQ!NxdELd5{fmaGYX6M1 zcs$K)YI=G)VRA!c9e;;D0Sm@?;}D*~S=u${;tnUPf3jCpM=Um0%;xJd9bK4}C6|zn zERW`X8S3(zY(EWr3iXb_`3UAqQo6X}MqzU9urSfl&(i&#Z;9B0e;;miU%oH6N{*Y? zd?`)5`8k(^4kZ+O%szqO1+aXj(NGp=U8c6j>`>75L9Yq*V z>-e&Gw>sR&xw)M^-+OD5Ft0#$gRa{I|WO$H-IZ+fT z;Pp&Q5(x8j7SqLv(Wk^sHssv)LEEmC0p>2viHZ(f`wi$IQd`tT~&qGZXf&+b_0QL~#J@7}RP1`SeGu;r74 zenvRq6HtlowjXUurNP%OzoM|y=Xf>@MxfBmInwp$C9xezX7YY}S_vb#6&>X4I@(J0 zsg+Oh_k~r+6ZFVI4Weq7H)P*`D4Y-kAxY|( zA@tK7y}{qlX9_IcI?C*aBDPG3sP)dBdqB93?Yx;7+zTMJ2>={c2rhpQ%tLf#5#M>v z)zsE!(E3uFH(df4stJ4$;};5SY-}vRnN_rrOL_^A;udE<-JB3$ysd)fV>FO2F9fpA zjFB4`r$vU}57*ryh=$lN7#xYSGh4+fRD!v0QrqF7xnRP>-H3H+^E0_0b^Z789ea2326z9M&54Y`r?FHU=w>yz9#=jv>XGLj!T!~hu z0xZtXWF7C$aZl#Zglula+{Z&J=+|fJin5bDqLNzsD7c?BbOH*-s3LRw#Nu16)(ELw ze>a$m6*j@+3|*cF`2c~Bi8p3A6}ZcMEe^*m_0u=yJzI*XT4v(zOm|jZ_ayPs)6=v3 zGxH)XhFXCBY~`=^&Jx(z zaC+FrJ7kFW+ls&lBw^`PWEuukt9O-Ezyn`x4tz4xMLVsIDF;j4AohtEAPXsRx<*9M zt7@;Kt=w~5|EfTIrC2Pj{?n@$zf8JdCa3!M2^d7o7Jv=@XAQD0m@Ea_1feLH-EeNQY&zT0D^xht~dF9Qkq~O~a=EDQ0nML}672oRIcSh#BRL7+4n#aWQ zhs5VrCGjR1(l2fC#?jU{JH#ztAB;h|pYC?L%r?6|SiA!DmsQb1j$lgG`j^Xh#qIKt z|J{6L;@X0Xcop-&>A27R3zG^BNy`^I*Sja3_{9&IJu+(_lAbf_ZosAgZsMFwq=}Q! zbMx}u`tWGox8~FrM<$zH^)%mTW$qXSacx{xJm#4PO{J~=A6*;8V6LD;c0>ohZgD}Y zr*N-&!6)U!xA_)a)V8RHNFwnHrFMZmR!z;==uR zLI%ia50CHP8q&dq)2YJx$V0$+le63a*v?JWgn+>#{iCRx-JhLvCI@G(Net;Y=LOeX zJ{<}v=sY^q?>+q>U+gZU&G<=U9(yntzBF1f+j2t+_NZ4T#RM4biY(KOBcpMoz_GOh zymiB0zWsC}O-CcP%H+v~WQ7EKL_*uhmA~j5o*Cjx1*C43+?&%_*7TkeOOO$9g8`;ECHV;pB4&J z(`d^?QFQtnsgPGXol(8_qWK=`k46HE8gnM-EqgoVs@V8wG-5}KZnyQ`GanMaI*dNu zz$ESF^w#rvK4s#GNp0YcJ=;`oGV7E;sWSqLo*)8Ioj_S|96|CEldwphd%KUl*C4iu z@<)13rqDkXztr-;H~5gfPLqdOu{3{xy4-P%pD~>*A~Q8S^A)fBUogRz?r=?Urb$Fvl&*5piAt9a4`2eT|m&?I0Gz|h8C(7bRj4Vw1 z0Omap&#EzuQ$%Wxh{x7X(0M1CjDyq;{O@K+ z7*~Re+}y!S*92?6J0M<#Bb44Nw-&zk>jBN7WfVq`a3P(?_fDM{o()2dT+gNk0kDdMc`cU?0(k(_2kt4NSQ?6|$&6^}%Cx2h)Pju_z z>{Vc9-W}t^Z09{W3dhqs&dOZe{J#2;Oy3w4DS&*dV=%Xm07zna&Fexg%C>Ak22Cv)?SeWWas1%6gR}rNr?d=Rn2`IrU1ta_8jwUpEA^VFJoVMAm-h_ z<--VqkJ?+|Y!m0C!h(vV*B6UCiPXTVT)T#kc^2Q2C2IIxbrSa#!o83O`rRG}a^#n6 zxONY7!KB8&vg~g(8oD}>q#hASZ3QCYfbWa>YdWFD#ZJ-a+OVRwD-mx}ViJC42Rdk- zwd~W3P{SkfIGayqqj)2tM8@tk^FU6Kq=>XsZ70lfG!@N&TY2fudZbfFTObpW!%TMMv&ATe);m{9Jt@R3#vw2M_DN5 zVgbHrF8~7qM}3eR5Wawr3uM|4jc$&euVl-ONdP3HNoIZOuxb?l(Ha+ih-TH_VGa!E z-o+-q{0E7324I#9bPqe5@KVNDfHWJS>2GHudMbz! zL{%8*2(Pk_ZEk1wc%rOX1!Kv^8N=*hBf~tDg8-}e5d;=kmo%WCDI4ZOB4}kdC z1MI#vO-uwqJ#ln75&Wx-K<0}Kk4?8xs!l^{I3i2DyoX?kLyXBYi-X2))0-S|+?}Bo ze>qJ3&>L%)>nRS!r^$ZI-z{%e5r*Aht%?Tnll_7Bc>_7e;o;m)SfY2i98XcxOg4%m=g;cFcI_5NHd7WRP7VcI+4e z#M`$f$_^PJA&n79;b zv*N6=H|AYX06l#Dfe3dJw)0>2-dFw5on@<;#Jss*+nvdza<94pch3Uc1dqiW!yNC8 z<4gfojS$fIilr9XF>iQUMB$sh-42g-opqv{i8}bV8 z%GOhbp={=W%D`qGWRRbg#$3)b5zy4438|=%;^E(g!=n1M`8yB)=uDZ|$W0?#vv(sw z@aZsduSK+ROAiMJhcF~pP6HOY?!%~1k=O0{P2IL^s?m8=fv>PD_+5DM;>Eu<)^(1g z4so&>V4g(6lNfe`rJmD(;_LQT7vH4Ieok{H<_M8)<@>rV>dy))G~)E`LZ zSI)cdJ)W;*xV!aTfm!P)SY;>XJ;O2dJ(Lzky?lFusbl14W=F+*y?- zfLa6(z}_6E8$kBUm=ttSIr47FJ$>j$Gs!TAof;CpQqfm1RF9M1*eSFUrg9=#Iu2-` zoz8mtLi=}VqR5VG>+2IVWk{c@LX>6&c;yf{KaM%f4%Z2Lt~t4aDZ4q$#fgUL5~rbB zGuwRl`Yepz*ow)!Xn`T-6%uj_KT2e$qpkg*d2ViwP^U(;Or9x4iAhv3B);5NY@K{Z zPm)gkS-v-HBw@#~mItya>!H{{8T)V^7pw@(O8E<3@n`FkvzLJ^vy}8L*gg-J-=FL) zrbuW&f?|c@x-Sv>`G3*;y$;${fGZq+6^3U==E;ACb-)LV5%Ig6BT5-jQBl{mW~HKV zq*AK%=nH#EYbAca9HJWt>^}m%G|kiQW`ICJunF<6sHpfWka1H`)K7FM5-%J=C83(<;e?t(9!pchr;krHksU(t_sOG;Oj=#um`_;J; zy>=26WQZs-jZynb#W*Jt1bMas-!H5|lsJjh@~be!q*LPH`MWuYI09J_^mGB+8d_gQ z8BLe!SUnA~lNII}iVZ;yJ?(&F^#fl9vzCQtFos!#V+N*hgu!j3590#NmUc8i0X%%DU7kx*w3 z3(@QUDiHjZtyArYdg%gNs#PevD*h2i;&Z??u8CN@U zMf|1;`GC(sPWDl2KnNZyyfHIK{?&fQa_e9>+c(nVdWMDs{WQ{g$S#mP6E3gC@lLL+ zKu-4>pt2(&v5UIb=OFqyk`@vGK~Ljuel9VRgHM2Y?ZGi`W2{w&e1Y&rUfEZENT zJ$^CK_o3g?Ia_g^PDs^|SOpU&j)BI?C=4d{Qx>iQM)sw<-L#Xb;c8*vCLj?1;#=;8 zBizsnY4;;Dbr|>Y%6Z&aLJt+5ruAD;W3|a>fF~-{S zpzR?0r&#LD0YtUjB$k^f11yBWHhlTh(2Xb>b zl5;;z$tF3)nIv_c36-E@Wb|(~jAVV^UYn!q>GUN&CtE+nuT`xXId{IYA>$`t|AioT zG6dnF50x)qnt3mbFt-4woJbg1LQ{2njB z?)Gr<2I2waxC;8LG}xRv8XC2{E?9YFZUw>SG{rU3WZU%Y0jxz87eE?e%v%n#gE@=0 z5Fl3Yb6X;^BN?-Quu`#^owE+*#DL@hSt~P>J9%U0&Yi6m@Tz}37~@SgV>%A-B%Uo3KSOGT&!kz|c{E)h@|;UPUk# zr`?TZPLXJY6a|Pt4?*S(cm|+G5}{a0z|$;7aG{6=GYmjUF>)JE17FUX(%#cGJ(FCR zSo5U=M@@Yn0{s-ePJYRGvfy7$P&PgUDi$z_E)`oXUR{AfS@vP72%HYpQJ*cC^C7_r ziBWJdEp@5zd}R}iGyVP39oRGy{YSspLvjg6PItz%E8}fiusbxQv*ab3nkPDS(Z}s0+p`Ay0j~ zXGx&&au;uSWhbvN(V@wVUPGe$|HT4xJ<7~tt|Z&Z+_Hq;5UFR5Rl0XkN^1X*>WDx3 zE1}7XUxqGCldDR)+R;_2siJQi!k&n6$dFFjwwsY15{MG;ytrh_jX!ckn|av61EKCu zEe%wtH6-(Ap}in_p7MbBnYD#+8s{2l;=~AB1X`;xsbzdJhH*6x(DBGHJ+7a@@rb0n zr|CJ&HQ710OwJpH=%$*TbG1S&qJ14 z5~q@$ec&OS`1E6M!MHmtkkx`NeGv5;XupKa9Ma>!uR9(c5=I&!POk@*O}s7exX&&l z=oiSIYs8cvfBa~QsA()!t5Ej;=#!*yI z5rbPA-ntASqQeyioo1y*D;b#M7=CW;1@k7=;?mgIrBxW+ccog8mp2S1iJbNV-ICuG zS>QNZOuZ|(6gXIH-8ll)^O=`V3IjO>#oWhBc5x0nySlpmRe4M!8Elw;H!o)i%zaa+ z+5m-=D4kkts^lE*a6fzERP|Yq>6a?!SvgRAMdnZ#8L^XWUjnZGou-nWD04R@tWd~P zvY*CGcqmW65)T%LA^vao+}1&^a$s#O>DHdWiZ*1Lq@XvvGVNVF*;ZsR1Adih94r_N zihLV>bJ5`3YQRE3;MDRN(~ooq-ZiL2moi9M%jU$v#+I+~h8qmNRQmBH_72W=J>-JZPi9+Jx)smkdNj~#*~>zyhEt$5})pgwD6Kab#pfEUL0o>lse)(-2PrSkQSlrjL5b4=YH=Vwf36>>+Hd3b97uYo)}HghkrVQikwLvK{<;8a9Wyp1<;$22hh(Y zcrS3f08W;nSEs59g$-&9nkFxhb#= zth(L{l=%ZcP8ox#5)~t}=)}NT!>`70IjMHO$wx&%WRspeLBBpOpPHAa&&y|%mX)Q9 zXYujZ(t{u@iV!_)_W_iVf}%2V?iY^_9rcS8q&O+JhlLe-(gY9=0@3|Iev{JDUUsVT z5Sx_#t!ITSiB~u}#QF!**f;!=P*qeNX=NV2skJ6i z6P+3AgjL^vp9e<&<>QFS|3AX68%?qJskFmtuqu+kn&LMqveWc;qK(_s?8I*I_H z1*QWMiVfvY1z8WVmewARp-Bd_2L9xUus^^W(D1#X*0?f%C~G7>H1&aCPdP#e7zzJH z2+-EjI*iPlLd>Nery*&|qGvozphVaPNG8j12+qL5J8iYvqP4|I!X7y!CUXXqnK>9V zL`F|!zBD*Z1XRL}B7A-7oTKO2v!M??<{BMd(&;^z$$vI2DS~Osf*~zhOfJKya1Y53 zzBKHI9%07k-?@^yz$Iu*nAnp>M3t^~Kh9m8;&lNXX`#E+A(%`k43e`f~)Omx4W3aoP1P1n*ym`kBa!2cXP*Ni* zoR%IySWdt76kRPw6sGxrJvpCVPSMdeuyB6GV2OiSw!HDZB!Lv%EJy!lr-5O*29*d3 zE=@L*u;|>@hFmcV`2+Tq*IG8F`m5p3rOteU#SDZ0brwb+F~z=)7F?R478DXv(Wk{` zMg|f{TawHW$d)fQwtH}&outk-3k%}QAD-V%&IcCOK}Xs3QuCsTSVd%1SQ^l&MMZ^z z9yrG20FSW#y=jo$w~x1zbyy>o3V=WrB`PZFn->S1rQ%2z3eTz=Y{+=hFv?J|0Fban6KA$*|nJRbhp z&n+>B5oL^L{#NKKMtNgn<1ZCtJNMzHdW+`C?x;5yBnK8`oEO_U1FW^3FU-`k!hC4FX*Y@`<6akF>Am#B|>TO@{epf)fve2m^bMm%cxP_+fsY3DnWw)bA zPyOo0dvDGsV#Ys4>X|Uya68W{cC1WY^c&+Y_EXI_UNo2g*@>?;CevPVk`cl+@wnXD zwwuB;cb)@WAycL;+tLmmzj{Hg!4n81qre@C#EADQ+?N=8Ag;Bc?Lrsn8Ke1Jn|M2i~Zt&CeSjqQ)@8hE*sA;A|hC?7r#jHMi-OqV?!+M7|lRRKjr|XgIyH57|M0yOR*1sHsbHQ z!tsYVey$%>p!@rJs$bhJhEr_oS1+4C$3`>Dj}iA1I=hoa;KqQ8m)PnvU?1I4JVp*qY`2h^v())~0=sdKb=b1?^kRUz zqol77%@?v2xqOqVzcCO@-Q8|=$?!sPRN4=D;d#grh0R{=iaBv3FUFw^vaTk;ri4y2 zxu&l8cy@fbo%bX&XiB=fg=rS(q|g4Uc<@jesdnvLUn;uzs5y?xQ38fbWIcW?9H&Of zFPQ$*c*m%Fw6s6fY2t1BvhGPA(r)(T=kw$y!?{lT9|b-v%L2k@fPSfM3?Vjv^{2>0 zI~HAMUN)!XMtF?$S-pIdD5Zya*Ru!;+Jk56v~1vYM9JbNm^6AHp%2KSu9^dk{#SKF z?jMEiI5ncEm;jiRjVMvb9lE2a<9}?cJis84wys^SIA!Oc`T#o>cjR{uus6?-5HuE+ z7XFbW9KFq+6e8E!dQ$&M?UmlHzS3s1Y#w5K=H)11`|inKNIbpP>>0u2UaijAN)c-I z+oZH1`*njz|0t2Z?AMwN(R4jqh>_e)iEg;F=TIzRh82Q6PR@<+V<)UUxT&7_QvT@4 z;I7>+$QX9rji^_0N{2)?_9z*_fNX+AM0(qFF=uc`G#O;tJE(2vr8{S^^#U-5kzm(d z)%y|GKIW49C}pmK$so%$e0%>-2_5QC(W6CI11%I?ZlV(>q^%dB3DIU~;e?G|oE|tq zi087P6cwwL(XKY4O8yP6S%0Qiw?$b`Qz(k>F@p^IX(XQ>fp!Kp9UHR!mfgCtCDO&; zu}v9`Qp+_cX(D)1C2a5yDmBhEZm&QxQT>}5?=8Y+0g)dD*K?+>dHLKTk{33$Wk63M z6Kb8me(eSZcbHzZSrj;{Aq`;<@Q=tsLjm$D(Iu=YofONpo%Di)2-iWiIzz1=OZ6PM z5ZwH`nqn*i-*$Ja%FBl@Oek)nPy$zPdBRH&3)J1H(cC3zB(XLu^C(}vWPGPs4NR!h zo<4n$u}7&L7kc=wCh2=j9iPAMp~#aF9r))0cWRb)FUG-G3U0(i=CFZGMqH0%FNsO| zd_tA@GQ%v*?-)Vkr=uat>Zb+b0A&9gA{_>T<3df74g0*D6i29b`g6X$eV-; z1=8cT3e{k~Thglq&&?t$D~r5fn_4XD?^6|+gEbHVV$hF40iR!os|UXAqEj(53)r<^ zgS+33zGdMF{!5_m=!Z%tN%i?Y1W}2$PQXF8%&6CqF^DhyNJ-CQVQHu@U{zWzpfgFR z6C;N8nD1k|ggi;%mKIa)Cm&w{5Xb#x1&OS5MWFe~5jzG*eOc0R_tF~DuMz(b(m%HT z++I2jU9E7deXMO}U3~C|ab};XhOPhI^~gmZg=j`XLcV`5)tdY9Re(Jd7m^Pf965gf zQ#C>a`>SMoL^NndwzJKt!)?zFgF^mVWVk=+^;^QM;q$SaBUnce7&j6!ov$7zv&WD{ z{FI420$Tmmyd!fj_oFrcgN3MR!A?mmRoKJ-Cn{N+L@v?M<6HSPLW-!MHAbX?@C@Cf z12=Jy0=STaEW{s@-D|6&7!spDs;VB}_sC5s*B^+mz5`NRIgs)l#yB9(YuFc@BR0H= zmEFD%weL^@bppR6+rU*zQ2P;a0Pw($5p1~kVywYaT=B@4WSL7%-cB3x^lv8@)Sist z+$=2E$nO>UhKQcJvZFbez2Ea^>*^@$=;SVpYk|lVRekm!5_?F}erRbztwW6*Stx@2DKd>AIpu1rT@=Z)hk34Guak8*IP9KCDdOdop1q5Y(Q!~1KyW0>~gmmPN` zn?>bv!>u61dKQ~-EYai9*9s@Th}^Qg=7kWVp>u$*mLHUIx%yWd{w9=1=X>69!!@Bv zB_I*WI>7@2?eJBdDuu`VM@@Q7&oqg<5eGmPL3Pg^s9}u(HuOOy0g*iE34ECoFWUVoc0|q_n z%IyPE96u?iO`?RJf5jMZ4OB&VXu{_AXM!jEN4SK)+YE@C3{y_ZNnPKAuU?6Q5d;Nd zHX<^`TiN_6#70f050?}d`{S>cz`Fvyj&1vZjvh94N%hX2#cpL%7k#ku`yWBbA|M)3 zhst;oLpvCwsgKB;gTN3A@(on=Q7Eqdija`HFdj%_F)-!5g~&`tOQtx6x0|>VC@g9f z6PZ9y&-2q`Fex65&cX3QzIOFddAS5WW&rwsQJ|H&r^ zvr%B0b-}B0FZ>iB{1qL;s4OTF06_xX+@s63w5wlIuy1?l{VmAMQ-X*=uC4x~eZXQ+ z-XRk@d=!@cLa%V>p&L_dY!v}gOXniNEP1(UoJa_wjYu@mSpMQM1{ASn#DHOUEkS@m(n%O@ZGz-T_(Te}<6l{WvVRzM4!IieJN)0< z1PBInv@U$3@&ZAWqoZ$p58^+zJ^@N?kV0yzgF+Bwut$Y9!=I#cfR?rr9U90&W}%RN2< zlyTblr>_mi;H3Q_V|&|kx8*@Hm)0b?bF+YR3={S60feI&4en#eETEr*eUPS)l3WCj zi$H5@*i3RgBCel%C#qK4`7fImvbrLv(j>%26e0%um45{MNJ&8N!taW|UyB3ThF!_d zn)svG|072ObP>^G?#Y|-=#dJ-!&BNR*W`1BUJ{{pWt62=37aTNq>k#m>M8~``U zP#NZ!RQ2VwUj-;f73i8UQ)Md>9OP0*mmwzgd9a4;W1J+wC~F63E3q!B;0%O-B@RtI z84o5>5m^v>&~szQRxyMor2-hP1+@AR6{7F`b!c5RLObh_qs$!Af1)?K>a%JPd;+TC z2wgPYJ_E2HSJ7+uvC6%Nw>s6+z^|yR{EE_0HRn-9GVR#j|3}GkN9`F0@ z^^n2Z-TzxOp^PGX7Wzs98SQK5-ek*YLPH0qF3w zQ+tWOZ%-MR_9U;{Bk@-M4qJ${^$ZO|?bWK8Khv9lmOj4i4#~T(w42*EddP@fZ*1Pyk&tb;Jui_9 zRLvaw+SAhn7AsJpl~g5RFqoG61GK{;KoA2eV;5ULSKbb)>?^&NME9%IGS3XGO0AVc zd`orjodhoysT0G-`T8YRIzP)1&T4km%f|fxyj42`t0xQUO*oI;=PjsDs(GmDT?$xl z!~2Y&R|}~@(W)%URcb7QODhWb6!{iiiysbXx~;^k&OJE(@pp7y@vSP=9pA_86<@!e zgqXWsYyzOUkFFoQZNC7%s*%CsGA~Hx@p}<+aU9P$(k?_=nKB7_YFzy z2nyJe(H?F@XEdw~@UF;rZg&aGIau=~w|C$V|H$SecFvW&M)JWQe-_F2?36WwK&|<( zsRC|oy=WhD@5>lIzL!X<;b!PUJxI|_uh(};3)$7|IV(P`%zaSrv)W-@FVL2Cu*lkbxWIxyxj>e>)KW@= zXq6DQV{#TWBOh{WBgtRojIAv&g~7(?SmEEk1XLLxY(HEbvPc!_rRtzQs;sPs=8rF~ zd&gJFc0R;2c2GSdLpAn<(xqwIN^SPU6l6GJL|B4P~{Le`5k~5}fo$=URZncv& zkwM5pUf}dTT97TZdYukTDUe3EVw9YjY%)Kz*Xe| zF$y8K+UEhm`(HwDpJt99bxA*7xImPk)d#AJLXJ$r+^Zmgh+0hT80-c`BBT16_+*r6 zODGU9nN47?Vx9g1Ddy9lAB;quchu#B+NfCeWkc-pR4j$?*SC ziCD=VKmL9nzFsXPWRCs$`^Y~4Zbphis15+?-dYV_@a9^Ug}2hW7QxDt5cZPccX?pt zc5}NecU{8GrnL-MDc^>_?apX1FS>N|pl%XK_CQYf!~H+UQwvpbalL_$ceEeEow z00LA~R6x!JvK?`HapzpU`8;5(5KXic;yy3{6RaZBMs86XIr}4!1g48nwV$=bIt*)F z-qFjx6Joe6=>EQ6n&QV9aU;baSB#}se*KE-iDTQVMZ-RrKV&*L*y9@<2!JTl0+w6k zWj#W;MPD#|0z^=+O4%UzohCpaA9*%NLSWkTiTRO8KK&Ce#_``H$8}WT7MFc1zra+Hw%D*w%@wjNB z$vkcefW^_)(|aIG4&B~9#m&EX#&f4$%71k_H!JIWxY0JZbmA(J%wVs=aB*_BTFs>J zG5A{gQyQ)@D2{}%CoevAn&s%MsxZ$e#N9`0n8S&l26D1V8gEuz(dg>ub7{wukpwnmb8)YtXH~Zn*GF@8eg4(DCX7$-5$Vl? z^#!69|JOU?n}#PMM$#z`oniHR{y3%c;}djRr4LZM&a@gbaz~EXJUcF1a2t)z^+9vr zzWV7ot(R5dt2XF(UMe<;$ zlhQsH15Vd^(EO6cyg!Qyzw@45kh{xtwAxbymHuQF_4&aS=k2{LRG63X%m&Alg6m8) znpG@-r51TvEg2H+l)Xr06y~u&QXI+j_IaImcfSn{aow;H zve-V*t@(&I-Mv>xMDVWEbH}*?bvJ04$o0cXLMOP@Xyx9xfGxtQb`_Y)uRuTcadj(hO6C`YIdjf=hyca5vw|4j=i~hE=#@F7d+tV? zCq&OzgLfbX9*Rzwcu0cp%yQ;zKypXY=Lgjf{5Uz9cY0BaV;l9SuWKT zK=@&d_Bdl`EGo%Q3`FX0FDJmi?;li?-djzYh2MUi8z|vvE~8zDt)Nl6RClh>aZ1gj z4ar762cFRjkiKhyx21)ZR89*+{k>f?`8E3jz+0Di=K5(P*lq)(qe?_W_iLTz)rH|f z2~g4EVED;^`|K!-^q4&u8rBOPCYs!;cw{v-@@kT6-4es{uBpc5cEiAd;%0l^;Gjv@Ydg;8OFN{A!Ymn( zr-OZ9GQneEc`uiK8@k-v%&?qd1<3Xoh{F7gQ{EE){_JcE#1jWk;Ng9L%`3h2fusvi z`y_hpreMU{(h9P1JA^@h7R0htYTlkuIf7=jyx@hhKY7)s;aDYcv(K4C&^~5zYV8WF zaVcN(MVV*3yb<~R^hOzERx(`uJk2|!d(DrF86Hg4K*d2P>|w8jXF+8w~LBd8i9RB$;D==E46 zkU0V?Mn>?GiIkAZ>T*nb^%z-IQ?;L;tV!@i+6{&P4G!< zj5*)r7+rH`mc+r*JRGD$>-B(ieiTe!RW@XC>)Bv_5lKq5IWu1o+s}e%&^-bo4W0G! zgwOY?c&h0oti;HtKxFT_)}r~C_4m7lg@~vw)KhLhbtSZ)&G+Z|-(QD}mGJ3IV*W$z)qLVX!WoW02LSXO=Q`%6TP;xskBS%h6hMoAzYkC5cDB} zpmY%guF(Y29nyUjbWY|PLv*;?&+TqLa*pQ6c4iGBLTjxx5RKMo$ zg744PI8XUp*+-HC*#>xN<(&j>F6TNlU7xr1{{D`=Ah4VayEFYo<{$G%fta=CAH>^Q zJA^v69Y1Gq>3hT6mv{T4(&m^h3TW>{M1nI`w0tpB_er371m7kWPt_MRKIC(Pw7i!<@-x*Jz>}$>>6?zE6N`Mj9`Q2pQ*u%-CddoobNFvHC@ zW57TZI$y8`!t5wgX4hJrO(SCcd#L`B;UYe=De4!POh)@{&NY|@N#Y(}#F%1n*G)!I z=+9@UDlPV4YUuOSUe~eHE47OUM}hs^eNGz8F>(y7E=pkdwYe(Ay)p+s58sgC!#wr` zr|ooVkm&ByKc|cV`7ULQ2?3YVQi#lS=hN1r_J#97($yUXj;$k=Bk^JBa|uv$`$$xT zx}yB(W6-SyE{jIS^r3#&)Rq)izZv@mD}`hFrNhVx(W!ckgbXSqXU5G6o;h{cLk0-`*M}%FA!}w`t11aq8WujqAxK?&VEVgQ z`*dUKpgkZ~3ga>na~`r6bLz8=5pEvgC++BZ>X|wE%I_3hz3O?+v;bDKZerZX+)!Cp zgcllj&A;$@*dCj&WHh5MyImki#Wr!;D@|lE+zJ_;?=-4>Q|^I)Ef%Bm`c8$R2K9QLuvW63dW_2S&|U62B8{-I)5h4w*i*VM{8%vuS{%>)=*x#|D4WUh9w`$z z6A~n!a(L>V8>)I-k!B?>i~lQtnKt=s!lb(EL4Fcyquj!hLHIJ~EIoy|fUMfFAZ$~X zXOq8?H~PBvNS3E=(Xzf>*{5?&zch&))?8*ls1iYW&MepyKAFgl(gshel9^3Yl9tRD zGL5q7547!bob$0*T&AZENi?B{yKz{-@c@C;Zh7TyX=_lqOakyy=vi#{?ilDcmjxUXKLDn5RMEFwI$|K}CW>*0i=H)RP5tYYSg}& z8Nhqsb@P>m^c%=n?UxH%OP#gCz@NBa_PKIXI z8QB1bX@f8Y=DN^$l0Y4;iPl>o1IPQK(`lg&_ZEXOVWRTuj7OOMJ_b)Yek}10cu77> zXo~J$RA6@_zoPi+jNAh=wWgo<3L|9oln{67Y;S()=$A`-?u3#M^WA?cEJsD9U$NQaEO^f2bE#Z;@hdgCxhU z=s@V_$u%*nCuJsjw~&yG#VI(`VnNB>x3`vQ8ZYJPjyKYdiM|VGU_U-*<2Gm%^Qwih z)Cg=b(2ML+;OO9H8gBf8js6CL>%upwj;_n!KZ&@`jCN5z3)>E}{I{!mNBixEvKxEB z#^H!#cb`yl+Z|>(cjqRa*JPlidAp9WATzoiv@cf5)d>%NDR!0G8;kG6E5*}xYXqm= z`tU@gOVRgi_~UFwHO~MD?GH;=#MeNqN$l^F>+^NzP(rC0!E|0&A?MMioo67b|D?G) z8#`~~0N%OW*7sUPcHte_*{;wS+18x(CYRzqnOuDtvw_u%E_9>SfDYe#Zsqko7ZcG) zLRt73c~Hk*<`1b?rB${Z%B+pRInNH11(AY_>lA#5+To#Dhr5-w#aFuYhXp2Y50)_InN7BPG`P| zA)`@>ZI_-Rx?@)c+FMl+?b$3{^QGZDpFS6dXvE;1jgW|j-AkihpD9^|Ix$G!dPOr> zKIp5+*Hi_9<7iJ>wnGuZ=}FK1PLm&*zR-lyIx-FTc`;S}Lo{vPyYJ)-b4UluHyjHF z8>--JmQX*M0eQ~x#tXMU9%2a7!iTIYQ{|U@Ord;Z)*OK+R*M#MH$t};t7B5Jcaba= z5y6(^{xRBJ>|51-O)I=4fSC-go%g2i?Ryp%FJkz*t9d?W*v`H?-mR6$6|P)vIg($k zX%&?hJpotd&awD5f+`XLyPu88bbk3k#hhUl0-Hq zI6N>Nm)Mh=da%))`v-A1svnfKQeu0O-JhHmGXIV~RZF<=cL))ux34l&+Rb0g?<;<2 z_0=Y5P!=ZekYpCou|PixJ0i-aCar5&_BkV0h{hWJU#hMCA}a`#a^llO|NjOG*p-%!C=(|kuY!^( z6h6%v_v+3e8G;3Dsp2R}N~!mO*MGnOgEK_r1+rlGh=*x+04?LxldES+hx+I0DU*$O zu@$To1yd1l)y5O1Q4uEmui-xUCcCSDmCNT083HAL)WuH!0m1~R(x?A>{yzpt3wy|) zC=!`MGnu1p3Ig!{T#(y^4%N|*w~aMKy=(Z3CP3nM!67l7T-VQxQZ_2f-^0cT>==7$ zlWD1+4NSJi&YQt8zT&RyllI~7f6(+J&By47*?4VRhC+T?eA$m<8K>8*z!#ojQs?K~ zv33^$9V$jX`qP;H8v5{*x^%FM-Qg*rx{;qcFklw}&JX~XP5j?f$?1rQ!gp7EY6U|DC*TYO=4TpGJ9 z2zUx_DSDhZiR3V&#ZBn<$MG+Y^WZ>9eQ^a^?pScQwXIc8GrA-G`k{5;%LXXlP%((~ zwY-6IQhIPa?DQJgo?^f&H~awdF7guTkh{+h6V-Q`8D;=50}2RE?}d@m3S}ABND*Wi zq@hAil+rRH21(0-;%reE_itAl18!1)_|1fXlnQJVDM|ca0Dze}Qdq$%N<9h3$BV|5 zGbL~IOWV$&hW_;2sL0OFW95`c6UVw4<{Bp@gK^Yq#73SoQ~_+u=DKp~wU;f>J{(*1 zY|bX_=E<+T4MKdJQUtFhMmx)-P5R&gG9d0qxW^@2ydj+gwg54k8lE*KavXTTaeZ%Z zO#(h5V%8K^(Cz_{Kny&4XsYL%i#6Mn+|N=Rgw~FCB8!-;@OHzeM6w(KmXCrydLV2D zfSi7tWXOo)`?(#nCTnIjkg1if)ZbeQTK5bPRUtsk1=7Qep9}ZjKc;9! zp08+^us!_%;+PMel=iChq|UGWr9?wbPFP~o-?@)n1LgWBn+mZ5+5DGX?*xM#Rm85b zasu93@^3L=EooXhc2S5V(PI01g>t~HN5=6Jj?jZN)fG1*Bv0+v3e`wv4dx6DkGntB`#C}xjd+!wD3XOE%8=rtc zh0^>T1b%5=YKr7vbc+>to_iZ*srC`Mq)RL>gwm%<$)py%2R?-7+ ztNq3#V0($t?Z+!oJ98jUL^jh<%|Ixa2tHt>mRRyB13uNXd=&OuU0@P-F0SKTBko)m z+^6H#9g}ZB@E1q$rJ9kEiyA#xmSYg=ETB7Q&^MA{8-K5!-Wh+XbzxGvD~8bWrZLsp zsQWD6&d_1uc1VeP(W=Z}b%#g|3QSp*;h@ISH28yy>dqgqHiC1{HdvWnbH9!ejqQFh z-KosqT3AEwl%4FF2KE)|B5)2fY?6mdo}j=ic3CHn16;BL{#R28d+cTQ4$+gCw`n$o zfONzpdhhp>P_9+OI-?)dKV1GCoJSrL?>j^vjVJEOQxycsuz)<1PAedc-xE}}Uip#k z*KulQ1mdn>*Iz14odL(IdS5d*a4sk>DMBqctnYV1nY@$ha|&e^XiPT!)Kik$kTD{& z5;FaCZKI7?(O+rLJHp?0UJ}-GDSWldeJRaL*8SYUSm;V?4+Lh;twBGvZKr$IRuiXZ z!YX$Vz;$DyW)vWVpek*g4y)Nc?z0#zOb)G?)u&i@8SZAy{4&3L9bd}uu$j;iFv*yU zBd#B+GepDJge|_*8oj2YJrY<*%8e%d=Vs7kMLoW8$%#u<@>8&k_OIgsH19OFbk(|9 z_02pSAZZ6}m5$*FQ*q44D*>w@qwznU&oSB=b!LD{pvFjr|23tJ{?eXRQm0{*L$YE#nnzxPY zHu1R_0=0eUO`iqntF{mY={F!0gNNbEriYS|0&&g@LzWM9ZW?#TJpj?MNRcAJDPcF= z9yJ8eyn64=th!{fn%FcO!(6B7S04k_>aTYUB~ynq8WRq7PTE6aa0x*b;%blhBzI*Z z27(PA8+P9@^vstfxGL=4?!RL6*!mQ;0*qI4|KbqOV4dtyo&v6i3DTqbLFsp^P!;!^ zg&>Z>nVQq{t_yR3?XD;60GF*3JlVGxJ+l{db-(mhCN*m1SjaYOHofC{%$7q_^%b+-kA6zYv`(62 z5n2I3fEn7%K2nl&c4rk8Ohl7^#VSa~MVvFVLUJ3s${fvU)XBq;?nFdNmzAr##U`1{ zTB!&nCP;w!ARn!7<(wBHXm&|7EIVV4lTVGK2l^x1yYer4?&rP|I<9-bbFZ+#;h~?Y z)n;A`oV7c=uWhqxBJinCCfyCY10?%+pX@h8U{MGwD07S(=}6OL2x6Ap(&w1MagE?x zzmk`!*C(59P+&*6pw4cQ8SflZm3J)H1j+kKD%IUHF6P(vkM5e~4#q{$v?|cqPft(d zU%BQcs7nSSj%ANM0BguA^0e@9nAx5Zwz?x&AjDwIcIu*?4gc}9W~EEqB0IjA zuGuCNsvvyJ=QXdrHC>PFLOE7Ov*(jxeB`W`{>r0Cpc|y0-BpbiU~kTK z*ryHaP_euzPw>KoUK<(f=|v09gNG*e4O9wNaU$clowabL_a75z0I<+h(3h}R=jt-N z!Oup$7cRlhOjAI-!)+_vZDGR~b79>1lfp|q<xI3X*UuS5!<gB z?ugT+6nT;CZZLgclOw6^G%MrUEk|55@z`IU?j2-)exy|WYPp_kOuFaWr>c{SG55=|3_(qRq9iwb7K9*nLy zI8dsebkCq^Ur)W-=E#0k+B7KC92XM@2w{C2Yo9_P-5Q<=v{4j7kRr^`*X40! zaSRiZSSa(KNh>9|_F$9-W;EiY+wz{i-NiQvj)wbZZv+b51q#=Epw!+zKW9~SV}isi zhihD6!gzpxKIoJ+1V%MDdtWI7B&ZeG3yX(5()*jKh~3nN`mCf;QN!aDBdql9(nBO@ zN(CW|++qY$!aP%XV*n!_Of;3de7bchV^3$?+RQzh*a5nGksB$fP9P^{^^FNQ(O-}X z-P=*k#iQU`@anAmK<@dqLI)&NPUKK#$Tf6+vF%!$)>dK@_d~Pt$EhSEO(9t7?4h*U zR3(xs4Gl{Od8^JXuQj^fc0o=+pE+q+smT2wDmnNjK~`}~++?zRLMQ2*u(i{?{mx2D z^q^`e2kFB+59sRkMbdK|8qjrNHsze@6(h&doTNfxqS$3xZU?(f|9EU}t|;>Mu<+4_zQdvvkT z_s2dAPY*K{Dhq`%WsE)Y#$7LDfCLh%+N{!(NBm*qZ12B~xepiH-819}F-OKc7FcND z2dJ+s0)E(^3ts_ojtfWsn;Ql1BivpyYE{+yC|HE}na~_ZeWw4z>!n?%ahN$ALJ4Ci zQ~rdG)%AS#<5`M336dVa+%E>->0V;_x1N#{CmrEIf7WY`411HpY8+KRXq!1NfLWrjv()eJ@he|=3pa0CsaDxd-a4sa&Wy<=0 z048hxnr9f#bvD$WghD@su}&Rez>~DIg`nlHm|KGuerjz2rI@Zkn3+V_m;@vxG6Htq z{zKz#aMe9DkB$bi9Ii3S=lmg=Ito8^pVDRE=l|D)K6I9dJb_WniR<^<2ajK_cg_3@WZ`v7x^o zsz04yPaSN`1gk**>z)0>SvW5U;od*`el0N-5Y=Grn?mLDT${zVgCe21N|EPOQNOnq z`y2vW1vlB_fiSkWo*psuQkBK9eZ9E9qSph3YY1dgEdBU`WE?tpOJpT&px6u-w$k<9 zj%qXTQ8o}7a|faQLO#K{qY7wbEAaicA(s#K_f(UwJZ=Gr7M*hyQh*=_S-YWQz9r`L^GM|0XMRv;NpEQGQ(nwmw5u~Zrd zv$P20ft*M9LEo2%D9xn?r-}@^Er3N^BB!G`&8)K3v3!@$T;JIGj~Ft zW3J~$^;V$R`2L*l3U4U0J_gCKQfzaB|2zw`shxA3Zw2nOJ7iM+N-t>BTp>7bzI{9C z4T_`^5Q@;Gv-9aiET>7nf&r6Wn zJr-J>4g%hGKb-aSkXB=+67_wRMkU`&h*}tI$&=+G)yBigAr;+Q5fT!nne}h09eh*u zGa1xNHier?uH+J`2@?HA-anI{zRy@4x%+`iB=t%(AGP%E+r@PxevjlBs4(h+gZ5x0 z>6}=dQVZ>)4m_wgtPipxcy!xvcdFoMzg(K$!jw4`+*GQKPwzapcJX-{X*;wvw$1OI zc$*G)I`(8^8&+q&Hp9Xv-UeoAP^^Sf#rzai4&%C2R3COaqvd=ls08dEqT%D3^{s_5 zA%Vpe)X0#SUm9NcH3cBivmV(S*l3-CG$S)Llx)8$x!sfT%D`85x3SX_8vdgozcz)O zMvF`i*n5#$lv{E4+m#*n=`9^A{p2g?*l;8)D|qk#prW~1{Wmv-J?yyN3fc6XF9JU` z?^Hr8v&Jb?B$CL}YhHeSpk#h4!%_Mq(^Gurh9?q0hD@nbuZ=RX>XO1XW6N+mm(ISP zyU?@90~Iq^o1DWgnc-Ui=sdz)$SJdVm3=egw#l-!HS0L0?zK@8sx7e%J-t2k&<#?2 ziiV`jnrIQ}LNvT!&z(DW`o?^=)sX}b=ZfuM<|e@UR6zyM3S+D#e>;F5SkPoK29-hI zx&EQ-)U6yc{HcpPJPLJj@F+$hTfBdmel1dv4Ct0ttb2y2L6X7+KAx*u)#@$6RKgs; zuC>F`hIACAZ+hi@`PW`(;ah>Z{IFopYNl#=G&9-L-QB$le4=z&?P0AAcTiKx3E<29)vEQhd&9<50+j8-Ge>e_8ed>xYX=Ns`)Q^uqN95 z-U#tQ)l-(dG50zruW;>`1w?m%ftKd#P+d<`q4thQpBFgLgHfw zplt`KLG{MO`z(3xG<1(1O;qTnZq5ej$9leg(C`mLlv;X_pGRA(&S# z=#?S3fbI-4Jnj!I+6NJ0-RmkQlY!FcL&uLCMKjCR6(OoeG8&FP*2kyn<=;!oOx_{j zew&;m$17U2HNC5`#K{bcMHs|$5(s_Bo3?g<|7?L((zrx@LGkU#jp<74d8^H(Z|lzS z#)r`g_p4`0bQ)7>z8DnYUQ!W1ZVIN5TMTNuh~A{N=)B}O@)RJ6KGqu`dA3o^X?rNs zq@-Ox0yQp*J<6k1xVNui)}X8Gh=T9M_@Jg3Z0+%a)LLer{}SSG0UCi95r%Ag59;ZM#okMb2p%Y1`2sgeajjn& zR2-dvh1#sGkc(LAlP7Q|(Y|g7ci*$%kU21k-{#J4QHhh%d!-Sqto{9XVBrlURK(aL zTvs&CB+SnR4$Xj{qH9K$`c_=|Ev!{-O{~v_(f8PHXs}0wp=nP(IEId-{h>L&QahrB zk#tg0Xn4smRbfc4ME?18i>j~cfsI)BIQPjPJ%FX&=D+WCsg85%0k+Ex{Y5DLmt(sm z-RkniVJB?FSSXi8jw22pu4)-$Rx@}1!t^8hP57?=n%pImj^xlU9g>--)?mk}afBAC zf+922bSu8lJ+AXC`q33LeeRP<)E#xz%hRwMxVxd?!IlGzFw+JgHL|5<;rk8fNNd|C z96-h{{sHC-A~52I?$f0R(2r;9mj%Bls?lFjTe)Y}v`JdsO`%W~MEjs~mic*MN$eTbdb)ItJtrzuUv|a^oL@Ulk(MiZ zv5Of|Xo)np9_%h<0!5KevL5E}XuzA(f_}!pv5k9*@#@@9Pl^a8GFJ1yU>F)$8H%vc zPfxmbI6J9dtNC!8uaLV;X%>@J1MFSpI8O<7NkO#&o>Q&Zm*(sY7Jr`Lovxvy?R6>Mw934NltAy_ymIps7w|A8$Xn9Q=z0<_(+;uY<+ZLh1dR;IYXLePozLM;nsm;YA<)UwD^90@_|^J+r*v5 zPgk=xj3w)ejmaOq{^oLNes!8kc{BnUySs3&G2|)vsuN!V1>%-pQPo|a-50Vmf^Pe_8{vpWj&Wn3Mxz> zAr2K5x|cS`Bz*tyF3~BBIGe`X$?tXUqd#O{W+%E$((B{X1$ft*G%aI8zozYJKXfQ< z{kSK$eS{F@es$Ic`wjbSm_4@y+`1p~+z?IlyR3F1yh~PJvTT)^+fhkR z);B$1pZ&S}VY3wUxbgN>P2j0U@j6!Y#P<$TO!v-Woc;idu(b=%(8U*Jdv8pO4@|`B zTA|IXcMVDkxYB5elSlRcArob2Kl@zaJ>v76Dtwdjh)dGtYORab$hFKpdCXOcq*SXm41TO z;uSnX0^g4b++(H-5QvqntBb=`V1v0crR3T?G7|Km1rmbsD3M;oCMv#8y=X@CHL$xA z!pn=sKh)Wu)-iV>s~e7I^vbEegR{ii;YAUP*!8vR&xM|i-+3c?FodRAsFqQyPY9i- zgrEH?WF^uotW)YjS$89K7x+h zBDIsxY~12Wfegk)OPbRxiR)()FN5ci9!lF8@gM5_8Zn}6(p+H-aUp3Y>8e&z3qmB+ zK#_3c1Nd%aguv;_sh@4a^+$AOKz>b66$I<}e&ZSIG*<1mpSU})cQvG+j<5QKATl@+ zon8;xJ_$o#3Sr$y;PM|2CE#w-tfe>*TBVpYgX+a5KZsGZlg@%nE?T?5D(a0%V2lUP z=EN%fi$`7RN`gPpJchBrD&>TTpwGnjQ#}-((v|6r9@ZeJxY1cQcjwX>yb!MVUy?RO z2Cx%CdX0iq`WkJuOxC)7^Thd`r5O#4s|3#h7TUt)JNnW1#;I_6zI85iAuLK{1m*k% zdU|@1rS!GE=3k5uH&xj6rQta?CBr1 zjbk3Sc@Nia=&}crTfLqxrQur1?A4U~5F=#y-I|b@A_lFF1SYMz9p6QATH|khNoK}z z#@+R!GMM%ps`2*5Zw&JZmqh|s)o*06-=056yVUKGvlqagSyqmFj(EC$y$SfpS9dn; zm3nOO?1v=3qyE*d)5^4gxLvJ9j#jZuUfOVdVe6h21LuA-ytXpCDy9qD6Os|`La15y z^ByA9pvvMhm92VorPU#Gj*^o|7Suz*mc4oL^JM9rGU!jm`~@w+T0KF2Mwr%4?X{VQ zX%1J+EVzrNfn++ovx)GF5>UuRg427akX~;ukaoAj6c|r;-JO4spm$wgT8W?s|{ zdT%FKiNrW+s$H0=!v2y}7$*(H>s*1A_kFdT#E>@oslGGG)rK=X8I*WLTw{{#b-g}H ziACqhYn#glPxg(n+k1DnR%~Ocy~n*jK58%MRykaQnd3V!xzEpLZEV0{ zR78++10w!{wqQ=3TbU8=sRoBl#rocLla+!7=Azk6tjSM=?z?;j?ND5;#8#4^v=XPs zdUjqfW6BdWVmAI6ghpbxa6T(u>o|)8TBn!nW;xtr=^OHOLLVdA2oCyX2xJ5QnjAoS z!6#_M&GC$EO}pdi6q=%1czT*X!=mF$hf28C8{?W05!UO*%${G?GkK4gXJp+;d(xp& z7vm9@oNmKsCcQ=4cl)w$B%-F8D}wem+SS2NeP8g{83Byu8>qjwd3lA6vP|AheVtor z^gLIEgElf{Vg*jDiE9$H$Kl?$EXLI^?5pk&$Ugb=hEm>oR&a0BC{d^$57&bs8Tv{^ z;@#eO;?-n+KE5tTR%|p`<8HHfY34)uU5TYyB0SwI0pGa73fI$1v{^-Q)PPA6ZmG-07xQwiJ3F)t1j6&}{!QN3 zr9bj3a2MDxQvaa)hX-7B3eElzN<4>`MPbOkWKg(WE^3|-`d}b{zDp+a*e-@SFIy}L zxBFiD;9q(wnbQ8iE7anU3_F3;C8CbhI-xPodJR;^A0LDd*HIro-Bo+SA=4q$$Q`!3 zECj3_e?PvH0L5_u|4nYL3z@=KPB4hjo^OrOxnNi!K=H@Cs!$!h^ck*?)vY3D+soMU z)w*Ybk`@MscYA$_NaxIwiTBt8hNcd7UQw3CSzed@qptba_QSeOnI$VrqJ4}z_(O$G zk_wFEVl{$C6a1WrXa4Q@!*&TZ{DrdV=`|GDBqewKQ;pkz z9VlVR14A#1hd!w8wtUc=W>-#on@fq-u&h0O2i~7uEQ2q=$5RNX_u}tO6qZD<0YIe! zTj;lNj7}3Qf9VBi@N=$lO#-zpYE0gjlP|_lyM`ZNMIY@@MHIP_T*&7%Y8`M|2@RIo zUj%YFnU40Cq&a0U*`X;3jmc0y^=(i8I2bJ3jllSNDc=RH@Ykwzgq=`h3Deo$zOA7H zvT86-((il|e`O`*JA`*%v+{stxnCowi;NJL4sps~#ZN~vF?GUGWx^pI9kg5CRRhiYlU5OIe z_Jl{%MY(=ZveOG$1ZNx9RL}2df=saMwGPh;m|t8+@c3j~%h&;Y!qxT?w-sC{UD4Pd zTy>XE@azY+l;RdzyCcO;(^`P`Fo-k6frROO+YuK)$afr(AWOi|zLkp>;GzI#iH#P! z|2n@o6Zl2kR9pP|6sWAD;BJor6^Faj6X?}CbM;cKy<@MxS&y~s+TUSBohbr`f=70> z1&B~62|E6Yzki0kTh$6(?vS5UP;Jsqf1aOs(@43WG7A((v538fBrz;{6=0|sAp4`B ze~m`Oe^(xpM?>aj7eJLwYk2v9leZqK4?MxA+gc5}AC~Ne_n-njUMt+Fx7}4#zN6oj z-k-f*fqHh>^w2-rFj?-)9x~}R3gghn2qTLe;Z73KM& z_!2QKX9Yv(B-ka_(J94zw%Aoh$YCvQFSF?vKv2Yn)u^8Y=y$r=TI~Pxl>n3f6-Ff^ zDw$C3vdHW-&<0@!eIXKN{8ga$IBKsOtqzze9(0R@0P_t-Of`a-#_F~|C0DKFnt2x? z@EE6A-T#&d_L$%)=C#|%F1@)mnA*^W@&suJurgbOYklrISY+Og2M;7(2zWT^^1zii zgtw!*{)wFa&%zScTw>cR2~oESP71NOxaV91oau*qxjm9H4OH?=*Foqw11%~XHkWP{ zA*)#t{rn9s4c0Cf1Js#up`jd*P$A~Nu>ZRC#G=V!2*RU0CSG|z-GDUx#|G_X>Z%`x z?ZiXoua(=jI?*+q5=R>mV2dqu1ih>M^be@6aKD43o5|QZb?I5>z1+e0{%y8B?`&)x z^VHDf!6kG3w(&Syw1)xj`su#$p}DJX8`!@~$*yd>4L`k6`PBa%wOyN;x#U#AvP2}k zdO+r#p{H?eW`@rf+FKo?mp{5H)q5U#5VyjtY?RO(z*&zKSa_HxzL$aS=BLbmVe9XIO!rt%u;qDbquU(@^%WIkA#V*EXE4@JXB2qf-Im?}Z6m7I?rm zHESzhXjnRk$Z1h11v&U3zwEU(MwylBL^pH*-e1F??av#u{gbYjmJ&w!dm&Htq(1cK zm$p~I6W4yybx^k9mM7 zY&$$N;tbfud^$73Ysz-FHjn^3wY`4$_uB|&NxOqma$o&ovIgOLhj>0ANNHm<_BJ$)(+5V4`I3%`E=D$Nfb9|+(ZgbAqu$8Cscr$D8> zzK+z0Q`nhc$ijH z0Dfozkfq3W1N4U(kZO#Z>CRrCLh8~Px#xYkT$#)7m~|oL&ES&K?+@YgF`|%OL#xKi zbMVM-wa3Y{+EAs1n|K>b#N#FC_UngDU8X`>W@apS-=e^wJ@C6$7bYM|!^F@rn%_T0 zM(Xq5Ffy4D<>|k)9O{Q8;h)DF^^E8L`|rvupw1&+x(pcKGTa_= z?bX3fkrG?UrwP^os$S@KVf|X)4)V!X1VBfG2VL*(nPOjSbAwCk)@$wT+rL$TAixTZ zj;3E3M`Ri}YYKy-7fdqaQ&wt#M?Y-9l}9fE-E%?$?Pvu(wt zk)rJg27r5_9IVj|?Rw_%6CRZj@NJq!V019ewjkbX9RdgLP#}fEdd+2D2(F0J10OW+ z_#p-KGUHX4$kcIAfgcQFG?HX+@RJ0)Iy)bf!X`E8$p&J!olQ4rVdf9)+nfd97Zllq z340BIMQ{Jwp!&=M`>Glp)OPs6`^DeR0$tQ6Ut0?YT3k?!v~r)<7ol z7UZ#mhX&#P<}TKQOw*xi9#Gb(Y;5L$t%d$T2L8>|7e{2cF18;XVZ3yg^;`o9+w5jX zRF+Oo#Gc4Ez>Wpra1?~+Rggi%<~$^hJOz{UMPyDv_Bf0?+sG6iP?0+!PtJvIpirac z#Dix{6-I3UY>J))lo>zh1yIYp?RJk|bUDbKijW1b+|ZR61X3yVEEmCv82c&jU z3xlbW;WF>dVv9*okQWYng95pK=Xk*Z$Y~8dO%XHZP9I=-(t~@TQUIUQu(a2%YkLDh zTIXg*X(I?l54qLUhtcdI1WU?@H>*yAMiLl62Isj#uZ}8(5xf;~RIk6^p(vYuk=M)WJhg+_lUc>FUGAj{=?ovrB zSf@+r>3l)1XbLvATW14j6lu8Sc-Hu$nR9H35w|X4Xyo6fW=nN_nJCzH=>skAjq^YD zdJP<3E^Y%0Zn$_6uJPx@>igI6N%h@=?GmQsv}2V-TB{%anTKZ2IWiHi^sbP?!~lV-C56x5`)?)2^)PAl(sqg7@_9XE$9hik2AU*F#P z5IifoVaf4SfzPP&()W2Q=WyWQWh-r2zc#3=f&5~&VgzgkQMPmnB^T0|tzsU_6%}$F zL8pbd1||cg)M=Ce2^aX9C+=fHpf>a(NJ$FY_N;^(x8?KmwBzl|U9bjPTt9OAE8>0M zg*RP2b+gL7q_rtt+PfNv){rs)Q6yL>f~`(WY4V(TwHJiBtCu z`fWz(gCN+~4QC0o+6N?_LjgE$zAxPHZU2JH0+x1k{_nY+E#>79)2OV+v@JjB&zbP7 zQN&l9@#-lSM3z5|OqWaVDJ18(gAeMzfK_R30@g3tE-C7rirG_pv3=DNe>PKl(U+8Y z=`O8|)jr6KbBkJF<7Eq+`ctv@)In%^Jy|f7&gY{)J9sfr((00HVQ!%PnP0nLS|1gv zCdQrbLZ+YqQT@fye(sW~XYKKvdX*!r;XOH1h(Y=%reC-ej=$?NseSSCl4M zPJKNH3v8a$gUF~Ad`NakVTk2TglGX34kETAgc|oUiml||)f={XZp)AS*QfPEOm8i} z!OYo*C5}uA*P+^89J>2(&v^?l9IDuADz+lmz|#l!ch+X(wsmbe$(vCycB(BpXLD}Q_;ojz{M&-yRgjGr#)WIsxQv|EPC)=$8+1vWn z;wIKrzJ8EWqz;*~dIy{iA8mwZ=r{}&M!=<}P4XFfLvx^${2@iogmGXa^+yvdMe^a13N#W?;4ZuH#7Fx?`voTmJur{XlR9}Y z7$i26^{kXCZF{qW^!651;VXGXkdH9B2C{xRc62y<4^ z1lczGo9zjO>nRNCE-6m9zz(~helMbIi6`3t z>^JLN<>QBF8hgVjJOHk|Pvp^T2cxIgGPCuHGJe-kVng}X2DvKRC|dZEjK(ve@u=oq zhcg~+*sxWj7F`bg#*1Hve7d{2p_WEJtAL_r21)@=EWZm#u>@)$@Md%#9u95XwQEnR z>_L6j&pZfYwnyT*hxB4jSyje^U+~4M9gaWCoQ$j7*90~eM_Hy)i9@V+5sg36jTCQe z%=HVdK=)YYAR&Xv8Q%GW%Scectjt9KH2b7K=##0u6M|y5Y?glK#Xo0S0%WDA~SYN1XMdO7)N!0>| z>mqw`Xf5V<4Zt4K0#1U7_eTjEg}tDSoA7vmID>d}Ueh{2Hl5QsChlMZ?}t}TqaJJ1 zz?}dg@tgS2goFe=kmj@yqRBVyW=CthQgauN++;6Q4D zV1!n<`ZkEQrTMf|KPW5_MGPpF>Obzf`e>h&rk2`Fnm0}rsDbMe>xZTou(fv1hpdfQ zr0tR`B~l2uaU~{2;-o)+_y5Q5WDMSbNfsJ#x0{Hv5lO`u zZO?8Iy~bp3WI0@3GW-;zfs@lBjFAQo{=+@+iB)*baJZQImkfipHK3umFf0NX=W`S- ze1>B^@X1?Xv9Kz5#(rzJx@>ut5Uq-bCyWu3aJOC?iyfUluR$mGDzm|-5^-tttFWV=JwL&BYGvL94(m9lPDLDIK8M_d7hCHDs9Nu% z$h31Woj3FtyArYCE)n@-3cAB(MxY8wU?%!ij~rff#t&{mYluUrozkhYi3m9NKqzT` z7?EmC5ujM5&m!_epGRrAm&(BBVaqCD1Y*xS50Pvo5g;rA23J5Sllc3k6sN()BTPg> zc^*P%0=Tu(ovvKN&3~ilMw|HWOaN1jZ9Q(pcrP*6gKk7JA#V^ z0l)@W6#%P5JgX`o(}U;~>-P((|EIYt|A#vN<3sVN*=^e*9SXbk#YBf8a+T;%iVm%; zYbpkr(Mrx(M75Qjbf{b*jiX}5II?crL6{JBjLAb^wB?GNt!el^-|au~ogX~>FlNkW z-k;a`JobIN>eq;hr?xbNlE8vFI^^d}w-*r|5zPa{e>#Qd0v zS2(VoB`6{!3ql(zj<~_$;U*=FB47^}-jlBB?`XfdmzUr=2QQw)S1G2FSnjdU$g_>4 zQ{%xY>fH`ei+>s0Yh)s9ENhzR*-VwXLO$&Ka|*ru1GqRP#A_TRK7YadNE~ZVxC~m2 zx9m|Duk1s0a)mDm1|Y2wPO$DEBP!D){cG0*-6f(2gR{z8Uyl(=8|m~EE9GH9S{u8@ z)Ubg~T{R4$hA$u<6Aqd_ZokH2j+JyWb?!^;eu57lCar@DF?vhNJ(6ZIHKO$7bhocb z;E)%5(NZ>eWl0LP%e+ui6PDDATr9adlOd}16(u1dcx5b+n8Xi5A0>Zpe6j!AYB|*s zQv}EZ<^92JL(aT@bW5zXB^1iD@5lg5qo0V9Wgbko{2PR-Vvi%#K5eJn3A?E(G(HNH z%(!a~PYIJsY~cO+nc`|gRtJMLErhMzZ9)u%ga%6$3JAZU;7p-*K@R@J@NOH|X8AbM z8dF?(-pZEIk84r9LysLncyK741KrwNwVfRkW|nxHZ|R=J`!?bQ*oL>|ZN>m3y2MKC zsyG+qHo8}AMI27U&6P_^fwN|7Hlv|sY%CwORPFP#;uS&YAyxUop-5j)Co4rItB$nV4UFwh1WiCNo%g#FCxSXF^({6Bmuaq`z?Tu~3c~J=8$ky7h zL6~QB{Lh%Q!J+P|ZeOcG=ivWdaQujHUR6aNRk)iArO!rSx+p_G`&!xJi68n0I3`6)PR&!d^ zX=zRIlFQ&Ubku?xBFs}W0%o(@zs!&1IiNK=YP)c+Z=8lwcwY<|hs2#F410ok9G*hK z`jVI(Pn8OO{WAoxE-U6=W*|@*-W-@{wKGBr{H~Nk=kMmM_U7q~dt$0xDNUZ;2hwMZ z%cfPrB%>#?9QmBSSF}y`Dn}Tbw6t{gHRb9)WDw6N>9Nc83)XT479J_azt;c@_c%>< zi&#MQUZ6MJ($hp`RrqpIJr%T|+ZB~wE%%*6Y_UF8BI6m2{`rG{^kri`R^g0+P+dS9 z-}ygh(;Fdr;;=1?VV(VaVb5wzN_1gjG5wj~(_9}KoW{DlIBcU@ZT7vx>j(X{z5X_k zb4W-|jJ`xI&aS$ZUEZ`kScUUfz|6LsKSE2wMUT(498O5hz(_~)$%pa=AC$os#VX*%@J?JGzcb&zO6JkycUj=UF+8 zIW>ptzZv-Jt{`XP*5j1&hPB+&&l>JYY zCQ!?K(X!2m1NplMI$gs8`DOSW7HC4yaR&w$Deq%^?~R$vanW?s<1>QX6I}S+R_^Wu z&OE3&!2ahC?$$0KCglmPlD74n40vZfjE=mUUzZV7N^mQ_PZ}I%>D2&!20~OO=897@gsMuoeg%NpI=N;7 z$^~qSJS#D$=>TOP5pUyc?OBkd*M=I8DA-9$iCxv#7OpnqF&B|A#Ds&OFK)nxqOl65 zEC|*cD3+-iGN6LbJ^LmJvoYasPz;4F>O*z?(YsrYViBki{RST}ZiE*6Ks*M8PBqi$ zl-mcv!b%=C^jJYVrgVj#YOm<nUHUChes4wbovsR!>2*&9`BqC*C_lmIQovmyv=!0;|`&1JuwW@C29vIfc2% z%!k-jSkoI;2NDuDYMsv540uZ<5t%b0(;4v7(aoVkQgQLBAmEZ0eF^QIKFJI;)PkHr zpPwuh+SjR&{3Nhxu%9(Rr5VTACLPjux{|fO5afAx@6Bp9keA9ifyRt&oGvKA&CdEH zT83vX+$(#4mpSg)c4W6>c{Z&qdt`Zy7KPFi{dG9LPB)(Vu+PpTCb8XCd@z3kkc!UN zH3;BIsEVE|>snv6$8)22#CI}|51`Nqly6L%(Y7~cwRc}2)(RF7g8cW|3EG# rdHC&8B;_z3J^vQQ|7XALH{Fi%o+3w71U;nnJT#rHJiN`_EK!urJzN}|JsfP`(Ro?Ax!XEB@p8R3ca-t$8e3MvYUywqDQpN#!Q?4+mK zOIqHj&gpaHzc0BL zDAH2@zSMhBNdEUJQgpTdzE7f!C;s2})wN#!|N95Y8=u}R1rJ+k#@wh(PgEE-j?er~ z4SP(U;uLiAw~xJ0`S99)x?I0-!L;06Y}t+D0#w!NPb)wZ`);Kxr0`&_#j|W_m>vWI zJ${E09m#^y+uQqDEJ*aQ?L;NRV_Y5%hl?vH;FfBYgvxxPTx~lU>FFU9i`^Vdjqns3 z6+vcPg&OF8$1&aa5)!>V)x>-D0wq(>Q(`#%H8de1hsJ*W=O@<@fvuJEdFQ^ps?NBB z`Nq9z6%fz4KhEX(S`2pz*0qK2Zb?z-RS*u7{k>@`BQtZpNxR?nc;Vx4LfW|HpnH*R z6pI&qvFF{%5*9wPsn$IYxwl2;cm+iB&J6z@O$o`anv!D#;t3g-^Z!}G7w%OvRbY#g%TZJN&KXwVW*b9b>$XuUO(i5^&9!=YazmD)h5 zK2vEt2)PZsSdADzh!;86E3E5>1i3k=Z;Jds7Rm$vM#50hv62ouEeBJI>tixT*5k#N z)mtmpe9>Wa#?PiF&gz+R@FVq-2u6e2Mwu)Pi&JXPDiXS@-d%*nL1o0HVKGvnb90 z30TX#H=6X~YOm^B(0$;|EpmB#qA0#4R4-rV)A)TddqK^!sqCP;l1janD*nM_CEw>o z8E&|{(-mcvUWbdVx(CdsFo) zlnKnLV-QY>oZekK^2o6~#k{|<>4y?P{ee^YC~TvcZAx8wGFer!V56cT1%)!y)` zRi?&bgnu2+<7kOmIsKJ-c502|e1qc!;FweH;P|aDauYTECfHOFc|UH;R+hiQJY+Lj zcdh#|x7MsA?DA&LzKWii8QZzzk_mBg@^{JqNc!|{uw$e{j!ED5t7>*!>tRdTsk1;l zbkxW9aMO3G$u-~Z9(X3#l`hn?XPd7R@tjQu0?!8pK3IIL(ywJI?vs&vDRcDb84;9d z_P}dPn%r2+53E}GOcc4`J`x&=)_JdKWV9c!{^?Cdg+aZ>z5Di9ZqAYJC|gIcR$O6$cIPet10{n^@{j%7J3#Xp;y0uvJxiq1#9V=& zcrbL0J(Fid0R-!-vz<`558Qyt;w2xnSo9NHSXw4qi7tB0JBlI`-Jh4JGi~uo)U<+Y z2g!Y7pE9dRbq3yQohPS__(sZ!JtU7ye*KiSVkZQ6by8Cm|Kc7Td@{gZmE(7|i#i&* z#l^-hNw^R|O2JuGT!h#9?6qAI<+xgBAcPSNF5wm^$_^By@!x;BKi^ojfRgkjfVG(A zf`p?P!n(kQxnRkkU8fw*JCKkFe2#Kp&r0}#BGYP*vLFTK;THsleE##oUPRpK*{G& zoYiSiZ&$+o2Z1T_nPPcEpm{BteUcHwO~}ok2X8CWdkN>l^N|;sFhx4plx*=<1NZfy z8hA(1d%-n=@@mh8zdMi{$BL}&12*Bi?vFU7+tsav3Z0?{brZ+i`SI@naD6*E!V>>B z4pX3VlW+4fdFyhwtV+i;t3n*@`ViSd<<#^Rq9gasQC&^#Nix0LoH^wF=7jK+PI`Lr zXpp!#O~BpCHke_o?}<&wfn4IM$6VK9J925y+Z3m!--VA}ax&<0ll)m&60VE=#8>6! z>w)Y1)*r#g@^t zHto`4EniarINFQxrq}?d!n9y#<7by7XQO=gB&tu}OYcJ)0gn=xp8oruR~F=4-#9wK z-dPg&oPbueZK2P|`8_QtdLGL&#t?9V?d+jc=fH+uxsV&Re06xm>KQ zt#$ct7{|6Wlw^e#*wF(%2nDvRi^8Gez+)9DjBURc6&PBS8F%LR_}B$S6{dnCv?~p zN$~wCq3DI#P6v>a%1`Ni1Ox;|>gwvoApn9I7o`PUZl=;~p?}Bw769Z&d|r&BdFM`i z8@)_^mlg3H>vsUX2o=O$dm;=feH4@MlqmV{W~!;5C8pH;Aa`eMTbuC01K{aKL-13_ zOp-mQgng$$jCkDoe%$%AHT#=T5IfB6StXeW=!IMg(42dm}# zR}zL|JolzcSWC{B(oA5a^gXJKnt1?;uxlqFPswZ4;MXx>@Bi(8{2lv4$1My-{JP-B zZQH=X;Fu;-`$jIooON8LpQH8i&zGq^?pZG2AhjpT6~jX;WR2bW!MoN00Rhd8%$kFP z_Dogr*8GWU0 zv{`OtYdY#?bywR`wJZ)7noFIVk5sQ>gvs3k1|agQ_N+9VRXafJmIUe~HG}$cPBncG zENv2mN4_e5DeYs?cghYxM9rx>$sVfm&@#LYC$PPatgYQne4>V48|Kt)Sy>W6vMM;jFT+c7m2mDFx^1)vS}9{07QqK1~r z7l;|tppqf7S=GwZRL%r0U49|2eLX)zw=?&#oVOzYBajhc&V44}Rv?=a*NOK)z6Als zS$b|iTT@tDP{3#xKQS{ot?^1!V`4;B5PjfwK^|4#bo9%$UNRm}kY>>JU@**qx<%s^>)=OHT{pjROAmZdyIVOrN5Is0-0|>04)RB7MkV@(lXs zs~xFYNvQSiTPpFOpX+_`NM9jd>k$sf;iiCbvs>Zl?SK`($l0W(D3VIZ%V9?})x@ua zm-z1P`pB)8hw8Q6Mx$7mLbOe*^Zn&kHgaIn-5CfommfSiDcq)qe|^uV4V|_9u#))D z6>Ani@=Ia`K{9rKr43n3YVz3;i{8t=0SzDX_pOsV3$2FJQAtIz#exEDUwZ1Rh+bN3 z0q(FGaDC7?WnJE;bBKJu)59PWtM;@8;{%17lok?Kr|%g`h! zRQ)Qfq>Wz5GJ^Qxs;1Flwq{&B!3)Z52V`q+6YoXO5mwEw7q=26_1;HIIfOLDUI;cd3t0Ba#?KEN=OTaR|9 z$`n5kH7*CX&YVxE2vwYRnEGy#b~ZBf74m&1vqMn`Z)$FAwE30S-M6b+fUY(t-mR@` ztbOQ+Pc2yIu?*1H=`GPYQN1NR7vcmjWDo_L?k|7&f{x!2LL2gmS2_SYoIDtK@{VFn z>-;ygk7?c#ZX^Bn9)YHY2y8a*!P)}qC9NkW*CrZE?&K9HNo@^>2lv;@_Zd(Un_#A~ z-qa4rb&6BApr>7!BP_VuKZv4(>~K9X_?PciW)atR8-Pk9z9)YKtcO15q9xDG=#pIq zRKj6kMd77u2q!|HcK3w?V3lGJhlGlh)>pG{DJ0jg*(li7Xz~~^>ntNReHpb)qpxw1 zctHj{`;y@=&-Q00-nZkW!d69(I<70zan#1|3L|%7DR>noP9gYHu7s@#xxA#L8STK~ zDk53u8|1`PDr!dr74ffMviY`Yn9XOOjEXJ4k5r`tYTNv9@7uq9w1rjiLqpn2c=4+V z66>48Ni6Z-MKn{}Mo38y+c#4TwVOghQvpSn`5@&41zYW*+i`*H)Y;wX4! z@C8!nV|m(i!Y}(>d=C<2D6%+u_68VwL`;2FF<*y>`$8c5zrX#iHkZ^pUh%&WB?r6K)zTnISiu3}S$&p_(e-Wc*S?h{Mvl*9uKAvvpi7$v7= zr=HC`lW8cL?U%{w1OpYzd9qJ^J4~-bV$1XV-prNs6H*ImtseBdv(_hU_{PP=^dzxr zYE@*}!1RaNKyO*R#g))RH`}>L+pXr-rLAHgZ}_+dydE5vO$G6TcnRZu=r~!QKMk?$IP$lmP zC{hh`{M{KO7I?K&5$Rtu=O-hYf);u$6# z9(zwmW4PK)m*X&ubufj9%HUWjq72;L-=V3&@U_&T>t=B$;&Qp}{l^L1iyd;)Mg`HC z&I?WI5Gc8ukv#2bYX4-pKCkDE!ipvjrI6#Jmm}tQ*{nUVVoEUwS-%AGvJqzWn=7(L5Vt|s&o7$?|~g~}z#6Lxm`kRrR<_I3F_ zb3?Sq1WYR1&kfE5xG8i1y#_fj#{j}5jaVTfvX!fr7kZou)dZpn?UkXDAKRPHk+QgTR-grq<97iow+V!b(T`~Oa zKn_(CELT`HH0!0-6WgynN`O+jnua&xaH;;7BzZ z$JQJCipp(~>6!NvR#g<2CzG^}UR4%LzGh{lmlfcG)iI z+;i)e01ECZ1{K#i;+(sFl=$*eU7fCy2lHAuG9x2HVHg7^W%3)0gR}n$P==AhV6aIw zhLU>CUkP2}@^o8(e#mm`zxE@_4(B6YpFbeRNmLFqNz#EYz@8sB^k9BAh~_`sE6hF3 z5+%k3A}SC}BgQ?Ma8%spARIkuwx&j>MZtQh=qjZVK!!`cJ7llcO!1;UxR*(6`wGk5 z7|}W<{df=1XT>07YFH8Y?I3{pDz_%;#-gA+vj@3VE1YE23p;0HuqTp;f$cdc5Bnh5 z5(lBh)K8qiO?K3I-!W>gInL*;3_PrwpQuV16c4B5fZkMLQ=xe>4N8|pB7~IfNNVNf zm!J;SB675nqC4|t+w=J~jf%_YpP$E(D2^tA0Go7(A1<{U;)lHKej$8dPrUv922i~M z?mD4zVlL$Oap6F@qJPQE@uB+>7v1m)8#-*0~6=dr9bps;9R6f?goM50JLx1~AWM69pd*Dl$I@(*F z`JD6i2A&&0?kgNEu^5u~a?_t1-4;hX;ps8Ssecp|8TuWN(A6n;1{1vuTuMra;K{0Q z1is%d-{0vPDDh9OO3u-9-0{>eV2&>Xbun&{DObFO5r)B2Irx;0x4E%t?U@`tj<+&y z2^Fadz>8^_sWX2mcd#@^$n+;2+}W*tR1D?ed7Lf=%lr!zsi{`a!>EG)Bk)A1$-lHZ z7)w;EDNzwRs2k5w@cnMnPwWF6BoUV*ibV9|h;+!4A%A(g%8sku@#SovwJ0}avboiv zS?16Nc?rF)73j$K(Cj1LQ6i=Aq5k}Up@o=tcywDIR^q3P7>uxmvw(u)jv<#nG0(RS z@t7`!6C6sS;bZn`0>jicN#=$f!pB|cg>Ug@gN|3eznw~h^>YDY@kYH#7B^1i0~_X& zvE5rpCNEFj=ki{W2^EL3S(wtaE>@z&I3WzKOB0Lm(FXVHNsWzQqXut~{&Ym@j1R%G z@gd`P%N`><^-X`N4^ZI`;*Z0pR7*ZFng*H%UT!AT5E#_is9*U@hmZ-Zrg9e_CXNAe zdI`rTgxp(ofsmBh{Q;A=$xp2=-|4GY%XLH!juChVD|c`tXL6Z@FUYA$!UF+8Cabho zgXYqWkpji?11U5h!($S_pOR3q!`V+j8z3x##*bwU$xxMs!Cl8YU>k8{gU&^Kp)B{P zN-!8NHMvFf(7g7|g3E_=qH^K!KCq#*l@EvFbp{@-s1auU0Bj$ff^Bo}C6;2xewN>E z$u1J0LS^giZRSQAj7u}eU1m#)Xs&=JmyE@+dFe58#;O?3shY+kealrf ztjewB09-l~P%%@^9dUx;TV<^*4Prd2VMaqx3N%Y5op!i?zEC6^sswV(owGz54Edof zT6=@Pn0oSfmMMnseeM3SGQ~T+B`Hecj1*)e08S>ioQ+$~U-f23l|{v9A*bruTP(0k zuan_7saAgS=c#-bN>U_T(|}Zjcx!b6O;oyc<9O}9@%YbJuH?(W8z0YPZ4)eSl z8gp0^_=996R)oP(kFv_ZA_bL3_J4#>Q^rN+Lj?He?dD{AJcbt|KNFHOjZ>q*aE=8P ze5pZ?N_QE`rf&quEr~6N8=(5~T|{Py_#eX=;)|bZvi{&oV*yG1#2M72jqKrCs-hRzO$KkpjKucUHCE-bZFCWc2 znDD&+IPtw&M@Ii&_IODiT8q!x(%∋i+=X_&Y%w*a|gb78hTHvGkd5)oBv(uwf{9 zLtacr>WHRpU+$7fi$1vo6`IzzK;qAd^QmT07M1-D_K3zd!j}HOBqgCsR-h2(gCX-d zab-G-dkS8wB&^eoCOqaB^Y!++o(ltv1PDR-V6~;aLD+k#PzvIkggg4a1dBm|e5I|6h91>&b+#Ck|n=z`- zm>`NZSBK48G@kp4rb!)K*) zu;l6`kru5Psy6-WzpOq#RmXE9CW4w0MD$#aD{>0}wL4=fF1kUS*qzT7P}=T#8di1Q z`R#}zTDhB>n@8Tqi%cImdLPV9ZQZvYcRwCEJv|McOU=mGzX6(c!goPHcV{FvHa3_x zpW_qN8?Qh_Kl7$&kXP%XfZZgEXLiVfRlVI*M$p}5(enIbq-%5^jemLf480cOs>yji zl{WAq*Q2#AKX3kbjiq8Uk{o`}-1x?EZu01$+^}(E{j3vcwoQGbEq!QvEv3}`1r!Hv zWIfOy`T~?p6GsP_CmA*nKtvk>lyXduwjX_cLJzfccH={DpONxvF~<3yKcwvL5-xD-w=3wet7{Dhzj>b01}P>65r)QRdF#~ zz~T3cDOxtcLkGSuCqJp2J^pNLVB9qEIW26rAqLnpU0q#Kspm^dN=iiljn;GbV5cbC ziwO))o;UH$W?^A@bm4Q36#ctjJBoFB1J@|i;oK5BYST2o-sErFt8AUz)A{A7Q=-u; z@cw6Puvie9G1|_Nl!fCEfGeeRRBv&M75;=9aVC}3z$zH@`FVyEYkU|J#lIS+ly$jn zYVtrNz`AcNBjYWu0?ucx*;*xPxY$Zl>BA--%YrIu!-p`}x!Y0<#0&#ddY>6-xh`p`qGXJky2BJ<8<4%&!rwvgyQ#%q?~ z&~4vjD>JH3X0IG4Xue{bWpxWX&x_$g=y!rN;u8ZBcecSo5+>*r=O9Tr@Kc2v{*7mZ zsYYq3GdeYb3%9ay%)S+jgKCvSvhTmjQ?Rii*>%r4ZxD)}0g70WleX@(qZ*w`;osqZ zIV8gZp!-kzgJJJgv#5OW+!lb&rQt2m1iqbWkQ2q~_e$7jE@5Ci(+j-2v`&VADBio3 zD|SM!L=n%xJz$L^FljSj813^^Jn1XIxwV*5EGpn9tQ0!13i?}PAu4kYa@y2&P|`kl zhx~9g+s!}KG|*Si5oSIHd&_)ufbf&Iim56GQ`nsYMtcWT-=+71FIoEHEEeVXU>4Qz zZ{E}T)+KxjDGo0-g1|khaa8@E6CKztjjs|drU$-p&Uzuz7>{J#0#Y+KB)DPftUauo zBH~wtc#o6l?Z-{}H((T4o3+fujC&e2BaC}`HLomvs)S`>JM0icCai=lxteMVowxeB zL;?gwDw(BMMUxjWCG*RS)cGVNmq031Nn1dN@$u;LH9e6!zXNd??jG60ki)Bu!I;*_ zZy40KQvwft|GeMdK^y+VcX=tYY)sX!o@Z}JOO-^}HA})3zUCgxRMCoFtb7=^Rkmb9 zWMn))YgFoELoNd#nsD^t-?0oh8^;$^Fi`YH0qQk5(3%so8qNB2b+#wF;nf#QksqoW z6&3ZF&ymv2-5r{l`3aa$DB~zzZ1r9TP){0&J*mU^|1LH3u#~>S-@kup_R?S5hXa~O zSTbt8^RlR?5YG>1puyw=AdEzK+hn^1>*yn3n>t4~pwT{`-|m98|&4DgPl0!hpo9R;WOleQo6N zOL}*UyYnV5RQtaN^eq;(n65&_sn~-b65xW$^M4&^1&Q@R0;|TlU~S!GGr@5<7Nls(r{XlZb|uW^lY5* z;#RC#On;+c(UIjk=+JYHujxMCk^d^l?TbXiwVkANl{w?w%AHuysANu`y04?ly=mS2 zlc<@oM~*M%zm@A!ZFo0xB9~V?!uM675d6)25i!$g>f7?fuVBUtrw_wq5qWv}jo`iY zwT%WNN0)Zff%|4LLrbxl``+GWF9kW5E~}GUO2JH=1k?DtA9By!Bm|iumc~op5~r+8Xh5{)Au#+I)KGmonW^?;u#i z;Ud-2%q8XP;3SgeTh=#k2m}OH@jt`b@PZ%)b2K9!HdCejtSv{#A2$J1H8tI3IEy(U z!lND6iEWjK>mnq!w&G{I9b(zIk3%FQy6syxDt|jvVzrE}$A+S^pkDxyFgO}F*ZlMj zNtag)Q%&uOICBcChcjP&#OcC+Bgd$CG_4j)ZeoGJ2jLQ2$GeKn6{%v&e1dWNWzDOI zkm|KeBJcafBi*pxVpyCztfboqI48=tFsl+At)Ico@Uu4<-ehp!0J8#i`P`acQF_AZ zIS?Yv`?evigr56)q<}&ZI4>E|GPga5O^c#y5>@T6i2KTMxBc$Zo8T`_xVK(k-e6Bx zA=^2{mPz4{`ktzyG%o(}pTGFql^-7-IM2$85Sh6@pQ{h~fGybxNh4GOR{G+MhK>#5 z2)f+_zO;{)u-t(3%F)!fup;ysI*?k)xeYta+|~3W#qxHVHx7j1wD!v*4E}6 zrx;&RRfglEPQyUaTmlSV-Vpj+n42w&wi~{8Bj=*omk zeI5Vz=J*5qv%kn?zs8U6atY6l2-{AHs~ytzjuKU|vLIyJ3j)>m+(&|$7(P6_J@(lP z;bO!Nr!RN>f5?q-Wds*9PF}ko)Qe|}K2r3JSW*>}_tM&K%>29^G%v9A@xH}wOMzG8 zSpEeHX+bzXRVm8kh+Fs%+s4-kibSih4@@E=H}mv)2w=pah4H0VIr|G@6(>$@rIDX1 zVvC+$(`Bi9?IX$>ncx>-R38Aa>Ey`9XoGwL-zF_D2U7XmLqrT^zR94;xheS0T2?wR z^bbU7A-wB4K}^i8DK=43S>a-b(S*NEYL(Xvf|tUUuW=06ou5GIr*??ZmZYdfOR-i;brgn(Ua5K#S$S z854Qno1HVJgdAH2T)K+TI05(IU-`z;xC4XwOz+R=LGSK=eb=|f?5LWQN}-qWUWUAy z$Zv~JdmqQz`BNijruhb%u7zBpx^mP_(tmoGA+u>&E=PPhr5oXW-64b9d_9KPtfW-W zWjkV@?In-6Z<($zyz|1!jx=&mVcn!*_%}_g0-Wt900+ zGJx_FKw-%}0`1AcztXI)mYDK)3adSQM}$VTlMF-+A|uO7q2JeVQbvMd2Fi1`*ozYp zipMqpzo7~1PJE4S)atnxE*(RbYd=$|*W{Ah=Lqat7qbeOH~0W$kS)D`=z1h1C4D4i z*U15B5!=ONgQMXBFx8Wc;E#$_l4f&JIFm`kmHR30u<-2neT6={kxNKfLMUG1MgkcK zG~qTXNmR0Z->kY9t)Nk%_uF;!_HLnC=Tn z_oW$s!bUJ_~HRZo`v%ENpTGbJizF$LNH z3|6D?LxZQ%>u9OMTQgG07HH${T0d>pQuuQ^D%N$p+C^@!2=$zDo*fX7l3t_OS{lrg`SG2|x;g&LkXMaD0UKar6hwPqoE zhQrXZToQU9Lb*H3z1bt-8$7)2i8_`a>)I@@6+DtP4VC4mbNVAWn0lWOrLkvqp(K5$ zI0&W(jVI%^```JRo!YF(Es8{Uy?`$VVaJJ?9Hl(z#Sl1IY9WOW9qz@sBLu`z_xt$o2n&p=AzM00Ov`W~tZWwI3koTV3O%%#1geZ*(cwfyP-*z& zwBtM)CZ_C+cF9_LO{uvgkQFTW$Ca~uu>fro5fkA&i}uR?Py3P+wu&f)Ui0`_W2{&E zCK;WmX!W(K;)#mEnr>feo>gzyf{%`qSj-RmUH7!jD5eOEra~ zXj@V;%+~$c0cZ>E@$64Dp*5&12i9Y2y_M#+8ssIi@hp7la0wq8S(3XK(-#*$Js3H9 z=mQwf7#MMlb#zjel<9aXi5QGd^6mTZhtt*#;vT)+-zf34U)d=NQRKFyi&lDnVDQnH z-{_jc<9xQORJ+oF9)ycGx*l0hs%VAiY%WFad6OREbYklK`8=^ zIVR*$oN>*7`+c>i5;QmQ6%`EbKte5IWob}%r%sH5zGGHG%bjlUK)8h0PoVW{IR6Pm zf@gr&KZ`&I=1R?W#s`|kSpD3$GP`o#W?{N8@+y?_GlR8NgdzicPLx;fu0tT#0nVxN zAj^-XjD!ltOhEC)U~f_FgO(T^6&d+1EfyQ*yqG)fgPWqIlmg=KquCqy=*F#qsc$9d zw@ptc)ao`T@luwnHAPp-D17nqv8ep2%PiNdI-V#njgA>lp~e)cmy(o}L>2eGbGQ{5QZJmxGWQq=a3-eA?tFP+DOHec=hTz49=-yIHxbdW*i&&m2PKWcK}e z^k=amlRSYGyo_c!x?#SaB#x=gWnOk zfokd)3AX5QmE3s73S(w=Vai{OI^jxQxzmuVJO7o#){xG7g8iV4`TJnht{CnXGtBWas$%Hg%ZG0*`jxROhe6TCFU}2h#~0II-yts#TgSqufMZXSX86$i^jR>^cT@)M zUqJ8Y%n26?P=*pdLunyyJ|c=*Liu({3ih+BJ=s15Oo^Wg@|N2j5aU^OC}{oL_ymKu zKZ)m;H|WTv#{s9%dx(nYtoB5IU*tsOe{kq`L}Yg&6pH#~_$LYpX1Fvws=F4LNk6O2 z*MfpU?x(rSf$rKdcbnwf3ZeRoEvTwu_!%Cs4#!*#Y3Z(8B)a<_pHZ9CY(oC$9rslx zB4*tfW-DC-Wsakxba}@QNhd-Y&T8YDff~tT;mOYt0S|!4X=nxB>38^|P`&YU8XQDt zQuP}CP44LS=`eyeSW^YnsbluFlv>`i0(JpT7~SMjd&mJd<_}B$tR&hp6YAvX?6*>@ z=Cq(19gYwRH7x78mtTNlw54|5xaX86@b~LpeM{)7PreN5?KO6a1cM|xM7#NGb^q*M z?I;>@QTNVf_%Sy|mOp{bOeK$@tdx`mo=gn$3lvKi86h*EoW_&<%16cfQ+rI-YL++M zpg#ER2P4l4Jw-OQKf~yi9drJW!IVmrDR8P(1IIG}7t>zx=n03QvOInMf*=ANn0y^? zWtIAR3Jgw`WcNLHk6lux=K*V;(o@9LTi~eaJ!_s%dT9cL|6od3km1>0YcwHdx_~sQ zYB9e*U-kQltt*wxV3YRX4U6p&lG_M)*Mos3fWeE`c=4z#xDNw~VG+ITQQc1`AabSp zoo8gnD<3K;uDZeX1&oG!rxOgn1bIR}ABss4bpd3d0%BidsDRcZpXBc3ql^Jw|y)YC#lQ2xA|eQp9bQ_tXjWOo^t8H=Z^#GOS9Lw{YgGrO&l%mh&Eqgu-;G^pc$wL3gO(c&H|R{n}$U z8SZ9G_A_Wb;i#rT2SM4qBPc5T-`$Pv*v48LcWAh-3DrLXMl27LJjNTbVRZ*KPl6?n zOymEhO{?%TCv0!GETkw4EuAk6+CTes(icYWeaH24fOWJ$zB1y6fhzF%=@@30M>N2x zsarzV*5ChZ(g>++0^Q7i5hAyl#*2(OG1{F(#UjM=I+Cfwjac@kb(z)|Pvy#rVYE#K zqk_|v#&nNa1t^{g5bHG(Y|q4hX9hAu*}MN_tatJje882QUF*CsTqLnJ(|In-^|kX# zQT9d@fx~hdFKPtC@qCp5sn4nUOw*gFQ!Rn9ty(@#uPNDc{R^@8Y+l0l< zWvDF)b?DDef zbf>G*xZ|E%l&0xKW#w;Q2<7XWrJL2BuI*oy<=~*O9$p?J&+NK_pMEi9Z^bBqW3KUe zKnEbBph^OkWJ*XxSNe>il!Zq^cC{pno2>Y<$k53ZvoJ1!SQA!wIwslG0qMBc9*|!Q z5_^d3mR;blEQCMdkv(J)4D8sR)h@0epp+;!!X~8A60@HwEqtf|q9E`MK4+QLW9+|I z%=O12emPHMA4B82pJjYpHYQ?J4q0BkT>opddJyPbGg|1S6v(wy6V}5J_1~zr{asDJ zwzh`%G+1G*6;0cEXYNLJzQpuiU(o&%Oqv*MrTbB9OgN?b{z+H_cV>7TthECvNn9H4Jw-{`C>_T`mr-oFMC?zpGnM;a~9`<=1<`2FqDX71H#3(WQY zE@rZiRu&OKd$qJWYi&HMrWO4^vyA1SuC1@*C!$)GqlRoeeg9D^cW`7x=bzJizVBsr z;c|55IG+O0ERLb{WGbCwd49^*5{E-+@T+~^r04$SGhSOFNEF(j=hF~9d;pAm9>C~d zzUnx{1Qg2T8wAaSX}lc{~QmncU3g{RSI42gNDAOekgA90E3GK%}`(+H{mcgSJ9AQM{R!b8F?J2(67&ZE2&=Opd8H4=ll0* zmw~Pqy`h2MF(a4j_qMM@{DYsWpzKcLA9!uvSGM3b9q`eyvc4Mz1BQgWM6pBzlMd%A zNd^8#$=&bNPp0&bx9abYD^ z=T$7=!)(iO)LpW)sQ0(nla&Vo>zPK2Ljwa6PEK5WjtY{kUVO$fmEg`xrc$_)&o8>y zW^)ag4wW2L3Z&#)lp6f5Ahjx!&1JENGVT8<96+VQJf+HOG^!$)BxEhq^>&yJzg;|N zaYQ#aH%r8|?CjVa{4i|e^+5lnKFY{;JhI4;V6&lg~#xm-#VV3b}; zH6}Xd6e*H)pm|af7Aesqs&#pH^>}-ZwCUX7XeH5w|M{+Bnm-g+T3Da{g7G=00Ua<_ zk|5$tqcB$&k{fXQ+NF=W@{u{p|5B1(sOvy(qN%Hfe{iIP#+ymot|&Qk+ESy=`$Jq& z>gMeu&9hx`jWm#|4vwfTDRMhxjW$SXJc1Uqnrezd9lkkRQwzPh+{$7%_3eF4fC#~6 zy9qj{{SV94XHV&(_xUduC7~w3cG)fIiY8r}A0L0-w3QXG;AKd!p>b+systq08n=YI zZKd6ONqA*|{mh2=pVR$`8v&#nqTb@hB?NUq3t|9`!#F)^wTB{LT=H%`K7Sjg*fuTX z+(vY#ZJ94s|QY4h;xdZf)hcte0_sroxKd@;n%zwrTzjA>LsRC+Ghqr~YF9|t;iBk$^1oO!9SsIpFq({zP2|ucOiNxIPM4YU-hO<~>t%}_)#AaHtZhVog3~{3q^*DFSBUgR> z4sV-TVxM*VmGauj?Ej^E85t}GJ}P^0^_7iaAnxEek5vsKCHh23AUtUXX+J2mT{6Bi zue;XSN9=QVi2r3Rx>z!wVB+U)d!h%lyZZlc?sx{Yv_YWpei*oGp*d?WosaW-0=*?j z!{XUF!`tGFor%?A(`EZsWgfn8LTr9`Fpw?K0XCzi(S1cIl{*LAyZnc0Lg~Ln*4uAh zo*Hp`R`kUsm)*#IAkCNEVi0IX%*(y4o>lXh(&lgTU_~4?V~^Bu|DG{L&kh;^Hp28z z5_{OlVCN3y!KYNX((j0F18Sp_#@T-?A5-WGf6VeGZ$2fAke8xZ$TM$v|PMnf|_wlE_SpFG`IsZt2efjNuur z0#|}%G+eD}gWB6YQY)iS_Ip3u@(c|VO*Dg*kZ|3na`6QCF@2Z_v_ZL5E@ z9OQXvAxBUA^vsSz`)i(km2|&rpKqm6)058vnpI3hX=D#i24rX8syt9zDG~dm^Zx>B ziBfdUuN!$z##S9alA8E8KG7c4&-q>-ba^lDj57kx%Tg~C_K8Oog(ORdD=|5Pt(P_Wlct6DEWiy0T>8K zch4Lbn4oDE78b?-ptfiUJLg&9px@13;exNbzd$^=)}JW(FQyHvXDSbx&FA@Yyq%Ad z1VD)Hf86WA7Wvwh%sh#dukHR_qXX#1m#_K5oK=IQnlb<-}J*`AI_x}1d#AmafrNdNW-ORYDi7BiOY;}<(Z$_$mvuJk zj4`%Wna*E)MPt@9%c}Q%j~YM2_5OLx&|%v;GH3)aDO8dT{Eq`w4-i`jV>o+FO#IB5 z85i?9VQ{S%xSG{mlVu^323-2CCm|&*k&X`dT2Sz;avF_deZbw8=r@eGAdV^`iQM!f z=R(=ITRA=rUMv;%WCc~oG%*F?e|%DP1-KDe^Gd*cRsOO1_Pch%3RA+Rfcil;=_s61f8o@(KzP@7{sX zRY@p|o<=<;qjTJl`yre6Q@Pbk{Aj6is8|iIh14}#y^@!bN*Q|)2(WULT)O0TOGv62 z^H%<;K;eQEQuI;1qIC`wE|;4?3S62$Y~y`OD3Vkbs_N>h#8`xIoUM_*by#X^zR$$P zO#o(iu8+KLRSIhL0DDIl0K+0>91{5$IiE*u z(AsJgNCYX!9y&SoqIE#^`L{Q?^9U*A#mFEqOXCK^=}p<#*ogL4kXOeXu*8Y%2`QMZ zh4OiEteqc7O3DB7NUJ!ip9z*vG@?)!{w**2?Hmx~&-BoEbN(wL=y)E}0a|F;p{9O? zVXE29g>Z&{Wf>h8bjd+qUoK6Uf|b{*i>%^GRij7&H%D8l231w~7iqg?I}HfbWAmU2 zqd%x;=;1(mi;j*Ce$0hJ#)&X8EsRDd6#X3k{ljDE=VINx$eWn?CZdu=CVKOJVnFAW zY=zzg{Mh^xzWf9Qk7Q;G+0>1!?7e>1OTEwee1GTPliTg}dXDRQtZTGf zaXIB&q=Jo2$(|ZmyQ`z_>r3T#dtn|LPl`MpVC&lS@sWYv!lSUOWTBs%vm8vdn_TSd zM4sVs0indD zJeVo>y1(bwrOA&kq(1suUpz=T*TdlZ35CHYE45-)kO&Z8J5Br6RZrn~e&ER`oSfM` zd?Jm)&Hi-qBj>>B#?QdUu$`wfg7Xub#jD}9W~I#>QqT_DCW}0W2!a#1Rjrz&BNA7` zF?)N@@po$jeE`L;9)^NI(B`Gbfkg6cB*Vepbp=2$&HxPk{dy->(KM!R#^RLA>Fg)C z(h+Usv{ z7X}jAk^)exUbHyJsXa-~sWc$7H28Vur2{V%B;F55pme_CbZ0sg^)RYwl3e~K_0lk% zTnu@I9<2&j;lU3`!a-|&C$>lH*UXae1!oZvaa8FSA zt82tdqiPk@M+-OjLV^WstU+4#gsYW0YY(1IX8x%GfVw|CviGt1J(`X*&ex7xUiE^k z@ucU-8uucmX>VYTqtH1zV9>A*yHf3NeqjvW#ploWH@Vad-_k3UugfrW#V(bDs}>Pq zWJ>B=Jci@Nx5VF{@cRp%hHq}V2p)gZc(q7za&ZO0*=*>o6X?cL@k1s1{w9r}fQpvw zm6guBaNT*g^qwFEh?@dU5rT-3bW2BqR8xeANX23g%YMo^?ak3FL|e_`z{(i_&Lu+t zxk#g_$>oiCUVM-L^T6fg<%9P8!xov04|>IdmA|)^mu@G$P&SkRX7Gg(1mBuo`H%p1AVPa$gk+-PXQH(80oI9 z)dGs6AXyyWk9p;VA1om3_Swiy!6v#;8~byw7!gvbaVuVZI!iCVDgNud1WaluQi*26VEW!^QG;(_u6V zFRnkvI(uP$fQgeS(;1(%@eZuXIv{(z+w~#92Pu?P(@9Jxhz}0ddumI~9s!LMefT~R ziM27IGxu;m?U-Bj`;LkPW5FcUDm4C-P$9LQ0wpX$+GYQ(6!bm`F%(uu^p~3F(>zMJ zC|_)%TpY;jm@2~}w7}LC=D3Ab?SLH_>2TYRut^G@mcvbTw%1}UM4iTB4eT&qsr z|Jig~i$h72DBljDBJ;S+o?M&pG9 zPK-Q)njT(MPpP>!BJcH1wtG)M(K_Nb7%o5K&6!y&_$MXagi!;*-2p zq(^dmeEcic&zZS3m%vpjuAyP^5=wSQB2iDmdoS~!&qRac9Wt3vDvBbjo|nz)qReZk zHcT&98zXcKRSRD2Lhk9A_En9zP|TNuUA14m9z(Kd&yWbOjjtS{pO9C54J&c>b9pY9c7 zi1eI$=Ues@UWmVPx5)RLES2}9poSvoW22xK$A#_2Orinv0=7$EsdI*nvlC3=cp_aG3NdD z?J4P+hv=|G(4ZkD{)_VT#%VDYI$5CTLRX(;Nf5!HpZBjp(P8GJv+FKWd*Wp{fAC38T(KhDH8 zj3?#m_chB_L3Y02D@Q|H*mddU=WyVcf$>0jtM67bwL3gFzQyMI=&2wdtj{roZY<%E z*(5P9@gXq?&$z!W>~>Fq!5PDjHs#mX659+*j`X<22DJkY9DkoV#OHZ|<**%oy|-?- zIOxSCWikXv$k#U?2V=?HGE_${+g9dh4PVq81blPJ(5&R4_gfGW{7&Mt1_!EB}zKw z1DjdFqphAAam&jEGG<&KcpgL<#FThO2=J&e2lPLR#kJ=c#qb9^7v)w8L=?!xKH;_R zEUuS~6qG+22PN5DMS~YfruFIDVz&%DvDKJ8R*LV*r%TR+M?|DfWt0Fjd?R{Z``;M;S0S^Hw|Y8fCe7Td zn7}-D2(e={xgLuRM_-N@zQ^G#$XZslpmM{qthyMUe>-Auj*owO+puHUTdR8QmXQqc z;+8YmQT&=;GIXVH6-SsB1eDOW{fw|49+aaA$g;NlRXq)Gc?Gp0hY>4Ln=8d-9K(-Lv{?F0|y+Efh5eRpks1YK^NL6yMvw)b{+7 z^Z0aGpmUM4$otc-rrk6WmUrQQ|CZS5*%I9q?B+4vcN;B*pQe?(jBDx+-@yvCP_+DY zZS>hhGp4-1&23x6zwiEB%FKBt^Ic=WrF6UbFB&;9ScA1(D@ezu{2z07kZi`)x zoLcvpb>vu--P>SJLiVvEmQ+cw@Nd&$8FKIS&btt9w}N}eW-;w3{QVE1NzBur3HR!b zwXzAcH;dgGcBWv^)x^n`Gof1yZ7mosRhi)Fx257l%;Qd6B<$)dT%jBE+6Ss{1s?KT zE_r{+WArS?Kg%{!Qh_{r`086rXD|DyN6ib!Tt1*noHWKQ6n|=UDVtUKRMr0P$}%Ja zLRS3nont7~f__VMyMOAtoyYnIJF5`6!T^TmPli1Uy`TI(n#!U!I4j3_C!E}oiep$| zYT$J+8%z0}rjQGX-DSCZ+|LIdB8^Y5+Fqd3U)jnWx={q_&CAyUbK zxA7oa=F8#!jTayN@mycv;XR1tA(A{xxig;dT!kw0YXy|m;edrCnf&2?1Pfv|^|O4H zNKp+_qYOkH6ECcPQ-B951J7y1YNowDDL}~gj`ch+wl4Rp9X6#(W9(-lcOagsyV}u~ zO}*$rM^Pyxh`p5tEfE2ctg3g`h!=+JqmygsL!DNaaDV;Xn0jVT0{mE1i=lhcOFfZr zEKIb`B{E75^e}zE)r7^H6p@e2kDMlbuP63B_Vt0U#^uec%2ppNx>8ge(I!3g%KYpM zW)iL+W#LbqAv`-6Cgv&#{mb1u1VhW}#U#Wm5M(uCTDMdxz2uDp*Q?k(XTei;QlDV6 zvA|+uQcDPk8J?+H7P^*`BwY!WsB*M&>cT7S-B6vE;87@l&X4z5vpWky3SG9Ju`AZl zB(U#MvYve(%#gvY5H0^CKFB+($Ln#T&yE(?m?|OS6`^`bwps@2^ zxAaVYEaYbX8skQha7qW+q@a3bk3e4+`EfqK^ID{QR-uWpIKTy*}h4 zp~x^Kmo52%mfgL6zrd%;aYD{GHDhpWClqy;+sr@8j=)&5#S;XTnvPIT{gA^+?7m{v ze#?XowXl&uBK#`MqUX-tPwrOz+Vn>kWC1)M0Zz3vbIs%%DkIu`GW2U%Ic;yZ^N)+4Q2IPl64=-LlORAPc z(wbbXQ1K~?Ikx4rFh{E~MmL#FpK#Z93{PTcIM=)i`(P^ydXNz42O=Lu*UP;3@BjD@ z^jDu@CHo>^O2s4QIH!V3#LQfnF|fO^p0nQUSNRzvL33u_Cbwdq6)`~1Z^NpX^5^Q5 zD_m+>lB$#DOhSWnTH0L$Y1gR0QShY1E~oNf4JX>g!3Sx13eyzu?32Xpa|hqgF`^RF zf_!7b!Ps?I+1Lpp3XmACeCK_e+n5|E`7UxBpEH9Xhd?tGvs?)F`EW}QKb%eN4S)m* zWo4=zHO#Mbz)6^f6{_59N*3v}(N$2WM;5fYANWvab1tvqE#a)E7W=(bmz z`9scA^+BX!zpSCfMTwp;l|=oShYZwIR}u~mxNh38t#pfu=qAQ?Byl1#MHjaE2joTH zqs6cL$btmNe6UkDn~n?%wPeXW@K`Z$QZ?uOs!LicBc^{^&RO17T1efAG$IIF1VRr_&) ziOV;tDLx2Lf$5|mO3YwI;5~}~R3#ZT#qv0E;n~;*jFxrhL zghCS+Y@Bt5cRwb_gksn<-}5)UKTX@-czdv{K-$~;&2-k23z6Ec?+)%RzwJ{QV{X-o z;v)sk3aBOPrn-|N0VVtlv|TggLr$qdXyo3IxXe|b77FKRm79fmoCm83=autvZtdMWspQCC91wC#c;=-`dG8d`I)@q2&y=*o^nfSCk zz=)I9sM?_=iW5(5!o^QF{TTB#j3NkDLh|XC?5286NFlT@F@TwDPd{0k|BGf4v+dw0o-T z$btV^?&6FZ&0m=n4INe-fR~4^J6g~t;(sZCuoK6Dpvf!vu znxk=Szq`Z0^r+2X#mAelqo?Sa<+vR#9`0!eQ!C;t1Wrfg982&6C&_@7;ur|7^rd&uMmyYb+R0OIdGmq?^C;Ro(W+}xW67d!D3>CUY2~njK=Xq5qKmixBt!w2(D_L{5b11> zWSRTkqgVXM@XJEKbW;^{z?uKOFf1b+6oDpm`u+FOds}q;$M&1bDm|+|I3;B?D=2i@ zLIosQt`>xXIXc~g4VbW@LZb%x+M3xt%}O_E4(x)$O_NAQ*!74U{RH9(TkvUT5ERs8 zM<;93su!H%+f-7SQ8Y}MEumzWy7~MuhtnfgF?}1jdgafmMRC!K;w3~IXfG&dVo|+U zQAkwjdl>NNCH;xbmZv*|1sFoKJyV{p$M)98+gCntF`q6r$^XSi8W~k>WrV3(BZz)! zP^0C%ig(qcm4B6wloVIa@M-DDCNQDTOvfse+0jbn&c?fz@uH_Emr9+@BPRRqF6$>5 zNbB`8UG+@{PiLlwuIQ@A1=O++a}_W6v{n`i9k^RJ;7CR^@ql|yV$PE%22Rh+WO#V9 z=8~MJtPqb0y<*1vf`@r;yQ?|3*6>GBpTD&Nn&KHA9-c|UcX>X$n*^SOtLn>8bNyqWX;4Q6%p!-Bu7jo@Tp**6BDnr^3S+>zz2@wYL8H47nTlxD(?ijh6t; zy#6^vb|%Tn2tSsEr%bl+-D7w6#C9`@er()8hK6PC==Aiwj+g~j85!&;WE1{4v)Bqs z70b_`Lnss5mPFIj>wX5@&3mCn$x+EBn;pf;cskBCwqcUIk>ZTNtsM&YA3uY*^zfzL z-|d7IlDJ;&ScxOIFt{)vz;*I@H*o+0d>v!u%l0z$V)%l6HlVje`!KMI(M#Q7s`Uuyby+D1z^Is#bTL%f>l47UltO0LOvD{)osd+%wa*b$@$A!IdonJ-5aaLI*PyNaBQY{LwIcRB`_?JK=B_NP`a^VpeM zK{nRNjan?zk@;R8R6z=Yq5eSvKP#yIBXE$X(PEpin3|Y~QqFY;v1MYCFPMrW{Vvek z9@z1UeNzXgJLO%w%>}Q~ zQC&EFRn#y0_)BqO8_=@R;LsR6cCFwa|a3;p8p-VR9*~7!$i6 zRHVx*$oOP^_i{Ptx$P6u(q5$VEiI#LqZi-e&d=+)L1=skEYvDK7dCC&gToIHijhL_ zEIySduSNXB;}n%XLXZD#KLea-n^2Zl;1>afrC1F@|2QUcRLBL;#JE?Qi31Rv-%S956JW5S<&cumaw!pEthMBNe=hI($O+#NXC~tD}R;RbR=;l z{E5K85HU5Dcz}4;k3ZKd+kG<&@#~l1k>nfnvjVm9TFlSMycUE*594B)=^r5Sff7+K zIUu~xrM5a)GBMO`4Ql6PVu1?v{omz!KzW&#zt?qTuAR#d3K#$>(Lq15)W~;9gns3B zGI_KtmrH=&f>ewE(Z75iy5W6)3$j&#mdClSq^6{#RPp_V*n@`~IfB6-=j$3sW_8A^ zs2P|SdX-R$-)o#J)xF=y^I2S#YBRj+yz24X=hmOiRmKrPdEweVuZeBMb5lz+|A4-qbqy^+bhe3j8uNwd!*=0f0qDx z$&YhJRD#nKPp9r*WoPewM2`wNp<}HsJ#xEsKe2myb~e+)`+cyE?cu&-PL#0l`P?dn zk>_QirDptm`-{}z8oe*4s+vZbD95K>g09cgNtgf`6{3JpHTm#&o!xvaM7gU}LUWoF z&kS(ec_V-z5h{}|OC3+c;hLP>Wckq-a=(|`W8>PP5XbV#g$LpY3P8L-wJ1!Hb3jTz zY21x&50wP+-#?7-_tcsV(FZ+WRdgMVc@LJbTY50!v^X>XoG4_9xa zraO2ARhk>?ZH{)=OgDZ>oe7kBx{&C{S{nnr9U=t)X6!T}SPz=xTyc2Ef9=gl>n4&H z8W|-y+IGH>Km9P`LQz90NmJQ z{&ApI^xRxaB-iQpdhgRMw4cb6Mn`D>uCUa(`S)3gwLH2SYo$%@kp&nptot;DR{!NS zLEWF?U7M?`GNYE#*y8>yBJVD}4yX}ka=UQ@jk&x}GS}LZMp{s_M{q5Knci%KC&2W~ z!T_BN`S<+R^JmK~LEt5_G<=ibZ>@Rhe{LK77v|M?8_Ub(9M@WX?AHsxZgAd60N9t! zM$_+Z8?l0#N-*I=+4&kOQSKIa_DH1> zgJ7lY!Uj5h5uye?d{O!EVO2|O z>%DvTuq$kDeEIA_T=ZhD?G1r)0!0|u#E?%rujzbyt;9@h_>0f^<%6cnZtWjzJNeVt z{=T1ool5!|5QEh6^{XQDB4F|l9!y#zl(Ac%4{PbfdrZ02_3`burSyY%vs zHi`=z5QTo))tBbRD(Yt}LUXJJGEa|q5{WoCd|Qq?IrW&o%#C3Y9K4J0egk4fxCmN& zG5d8OLmi|sSpywLPmg3q#)KN=(It4 zRqU&eH_jKJbMs^b^I*$&6KO5J8fsxJ}>FrcBumz3< zq)dydham{(Fcu-)l_=VaVvoX{$9+Cm@_YsCs00He=X}hg) ztv&(jj-LUx=b8L({?`P4XE9vMSp^ozx53gga;DNafo>oN>yZ z$1J4fA;YkFsJF0u8RjQQ@Tv#Du2dQiRodo4C1 zxZSBFau>&E$^TnkCq-LiWTXjQ!5iLjtj9SU8n#Ef9>znxq{aK~9UaPN!Pt7}mDSl( z!frX!DYkXlye{&?ukx3xx|1_e)u}ItaQ=Q6(O+9=;WbvvN78Ya%%@C2J`Z|U2eKKH ze*8jw4wUR|_q~XFDl3$o^i7t&6lk+~r`?C!R62Ovkwn?NcFeIMO&**=bYn~>pZR}1 z6Iqi!%mVJRiLS?Q((vO(wWD{ZBf-uFKMYW)=n*c7o%uyB0IoU$%|eg`{~4Ja6;y9J zPAIUSHCwUq>NJf*ng}RJNlER4u#~2hRqV5E+vKW7%y74s2R~_o%@MF25jX=*6cflO z0gsmUnT`;he_|J43(#86qg^Qu5%=!NCQ109S4d~uMnMtx(sS0t3-^Q2*zItqc_pg2 zOWhrRJ6DM0Zs`T|5LTQaR0U@#zxU+CEHEV6uLq}fI;EZmDeRwR*O-ja$FzUMbNm`U zK0-{3|GVZg-;o_Kw%4n!Axrq|e>QWWg2KX}$VflliHG6~1FL+?0(zP~uqk_eNAhu% zPZOi>56wL()^h5(%^l#ai~4)3xQD2@>A&Rjy=iB03%O17?cO)N3)V zwLAbArq^9UnHwdfK|+Yelqnqo5pLW7%*0S$?y5@q`y*43^~A*mfRN~RXlj8M3W;|3 zbLiXxzn2H^xOcn3*Fe|(C5!M~!jNIP&gVJUM>`F(wd?=l9U?VOfNqw&7)qo>aqaw~ z2Z%wWvvJAB>&_eu%}~Nl`|;zXm2-&tE5(968RS>!V2ug5;@95jCy%0ib*W@jV=`D; zAh(A4U%->|br~NJ*a^VsmZ*ET%NbY6(((`CyBty3EfqUq0EZBJBMUO03MZQ6)KsV* zk)smR+LJ<5Fx&n*$;G*gsL}?z+Cj2jG@$?Ais7&Dg=J044m*(ZqWb2-$m-#CrCgyQ zij16Gr*LS)1WbqW=*Ht+=H46SU&0QP`wN*@=2AHiW3(R87li)Tl~L$XvVp04Q_FS7 zf_flZT0%jA?BqpU+v|>)tIz$UE?ANI*2P&yfr%I?hDp zOgEQ9jPtnFVWbh|)Sxz3PjTka&n?IVN5taDqjGHolCcVD4)g48S@cTQ1=4*lH6()F z#71$V2s$AkT2Hz;eeS!q6>XXrj9t!hcf!Txsu1_4ckfomU3XS=0C89WUr3wEUsz`R z>V&IdyGS;N=2O=hs`xV1fEm)r*oLS7M~&9T3n#UDXOQ_e%s4@g#ZQ<#fm!NhwbTGW zoy}`n#l<~e%24X#3k7}rn1Kk_5XUT*&D~xar*qeGt7lvxQQo%SvsZ^l6~*-xdb>~5 zs<^&5O~bV4gpfLNfWH|mhm0mgmM0ZK?!8S~MQ-ZOmw zwh(+~TWG=wm+%s1?;Inlujvmco`DuVCUjbPJ$omr!-tB?(&AusZ_o+ZJ)*7Ysi=h! z^B^v3R%+`7hvk*65jcq)xZioB-a?!DjzNv};=};<##m8gW?HY7hzq>TlQ^;IM4|kH2l#Dn`AId9zF_%AV zvvU*lxgv%sW{>C-Cy{x3;Z0K{`V>z}ux>wh$bqRTI2RW`hjtBPi)Ye`=u;w=l%ONU z=-Bs*{8p1`%3t^0!^B#B9(ur&ob&;8G~_zM(N|q;PLid$@TJPW{MUFjR(d+|#+3*uRnJfI%rPxJtceK`oI$lCPw=gwnE1OD_z1B#Q`n9qzM0N+{8JE@9SHfVA(;W z-CGSK9iPZ3kQh=sPKM!VPVwxYLhSjw$`4Bz6v#OOD)8LgkNkK|=8PI~ODs4V2 z-0&I|&IXM`+pV-BsgUrFhtW(-?McC(?T4E1@^#!0>A|~TmsOapdG=wPRx9##z2NKq z=S6_JUEO=*>TJW4(u(nBKVM%ZEY|dg*vd&GC+Im+3!r=EMD6vO>eOa*->>I4yWyT@ z&9_E3%_1uRUWvEg&#!2T44Ckh=8?vEMToG)*iA10&yyIBk}WX7mT{h;?%|LuGUM-m z&oPyRU%ga(YO)?FSy#1E9`MU&`EkLg{i_yi5w5&qcW6C12n7^+GfhDnGLanlH7 zzwAVInTG+zKcoR_H5m6aouzeUmqZW)FpSC@d?Fl~yF@$F7seo`uLewx!I2^0c1NO7 z&dPJ2r@)HPoH^D2dYG&|E)K@gvA-Ewgkg8!2@`-#`)e!V^U~aVM2Ug)F9TX$jV|XJ}2h)&4!w<8xS@J>(k)fYzz=Kga|0I5AtSa zIe!j+}Q5kju6DaH;gQ&5~PgjW2>xwga7x^Bj@#H@zT>eAj-532qa>9@M_k z2OxbywM(6wlfc>oY?DEr?dBJfKG!V_OMc9u#p{klmH3@oN?e5%m>-I)MVYr<(8yit zgf?Y=RbuiBdD6gR6g{HV0~I_}9t%P;Z}ia@d&?fO$MMbMCUgLtI?!nZR2_ zB0s%BXB2a%B}#xwP58j=2Zv@=$q0iVBm^h(h=!_5pR{6y8$Rcip>YspWkBy~Cye@9*&?g8TX0xItzhoxEc)i6!oT5&rIBI#8~I zAmcuyxUNDpvWN2sv6tvrC(PS>3t?<1ets&VOWbvkZ|x&N>rAol^vIn;eEu55puxy0 z6FOf8{^Xpp`{QwfgK_qTq_NICBs`bd^dQ3c?`oXu_IklXq3Ob3FySamRK3138z)lh zGEO^*rc~C=(;MsMuZMpuy8U}BABJ#j+QLy01PPTeuuh&m&UuRp39>9JoBU}(rcnW6 zU1;iSGis-KGE{QkBc##T1Pof~E;N!wB#l1;{Je1@LPL{n$y;>wIb$J-qQOgWu+&8P zjas3lPO<2FQB|M&P+S0;Q(*hsKRS|29+F3NLa0PXYYxajtOzkQg2xH>aN}Rp`Ff+; z3aI47#KeA$RhAOqOK$b8O@CEhku;QtB+ZWwTv2yzIhq1!X*Wz~r>7Gn0%%~eK`An6 z1w>yYG-&9yk%%pjKVE|IIPY8Zbe6CZyJ4D!0y{bg_|H58zu5});zfi4x^42Ic1BSR zxaR*owGl7h8)aDbj3f}}=r$OoOy zS$)-Ud(aLq?u(}F*e6enr1&?`1zuEoln%3yBoB{Zk|z504#u$OBAxuFKp~E^^2y(q z7%%d$kt?Q9E7hmY#&zYtTDc}Hto>(~qx3css&47YfhzL#(n0-coeA0?`M;7$R+uY! zqM4L_ZK2Ju11FAA=0q)wuXHna9IvuxVPjKjZ2#1xXplFOz2E4`GOUUfj*?`G3+%c` zH8Ykf%MpG<*#CMCi)UD?oQ*BJ7?Siz5_E4*&Pl)$%BEE{BjvKpb-x>*YlTtnd6(J3 z&l&aq`1$*RHpIpr{GJ_W65YUI1auc1WL{7KD~o1;bK-%dr2M^fByDvWM!}-#-z88; zGNU3u%9Q58at=bAq8(Vf^QL;HLlrefFwUB9WGzYWEJy}Hw3}TAL9uE|95-H*rsdVQp-OPkaZy z3JqVs19EER1q*z(`zO!mf5m>QfZT2A;?P= z&a~gs{?2R-#GAxwA;&?->?8Q)B)sM3YRslDLwLeL8A)k+8I#{8wDx@r!yjb5MqS=& z#oUz;B~V%rV8d2eNwoH{to1o#b3{o~)QLC}lPTP~J9A!zPkHXNJ>PGG!5p&NIBO=$xMWd@$b3C~3p$lMLf8GXOCV^w-@ zgk*X%)Oz^mgIL9-b&f9gTAr45<_jwNL9;3Y&xA)X=1Z&0xgNDV#sT#9$K5pntqa%p zjrS7-CJJQsT^PE`v#e;pl2L@#7K3#21_&SVdT)T(kuP|EyC2_FICNxX5P#c)>6ssV z!o*oj{;rApR<%+r{g4Fw;&S-K+i3evgP5o)rnS-%l9uPp$VaSQ`g#WuN1h$tR^Wep zP)rx7r?wx7YR;?WIc3TABtckmA_yn%_YtP)2s4Dg#k|GrdMK%7yJY%YIPU(u?9dPk z|1<@K_AAmYSndO*;9XwG^|}ywEnwC$Twublcv-A7FK9o;E7gJk#rk-4ThrW z&GmlD2rQ=-D0m=I0~VQBPNij-he`NT{u~k$vn4C( zdB-DVCpl#Df$X+MtBxI7UZ+)U1(}Pe;mQ!q-XrR&NKylaH$<%*6dH;lU1gUAZMRfX zJt*+AD{>2Rz^%|8Cro?WTl%eR{Vp4MyLG(UBBgmVRY!zGTU{LO_zUFPP z+>o)U{cs=&-(I&E@9SuBwwB}M_h#XPnr-Hns51C|TWMW2_DZ<~ukf@gNj>zRKO_@- z(%?zd@1fZF=7JcfKH8|m9g$=GgT3mdJ^se)2y6ig`nZZF~uB(Z4&^kmWk(`gSjcuOb zp&GVH-ntPK$q&``O3Wqw*DP^151%^>g-Uhs^u*tDk2l+g1YDKc(&vMnFd}V}v$GIl zME{Y1afzzOvJ_t-w=)H<%E=U&u>8H)1&-<>EQqo_#t~;r*RHKWpceKmS|mmbEFGI# z&&Qj&fqcq@^okhc2MeO)v!ZA$qF( zcge_FPqI&bZy1<}(t^FF4mTqRje4>L(U0}vBg-HI|63PK9J6~UIJX7Ui_?bI7*zY4 zwWrl+D{Pj0;k3z^h_j!eira^{?sRU0jW6V~=&~Zk05W+C{b7ZrZ=cX+nUAr#j2vkHk9Zmnw=|_Y6bA{@yCFLQ0msk`~|6 zrZi7ODbqCbs_j|X?N+%mxHt5=Vg)~OeLqh@5nAJ;>AczhLC~?}W4N3&j;tRgyTXvn z*$W&S_9NiYC?mLe<_xEDu9hZV{HXE_ki|3)V+HsZn@=#-K9)WvGE%jAzZ*EVqf_Oe zIn;I9*5qk(Vt__W-an1g_*`%R1NlEj{TX>z_^`fAvX$u*Az?sy8_!DG#hE>tAoJS= zrUi@R#p<_=@y|xPs}&kf!S+}u5WlwIVA-D${A!bW-1(zW9RP8ll$u<1hX$J$Gzg1q-zxp-e zZck+?`SIbbFW=^zqFW@_`i5C}!t>J%3=D``uutchj#GuUU}??TZOw?q>>frgQ~h{`yz+A^sS28~a&=`RmbBW-Cu8fB0z@ zqF}&68kNE){HIbQ4p6OKtJbcD7=d1gC=3L-u=@;^!iLa~n_SHV*MX3-8;AmX&{jjc5RngwmD7WY8t||w$E~m3Vy(%mgh=_Z4WdS$@-U*F5 zWTBFJWTmy=6!2(R@&5c7n3(TNHYdh9By;JkTHoGg-M94E1(R4+QX zxvt+jK~W%q6NAw&4lI7PM8Kw|0M7IR`O1mgum2lSfZ`+st2K;~2Iv~a%X<%xs;_VNC8F*Lir}>8?2Wbi zY&9Ls2pM+Zz-x^` zLF5`)V8O_yF>%l{F+nUu-82e=>(ic;a)6b?gF~|-9vV%W{VZ^aL8o;M64qE7-pO<0 zoDc%c%o6PSSJwFDRaLzqEFo-HA5d_!;AwZ~znB&H3?OPK9`lx5K*eQ?>Za~nWR)7Q z(Pdz#K@9dJ$j6c4?G~`nHYdmY9z*d2oHIUBGQse9G43dRDI4(t(7l{;l({#`wzrzGO4a%RNFyTnv{9^F#M$={U zv?<#o4Y+?;Zx+Ku8ROfnMHp#HP!$#2Frc%7Z2Fy<%V~8Xgw-K&?JgPANY(at{oQd+ z`{jFG?xscjpaXQkI0wYi_~(tl@Apj|(t^8w)E*y+0)?CJv}?JsJfVH~TUI5q_aC|a z;CvED2uWATja6@n#w2$5-DnddfC=qx(l?5AIzE!Y#GFiz#M~1P0-{_vr3AM`Sv$Y;#Z!b4Sg8V*|(7)&J~=3hV2)Dx=<;rC2X*|zzTPhdeJ`+wlVqs z?)#km`Aqc2FL{y)mnV<9z%NNE&xS}@GjkJ@MNfm5GM#rg^Fp3gD!{j#?rC`OAZ5vN ziM1Y3!(Lh@RxmI!lE{=yVJs9N4cdKvTJEXMA{OSpZmAih$~laG{j*%_jckMlifi&$KPRf8nJkggE%?$E6tMU4hhwi(VeC2@ zKHlKO3QuVN>!jDU1D?&(Qf)ZW&NK?v;ZT(QWHkv&I_BxHnm{=%c=RmeqZV3g|yGJ=%kI z87>f$Tv;2Zq+R~PUW(D%U3&=FZ($s(;t2_S!gLpy(H79!Qd?@-<8>C0CpsPRBlvI>3k zjO8g~9aw2^KVQ?&DfpC}{FflpUlLLpEo5G63fLZpp-y$7*BrQgTDtS!!pi{I^;Bw~ z5BxDxmm}|1Qj;^we0f8!sDjvxdrUy!P8F!1F5Qt@uo;`7^m1}@5D{3W+Z;=0;CX3Z%OC<<+eOXIVN(OI2~q6bsBNDN>3J|yz~dO{jf2EqIXbueFK4fSnR~5zXKSQd zvDQcT!8aT^@NYFGiurXzQ(`L4i=}X~(d=o!UB73oJZvYa34@L+~`9*-@U*iV34r-2H243*PT@_dW zgay#9A)68U_be9xsCd+ENxAsAuFBi}j2+X9YvohXg|IzfP(TI@VHd0t;Pd;dGxJ>% z3Kce?@O9XRSUhUJbKh$;BY5Or_<%tVxh_V`Dv1;(4z8tKBqMDs67_Ox4Pjh314&1$ zJ4&jm{{R7Nf_rJH4ODT!^ZqX*u2&K=;w(vj7Z1LAP8l?fBL~JbIvRTnDI{H_B5p~* znPZK|!lqjzxM6YEyPxjAAJ%1d+OQo9GJSqmlKdL#^Km`ykS~r)j%RU~oe2yJ`}Fq# z*s=9^DJTX3_Gs}fcR7EU-;w8Ruwr{78Cjxr12`)S_acLWUPF8N6o5&H@cP*Ux-J0Z zb`OPjIoH6|D4wa7qJMJMB2})-K!&pPa>|#`M!(k}iwV0Pkh4E^8d4f+3ACJVp9Wnc ziUag7A}nu^d=nST8TFGp$)lO|q^lsvt))p1CUbdfdgw6d(|d;b*Dz0k3maXh$m}Rj zs(*+dj(ym$RJEXyXPi_9Yi-uUGYmM+@N%2@8xX%D6-Lg*pBtxz5wHHlWgB!M3*q8u; z`+1o?pN*4T)a#pcj+Hhn&n9Tje3Yy}>hCQ~$eM@jU`dem5TVTrF?OtCi>c4XW89>q za}T6$=^N4s-H%RsVJn%{YPX-pQ%dQ6|dq| zPq8LCtnp+31F<9n!PtpdBxt=?S|o%?BI0_^3a!(fpZ+K|jSn8Fah*mht;zaiq%sWm zPnhEs)oc<;tTt(T>$4zgJ0a^6l~Vjz3{6g#Eu5CyS%#BT%mlyES6~18>hSZS9ZU%I zt3~515+MQ^uu%Oi-KX-^X)TM4Uk7uFHOE-ZR-_O!`M!pl#dC6Z4S*ViH312+IPA@C zm_hw&{#!a{)KjT5XTtV4N!6s_pxrck!w3oXS3&uK7f(PCjLon&dV{giCqZ-_=4;`x z7=2dc3}OnN5l7QO7fTRueqT<0qUHgVb-lHw!Bb#X?N0K_SwcOR0&4AUc@8%|!UZPo*mwI*`Gd92yTSFRUM>IrSfQLFKnZ^O z$q+VTujR?UPZ5+)+dtrNn=Pho*eGs51+{>)Q_@PZs*{DA3n*~K#+?DR4C zN}yAEAuT@SR6M5o4lq+uJdC=A4oz=)UI_gi6N{F`N+n}04id8)x4*#hf`s4 zjEN@rIk`XT595k^)~KfM(8wF8S34B%U0W7ppn4=SL?&~Z7%NE}8{`pfZkCtt@|@-! z3r+XEAO$UJt3OZJb+L(X`tvhUyyiLoQ&b2%UJ`8urjeCIV6uE|z3a8#jRA=Gkw<(9 zvn%3fM8vZH+4Z|ollDe-?bEb7iICjA6tHMcA|p_5tI6D%W7ym_igWe^hDUCR+5G03 zE5=8A$NPQ+s5U5ZKnY{~U2dM+o`ZArcfSe}>XM$?Yq9ba2VsN0d_mV-Xk^)S3jZr) zpvl}yNR+zmE|&*v4A?_fC0!zJcc)iqcB!~8CE zm>B1E10QSY2pn+29&Bq*9`|LckU~Bj;dZ@PSf);c=id_1qd5Z(sZtK9PX1=AF$vn% zo2ap2^Z#a=Dnu+v6IH8u<3hoMp)?>cmSr9`ptg^V6&27$UJHgVE4>zLKzS}on zsiSrMHNaFrlL|Dxd{kVOOw1i5ZKTZ@4hJsQ(TSK{C79@qv?|G1H%UDVq08{zL}B_; zzwSd`@QB%UkwQZW6&{ER8_a1;Q!ic{k!aeQ2znXy_&S!f5*3o?sy8i!0e)Pp+L2yO z@l6v`>%|C-9~m(g`dgfd7J6qC-j1>)4g^E5m-gc)jOwieN3r_UQ9;}{D*nTbz9gsx zA*4<{@lw!qf2Imaeiodtw;HT88$4|Uj_ha%H`LJhkCViSP7npC1g7#GXxlCeUhzp~{%wCRq=ftf2$|5zFkbbv+R@v>;ACs7d6A1G(o zneD90PC`z6Fj@6SaY|D7eQ!Fb8t=Ux)OHmFdN-RhieYI1x;AmpTgXA+8YntWhHt?* zDLD;|tR|+mBlGIb$3-9Yq~5(x)`Fp@wGgM}{ZMLLn}nwQ^Zs(gWQ=XbkK(~Kxy9)(PFUDLlnWsZ;9kUe~VybwuvCk)=Pys8Z4F8K$)jE)vIqnG2xN0H@6;NIa=3)6mN>qI^bnItf&iI!Y z+i@RI%_e~nasEtT9Jte|e^)GU6dytojQ&b1$w_=Ul7SSi`WQg8H*#P!B@~kH{{J|8 z%eW}pu6q~|P%w}ZMM>#YBot{xN}2&_rKFMWR6tR>Bm|_pLt0e2yF;Z@T4Lb82d~@v z@_C;3%lp-z%*=TnvG-nkt+fGE`zLY2m1EiY@LBkUJmobxgS+c>6x~Ej^c*%&$X}Bu zioU=ALg-e$r3XYMfaC4lf&OqI@0J+YGm-Abg@)>8sNHJjLw^(!7nk$rV!A!#w_>kM zb_-1s%8J6#=os9w&pw|r)0YN0Inu#e(pcbRg^zyPFV}q0L;N))vz@sjSlMoUK$Gcd6bjU%+)qg z*o0v|`Y|$1#tRJjN=?a37T#kf-u)U49q4AvEM?){s`bzADUP<8H7;JK{d~|_g;<@D z_c+=(3!}Wf;Fu`iU6c2H73Y4iXf#qpi;5(u-70k#NvF_!>S2~m%6-9cS8aN|Znm&8 za&^pWAls{n+-5pTXNG?eMzH&sfxhHF7XrP?Veo&Hd!;7v*9Y^k@;-jH!wPMFd0G+9 zwMW0m)hPcSXVaNku#ijCxH?&LA4rqlsJca6hC1mg4f^wB;;A>`5zJnTeMrj$9(Euv z4m_xHZ~C;zsdB&??blDGUm(nujElFqba+%5vFdpFHUAn-r4XBMGw!66$QJv}Z-9uS=HaGVXow z4|nG0S7!>8BFHI>&dJhoL+r`EHGj+lIuwBbkdhy=5ztU|6FRJ_qa;n`y$}AjZom%@ z`(RXRq~LASc`wG0Ie{lVLEF2HO)mz7R8SHT@2&@ZWYq}fwaaoK1C>K4Qy0(FsIFcyn8EuL4m? z0^VV;^f8!fYZhVR)FJI{0Xb9L!v>#h{@8~^;TacOQNp|fM!Q6eLO`8%V7|*zk4dQU zwcO`dL!!-4<**7h$tP~hT}_7(11vZu$}8aD_@5V0fJcT88blyns;|0Eqf@fWMbU-a z3kT2=RkA=!Ki$1EmWMqN30`g4?&Qoiufxt}rov2)Ooe1$^&9|_JPY;shiK{VZ~Xd! zGLU7-1M`yq>=i$^1%^XO`VT*sviS!`Qd~Rwesvb7u*pMKaQ}rdu>LAGkKmO(1ZVSM z;JlrBW$%5(2IhI?PA=VG21#du?-4zAFiDlRl zJ^*}x6O14Y&>ug9>|IyOnO-R-dt6A3cT;>WNs9O^FOYS(hHm|O&_hcSqW@3u_j7CZ z>sOwEKG)vC2T_AjA%67-*mJ1d817N^>)d8ATg5%wcftT14EXuUK=k<8Y5sk>d*})3 zioJVyCd4j3j#YCZMht-3t{wKe3;mftS_vYIUm0XrTUp(PD!stN17mYb3Ap8j0?N(K zGu|N?AofMQAF|)}>-*nqlhoHIC0H~}_tRoMRdpg-`#u!Xu<;?!a`>6>9GC9G;vD}J2` zac>vDYHhcR#Z{h5jx+c*F=E(#;4S+y<5K;Wc#)F;a$5;yck{T~v9K};I6=%ax|7L4 z7-YP^@C0yLu0%LHS?|c1%t?Nd%tMnx^GEgFU?#GG;M!fH3lwWL!m=FCx(1Ua9}r6k zy=)}T)2vWLPNql__J`cQ>F$p{1u)sKjCLNjYk_-Eh5T6KAV#)`agalgMk7dhB~pz$ zVERXp*ZuYI?90UK%wFknH>#L!yo-r3`Dm=n$h|UIqk%N*PW1ZM`-)&c`QUfC-04ne zn`@o@r5oooegJp!+2SBKhb#-#q|g)+Jbj+!s^m}w3PF$zt69!{h1tMts<=`cA@0k{4Fw}jDhcHvSJah|mFbV}BaObq!4 z&LJ^+D3AJ6$9x1O+og)WG%AaESsFGS?aEOVWz6~6s4u(A`Dn|mO`NFQ5kC_h@M?y) z*Lf~M!p?oAh40(!%1vRWYXTfb-ITT~!}w5wd}7Y8_C_A=WyjDFB$|vA8GMu}x{w?& zLc_7#j^}*V3KaBZ=3Yg2=KKviq;6Sgk~{CRJnfL+xRDGaMeqE+J>{R69q!#k>D4`G zw(69Z2R$*N5sh9A|&A5_B@hfDmQqne0`ZF|H)IxpUu<8DM{kDYx zrb@p3J(+;$DvF~R57ItTEcY^h1|7yY~ytU9ks5EzHl>vY$DRl%9Wg3uX^V`JWLye&#T0E%`E?X=UwsT!RBbL^POKG1nqRfwA!6zJ5v&cuV>^@v>`z#xYlf{+HL3U z2N0_J)J~Q8AmadSz8626W5%$WPT1m&|!1Q-=;pfJH`TBM9b)HTazv*;|_~*2d31ezy$U zh2ua_mRDW$cpW9v0ZAWmf(x5}r{I*$Yz)_@kk|FEwnHz!zP>nFQ`Kti3@7Y4R1`P) z`D<*)?sV}f9URg^m9xjKPsi5-29Oe*EsF1mAD)tSca#oml|xi@u*xj)kOE`nXY(rN zj7~b*+Ftajy^OUe`Vn|vq)v;PAEgvYaK230YXiN<0K>)#G zO493xubKt_`87Pu+TRj|C3^JvM=p_Lrbtw@zZJhtC@mrJN@5tU&EC)}tm@O5>G7YT zr`FGc6$BGzRKJ?HhBLZT`~nzq14SL3f~g0juj$nM>%Ol&P%hL9yhoRa9FLYFU|?d* zBpn>nv$Dd!MA6AyqlF{Va+uK5_=FJ4TlB{%s}-gTefT*8!{0+e3KoYMANaiX7qwY> z!X>bNR?ZkKbt7^qeEF)>aH0EyV8j!#>S#2h$?RKE}iC75oSX$c!3DFKU8 zO~-FOAts>)S5NI(P_)KWzD;df#z_Lt)|M8pFZkMnPbFV#O$4%7S6RT(1B23xCMs34_Bbl6w3jH ztLnF7IJyr3FFWvVB$bZ`97uWX!ixt)0yumXQ2<|PI%u)xJPx_KxvKSCm`dBR9^oP- zF&EY|c?4viR}9#IjMZrH(12>8Mur1gHyDjUXO{*ClI@7}AmCvpG8CPCt1pKNDyJq? zfK~T*0mnHqkC_3@unQYTIr#hIBGN)J6ZGHRxD)Y;Z*{6p2bseV7W}FjRoB3A;`n}Y ze0_ke?uM1$rTt0aqV~D2sF4PyDq|u8s1xN$z620>a>#NqZN35ht^e!{T7m}~JyZO$ z!@7Ndxn=kCLS=p}h5(Z2@yBkj*Mv3-ilO-&;rdr^;XAd$U?f4OO}SIjJB+v!^Q}>$ z*FXMsv$yfB@Mg8sp9>?RApAQF{PkkTgth;oVkOv!0)4bpD;8Cn(%-x5D^kSnLPtgg>`Qi zp~s(#W>X3VrmKOwU|gosH&B(>Bx5o?z3Zv{f%3pfquq}MC4q2Cx8;wl zY+Nu^U0xzRy-sHT(dHm0U7n|>VyP&5%YEdc|eNbyYRSQNve_me{@+%?A4kpe(&Nub(5?tT|EJe>zA_&kG zTU54q@!D;Il_+UZ(f3=r57^dfs|sun;mr4UH3nRFInoiqxJ;q5cuJgsd+@t=d}aU_W~Q+`E$}iBvMO9&-|9M>rRsoWv((|Z z4=!2)#|LeJa)P;D3dy+ddH~G=gqk6p@RTz$q`js2J{)swv{dNLl}fIF9O6@L<+VYG z3CII<=u8bEk>$*2wDsu#co6~inrbso6z8bx^_$m2%|Q`+5vbyn7EPCE+_pM;1~|@t zgXxwOu8F2w&Acd<&byR{g+>jyFnTCBF-)JoMNwWpu_ysxyAVpfrK3kv%kiH<6x}57 zfQa{>6esB==1hAWj?0Ky`u>)e&-on_Vl|XT3dkz=&Y(A3&E#@wp`EO!H%9}0HkkT? zN@C!9*Qpj5HF1p^#>Ssc>ta$H$ZCd>lrG4Er*XtxDJ$466DalE_4A|h7g}1au@ZJy zHl`@Xr4OHrT zj6&5!T=uYGx|fYx1k|oK}h2{AXJwVH!LtTfOx)U+c}bO{Z7KHwLYFa8)b2 zcs;Na`$5|2kOi>fr`BqAnMT<^Pdc~V|3w7Ng!brr3%#i+ieoTh6ISZY%Ia7$06sx# z8)k6A^qMfj=oUx0Ie6Oe>H!G)zoZnAu6+TC6*lwE7cTnK_|a`X(#N83!{oHMAYq?x zAAPUSWr{wby#b5lCAsUVR~BT%sL9zN@J3a{?wb^@mjgLQWFfmjZ}Fz?dkz)rlSQn0#h(PmQv{*N7UtAjO$6ZS9J+|2VF&QL z>}H%gN@q`pwI_0!k8*OEM(VQ$PzVaF99FK^9{z;d>)UppoH(b%^xI}BOK%^aimNi|YCPdt+=PKo49{ZZcc3*(ZnI@{(HV)R(geJwp{5{y+wQ09{px z%P_1>KrJ`wTJgWlq#tlSVpm34u4=oiauK_dLRi=RV9l?c4jw)q7exYRmDv`2(F#zj(?C^rTAaubm1k!aTcs zQ>Cr?WOy>s6x24cK0<3`DIGH#NC8CKZlJIQug;(b7%N{0RNf=NC6|sLq{OQvBXa3+ z!zV7y%wa4p3PBmTxw)xqs0+%%R&Oa;*~Y=#mgbfXSEU?R+x6a~=G4J3cUfM^a{G{d7;@k48JgHzWYE^X4rowFms^%fu77ctfCln-wS3Cg zRxnhIR!s%qMp3{q@LBEm>E!e|IP^6irdkZkC;8S%p-EklJQKmJ#&My{&h#vd_*^UG zv?L#y?gKg~7z_M+Vct^B`;f?6uM~oO%*iS5bKY3_@*o?B4m1|C8k#1qNdQ1iu~!Dz z^I12FY?a*4DUxB`z#8iP=Gvp%L&H^6u-NC-66RHE>UF2{)t6A(OVF|c8jrpJ!jLIL zpQ-ilue*?Bg3zx!WV>;Z)G=?WxQX~>Yin<|fqv1ct0|;IOi2ElH@~dAubJ0B9H8}Q z5Z#?QUrNL&CoO&cK5OR`wRzYJLy*3t9!hh+#_Rx2vTCI)_&+P^HJQT;G1(R5xpqKv z2%u20?fd?^P&>t_CK+F%R$hk&*L_BhHs^AbZtRcqv%3UvtF&4Fe_1~})y%E~CKr(x zX8hD(0#Z8ZAxY=toUm_(qlp>%Z55Jq!7De#HYZM*{dhE35oBN-bd<|~tnZi=xY0>28SJK)qs=?;Vs;~ARbcljS2lz?sYnN9NP{EJ0(vEzaHdTJnF*b^F?UVeP+ z;j)K5ryPy6I=Y~=pq0v`@@bNodDumiq-r_qv5JBMzDvFK5CXqJd$4S<&P@zvAK0T; zd7tY&+-^D9uW@X8L>Db-9oAA4y*}@>7S=9fy+v2?jPhBh%jCk8IH_KdUb8Joy&5OVU$Xf4wVqV$tW4zE_Zu z(dL_zQOX(#t>#gj@L+NncD?&^E`Rdz5 zk67VrM+2|opETMH-mcm=EOy&fz%I=6^Yxr3+~f!ycrs3?<$I;ADi`eRB`FTWm1M1E z?Nl|b#9s6raxhk=5I1un6@E;=2$L4bt+p=ZKmJt|N1O0~wywvZ4oy+?0{Y@jcQlVz zAD%D)2E|Oq&2k)I;+9mWZ2Bff+jsn&IvW+dRft(RHRSv!cOxA%f`$!EaQODdZFYfx zF$yymjGD+@a$=WAYQ$6(oDP%b3)9~8Dq-fBil@5J%g2Nbe zBNF39i<}ys$;P(8TC*K;n6Mk7Bcr&i5~ZVE7rGlP+PYGE z7};-u$6xYQ;MnoQYK=odD?d6K8k+v&Dti^@jfS#?n!Oy|^_u0W<6%9+D9z9}Ayl55 zMK~28Y8;ilzsgzO`#zGUpW|asUfr?t^?TMS(kjN04{E!Z4;i_FHnb4TqAHL}Eu6rUQ-Xe>A5r;!PG5<$ z$vNs4U0w~kKxV@2?kd~ZABQ;05+$$il$Db5Ron3!))@F}n{Cdtlr>z@_!$2PP+^r! zgX(rO=pNdG7E`^uaD~CU#9t&^W!8SJLVER`o|O5{zzYux`^8?`AyG7ufU6k2;zwx5 zmqn?R<)5@!R5Yl!zkAxSgg(2PmsQj}U9a-<_|RlDu8)eFo)sff`rM!qM~=tIabC|a z(23+&PSvt)q-=JOIgIy^J1McE!Dy`%F!JBg@?3N?Bg?JX?!X>Sz{zd0BIR=iry zbGX??EQm)@J1#5emIqUI2r4ndomC`<8!uvF{n-zj;@KW2W20|L5huR)*E#Pmrf6Qs zR9he`aK#{&M_bCs;^=G5)ZUal-pS{-ugB67-4Cj3ywPZNMp*dI4Rxeof3T=xzumL5 zD=y1FumXc+IOsU}3FOgCGRZgF?{sK8rLG-cVWlpca<7jOya4Z)k_H#oZ2uz9`5|CngkBtA&*@;eIxU;U|&(9V|}7~bZgA)PO&t5 zuE*GG`p8in`kZ$j6c54i_3w{Lknp=yw!P7mn+6sJJQ?CWwTD?-u6e$`bV7uip6Av3 zZoxP_53cLCKd;oC1b7^hyM&K@!DBD}{5g&M8a$GxE8^G4v%#k)$lNFUo0U8aI6LDu zjU>TG?ey~KSe&8<{2u$WhR8#DSpoBEtJDB8=}jE?Ga*BSWo0I7%`)4YuPPsFekDat zcMcQ;8z>9;Q0ip{88yDWRzZ%t>6wJgOmFYJCvLO!V=BGimTnV;jvfV(|jdmvH z4s5)@VM4inIhu%wh>;10*!OlS_xEg;2f4Ui9i!hno2Vrd=fP5-1rtk0srw;!i<8*v zwmovp%Wf|Z7p~O~sN6X`oKk%>V*1eeEK7oX_%b@0gpN$W$QQi)499~XL3O*TDk`H% zV$w?!I(_%>_zK3?>gv7@s--u=0Q-LD9qO|e;nOMoMoWuny96zB;XcGom(}X2F2ff~ za5rHEWsk>~!}TVQPs4?VeiRn=6LyB#c`;tXHxZ}XKut+W519Tgur4=91t(VqH9F=x zva38gZxmbB<$9dd?5bPnHMYOj@K#e))QyN}7)%S`I)DBO%kwDk?C#u=_mrppPhh=F zst6ZiY30U4Q!e`)Vr2yFiI5vU?|L;c^PN%)^Zgv?BCOq~B$k(mA&@t!)pqGHOK@w? zQZnjLR#P)-Xjo;jufLq@N!gLfGbPf2GcU*{H2NoNc+dIjD@AL`$F6 zvN~k^OcXm$)eONME`cdqrSosSy@}m&Wchk>#Dm_>5_tEY^~cS)tSq#6Gv>D5L)0_= zT4F9I`%q{2utIJI+=@-%4o9JLQ$cs1139Ss@7sY_a>pF(C z)O$J$t7sM_xGclQgaC(hh01hmOZB0YvP#@5Jp+Bd#lGW{} zu!egJt=%q(vhurtt52Uk#K3LFM;?P+N^z}QhwsJ1>?O%Dzz{SsvcN%YFbV7FlHa7N zIa=?Zq?(HJnL&D-?m*#yY?pxP?p*8@71!d6URg|8I-4DB?6Kw_zxiYChSD;-2l1U8 zaYQ-xrbQQ;kCs?n{4w1H5Wx7qBeF=bilS~?{>P-_<2zl-_T1qWm;~}v^6~|`#GEgu z_NHQ2tMW%6E`fY|VyyFiekrStwlTEWlQtP&Y-^kRJDz>0$R2ld$R0ONue@J()VmVz zA`&f~h~oq=*Z690f<-!GHP4ipirTdk0kB9PEAC<|bVqs@6EUD(52b_pqxUALoC!B6 zE1sG;S05|OVxgZ=c^^}X#gp!N`)$J^EfT?fWkrI*J~;qBqegA zEmHltSqelJn?qCxl~#!=b_NgJv@qf!025>699`DVa^zjkIXE2Bu4;{U)Gg#cSm4mG zk;#fLGVYAK_g5T7XMRv=3(1%fmdP#m-H|J4nsCG}CtkC247jGpZD%s!R<)*XK}gS5 zrsZ&P&>wR@%XBF3?@#dP;jCV&bxYoTH}Zx(2D$EWB~lYZruht;k<6FiYNxICLA55c zdhfbjViNK3@T$_mq$N}&MhmX~zVRa$Id!b!Fz>t^4PMA<3JTL(IQ3F~=uR_5ZA)|s z`U~BPzc-_2IAX237_n08GVwl)K6b(>-3J|_Px{-`IdgcXJ42$vxZC%iBnNQL@8-J! zd25V5fu$^v45uQE6OwX=idLrVnZM%oxs3dVy(x{R)pDJlGfY|H%3v|OEdA{L;qF4u z7Z?Vf=_#_=jPKhe!!sZw&;q*fP}f#3n5*(v-GL1v-+x7wUh-f?q1SK!Ow&%P`>{soyGY6#&$_Rbv{AonT}W5bevseYdlLv zh9~aFL<5$MsGC8YP^1zM37)tX=GHo=OzN0Lvg=p?FU@|-0#(AHHNVPYTE1lzOe_J1%&6Dfh$1TrA`E=`p_8I_Vo>?l9jWNdOr?x_Bi zVR&K_5sz$U=VjHMET*4{MWv4^8EK7XzkXI8HU^rTQn+?E(#BQiLYbsqW)_|O^t+rC zS|}!FulS!hl=u{J=B)Za2PRbIFBif{zEW>*%>Z1~LwDa0 z(|tB^RN<_DIm8ne{ow1_ri(l|kCAMnPyHgs=wg-UO(cvSsHsJdp{4l!i4#HuQc_FD zH;C^Dx6uZB;(D2|roW1d6_{8#SYBQ;?MrK9tIXh36%`>c?2KFd>tqK%#8U1js!oJx z%|&(Ds?$qBTfR<gVw!Ra zp)E{RT;w*Y&RX1Ti-PI#G8{Chw}D2wj!uj~C+4JqHBU>U8xl2Au^;bQdGFA=G8Y>) zEXCZw{$kX58TbCQ=X4&MXC2Gqf=_>igHJkm8SrotLOw#>vN5jd56y{*Q_IFI-k8t7 zUzRJ>aF~#~e&4==m}o}dQKILw&-uSfGK?dH5Eg5c5MIV*FzIRExf~M#0Wdo!hcoxF zQ#}?j)XW|TUbt*>GG^joGf$!woU{**rG#>zrsTvT}QXN-=Y5~xF7vFg;0(nyL}(NdVsj%)$GkzbScz)Ea&kfZD^{s%6-#tVO+0ZyjGq^#d7 zeHuoussT0b^js^o+tPgMzX2o6lxR=?r8C4d+pr;N9|V&{ zX{F6-scL#4cXVC0I^d!_j2sp?#AH@_yNv=LE66b}oBa10ASE$|ol%0?_C{rNa@^PN zq^;FnA~P{NT+O!O`n}AdB>8TSgqR_B`!K?LXjZ)RPO}fLLG`A<9Oo!G&h>jnHVIz0 z{u_^`^D&r=;K9-AZ11c`sJ3gnm9Ev=4ds^T1@4=+NX_^GNvP_tXZXDiRF4TU@KwP^ zI?pdywZG|SPVv%u9o%2%hl$kmg+Kym{eP}r{utFKf3xV+S~;ck!HM$u_*w4fPYQ~@ zG{3c&sGM~^CzSE0G&}cG_4Hwtes<@y$Cm5PRzYTsf1N7}jvoeU=_qYr+t-`1ib7Rp zVq#*=|FYT&s>kzJ`X_j7jWTmfudG|usu$_M{_B8y*5RVI@jGu4)Din^(n?f(e=Vab zUpmU<{(Rh%yvtyBecvIk3dM%a^TT*a@F&UhY!vf-2}>x};Mwf~%+Tb0Mo#i}~FRrww#QupWHJ?RV(o3mZ$ zruzF}d)6U-!gT=1k0*=c+_mDZ&g{CArEKZkS41qQZvc-u(c2eJgmP2obx|qqsc|#p z&a!Orb-s`7*q3rzN8WYY0;zuWuY2lr#ZS|(%L-?1rsT>|6rUh1D=yyns9QOa7OOIv zTh7+CQ_%eYRV@Ese@RE_BZR;8)s>NzwKOzTv1)J9$$mLkYXywnbM2NjgK%XGfNRyU zbJ=pOX^%3W*QYU| zV6HG%lSbh8{C$gUW@GqEmQsBhpP!kq4I7bWkDjoWe!W_0djI~hWATLj&92cnfGC;& zUPqx&-LVdU&YSZn+;6$;&k_&a48d?g?x?I;mrp#dBt|_!Z~`?|re_i-4Gr>b_s5>( z9qoY$hhEA0mE)kaWa7)E{{|>MX&Pt}sXhM0ih+TF3UZNYRyAGwDL1>l+)|Z@i=@iY z*;G`Nzzv)IYY~IvBvy#3i;9HiPR{_VdN93NyC_~XtG1@Y6x5H68h${0nEU%E!kc1r z{3jwNzULH%THF3`)ou@XI(mL&SK6KOjnttn((*sn+z?AyOpMY7r=Ij;u&>pR_B7et zs`Z~=+5qm?3^<2+YEL>v5khNyi2y0r;O^Y=!Kw5`2T!mVnEE^POfmiY+czg14B6}8 z=R!F)rf#|s|I`P16o039G>kMj49*a%o<4n==Xd!o0%54v_)g3Eb!23Y%a|m6?z?qd ziT@+WET7&vkWDi%Ff{L6#+VJ@Y45!IWhK^fX{u~c9l>oRZ@}j3<7omsyuWKeG!J6= z(^n+2z?2{>FtXxD@qG$vu+%FB4^L3fil#4ThD9XP9FoUmpfycI96j+|)UF1pRi!fa z-hr*qZ7=lz$F2k9JAXgB4&ml$*rig2jc*Qbdn8eq86zu$L6<&3(}cpKdiZ#kCsgj0 zN2k%h%Ov!D1bPEe^<@Ud5j+-x*Tdjfn46R|&Q_5!?(bhYoS~2k)~*G-hc_RqKsQ1Y z5B(h1zv8Bf5OU-tpHaojCjr<-LO>Q;)e+_%PAui$_*$ficUlQ}3L%7u8PJKVG+ohd zVc~s{hfR^1nyR9m*P5R6iIx8KbZ_;1Vuu^{=YOR**3=CRI}XQcSj04_MC9H{uU3gs z1Gou9XS-(Q<&{MiZfSU7KS~go`0FK5N^aENjEXwTb_xRnlRD?#!M0Mdc1=4xPvcw% zvyEg%XlSV6p*tl%G+pifjVzy&C_Wn=9(8uD>JBWcQD9#OrOt|@A1nRnE$j2H&-t2j&<#v(>R;21O%v;rIy!N2N%jH8{? z?2XZp>hkLcIrpyKA4K2`T77@L?d~nu7RBE6_sub#fA;sqERvYPa3Yx2Dph=2Z)r`# zAi@Ol(^k!kI8M6%JI4H8Db$wj7`)Sk!nh~gn#eWC9hjJd{r^W)IgQ6Hhvf3W7AOw6 zDXtFFo^xelxd&6FNv_|0p2&an^NTTAeojt%yxXqYeHdYG4%825bn-q1=lLC|Hg}XH@5|V3$v-@6CVz{z_WtwBYjWhrpah8QL@wnyHkIYLDrI*MPMk%6yGB7*f~q zE|>vd2)##^)KnaYF>@Z^xM;3+fd|ZBrJbopwWHN)Eht7BP+pyTuI@W)6Jr5GE*JNW zdTEMT%QC9gEj^fv!H%Po=jad^)#J0!&WvCFfcD|RpDK$|5`h>@1)Tm6pwvnon}BSZ zBjAUyoZBqJ$b!rnSl)tB9~E}`VD!2f3Xp;_c?AB$8iBZ+z^ z#VT;pTLmp@!RDs5Mu|~~#`|y8Tb+RGi=bHID`1S)K5E*=<1nspx=q>!spE*2NB=6U zLjC<&2s9pxVk!4s<99B3Z$E0B$GZq4@2w=0jfOOB0<@mjW5FCGjYDXc<^o_LNwx># zxI&P;(!>S1u9gms0!e*6Z~0?(ZduGaE(4BKBggf*E*|KRl3wTM)$L0s8Aa{E<$U&* zor2SYdCsS^#zjNecv0s*=(e1voSr3a;y7ub&+a(#t`vM(qD|0;!QmGEaZE}5eT-^j zmUV6~I(`NU9{(C$4&yAp5U|wSuu{?|wfX{YZgj$-s}xA@OJt`X-QxdMOVf-zUXe(#h$QT`7H3p}6l0RFs;c^I=ADyzii&jK)q&XT%F55X z6XmVgm)hG8u_cU+GyhIegCc&mww;NRHVVn_XsY3HesPgo!clOYTc)Qi9EXsuUwi8L zmy36-+aJ*uc}rXzMEIm(DuczWO?8OX(wV3(nIkW=|7Kp(dbHJ>n{bdfInZ}NE;*#V z4s@N*hfd`k0Kw?Bgu}89YfV{HtpeG_U2@M!s>Ik#Dh<@7a;1B!ckF08K;@=XCAZgC zyHI!872KzGVV4YMa`w6zw!c%TLdm2p5=b+}^SM8`adC02J=0oxW=V|OljSkWm+aU~5;|pq9N1id5yIP3Q4J0Sqi+7Wj`P;lw;wcp2n;d0 z@Re{QT$T^j_%}buDPcbl&nkBL!3yL#4h88{wk9pRptb3dT-G_-5o46A!lBPH3&_aN-L+={}(h7f^Y7^MFEWS@2Z{_r z&fn{vCrK-Uw-h!fi>?+rRLD`^N z2GG!o!S1KNi0Un|$a<}5*>c`mTu?Tu(*R79!5Wvz4m7b(za(#+q4Y%)JZ(~*`>BAA zP{9AFDWa_+l<Q&fKecbq7n?R_n>cXmql13wi5Vk0kxc!AKv9Nj=Rot6{RdjL9-h zdOo@L%0qLS$YB6+e-)!se;Dh`;Y>-0zR-&;Mr4SobT4577B1 zMG0-A7#_Kw)AR>~&yt1r7M>hVomBo{-cMO{&})3#GW>DN%i0}02s}=_&PI;-QLvTa z-)zl+O7JbJiwzwefZt-OWrY4O!Ui!hG27D=!*3kmf4K}4xA#q#NI9DgaP6)wSIoy3 z-weF_nKpqFX|Xa~sMf`#f#gIjw&)< z9FxHSx%bi!KUldPY@=$hZfX=Oit3XNmU33bOEczIoCZ!86d~8Mbci-v%oLEN9fjH7 zx)q78s@vC6MP7XW9(Mr??fY?Hf0}PvjJpgM5v<#$aO`>Z5*3F@wL_Z}Rd$V?u4Uoq zR?-~XtFB4|VNtH?#;FlGO6jsPW!v9B`xx5H!;dR94X|4f3M8!fF*_{DDKzi_@MP;c zdM+}J>?vPWmMBUh!5hsJj(c`v;UI3RhKM|$v?~@`SY*r*G z(|rE?8Lo@$cW(-dw5V8Q^on})L_txjuB2gyC>DD|oksu(&?kgIh^t7R!v4$0>cJv7 z8IalRxrEuIJY9;$0>Bx#bGK2LBh$IZ7hnT>>8E##hQpcNZB*N7?qSOS7`{DEEbbF^NE^{A)2Nv2Ne<^oEQYDHvsG=tNVQ;L4`r zaIn086EZ$8Q=v;}!h~8IUwLo9t8Z?@%|O%jRzA1DzWr2POX^*(H)Vs|W8HJ-k{aHS z035gUpQnxU{lPUbaD@h1;6hhx`CMA)4>Bo@IJvk~5ne(9?|3ZZt5$#ljsmhSow7mD zkC_s~Jx%ZvjU7>M-DrzDS=FxJ;cze)8UQ#r449JgKhBUBWI+FFz;$p2H-kWL<=IA&V zcUw=4KuS!m*s!!Mtx1{td}py~lTi+xR(co!&LHX2Co9Jo%}Q1V%T z&BI^_=iN%Mg0D()&K&bay3!p)%06T94zDLpA|Mc zsrT$Hzsh_$udykK3U|QPIMJq8vjPYGv4x)G)v1%?@@(=E>o!&!-iMrSN19d*#ATof zteOdxUW#KL)n%+m##COZ2yX^%yCt|+7{3?8W~=?BTLpF%E6@i3@O!KaAuXl>_W}Uf zYuUx%09$LdcL=p0Kx>`;8p5f#nN{nxW2jW+5K6=_ccf5BZ*T9vHVq12L)D1knHna% z0v_&2fJYwYXjMIVKkPnfN{dk)!Og)>j3>iBuoxu8mzkBJny+03%t>{n?F_}zff&=g zMR)h5%=A#_afrH>&*GiZsst>??=q`;Hg@Zr-y1b$KLmG?706&ae>V4M=9G<^Kx35m z_m&MMd98#`P(n8YfE9*$z4yT(=PDQ(A4v!}0g#Be|K50aWn{-fHKHFT9T-5$QDe8* z+aMFPG*wq8jomJ9lk;#KOzg@AR5U7RRDt?v4!M$DUuLglyrY{BCl+%|8evwJVVDfJ zRaf${N_Jjl)@s3*OL+%rs5rE~*~Xr4bMyfDrds>K(u))m3dA5d_IHt~zK-!Y`ayBA zb{E7I6NpIjf_m)1S~r*F{>-q8z*<7YG{o+v>$e8LD9}mz3Dugn+kDRL%^^3<-5=3y zL&{%H2qb?QufBI+Ot(P5n?=_Lyj{c;@oOyY;ybOW!knTxAe`x7kf=H z;n|AI_l>L~pjL&26ejKW?6;VN0{0a~nmYGW4W0N_JwvYWEXL0X?HJ4E3U^jTpfFcG)6%nY^rJo5zepiWj;7V#i*HN(w!*GR=)+CX$%&_ z0_87VbF|q8RM*%B{=CpzCKkEw!n~c3M49K;9%N-d>CwaE`zbV~r&H@%vAmb8XS%3P zn;A~icjEvAoP#Ly)9V7RAEj3qgzegIHw}xz#2NDJ<%>J)in`L0IbhmYff<+eJe1P0DO9khLge6b1^J^XLxEm1Z397@&p1 z2i=9>kj8E)0rM*JQM{okGH>}Kb$+X)5$-YloqK7IqICt4+hJ9C4=^mym2Kr;7+-DqnGL-h_kIC4M{w&lRf^mIKPpMp z{QQ79fHC?5qMHxsxut#Y!Z|lX<&xM>q+Z%C4|X6$Gil`c)FW8R>Ptu;EIVxlP9M}21G=3%pf>>9A8dYbrm1RVJ^5dAwbpKp^J&D&}Kvud8^Ipg2 zJ2_=`lziTWdS)5+9$wGyapHU*D-|5?alF3^QX;hk0k`prpXcT z&rY-dNlkFo=}_&^TkH2+whgA19a2&nCtcQ84{$Q6C5X8SBY04;MyXDhIr#xXEXzG~ zIQ~)mFr!6$av*P(y%<+rbxL%oehg_;29G>1H_RDW?nxG}K`wdG#;jdgg$%$dN@}Sg zxmI?mJay5|c}groj&~kq(jN6r(N12p8i9-ON?YQf#qBc}S)(mZBGimdBi^mPZCsf| z5E@gN(OQnWeq~|g2OQ#Dx3|ZHgXNYwRp7cmZaSn_gFW}!F~7;hO^C^PcLiGMnWt2K zDoQM?&{CO!K5n%~^`gaTjFu*vtWw@a2T@E4;qBeW3&xMNty4b`JYmNV&Y*HgYH}V> zZsELb`Sw6D;OgzqJ5q`^&k-jKEkAbNEB0;Kdf?Nl`+UuQua zoy(rR4ybAVoL58m+lud1y$WH*fo6=P)`R!ClGg%FAAjZIdc*dz>yX@ ze%*ZrH+ot|W^% z?5OIvA;#DlUnP_}^ETMPe4SxQ`u8)WuFUtgV2vvlEk>28QuPBuNi(ImtY!n`){XVL zNVM#1M~%}fvNFNuvOYj(j1Xp2ap#VwcQs{y?VV3b78h79p#W6pK$TWvWkhK&PKHQ62NYTAkzB5rmim9c63$Yq_A zqVw$4-4k;WC(a$0e{3+hbC$A@^Q$m-HT2pvxWqB(_X}y@gg7GZsRyv(BM^)s$ui-I z3Pm!nugOd)itd>8*>cF_Dv7Ds3g+r(Bn;xv$4zr93@QEs`m+2^z9RrIQ{e=cbd+nxn2&T@_2&VXuF*XFC6ff?l(ADUeFo#nigos@Wt z(axs?o~BQpL`1}DDT$wOll)1&BRz2kUkZJV$aactbwR4#Rme7IdsW)nHxu@3HbON| zOJ;R0)-vyXSqi2UsM9p%Xc2Q?HH2jb7%s}(bAiZ8I-ZaHt*82BxXRp{R76LbTtfWz zvZD3F0imQtjh<@5*=uT6xBXSVhn)c3u{J1GKuNoJYj~qcF`ovZW!xaJUL)m85EL`H z2g!y(DK3*z$@fI2`tT=;MI|5RmWm))a(!C}W>>x^zH=j({C5_knCKIwoAwRMT=)(u zjI=dAjUh%TZ>wUL>NP#-pz63H^Gvbsrc8?xfz)YM(;-RBuRRc&A!YpQ=QnQCL6$ud z(tI7c97yc4wO?4v5GvV)GQ~mS`H{DLwWy7)G$4E6uxSgGiXS${7J-LZ11S$KCA_rU^2yN}2MYIJAF$rH_SE3=%iPf{cp_YLxNsw$ zc{{^1_ynb1@L1HdqTFCChPTcUS_yDly{X=1}(|8k~Y6RFbeW?*hXy0j%BWSm+k z%~tVMX1@{=`xTf=Wt%-^oD~*IDivx5IaOY-8K^~I% z3C$N~2p-07$pT91JF9@o7ebvnl>OWZ%w;A_PBEYC&x*wsGRaCx8|GyrjXU@K(`Ld# z@t3ZrEV{?2q?wS@Dx+HQ@u~q;dg(Td!X*Iy4?|->`Tm4lP0CTux%97*D)!eo^j~Fb z_qMubaVQQ165NbY*@N`C@@q3np+%m&C;FP1i+jl5G3Fp+si+I&p|nWO4i@Kw#L04~ zCMsyc0&|<3F3#@Ut%-NczCvgH3H|Kw*VlyL6m2CbGuFnnb6aUAY*L)dvJL?K1I$BWzxewnWthtGG9kavkT+-Z{}s?SKNi$&Aeu3 z(5B@&t!(wz@GKh(aOy%K8JeM@#Nr3K`P$M6?gyC~JuLXNPTP;kl=%I_W)2*OD+p$dhp?X+t`Ju{QLCPg~^MJH0@wmUwsMK}DvE9RE(ZbZ{m`SSTTBvg*dn zHJIiiEtRP*`9txGsd@A}-!iBMD;b|>_4jNsIi=k;CVd~ON223en0;Z(i0dxxNw}Gk zU3iRn9Yju0$PA@6iS70D4s(^t>#2syGz~7RC`!8}acXfy^eXKHD1NccWr+$oHdRZr z5nA^GsH;-@Bdd^gEq2EH)t}O837wb*E9Cz*_nlEqrd_+BqGA`p zf>JDzrl3fXDhdb$LrX$0BTYKeAsE1hg7hjNg7kzYNUzEu2ntB=K`A1=2Be?;p!3fA zeZS7Xb6jg?F>5&?Pr2`V@2hRUOF7i8!b}Qvdn(%xi3*0T&npGT zH?Yx#e8-F^qF9w~Y&e#}ch;Tf2_GlL&!s#eYb&>k&vlB-{eAdG(SR*k6xVd+!jH>k zQad%-oqiwhNhw3ebu{m`C1^?L6c!xUD=A_vya^KGy=DY;Ue38;Ybi~x;vkDhH}P8J zw32ah`lT-d9w?o>@v}j^n8#FgTDlJL+}1cc0|>2UZF{udaQ~&c0q&Z ztOav?%6{@5jgtu6pLL_-?6Wf=_wl#cPel_l{)HJ-t;%*~ynN!eCruigp;Or5YwNiU zo;W0#yY^IB!0F_O=|aj^fn%t-6Myg92m4Mus|x1k0DiPl&#qjrbjL)CEYU$;^34j> zYsYLP0{bgoHl_iLb{l6&y2yZ~O};pMbMRD)m%%=~KQrN+d@2u3 zJgIsAkw2amS}xy>MECcD&p;G!=F1cfXqGB16KrWIy5}d;9_(1xvd(Wu6i9i08Wu!K zWY54I|CgYkQ+k{@PbMz(#dr9^@TXEDeup|(JIGNBDU^=|Q5FLgARDP~PxJcn#_{xQ zDo|zCG-|({THCDQS=#n5F+qH~MTRfbL3EgB#&4X{$u4L2t z9C37_9Bkgf=!y#2Cz8%7ygDxD!Tra zo^xF97CYA96;+~alkLbkK(#SnQ+-X__$t>8K}=xNO7+gj@6ew1I4t?=uiLMY(%z-E z6&20vv0nXE>t3u05TQ$uAzl>cLU9}IS>Oo2y!5JN3EokT2b6x70g372)HtKS%sI$d z?=I*v++PZF8>|BO8|>EC7n%?XRSb}x7LOZW*E4eKx&OT;HO5)KrM5DGMEopTae_mt z)Y)INx8RhBc#uo_H%t`&MBOb;D@{#9V5RZUJ0Z}SBd>H~M(XBVWMV)ZR5r1}qp}_`A`U6=4*n^^ z*2!Lo%`NzV@S)Z@?E=4(W3JmJxU)(qHJ#38`wAJ4oqtvGP+5HMeRxC%iHwoma01#u zW}$skY2C$`F1&^wlQ0*9ga`GM8iQm~&CeJ~NLHcX6=;)d@Fk>xm0uVti`1_Dp7vc>DYlhSG4fB=@Vjw5TRF0FhxikJE z=}8>9adT#Cvq|kLxK)==ZG!y1&`O2D48mU>w@4);5jywz= zz*c~w;?L&L0}t;gDwaO87z2gJTZF=XQ)@dExWHqreq6=tw#0QFVc z1rFDUxg6!!;@$9|*kUA!C5Y}pA~jmeQad6<49bs;^0}JRb{Z_rmc0NR>V+6VspPbE z)pe$P*$Uz(mmoU4Bq=I@X$q3AauNQkUqj(6>aVcBpoj?6y*1n*Jdo!OYk6rC?M0Gn zMi{dYJ9Gt?IRpq_S_6fGUdWx9D296pImkO~bLTXSi&h*a5$*#lg*doN93Y}>S$TEC z4Q9agCsVepiho2~_N{pU#8hJL3fn4`_ywgHQKTrSuZ%M;RrOzTtjK28RehUXPtmc4dzo@{Feh179ztLZV)68os6?11SH+9-Yl7lg=Elg1Q%KzfyO3 zp>K}y5$HEKNZsb%MwimumLBe5H3LA*OYu4jak1H^GT_Z{E7YZ9A$OY#mFmdqq~h0gKvE8`*YM7FsaB0e0vU;)<dRm0v-D8Ks3o0ua-BhC?|?4ltvq1ZB$xQy7FXd=ViQ z=$CGF9L;Ul1+|vR!3kUg<0C#^R9lQYib_wY z1yjG33#p#0Ngjmj%oWH~!;!+R$7z#cq{Jw?CRuv4NBPEvF`)hx5&)-6OITZJBLS47 zyzTq<$^QsE@z0Si<$;$%_#H=P&f35EX1}zCWxJMDKTYvOEv#7dygmcGx7~~bkfB-^ z-SGUN+VOFR;RI8<2AuwvDQI(auB--|TL<95glwI5bo7qSuL}tCFf9^(VD86?Wta^oKCVAG<)fQ|Ul)eweO+I% zL#Ri;g(2%&33U~LYqQc--O5JQp0SX7!4>sTC#oO%Y09LmXF#`e9V)Wu31o;$j;DM?Nw*3Hiu^cP zNUAGY)%RiWwpgT`oOx%FW-iVSRxkV0UTy!rUWh_o@Ys^{Kr#<^-k{e0NO@RL%%HE{ zxPy}}r#UPkV7PPluF_|r6-DkKRPOjj3}-xwAAMq{Pp}HP=v=wFiz}x!2!Qy8&WQll z#~~f4gCj|)RxkZgj3z0~72Dq?E?fY!8Lj_s8TE! z^m)F1U7`QybRX6@3>%A^k`IUVYx$)21wGt`dL_oAD|w>&2~6{k5&)#*eUmgCvdT_6 z?1vEZ6jf)0DFE1 zg8{m76?{TKvRqm-ir$6Nj=Fplk1=PGvPDVZf2G+gHZ%&3++k8;MQP$Uk{Z&h39}#e z=-JEpcTlxM&AGESH3soUf^?|bwpqRKdjO_8YuBoHUS^Ry*g&mFJLO)6KagmkV?-~H zRLkFw(7V#%-hLMT_G_${G5dWgCgz94VdZwQ=x7vj$XBhIGYK{0vhE1e2BvVC0cM<8 zn#H9#2YbSnPif_7Tz7>G_Dh+*a!y|ED>K=Wl(cWy^K*A3;s|GA;U-S6LMPY!jfl^c zKpJn5Iq9Jy)Q1#nQetezdBc+|xzUWI?`5u$V_%J^owragfj;v@l?_1b@er?|^-;6u z+_Ib;z^|l{9L064^TQ{w0&erhg@$^0NHx|-wr>s#Yet)w1Tq|- z<4!|F*4*~|-v?jtIk#({mkV&@nf(~U{0oF^FlF*JlxL0j9Umzg2R|A>M3swP_6G+L z6M~<%b(}?GrU3>x1c9wp+~Qm2T4U6;)TJEE&2;v0H$kUac)0QtC&y9A<_{?2zH{nV zae+MLeB@niBD(}+h+Sp)i?m7JzhSF3aL^C5OkGQwnX(XP)!7(+m+GdkLe~ao>?8$& z0-H10Q*JkEKec)9MLX?`4>F4jx!4V7%v$mA;}klJ)h^!B$wBoBtdTkDSlKT8C5-ig zx{?@dp4Jc$nQbkfsW0V2U?tgxFuPe2V$`Fsp$j15tIK3hHMclzofjPA{IZ^Gbx<|j zpFugn+!H-J2_;?@N0>T8+jX4DalNz2!=dWE*zid10!IVPRcwm_eVTqbp;#tCuC>uo z7ef+eEHpYFf`Ox%=D@lb%bQn;xFfm{JxY*M8f}+)V&L)@} zj+ZCygQ4&DdVqqO$+8@e8MruGrFHnkMQYC}FJQg$APeK{YHdd()%gvfNrljgFs949yRc)W^9iWOTzVIqVfZHwE zDBUFjv~Hp%W#~fhw1}+ExWK(L-{KM$H zNV3S8pdEtid;Wn-TjF=&tOU_g@at&>Z!N`mabNP=Ren~IIo={1l)Acy6T7PqdsT+* z`ju@a_4UsQRU-d6LjA2HiS8(Nb{=OH{Fof~i}et)K}xam(Ax|~056lIy^;{2nq!^V zlUFzS((A3t-_{^%zXq9kU?%(Xl8%8ZjtO2;T;+iMlg(`hH_dQq$TxK;f`uO78rtFIymLF36Oexhj+HY^tZ1VrM!u|R7=6mn&KynG?B{>$ z#Rr`iBIW)gKw_^XY}Zy*0;11rW1NTavQNYH_t#w)fUo}RGnywe?7yyq8Lor3`l4mn z0~qBH{1gwsg2Hb#^^vN5oIg^YNMSG-wPl^$;p~Znc+tyRxfdLD3Vsp^aP&?ENgv8- z*D&bsDX^Z{Y{`X9p%>x5Aq(aBaljbjL1)$t&ELJd_dB%iY1!H3AoRvT$_#=5XDjvs zF%i_}ex!U%1SFAGI#xMP5izpRy~bBXfw5Qa-kLIE9_m*PR2T-pE)T0L(Js_ub{_O< z>G?MOJ%q)dcM&2NQB1!PEgWoiOIenhapRqWMtpi97P3N=YJ;1O^XmTmm*hH#lRDc= zpBCrAfBMS%$%lKs5U4G_N9oknLW;E0Kpgfa?V{QvGjK*}i7Wx-W&?oaTOvTx)8#5H z=17y(vmW8mxh$!J~E2@KLu}&R1R$o^fK-<{^d+fBwG~_n}_U)YY$acD~ zc=`d-ODo<&>S<>HmW@r$G<<}cZoPB&fwu^?GYv7`x`#RVE`xPc&w0F!>t9WZ*LFW2 z&LLr??O=U_KfJQ;nBLuVtr@rEgkear<6xlX9x5gXa1 z3CN12Z710r;FI)NphqD|C$%(u?=wj24bKsfBlEVpKY&70z_k)NX<=lMEp!KY$kq-r z8)Fy_@(dylGC$Oke33_z?r?}}EPtU?AY_6`#O#{Pv~yK25J$PvB8;h>#v!U!Bk#$zWfECH?BG+#K=fIAM*t@(rma@BM?K zP$|or`V0znl}Jzl%9hpR*Y~j3%0BAW1YXQ&sNGOa@i_SoBFWdASMW@BhvRuj-Eyq2 zB}^G3(Pp`i2laY&d1i*Y^wAX-mG%&LfI3;vLLzahY5&=9e|NiXQh4Q|+RpyY5~u20 zSN`VZ2y-RC!|$WQ5m)gcgo0HP`H07*<&MC3){K5n;Im3(ssKcYoXz2@_?1wqT*hUJ zuKL-9Kq6ad5N!1kZ;X&UKJJ(B)2jTRfKswBjU0D`R6m2w-D+ zBas6WJxt+~s7&d}Dx0s*;+_B`K*@F8z;mO$yb?+I8TDpR1gS<8;5O|d%W7k^$tPj4 z_4h4Up0Ug-P7xb$oUTpmd^0rW;0b}|)=d)2j-zBs>gB?|+z-`52>CvpZXOu5sS8`M zmkJkeK|BIt`4j8nt~Y1oy6Z_N#JwaADIA~%Iu&9jaQx-5rq93*lr zRlH<#pUiPux&fiCh!&CwPrNS!`zljBjNt9StMH0t6Afdfbt{H-A{#|JwJd#Wk zhEvl3JAEOFXq>ka{GdK$il^g>dIud4n0?COiH&O|vECk!OERuLBsTdiM4fy@3}69f zD_6EAKrCl?1vkC>pval-B6x=VQmf>|_uh#W&Hpf0qt+fKG{@Xysco4ONyR@hog4f( z{YrHQ=jpO1U*XCJqkl}S|bm-EJG6NDWBYbQ~Ldk&rXNr~G6?2OC*gn#QIgqQq5>5-Nmj?Z1pYmeA$ z&+_7>&Q<^x(55&3!j7NkwHwnN@ccNmpCVsU9lV#{uI^BGMDUWvv0*a{q0A@Y40ixU zbe-A+(NR4r3Kr>%=hj5UOF4}bq!JivB991*aG82PkW^;4TgP9jg%~0o2~`BS%rJ zGIj4jtJGE#z%miSXmK*mV%-%lg|^}}4_2U9c!=HzTd(s*%q5xRR1g{3Hv?HGCQU{d&C;BqU4<_#>?BoB!7mYg``CO7M))qa>)tq#F zLGl?4W+}eVr7FiVl9D$r{ydp1gP)v&z0WD67_i|o_p(O8*!?+SuB|I+WD%5Zun2k* zyr;jx2J`qmjO%8X6y`g0oIKSJo%N|0Y3KlbWuJ+We}+W!@&ar6_*uT2jJEqi)NQaM z&np(?ILEpzJQ_tD!=1bc`UsK}CiQs}3!6QPiRdKQU)nBnXaE9(Yc!v+3qGXsg>A zt>&VyLvE5vpj_xrZamDlG8}7bA)37_Is#h(N6Db%W*b&HGZa3^+Ntr9c3zrL^9jXp z{R9gx;|TK9zZlH#E^}CbI-M9eKE40LptGCm!ykBlQcseA=^^8?F&FaqTna1K*|4-8 zbR7-RCt&$R9JF@Vr#iPOK^(<|Zy_IFzz1gTtiMnq$>uvGrjx ztwZ7WRc|D`&Dxd76*L!O5Y0`hkM9}=(>eiC;g&6zNvb6pM^hw?b8v!@^|`f`8%r_i z_AexFByj1Mjiu*EB#Zmh%%CnGi?yVPjKms!IzzOnyW~w9|2u1M$GV18at}W$1|0SF zkTULF@3cH8R&rN=CeFArjX)pWH4ee?IP-)}QP?hZf$v5Iw&j&Vf|kv`P&w?cw)65K zD~bWr+1z1fWQs!SvzO`{SSP%6<%6=Ek7xmP!S@}J9{yOLJ>`^|v`4a{jz5EDCCU8T zieH?#ELLp>hHu%m&X&~-=isJBP!l-2l<_Penp#liIBN2~e9h*q;^wumpEtysXYMW% z%NFX>Lq2=U#8na=nd_pTkiWY~C)B2zJ0E1}EX6P>*3AZ_E}Uz=H;LL}K!ph83$o6* z`k?2^d)vkKMI{dxYhd>NhqdHo_{N3EsNY-0zxuK$2E^bML7Gkr2Q=eYxwVyVV#sky zNj8ESmiqQY~Omzgv{5v ztA03vflcy-|=HiO&1=0pITd!_n6^Y>rQi6A|4{pO~ zyNb7)16vygj}5xQHC1!#V*(i0w0`E0dDVuDvhM}29{8xUp`>sWbp)53KoO17e|q^S zRdJsAWvvcX8N0xoP%|-SCM$x{PI8%{C@7yjHe)xJma7nU#>y&R3>C#ZfgL8Aju$&Y z-PMDXn8=~5^FcX%+)W+IY|$A7$E^gjX#0~hSD{w4$KNFwv^`maVm>cr&|f+mUmhNU z8Fa;4Qaq;z&3_hX>VJH#F?Aw)ADX`8=V&A2%qw(3;pvbQw)=R!hJRZ%f}m_`S}k00 zE{$vs`Jq{>ibVggC%%Pl*hRSfv|B1=`YQIodKRaQulgCrr1IE-(gzYEn=XjIR@Xj8 zO_*n}NHG2ktSo|~I$T8S|-sMIz48bm9l zUibjcK=rke_QWJ_21bC%vh~Uv6_u*zxnmbU_M}sMGCk&XVH};hn%W?PL5b&Lo_G_& z<3zk^Vzl**4l!?T5_b4;NBHU6oLvC~YMQ-#d6M`zL?69CMw=@H%5Hdu?ZvVsxQ1&` z)(To36lXyHH*^}9jDi`}(iSqui+PkC1>a^kX6 zu5*(}DFgZmm!c_h)C0ZH9b3eB-Fu||Arw^4XQvmqR~8T&ifOmR_}v1at4gcGC2JvU zkr3xjZX4D|gu|WtFR!lTE7L0z3z+X+bZcnOS3}ZNzUs=Rc|BtDS)Hg}n~t`6!OnuM zdmeAwJ>HegRM}RlklskzC)=8dFpgj%ONt>Ub77)WQpu>xJTs_-!R8^L1@t0SR6jl^ z(4(|Rl-f_}(>-T4<2m~d+KuAZD$&eoNiSh^KrH#o?-VN%Tk3jWx-Q{@Idj(Vfs(SJ=yMpvCs zoO#zsi|^ukI25ZW8lfLb5!+yzfwtNX+sohGB-P3jem&-fXN9{yQ6D`#<_4-SWsT&5 ze&^n9;9s@YPGY*T$svO`{kuE0?y)Ru3+l(Vj;AIq^txNusSNN^nXq)dh;J?A>8ci! z5y+R!)5ypDY<|u}u0FuLy51ZKP^9ywY#kTj_ zVGq9MN#b9nF~}Hw4dcTzaa~Pdl|?e9YxGk?XvhzmE!*75<1dkZQ%Qn~a|e2gSvOb- zM@r;dCevVB(;!$Xl>}6Pu#wh=9S2qZeR^JhxtU0za+jV|4`A_PTkd5`{zb5Tc8FzM(x}{% zKtvQ%?rhW*58&f@Z-yx#Ew9vOT%CY%MB~Gs z?NYQCekUCxZ8`UI2M71}?k@DphS=qq+m5XsG<9b^5YrmF(^CTv zbCxYD#W%?b=Oo;F{7zlFwD*^R%iij>mQ=mg6w-`BvHxie^O*`AGqFjPb~BS(FFoCa zsrIvYGC%^lM=fNs43kSLZPVM55QZRhV4x*cqMERPw%ze(ObiXejTI{AyzzX8c;cbK!` zb=yAE5((!LmLrZ9(^;2$Uq0%*LUmTdQb90MWE`(P+iLMJnpwnk_AkJcTKk%AGL@6r zawC(%?g$0!iQfN5;weYR_&}j8MbRLmoT*}QM3j)s_nt44x3g=X!`*Km>FCp{F|DU9 z$cNHSRnEqF{v#Vmdbp4CS8&?RERBsMbEom9TRZlp1@3P3+G3KXP5Nyf@@iA1eVU=N z1ARK2MapndV&^&+{j8^(vLCU}eLa%J6tRNy*i$aDE}@(IRh3BKKFyPmkPg4eN*%He6hafeA*UMSlLNelBvy2k{c`wz&Mf7nfP;SvtjS z)$gQnA$71h^3rBi;E%=_Q>K2!gHrP^$$S5#q|{)94HUw&JPIFb!dof#OQZopWb7YEN={|W%t-GSPUtB#jgu(cfy zKZ_9zFDmJjEe75Go<)L3Wx^bZQa$M@~C!{hPO zqi3bP!8{=mVeeo_H5LWk))Y*C%!=)4CF6N?N*qaR#d3kHkm0y%qPGyd9$&!|t5YIi z_~Lj&+Pke5#mMflR4gP>n}R%OT8-HJ-x0D>gdR4>Ag0QV0%}LX9SK6)683X`S*8~` zE$uDheM80Jq97Zatv3Xv2o<^sBqSB;0Msl2;UUQM!Q=;%Nbfxy8t}$w?4@YIYw0FI zsEm36bji%+zs7|A9MQf}j8A{gLUi2tF+?-2Q*{5${|JUafQ=FB9pfN|DF9vc2kk-| zYh>p|1P3;rkV}I2@WB%wSsIsJIxPzF3N6T4&p}i(LPwL~mnMw5g5bK(0mMIr@CGlN z4(H?W?Rq)ppMv-&`b2E^cY67acVx!=uiu*aD6){AGrP9+F2oPHbi-&I?_w@DvOYn` z*{yhY+Ppac)zdkmw=)Dgbz3n_p@^kE_wG*>a-5ej=|;@|NWTeU<&KXIB3uR0irn-n zfEi57l4y3s1~#$c*}=se<&k)Q%TghYrJ{ z0-@S_CKcI;R7l96jGwl8d$Ve7tN=7;A3^9AlN2c7Hb0yczypC4LMHW$y$==|E@PEM zNf}G|v}Uh5?`J~;`ak2jceEUX2F@`-tPG}u@gT7ZZ35!$MjQ@(#(|mP(+p8pjcVea zT9jj6P-ER~^8HU9wX4f~^ypC!)B55&2JEWVT6z0w(%ZLR^~RUFYGIZH+bGLt_g5|% zu7RJcPYz1(hF`6A*F91j)VMb4av8_&kkGbPbUQ5e)-Ym!%=gj=Bj`*ujSl9pUzzou7>b;zPrvJFY)n@OJNn8 zt3FwEeYXVf(m*^r@>QfYC8cYEO0R0BDyb)0|G4u4hu2aWHrc^#g}i6C(-WCX4trID zA04cN{|Iy2WE>M)BT{)UkeM2{bGle5ZZpgQj+zbci|jEOPD0W z1{Z7)ds2O^(vKn%>*?Pw0NaSk4hxuMk!f8){qGqYi#eS-w&h8I>BUjZh@=s=V26lE z`ZU<>d;Tjt;hj}CKSF7gR)DnQB5@K4+|e7)@k#ma)>Pae)GN@Hfw-|D`&R@RqjAQm zO?7%67I-0Km?TrC66FG&d3w0CoW9=NgCh1JzTcz}^};AuTdr-)z09U zEt(MZ4oEiggc|&Bq;4BC*g4nSBf8vI| zP%V5@s$1dinC#qhC=Usf5wijDFQP9+g{=feTV+QFnLP&T7Sa2Z@rKZGj%n-RU zL<+i`<8**Uv9>LTdWMj-vM_qjcv7n3qcVhzNdSK83=Ci|$Vx0&L>=G4L_v7Ho=pLR zwFJ&&oeK)N|z&?HTd{c8$@sqvL)nQ!Nb6^wfEYvE5!P&fOI2>+m0VedGgRnm?wn8$Q z+a7LkC#xJ>UwNGS;?JWi!0o-8kM_D!+mr?Q&ugyMprQ_j#HzOLLXdOHfJi@XyE6$UC;7FAw^TX;ep=8-_OL@oq1{f#(Uc`jYb6Q;QStpu9PjB+9N$*tWVMMqyb z=pF!nH2Zm@J@XRYi;+`cN$r;W^=+riB_NEcoGty)dT<7AXQ7f<<2VR8}Oww z(+xYfMqzr4F~YgLh)PzB1TMK13G2AOj2OEhc6_?7C4Fb~ZMazoJe70w{B5N2FolQK z^vTX4MDdfE=zhC?*aCSUcR|hh2{;Jc%n~Tdd-}8dHb8NQU45YiNvVBC*`Qh_6e5%R zrbV3AUS2hNP1MByWYpG%V=glopotg)-ed;{vD+oiHUC!X`e`dD;jcj0i z&ASnlSIb?9e`en;_5oua58#IQmy2NjnHAer*;El3*3h1noQuHcEc z4_A!eL8fK@^Rc9*p&4kRDP>mYV(u&0TM?eTvQ(wkb>@w zr>8#&cl)y8P30kBRgbO7wV=;l(S=0jWY2&A?|1fnx+4s~*bF1aPEdfh9?nY7F&1C_ z>e6+0&$fVEfz2HZWYhMbmCl3ru%X3#7p2mEW13o_@_)mz~W> z9{U8?(>-DkBVnEBFEoGEFn*=_E0Br?!W%jm1H`;`!2hHf>)a6P9U`1ZqSyM?%$S3% zpp4)OWclPWwb&{#4VEIoLSLSd~MqMZe2T?aOi{GR^EGh8du=~1zY1I?@ z{7&J%`+xhfocL6DQthx{W>o@SEA07>c?BUO7@|aRh(rpqiR?G5*CDFu_1ezoA!_5x zb8%bi&zQ81YIpm^fkC%Evm@CK=QjxwR~Vyh|{Xw=D zM>!*dTsP_Y2Z0^PE~lhxWItJ6uvw^Az>54n{G}>tm8IdCPa@*9&iq{k5leJcoV{0* z#-#)a_%CU0$rT4^RxZR`T?DGGQ{BsKWdihQUoi$#T{($5jMAR59Vs=#7NRkbt}k_R zNG?#-%FH5e)2s;8!rD-BTBc--Jy>1-iCDON`k~oaeSnhMT#=9~hIsMZON2%{>8-2F zHrV4gwreTe@7q)m5=jNbr2^uKBCpjBLmZW@M9G>US|KwP%`ns@Lr=Ckb$p2b9fX3}%S`K@_fM+7`5NP)* zL0B{+jZ85dlaolgMX3iOx-*Tj2K5Z>!$riui2eXeBs7h#C7$abWdvZtX>LK>#s%;L zQYCT-y1Mqi>@Mfx&DZ{r~4FUVDCq^k{p--S$y#`NE|P}sH361wS)UJ zPJPNe%`Va>LT7s}BztUBAM!4N*=L+kMoV>E1P{GVF zYg+81q^TyhNFEQGxw;6epc^)(appyYea`X|Bed4*v)Zz?pWd68^2_rT9Qrq=i3!W; zIs8X1!VBsNc3xZs0Ew zkn}~%nTaVeSgphgX7whII|j~VT3 z_`oYt525iEJLFiLa8oDxUAvc|WGx+|UqxlEe6CY7LdWhMm(L!_HX18ld}R+;zD76* z6-i)WZcsq>FyJZvr>f+c? z)&Nu)kO1~$D1jn$$@k%vdi8aLTgb;x!qMmpjnkGBxoxU5+aNld=9BVchj7K`>=O{U zd>&yUWNeo~Rd)jJtg^-O07#>Pp#*a2g;!!+!ui|#htooOH89U5vZ!~vhGLX>_FA!! zI?>6erm7k|qCx+J#{+^fyXE1(N;lx#^T%mu2-i)}&W{bm%S=N^!1DFYn1>I78crgL z5(}e|s{MR^mvoE1P8ooQcNku{%3G%vCQD=HU5BP}uTCPi%{2XD3TMLv*Jw$lv z)FTX7jx_%AQEIVhWjo#rqtx#WeEaZN&v4(~cap@_Hj(UO)~j0@UmfW0e6T#H{ZR%} z1`OI=P|f7m*P}HoN??cT0S$mLSW0B3bbsJrtaA^j%uEpFnpi{t$te8`?w|qn6+x2y z3t0awu1`aG6fkit$P8Bo>JD`rZa5&by&SS-JlzpDbIJzIqhLi&mE+ura=U(4 zZ{||l&!ovf90mb;Qo{M*0{)ObhOf{{1arY;b7xSBaWf%5_ z0!%2icJ>UM`O})h5YcU-c7Y)WF1?LqZ3{eC`~)i+8kG~<7qP1Ynglbr=TDqq{MIH| zxl9+k(BjYqnoO_$_SkVJ*6yL9@jOcl1MhOlP&5@7*M$`I!qK7YUI|u?Yu`7VyY4=F zRk3@x5!tl4!^7B-WayeePsDi*XN@V84f$Yk;uu0ci6_Vl?3h}~+@a1>JsRMZ|iSWpVMSV70nz1hcXG z#?t`td3lhzw`V|79ufyl2wVXlopXqi!f2x(*m5H&@SNWds11u-gWsAaF1PwP88p(ZuhtE}N@yhD}ok{GX2lTl4b4 z|NaH>m37`w$hL(rg#x7gMpQ7R1Ht;G1pYCJ*0lj>vczD?>U~tZ_~o@kFOtRUvH{+) z0CzSC7xKZW^4H4Ei^VW(ZgaYX0;oWco2=ILi*R?;F<@JRH1uUr`glloa|cfwa@h_% zpI98eTs_D-1ba^|qT~dn>#>XXktSk0nS^SFK4T7vvZYgCKXvrU)Xx+3o-@-$gc_6JZzI3kg7?C)7mB;@WG4`dY%NzqT3auKCK`iFEITr< zRnnTw3tLDe*lt-P$gsNKF9D4PdAh1o6-dK#4U(XdbU-);YSvrqkf8fP8{ne~tXd~j z-#L`0!T>u5pWASmA~=x7dVTE%w?;-$Q5P}*QrH^pa|Zu8twp}>94g{RLrmoCoW*q_ z1cG#{>sXI4972SUNbVr;xdlCIuRutd)gmAE1&)D2U3`ELUI=-@yityy&OYmcf(kqO z3w_AVCmATd2GmE#b-Mk>yPz`sRzrS$`yhq~d*fWcazi1+Q1T$WV0w+3p#o%BY@w{8 ziLfNvFUH{{5Hz$DG7HkLL4-Pf2b>r83gGoFxuDFws6Vlx0aOe#>L)lZQhSyWE2ra3 zrhY0Lp;DH~fJ{M$J@>QL^aKS!2Zc5J%?8~G-6PAIpf$Z8Nw|TyHVlklMcK)~#$=Be z332AZy>Res?w(2SEniy#3~wH|&$iMgKv^tiuEARiEf%Q7vdf9E=?8{IM%EH}SyJQU z-}l=>yrkU>R#o+Tz?-#6L< z=>}v}rW4n}LOCQ^3!4niJq|IvP^JKZa4a0#@D;S`y5bT;i338soHF&|1YbY~h*LitgW?vex%Z z2HvEupO2;2>Ye|DGLs^9x@_zz+0b`t1Z3G`#duk!Cb$kgHlAjIDr0l)cyeP|%{Pbu zvF!6e16`fD@}{02?^g$vmI>x2{w`u_F5LvsO}Nfs2kc4P8{3;>*`pz;kRm*7-;tq3 zKa=P^5`8Q36TrF_sZCs`Ld+IIt|Ne~c}r4Ht!-}I#c4I{0%EBWCi0%nR+UH=$ZSeX zit5~@+dABKMqD*lb|YD)buA(E-oa*Kd4;k?7UgSKK3TU!prNT1g1RlaMfnWl*x)eQ zjqK=0M78ZMAI)UsfIx9&SGG)<$ma}-`VJ16FH?M9%jh^VsXMj|0%&31u9?7{1kbh` zEUy!*A|!1xjQu{JeRau+yiybyG#_H*lTo?3ny$?+-80Z%e5iC&0F2L(w zi%N2gQJ-E|U^X&HjkaOi8j|$00yserIb`#Y^6q;)H+ininVfh#NKsI(fN)TC8Bh?$cJ^qNBDz z#%tHQhBO;4h&=uqv(G&=@YP@G%cvN`6uRl0*v_0KW;kYK=5Ff#l6O z%5r!mxa*?Mch~2*%EML_B(Z%NhDb2r^nBF4*LMCD*GK^HJ41Kdxrt;V`?2WMB_Y6q zo+ltY$CT!;w?Qdg756N~~xYV?pjXc89Mb1~{Pm|Cw@!hB^pmlZ&F zlNXENP$^w1iBv&*OG;;NMK1RNG2wt*GEH@t;fu?xC{4n(T2v1svKfnUzpELBm#mgoj;otU5 zcP5gP!FRPnU_rL2NP`D+vX2{jj37YZ6B>=~qar}oa9yZgC>1>CyV;hZE$$Esf_Q)H z2mX+K-loBL50;$NyVT)e_0Ofyo%io^?qTS5Y{g7WVV?$v2>JkZ+4>DMye3fym|7}{ zh4ZfvaUWRE)nu3x&_zkNHi99J8qrTPNdrz7&?|l!NMB%Bx)84@gOdk?IaEfDbsTtY z1d~;eP=Xj@0vAA0Czb_ zUm@B%#dLstzr|5N@VJH`E)U6dnXXu~LDC^~2QJ_bjM)*^Vehntk1dW#iOfWw_O}H+ zjg1vL4GE6mZB9z+gpi(La*05GCY;*lES}(=W&ElqZxQc>cT{UE-ND=9dwksnOFKHJ54lO0Q zl4X#j%yEH}O&9Rb&B=r0^sI4+i}VEo!}$1C36y;7L2b$wAnv)(Gfqmlq|+9r*S7ou znNHY!lMYGj-Jyu36C6=)-J4I2CBNYxLu4b^J!{J|wouZAGch%vW1{i0`lc)ne|K+2 zWIpIl$SQ?~#7|Mb(P bm`%31@2BHP!^gMBkt)b4$z(T!K!thW~ literal 0 HcmV?d00001 diff --git a/examples/plots/note.txt b/examples/plots/note.txt new file mode 100644 index 0000000..c028b32 --- /dev/null +++ b/examples/plots/note.txt @@ -0,0 +1,4 @@ +The plots are generated on small values of hyperparameters for testing purposes. +Hence, the graphs are not identical to the paper ones. +Nevertheless it depicts the pattern. +Use default params for better results. \ No newline at end of file diff --git a/examples/requirements.txt b/examples/requirements.txt new file mode 100644 index 0000000..feb0786 --- /dev/null +++ b/examples/requirements.txt @@ -0,0 +1,50 @@ +aiohappyeyeballs==2.6.1 +aiohttp==3.12.15 +aiosignal==1.4.0 +async-timeout==5.0.1 +attrs==25.3.0 +certifi==2025.8.3 +charset-normalizer==3.4.2 +contourpy==1.3.2 +cycler==0.12.1 +filelock==3.18.0 +fonttools==4.59.0 +frozenlist==1.7.0 +fsspec==2025.7.0 +idna==3.10 +Jinja2==3.1.6 +joblib==1.5.1 +kiwisolver==1.4.8 +MarkupSafe==3.0.2 +matplotlib==3.10.5 +mpmath==1.3.0 +multidict==6.6.3 +networkx==3.4.2 +numpy==1.26.4 +packaging==25.0 +pandas==2.3.1 +pillow==11.3.0 +propcache==0.3.2 +psutil==7.0.0 +pyparsing==3.2.3 +python-dateutil==2.9.0.post0 +pytz==2025.2 +requests==2.32.4 +scikit-learn==1.7.1 +scipy==1.15.3 +six==1.17.0 +sympy==1.14.0 +threadpoolctl==3.6.0 +torch==2.2.0 +torch-geometric==2.6.1 +torch_cluster==1.6.3 +torch_scatter==2.1.2 +torch_sparse==0.6.18 +torch_spline_conv==1.2.2 +torchaudio==2.2.0 +torchvision==0.17.0 +tqdm==4.67.1 +typing_extensions==4.14.1 +tzdata==2025.2 +urllib3==2.5.0 +yarl==1.20.1 diff --git a/examples/train_univerifier.py b/examples/train_univerifier.py new file mode 100644 index 0000000..989bb66 --- /dev/null +++ b/examples/train_univerifier.py @@ -0,0 +1,98 @@ +""" +Trains the Univerifier on features built from fingerprints (MLP: [128,64,32] + LeakyReLU). +Loads X,y from generate_univerifier_dataset.py and saves weights + a tiny meta JSON. +""" + +import argparse, json, torch, time +import torch.nn as nn +import torch.nn.functional as F +from pathlib import Path + +class FPVerifier(nn.Module): + def __init__(self, in_dim: int): + super().__init__() + self.net = nn.Sequential( + nn.Linear(in_dim, 128), + nn.LeakyReLU(), + nn.Linear(128, 64), + nn.LeakyReLU(), + nn.Linear(64, 32), + nn.LeakyReLU(), + nn.Linear(32, 1), + nn.Sigmoid(), + ) + def forward(self, x): + return self.net(x) + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('--dataset', type=str, default='fingerprints/univerifier_dataset_nc.pt') + ap.add_argument('--epochs', type=int, default=200) + ap.add_argument('--lr', type=float, default=1e-3) + ap.add_argument('--weight_decay', type=float, default=0.0) + ap.add_argument('--val_split', type=float, default=0.2) + ap.add_argument('--fingerprints_path', type=str, default='fingerprints/fingerprints_nc.pt') + ap.add_argument('--out', type=str, default='fingerprints/univerifier_nc.pt') + args = ap.parse_args() + + # Load dataset + pack = torch.load(args.dataset, map_location='cpu') + X = pack['X'].float().detach() + y = pack['y'].float().view(-1, 1).detach() + N, D = X.shape + + try: + fp_pack = torch.load(args.fingerprints_path, map_location='cpu') + ver_in_dim = int(fp_pack.get('ver_in_dim', D)) + if ver_in_dim != D: + raise RuntimeError(f'Input dim mismatch: dataset dim {D} vs ver_in_dim {ver_in_dim}') + except FileNotFoundError: + pass + + # Train/val split + n_val = max(1, int(args.val_split * N)) + perm = torch.randperm(N) + idx_tr, idx_val = perm[:-n_val], perm[-n_val:] + X_tr, y_tr = X[idx_tr], y[idx_tr] + X_val, y_val = X[idx_val], y[idx_val] + + # Model/optim + V = FPVerifier(D) + opt = torch.optim.Adam(V.parameters(), lr=args.lr, weight_decay=args.weight_decay) + + best_acc, best_state = 0.0, None + for ep in range(1, args.epochs + 1): + V.train(); opt.zero_grad() + p = V(X_tr) + loss = F.binary_cross_entropy(p, y_tr) + loss.backward(); opt.step() + + with torch.no_grad(): + V.eval() + pv = V(X_val) + val_loss = F.binary_cross_entropy(pv, y_val) + val_acc = ((pv >= 0.5).float() == y_val).float().mean().item() + + if val_acc > best_acc: + best_acc = val_acc + best_state = {k: v.cpu().clone() for k, v in V.state_dict().items()} + + if ep % 20 == 0 or ep == args.epochs: + print(f'Epoch {ep:03d} | train_bce {loss.item():.4f} ' + f'| val_bce {val_loss.item():.4f} | val_acc {val_acc:.4f}') + + # Save best + if best_state is None: + best_state = V.state_dict() + Path('fingerprints').mkdir(exist_ok=True, parents=True) + torch.save(best_state, args.out) + with open(args.out.replace('.pt', '_meta.json'), 'w') as f: + json.dump({'in_dim': D, 'hidden': [128, 64, 32], 'act': 'LeakyReLU'}, f) + print(f'Saved {args.out} | Best Val Acc {best_acc:.4f} | Input dim D={D}') + +if __name__ == '__main__': + start_time = time.time() + main() + end_time = time.time() + print("time taken: ", (end_time-start_time)/60 ) +