From 369a9bf906c22c68860005d096cb62a2af875e8e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E9=B9=A4=E7=A6=B9?=
Date: Sat, 15 Mar 2025 16:24:50 +0800
Subject: [PATCH 01/53] inference optimization
---
LHM/models/modeling_human_lrm.py | 16 +++--
LHM/models/rendering/gs_renderer.py | 105 +---------------------------
LHM/runners/infer/human_lrm.py | 54 +++++++-------
LHM/utils/ffmpeg_utils.py | 55 +++++++++++++++
README.md | 1 +
5 files changed, 95 insertions(+), 136 deletions(-)
create mode 100644 LHM/utils/ffmpeg_utils.py
diff --git a/LHM/models/modeling_human_lrm.py b/LHM/models/modeling_human_lrm.py
index 5ad0f02..3c5f2d2 100755
--- a/LHM/models/modeling_human_lrm.py
+++ b/LHM/models/modeling_human_lrm.py
@@ -777,16 +777,11 @@ def infer_single_view(
image.shape[0] == smplx_params["body_pose"].shape[0]
), "Batch size mismatch for image and smplx_params"
assert len(smplx_params["betas"].shape) == 2
- render_h, render_w = int(render_intrs[0, 0, 1, 2] * 2), int(
- render_intrs[0, 0, 0, 2] * 2
- )
if self.facesr:
head_image = self.obtain_facesr(head_image)
assert image.shape[0] == 1
- num_views = render_c2ws.shape[1]
-
query_points = None
if self.latent_query_points_type.startswith("e2e_smplx"):
@@ -808,8 +803,19 @@ def infer_single_view(
)
+ return gs_model_list, query_points, smplx_params['transform_mat_neutral_pose']
+
+
+ def animation_infer(self, gs_model_list, query_points, smplx_params, render_c2ws, render_intrs, render_bg_colors):
+ '''Inference code avoid repeat forward.
+ '''
+
+ render_h, render_w = int(render_intrs[0, 0, 1, 2] * 2), int(
+ render_intrs[0, 0, 0, 2] * 2
+ )
# render target views
render_res_list = []
+ num_views = render_c2ws.shape[1]
for view_idx in range(num_views):
render_res = self.renderer.forward_animate_gs(
diff --git a/LHM/models/rendering/gs_renderer.py b/LHM/models/rendering/gs_renderer.py
index ebf4b1f..6fea5ad 100755
--- a/LHM/models/rendering/gs_renderer.py
+++ b/LHM/models/rendering/gs_renderer.py
@@ -1337,40 +1337,10 @@ def forward_animate_gs(
):
batch_size = len(gs_attr_list)
out_list = []
- df_out_list = []
-
- cano_out_list = []
+ cano_out_list = [] # inference DO NOT use
N_view = smplx_data["root_pose"].shape[1]
- if df_data is not None:
- # accumulate df data
- df_c2w = df_data["c2w"]
- df_intrs = df_data["intrs"]
- _, D_N, _, _ = df_intrs.shape
- df_smplx_params = df_data["smplx_params"]
-
- df_bg_color = torch.ones(batch_size, D_N, 3).to(background_color)
-
- df_width = 512
- df_height = 1024
-
- # merge df_smplx_params with smplx_data. A trick, we set the batch is the sample view of df pose
- for merge_key in [
- "root_pose",
- "body_pose",
- "jaw_pose",
- "leye_pose",
- "reye_pose",
- "lhand_pose",
- "rhand_pose",
- "trans",
- "expr",
- ]:
- smplx_data[merge_key] = torch.cat(
- [smplx_data[merge_key], df_smplx_params[merge_key]], dim=1
- )
-
for b in range(batch_size):
gs_attr = gs_attr_list[b]
query_pt = query_points[b]
@@ -1383,7 +1353,6 @@ def forward_animate_gs(
)
animatable_gs_model_list = merge_animatable_gs_model_list[:N_view]
- df_animate_model_list = merge_animatable_gs_model_list[N_view:]
assert len(animatable_gs_model_list) == c2w.shape[1]
@@ -1400,47 +1369,6 @@ def forward_animate_gs(
)
)
- if df_data is not None and len(df_animate_model_list) > 0:
- assert len(df_animate_model_list) == df_c2w.shape[1]
- df_out_list.append(
- self.forward_single_batch(
- df_animate_model_list,
- df_c2w[b],
- df_intrs[b],
- df_height,
- df_width,
- df_bg_color[b] if df_bg_color is not None else None,
- debug=debug,
- )
- )
- # debug
- # for df_out in df_out_list:
- # import cv2
-
- # for _i, comp_rgb in enumerate(df_out["comp_rgb"]):
- # com_rgb = (comp_rgb.detach().cpu().numpy() * 255).astype(
- # np.uint8
- # )
-
- # cv2.imwrite(
- # "./debug/df_out/{:03d}.png".format(_i), com_rgb[..., ::-1]
- # )
-
- # TODO GAN loss 2-19
-
- # visualize canonical space
- cano_out_list.append(
- self.forward_cano_batch(
- cano_gs_model_list,
- c2w[b][0:1], # identity matrix
- intrinsic[b][0:1],
- background_color[b] if background_color is not None else None,
- height=768,
- width=768,
- debug=debug,
- )
- )
-
out = defaultdict(list)
for out_ in out_list:
for k, v in out_.items():
@@ -1460,37 +1388,6 @@ def forward_animate_gs(
out["comp_depth"] = out["comp_depth"].permute(
0, 1, 4, 2, 3
) # [B, NV, H, W, 3] -> [B, NV, 1, H, W]
-
- cano_out = defaultdict(list)
- for out_ in cano_out_list:
- for k, v in out_.items():
- cano_out[k].append(v)
- for k, v in cano_out.items():
- if isinstance(v[0], torch.Tensor):
- cano_out[k] = torch.stack(v, dim=0)
- else:
- cano_out[k] = v
-
- out["cano_comp_rgb"] = cano_out["comp_rgb"].permute(
- 0, 1, 4, 2, 3
- ) # [B, NV, H, W, 3] -> [B, NV, 3, H, W]
-
- # df_pose
- if df_data is not None and len(df_out_list) > 0:
- df_out = defaultdict(list)
- for out_ in df_out_list:
- for k, v in out_.items():
- df_out[k].append(v)
- for k, v in df_out.items():
- if isinstance(v[0], torch.Tensor):
- df_out[k] = torch.stack(v, dim=0)
- else:
- df_out[k] = v
-
- out["df_comp_rgb"] = df_out["comp_rgb"].permute(
- 0, 1, 4, 2, 3
- ) # [B, NV, H, W, 3] -> [B, NV, 3, H, W]
-
return out
def forward(
diff --git a/LHM/runners/infer/human_lrm.py b/LHM/runners/infer/human_lrm.py
index 9677c1d..b31c837 100755
--- a/LHM/runners/infer/human_lrm.py
+++ b/LHM/runners/infer/human_lrm.py
@@ -35,9 +35,11 @@
resize_image_keepaspect_np,
)
from LHM.utils.face_detector import FaceDetector
+
+# from LHM.utils.video import images_to_video
+from LHM.utils.ffmpeg_utils import images_to_video
from LHM.utils.hf_hub import wrap_model_hub
from LHM.utils.logging import configure_logger
-from LHM.utils.video import images_to_video
from .base_inferrer import Inferrer
@@ -161,7 +163,7 @@ def infer_preprocess_image(
constant_values=0,
)
else:
- offset_w = int(offset_w)
+ offset_w = -offset_w
rgb = rgb[:,offset_w:-offset_w,:]
mask = mask[:,offset_w:-offset_w]
@@ -446,8 +448,6 @@ def infer_single(
shape_param=None,
):
- if os.path.exists(dump_video_path):
- return
source_size = self.cfg.source_size
render_size = self.cfg.render_size
# render_views = self.cfg.render_views
@@ -540,15 +540,32 @@ def infer_single(
shape_param = torch.tensor(shape_param, dtype=dtype).unsqueeze(0)
self.model.to(dtype)
+ smplx_params = motion_seq['smplx_params']
+ smplx_params['betas'] = shape_param.to(device)
+ gs_model_list, query_points, transform_mat_neutral_pose = self.model.infer_single_view(
+ image.unsqueeze(0).to(device, dtype),
+ src_head_rgb.unsqueeze(0).to(device, dtype),
+ None,
+ None,
+ render_c2ws=motion_seq["render_c2ws"].to(device),
+ render_intrs=motion_seq["render_intrs"].to(device),
+ render_bg_colors=motion_seq["render_bg_colors"].to(device),
+ smplx_params={
+ k: v.to(device) for k, v in smplx_params.items()
+ },
+ )
batch_dict = dict()
- batch_size = 80 # avoid memeory out!
+ batch_size = 40 # avoid memeory out!
+
for batch_i in range(0, camera_size, batch_size):
with torch.no_grad():
# TODO check device and dtype
# dict_keys(['comp_rgb', 'comp_rgb_bg', 'comp_mask', 'comp_depth', '3dgs'])
+ print(f"batch: {batch_i}, total: {camera_size //batch_size +1} ")
+
keys = [
"root_pose",
"body_pose",
@@ -565,16 +582,14 @@ def infer_single(
]
batch_smplx_params = dict()
batch_smplx_params["betas"] = shape_param.to(device)
+ batch_smplx_params['transform_mat_neutral_pose'] = transform_mat_neutral_pose
for key in keys:
batch_smplx_params[key] = motion_seq["smplx_params"][key][
:, batch_i : batch_i + batch_size
].to(device)
- res = self.model.infer_single_view(
- image.unsqueeze(0).to(device, dtype),
- src_head_rgb.unsqueeze(0).to(device, dtype),
- None,
- None,
+ # def animation_infer(self, gs_model_list, query_points, smplx_params, render_c2ws, render_intrs, render_bg_colors, render_h, render_w):
+ res = self.model.animation_infer(gs_model_list, query_points, batch_smplx_params,
render_c2ws=motion_seq["render_c2ws"][
:, batch_i : batch_i + batch_size
].to(device),
@@ -584,10 +599,8 @@ def infer_single(
render_bg_colors=motion_seq["render_bg_colors"][
:, batch_i : batch_i + batch_size
].to(device),
- smplx_params={
- k: v.to(device) for k, v in batch_smplx_params.items()
- },
- )
+ )
+
for accumulate_key in ["comp_rgb", "comp_mask"]:
if accumulate_key not in batch_dict:
@@ -606,19 +619,6 @@ def infer_single(
rgb = rgb * mask + (1 - mask) * 1
rgb = np.clip(rgb * 255, 0, 255).astype(np.uint8)
- if vis_motion:
- # print(rgb.shape, motion_seq["vis_motion_render"].shape)
-
- vis_ref_img = np.tile(
- cv2.resize(vis_ref_img, (rgb[0].shape[1], rgb[0].shape[0]))[
- None, :, :, :
- ],
- (rgb.shape[0], 1, 1, 1),
- )
- rgb = np.concatenate(
- [rgb, motion_seq["vis_motion_render"], vis_ref_img], axis=2
- )
-
os.makedirs(os.path.dirname(dump_video_path), exist_ok=True)
images_to_video(
diff --git a/LHM/utils/ffmpeg_utils.py b/LHM/utils/ffmpeg_utils.py
new file mode 100644
index 0000000..a981bb4
--- /dev/null
+++ b/LHM/utils/ffmpeg_utils.py
@@ -0,0 +1,55 @@
+import os
+import pdb
+import torch
+import numpy as np
+import imageio
+import cv2
+import imageio.v3 as iio
+
+VIDEO_TYPE_LIST = {'.avi','.mp4','.gif','.AVI','.MP4','.GIF'}
+
+def encodeffmpeg(inputs, frame_rate, output, format="png"):
+ """output: need video_name"""
+ assert (
+ os.path.splitext(output)[-1] in VIDEO_TYPE_LIST
+ ), "output is the format of video, e.g., mp4"
+ assert os.path.isdir(inputs), "input dir is NOT file format"
+
+ inputs = inputs[:-1] if inputs[-1] == "/" else inputs
+
+ output = os.path.abspath(output)
+
+ cmd = (
+ f"ffmpeg -r {frame_rate} -pattern_type glob -i '{inputs}/*.{format}' "
+ + f'-vcodec libx264 -crf 10 -vf "pad=ceil(iw/2)*2:ceil(ih/2)*2" '
+ + f"-pix_fmt yuv420p {output} > /dev/null 2>&1"
+ )
+
+ print(cmd)
+
+ output_dir = os.path.dirname(output)
+ if os.path.exists(output):
+ os.remove(output)
+ os.makedirs(output_dir, exist_ok=True)
+
+ print("encoding imgs to video.....")
+ os.system(cmd)
+ print("video done!")
+
+def images_to_video(images, output_path, fps, gradio_codec: bool, verbose=False, bitrate="20M"):
+ os.makedirs(os.path.dirname(output_path), exist_ok=True)
+ frames = []
+ for i in range(images.shape[0]):
+ if isinstance(images, torch.Tensor):
+ frame = (images[i].permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8)
+ assert frame.shape[0] == images.shape[2] and frame.shape[1] == images.shape[3], \
+ f"Frame shape mismatch: {frame.shape} vs {images.shape}"
+ assert frame.min() >= 0 and frame.max() <= 255, \
+ f"Frame value out of range: {frame.min()} ~ {frame.max()}"
+ else:
+ frame = images[i]
+ frames.append(frame)
+
+ frames = np.stack(frames)
+ iio.imwrite(output_path,frames,fps=fps,codec="libx264",pixelformat="yuv420p",bitrate=bitrate,macro_block_size=16)
+
diff --git a/README.md b/README.md
index 7040cf3..ba393c1 100755
--- a/README.md
+++ b/README.md
@@ -10,6 +10,7 @@
## 📢 Latest Updates
+**[March 14, 2025]** Optimize Inference Time: 30% faster
**[March 13, 2025]** Initial release with:
✅ Inference codebase
✅ Pretrained LHM-0.5B model
From 0bdb6def60b4f36f4b1154c0d65431fb9e5f7ba0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E9=B9=A4=E7=A6=B9?=
Date: Sat, 15 Mar 2025 16:28:19 +0800
Subject: [PATCH 02/53] inference optimization
---
README.md | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/README.md b/README.md
index ba393c1..bf5bf5f 100755
--- a/README.md
+++ b/README.md
@@ -1,8 +1,8 @@
#
- Official PyTorch Implementation
[](https://lingtengqiu.github.io/LHM/)
-[]()
-[]()
+[](https://arxiv.org/pdf/2503.10625)
+[](https://huggingface.co/spaces/DyrusQZ/LHM)
[](https://www.apache.org/licenses/LICENSE-2.0)
@@ -10,16 +10,16 @@
## 📢 Latest Updates
-**[March 14, 2025]** Optimize Inference Time: 30% faster
**[March 13, 2025]** Initial release with:
✅ Inference codebase
✅ Pretrained LHM-0.5B model
✅ Pretrained LHM-1B model
-✅ Real-time rendering pipeline
+✅ Real-time rendering pipeline
+✅ Huggingface Online Demo
### TODO List
- [x] Core Inference Pipeline (v0.1) 🔥🔥🔥
-- [ ] HuggingFace Demo Integration
+- [x] HuggingFace Demo Integration 🤗🤗🤗
- [ ] ModelScope Deployment
- [ ] Motion Processing Scripts
- [ ] Training Codes Release
@@ -160,12 +160,12 @@ Thanks for their excellent works and great contribution to 3D generation and 3D
## Citation
```
@inproceedings{qiu2025LHM,
- title={LHM: Large Animatable Human Reconstruction Model from a Single Image in One Second},
+ title={LHM: Large Animatable Human Reconstruction Model for Single Image to 3D in Seconds},
author={Lingteng Qiu and Xiaodong Gu and Peihao Li and Qi Zuo
and Weichao Shen and Junfei Zhang and Kejie Qiu and Weihao Yuan
and Guanying Chen and Zilong Dong and Liefeng Bo
},
- booktitle={arXiv preprint arXiv:xxxxx},
+ booktitle={arXiv preprint arXiv:2503.10625},
year={2025}
}
-```
\ No newline at end of file
+```
From e3ead81d1293ce431fd9bac1afd7dc306b12ea86 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E9=B9=A4=E7=A6=B9?=
Date: Sat, 15 Mar 2025 16:31:29 +0800
Subject: [PATCH 03/53] clean inference code, and speed up.
---
README.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/README.md b/README.md
index bf5bf5f..373e676 100755
--- a/README.md
+++ b/README.md
@@ -10,6 +10,7 @@
## 📢 Latest Updates
+**[March 15, 2025]** Inference Time Optimization: 30% Faster
**[March 13, 2025]** Initial release with:
✅ Inference codebase
✅ Pretrained LHM-0.5B model
From 1595046472b70ce5f4aa1c09d10b7a41b5f4e180 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=B0=91=E7=9A=93?=
Date: Tue, 18 Mar 2025 14:44:29 +0800
Subject: [PATCH 04/53] update install guidance
---
INSTALL.md | 3 +++
install_cu118.sh | 3 +++
install_cu121.sh | 3 +++
requirements.txt | 4 +++-
4 files changed, 12 insertions(+), 1 deletion(-)
diff --git a/INSTALL.md b/INSTALL.md
index b5388a9..fefce15 100755
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -27,7 +27,10 @@
## 3. Install base dependencies
```bash
pip install -r requirements.txt
+
+ # install from source code to avoid the conflict with torchvision
pip uninstall basicsr
+ pip install git+https://github.com/XPixelGroup/BasicSR
```
## 4. Install SAM2 lib. We use the modified version.
diff --git a/install_cu118.sh b/install_cu118.sh
index 66c1bce..777f069 100755
--- a/install_cu118.sh
+++ b/install_cu118.sh
@@ -4,7 +4,10 @@ pip install -U xformers==0.0.26.post1 --index-url https://download.pytorch.org/w
# install dependencies
pip install -r requirements.txt
+
+# install from source code to avoid the conflict with torchvision
pip uninstall basicsr
+pip install git+https://github.com/XPixelGroup/BasicSR
cd ..
# install pytorch3d
diff --git a/install_cu121.sh b/install_cu121.sh
index 991f7da..23d68e4 100755
--- a/install_cu121.sh
+++ b/install_cu121.sh
@@ -4,7 +4,10 @@ pip install -U xformers==0.0.26.post1 --index-url https://download.pytorch.org/w
# install dependencies
pip install -r requirements.txt
+
+# install from source code to avoid the conflict with torchvision
pip uninstall basicsr
+pip install git+https://github.com/XPixelGroup/BasicSR
cd ..
# install pytorch3d
diff --git a/requirements.txt b/requirements.txt
index 8582d3d..acf632a 100755
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,7 +2,8 @@ einops
roma
accelerate
smplx
-basicsr-fixed==1.4.2
+chumpy
+basicsr==1.4.2
decord==0.6.0
diffusers==0.32.0
dna==0.0.1
@@ -10,6 +11,7 @@ gfpgan==1.3.8
gsplat==1.4.0
huggingface_hub==0.23.2
imageio==2.34.1
+imageio-ffmpeg
jaxtyping==0.2.38
kiui==0.2.14
kornia==0.7.2
From bd9e91bf8a688916193a3900d08392f87350d775 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Lingteng=20Qiu=20=28=E9=82=B1=E9=99=B5=E8=85=BE=EF=BC=89?=
<37240099+lingtengqiu@users.noreply.github.com>
Date: Wed, 19 Mar 2025 14:35:49 +0800
Subject: [PATCH 05/53] gradio optimization
---
README.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/README.md b/README.md
index 373e676..58e49e8 100755
--- a/README.md
+++ b/README.md
@@ -10,6 +10,7 @@
## 📢 Latest Updates
+**[March 16, 2025]** Gradio Optimization: Faster and More Stable 🔥🔥🔥
**[March 15, 2025]** Inference Time Optimization: 30% Faster
**[March 13, 2025]** Initial release with:
✅ Inference codebase
From 99ab201b1e592374a4570dbce099df601a6135e5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E9=B9=A4=E7=A6=B9?=
Date: Wed, 19 Mar 2025 15:52:39 +0800
Subject: [PATCH 06/53] update CN README
---
README.md | 3 +-
README_CN.md | 178 +++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 180 insertions(+), 1 deletion(-)
create mode 100755 README_CN.md
diff --git a/README.md b/README.md
index 58e49e8..88fa613 100755
--- a/README.md
+++ b/README.md
@@ -9,8 +9,9 @@
+如果您熟悉中文,可以[阅读中文版本的README](./README_CN.md)
## 📢 Latest Updates
-**[March 16, 2025]** Gradio Optimization: Faster and More Stable 🔥🔥🔥
+**[March 19, 2025]** Gradio Optimization: Faster and More Stable 🔥🔥🔥
**[March 15, 2025]** Inference Time Optimization: 30% Faster
**[March 13, 2025]** Initial release with:
✅ Inference codebase
diff --git a/README_CN.md b/README_CN.md
new file mode 100755
index 0000000..a607246
--- /dev/null
+++ b/README_CN.md
@@ -0,0 +1,178 @@
+#
- 官方 PyTorch 实现
+
+[](https://lingtengqiu.github.io/LHM/)
+[](https://arxiv.org/pdf/2503.10625)
+[](https://huggingface.co/spaces/DyrusQZ/LHM)
+[](https://www.apache.org/licenses/LICENSE-2.0)
+
+
+
+
+
+## 📢 最新动态
+**[2025年3月19日]** HuggingFace Demo:更快更稳定
+**[2025年3月15日]** 推理时间优化:提速30%
+**[2025年3月13日]** 首次版本发布包含:
+✅ 推理代码库
+✅ 预训练 LHM-0.5B 模型
+✅ 预训练 LHM-1B 模型
+✅ 实时渲染管线
+✅ Huggingface 在线演示
+
+### 待办清单
+- [x] 核心推理管线 (v0.1) 🔥🔥🔥
+- [x] HuggingFace 演示集成 🤗🤗🤗
+- [ ] ModelScope 部署
+- [ ] 动作处理脚本
+- [ ] 训练代码发布
+
+## 🚀 快速开始
+
+### 环境配置
+克隆仓库
+```bash
+git clone git@github.com:aigc3d/LHM.git
+cd LHM
+```
+
+通过脚本安装依赖
+```
+# cuda 11.8
+sh ./install_cu118.sh
+
+# cuda 12.1
+sh ./install_cu121.sh
+```
+环境已在 python3.10、CUDA 11.8 和 CUDA 12.1 下测试通过。
+
+也可按步骤手动安装依赖,详见[INSTALL.md](INSTALL.md)
+
+### 模型参数
+从我们的OSS下载预训练模型:
+
+模型 训练数据 BH-T层数 下载链接 推理时间
+LHM-0.5B 5K合成数据 5 OSS 2.01 s
+LHM-0.5B 300K视频+5K合成数据 5 OSS 2.01 s
+LHM-0.7B 300K视频+5K合成数据 10 OSS 4.13 s
+LHM-1.0B 300K视频+5K合成数据 15 OSS 6.57 s
+
+| 模型 | 训练数据 | Transformer 层数 | 下载链接 | 推理时间 |
+| :--- | :--- | :--- | :--- | :--- |
+| LHM-0.5B | 5K合成数据| 5 | OSS | 2.01 s |
+| LHM-0.5B | 300K视频+5K合成数据 | 5 | [OSS](https://virutalbuy-public.oss-cn-hangzhou.aliyuncs.com/share/aigc3d/data/for_lingteng/LHM/LHM-0.5B.tar) | 2.01 s |
+| LHM-0.7B | 300K视频+5K合成数据 | 10 | OSS | 4.13 s |
+| LHM-1.0B | 300K视频+5K合成数据 | 15 | [OSS](https://virutalbuy-public.oss-cn-hangzhou.aliyuncs.com/share/aigc3d/data/for_lingteng/LHM/LHM-1B.tar) | 6.57 s |
+
+```bash
+# 下载预训练模型权重
+wget https://virutalbuy-public.oss-cn-hangzhou.aliyuncs.com/share/aigc3d/data/for_lingteng/LHM/LHM-0.5B.tar
+tar -xvf LHM-0.5B.tar
+wget https://virutalbuy-public.oss-cn-hangzhou.aliyuncs.com/share/aigc3d/data/for_lingteng/LHM/LHM-1B.tar
+tar -xvf LHM-1B.tar
+```
+
+### 下载先验模型权重
+```bash
+# 下载先验模型权重
+wget https://virutalbuy-public.oss-cn-hangzhou.aliyuncs.com/share/aigc3d/data/for_lingteng/LHM/LHM_prior_model.tar
+tar -xvf LHM_prior_model.tar
+```
+
+### 动作数据准备
+我们提供了测试动作示例,处理脚本将尽快更新 :)
+
+```bash
+# 下载先验模型权重
+wget https://virutalbuy-public.oss-cn-hangzhou.aliyuncs.com/share/aigc3d/data/for_lingteng/LHM/motion_video.tar
+tar -xvf ./motion_video.tar
+```
+
+下载完成后项目目录结构如下:
+```bash
+├── configs
+│ ├── inference
+│ ├── accelerate-train-1gpu.yaml
+│ ├── accelerate-train-deepspeed.yaml
+│ ├── accelerate-train.yaml
+│ └── infer-gradio.yaml
+├── engine
+│ ├── BiRefNet
+│ ├── pose_estimation
+│ ├── SegmentAPI
+├── example_data
+│ └── test_data
+├── exps
+│ ├── releases
+├── LHM
+│ ├── datasets
+│ ├── losses
+│ ├── models
+│ ├── outputs
+│ ├── runners
+│ ├── utils
+│ ├── launch.py
+├── pretrained_models
+│ ├── dense_sample_points
+│ ├── gagatracker
+│ ├── human_model_files
+│ ├── sam2
+│ ├── sapiens
+│ ├── voxel_grid
+│ ├── arcface_resnet18.pth
+│ ├── BiRefNet-general-epoch_244.pth
+├── scripts
+│ ├── exp
+│ ├── convert_hf.py
+│ └── upload_hub.py
+├── tools
+│ ├── metrics
+├── train_data
+│ ├── example_imgs
+│ ├── motion_video
+├── inference.sh
+├── README.md
+├── requirements.txt
+```
+
+### 🏃 推理流程
+```bash
+# bash ./inference.sh ./configs/inference/human-lrm-500M.yaml ./exps/releases/video_human_benchmark/human-lrm-500M/step_060000/ ./train_data/example_imgs/ ./train_data/motion_video/mimo1/smplx_params
+# bash ./inference.sh ./configs/inference/human-lrm-1B.yaml ./exps/releases/video_human_benchmark/human-lrm-1B/step_060000/ ./train_data/example_imgs/ ./train_data/motion_video/mimo1/smplx_params
+bash inference.sh ${CONFIG} ${MODEL_NAME} ${IMAGE_PATH_OR_FOLDER} ${MOTION_SEQ}
+```
+
+## 计算指标
+我们提供了简单的指标计算脚本:
+```bash
+# download pretrain model into ./pretrained_models/
+wget https://virutalbuy-public.oss-cn-hangzhou.aliyuncs.com/share/aigc3d/data/for_lingteng/arcface_resnet18.pth
+# Face Similarity
+python ./tools/metrics/compute_facesimilarity.py -f1 ${gt_folder} -f2 ${results_folder}
+# PSNR
+python ./tools/metrics/compute_psnr.py -f1 ${gt_folder} -f2 ${results_folder}
+# SSIM LPIPS
+python ./tools/metrics/compute_ssim_lpips.py -f1 ${gt_folder} -f2 ${results_folder}
+```
+
+## 致谢
+
+本工作基于以下优秀研究成果和开源项目构建:
+
+- [OpenLRM](https://github.com/3DTopia/OpenLRM)
+- [ExAvatar](https://github.com/mks0601/ExAvatar_RELEASE)
+- [DreamGaussian](https://github.com/dreamgaussian/dreamgaussian)
+
+感谢这些杰出工作对3D生成和数字人领域的重要贡献。
+
+## 引用
+```
+@inproceedings{qiu2025LHM,
+ title={LHM: Large Animatable Human Reconstruction Model for Single Image to 3D in Seconds},
+ author={Lingteng Qiu and Xiaodong Gu and Peihao Li and Qi Zuo
+ and Weichao Shen and Junfei Zhang and Kejie Qiu and Weihao Yuan
+ and Guanying Chen and Zilong Dong and Liefeng Bo
+ },
+ booktitle={arXiv preprint arXiv:2503.10625},
+ year={2025}
+}
+```
From b862d64a33a527624fc8829d923f714eb20de747 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E9=B9=A4=E7=A6=B9?=
Date: Wed, 19 Mar 2025 21:42:24 +0800
Subject: [PATCH 07/53] gradio LHM
---
LHM/utils/ffmpeg_utils.py | 9 +-
app.py | 761 ++++++++++++++++++++++++++++++++++++++
2 files changed, 766 insertions(+), 4 deletions(-)
mode change 100644 => 100755 LHM/utils/ffmpeg_utils.py
create mode 100755 app.py
diff --git a/LHM/utils/ffmpeg_utils.py b/LHM/utils/ffmpeg_utils.py
old mode 100644
new mode 100755
index a981bb4..9c0ed6b
--- a/LHM/utils/ffmpeg_utils.py
+++ b/LHM/utils/ffmpeg_utils.py
@@ -1,10 +1,11 @@
import os
import pdb
-import torch
-import numpy as np
-import imageio
+
import cv2
+import imageio
import imageio.v3 as iio
+import numpy as np
+import torch
VIDEO_TYPE_LIST = {'.avi','.mp4','.gif','.AVI','.MP4','.GIF'}
@@ -36,7 +37,7 @@ def encodeffmpeg(inputs, frame_rate, output, format="png"):
os.system(cmd)
print("video done!")
-def images_to_video(images, output_path, fps, gradio_codec: bool, verbose=False, bitrate="20M"):
+def images_to_video(images, output_path, fps, gradio_codec: bool, verbose=False, bitrate="10M"):
os.makedirs(os.path.dirname(output_path), exist_ok=True)
frames = []
for i in range(images.shape[0]):
diff --git a/app.py b/app.py
new file mode 100755
index 0000000..454864d
--- /dev/null
+++ b/app.py
@@ -0,0 +1,761 @@
+# Copyright (c) 2023-2024, Qi Zuo
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import base64
+import os
+import time
+
+import cv2
+import gradio as gr
+import numpy as np
+import spaces
+import torch
+from PIL import Image
+
+torch._dynamo.config.disable = True
+import argparse
+import os
+import pdb
+import shutil
+import subprocess
+
+import torch
+from accelerate import Accelerator
+from omegaconf import OmegaConf
+
+from engine.pose_estimation.pose_estimator import PoseEstimator
+from engine.SegmentAPI.base import Bbox
+from engine.SegmentAPI.SAM import Bbox, SAM2Seg
+from LHM.runners.infer.utils import (
+ calc_new_tgt_size_by_aspect,
+ center_crop_according_to_mask,
+ prepare_motion_seqs,
+ resize_image_keepaspect_np,
+)
+from LHM.utils.face_detector import VGGHeadDetector
+from LHM.utils.ffmpeg_utils import images_to_video
+from LHM.utils.hf_hub import wrap_model_hub
+
+
+def get_bbox(mask):
+ height, width = mask.shape
+ pha = mask / 255.0
+ pha[pha < 0.5] = 0.0
+ pha[pha >= 0.5] = 1.0
+
+ # obtain bbox
+ _h, _w = np.where(pha == 1)
+
+ whwh = [
+ _w.min().item(),
+ _h.min().item(),
+ _w.max().item(),
+ _h.max().item(),
+ ]
+
+ box = Bbox(whwh)
+
+ # scale box to 1.05
+ scale_box = box.scale(1.1, width=width, height=height)
+ return scale_box
+
+def infer_preprocess_image(
+ rgb_path,
+ mask,
+ intr,
+ pad_ratio,
+ bg_color,
+ max_tgt_size,
+ aspect_standard,
+ enlarge_ratio,
+ render_tgt_size,
+ multiply,
+ need_mask=True,
+):
+ """inferece
+ image, _, _ = preprocess_image(image_path, mask_path=None, intr=None, pad_ratio=0, bg_color=1.0,
+ max_tgt_size=896, aspect_standard=aspect_standard, enlarge_ratio=[1.0, 1.0],
+ render_tgt_size=source_size, multiply=14, need_mask=True)
+
+ """
+
+ rgb = np.array(Image.open(rgb_path))
+ rgb_raw = rgb.copy()
+
+ bbox = get_bbox(mask)
+ bbox_list = bbox.get_box()
+
+ rgb = rgb[bbox_list[1] : bbox_list[3], bbox_list[0] : bbox_list[2]]
+ mask = mask[bbox_list[1] : bbox_list[3], bbox_list[0] : bbox_list[2]]
+
+ h, w, _ = rgb.shape
+ assert w < h
+ cur_ratio = h / w
+ scale_ratio = cur_ratio / aspect_standard
+
+ target_w = int(min(w * scale_ratio, h))
+ offset_w = (target_w - w) // 2
+ # resize to target ratio.
+ if offset_w > 0:
+ rgb = np.pad(
+ rgb,
+ ((0, 0), (offset_w, offset_w), (0, 0)),
+ mode="constant",
+ constant_values=255,
+ )
+ mask = np.pad(
+ mask,
+ ((0, 0), (offset_w, offset_w)),
+ mode="constant",
+ constant_values=0,
+ )
+ else:
+ offset_w = -offset_w
+ rgb = rgb[:,offset_w:-offset_w,:]
+ mask = mask[:,offset_w:-offset_w]
+
+ # resize to target ratio.
+
+ rgb = np.pad(
+ rgb,
+ ((0, 0), (offset_w, offset_w), (0, 0)),
+ mode="constant",
+ constant_values=255,
+ )
+
+ mask = np.pad(
+ mask,
+ ((0, 0), (offset_w, offset_w)),
+ mode="constant",
+ constant_values=0,
+ )
+
+ rgb = rgb / 255.0 # normalize to [0, 1]
+ mask = mask / 255.0
+
+ mask = (mask > 0.5).astype(np.float32)
+ rgb = rgb[:, :, :3] * mask[:, :, None] + bg_color * (1 - mask[:, :, None])
+
+ # resize to specific size require by preprocessor of smplx-estimator.
+ rgb = resize_image_keepaspect_np(rgb, max_tgt_size)
+ mask = resize_image_keepaspect_np(mask, max_tgt_size)
+
+ # crop image to enlarge human area.
+ rgb, mask, offset_x, offset_y = center_crop_according_to_mask(
+ rgb, mask, aspect_standard, enlarge_ratio
+ )
+ if intr is not None:
+ intr[0, 2] -= offset_x
+ intr[1, 2] -= offset_y
+
+ # resize to render_tgt_size for training
+
+ tgt_hw_size, ratio_y, ratio_x = calc_new_tgt_size_by_aspect(
+ cur_hw=rgb.shape[:2],
+ aspect_standard=aspect_standard,
+ tgt_size=render_tgt_size,
+ multiply=multiply,
+ )
+
+ rgb = cv2.resize(
+ rgb, dsize=(tgt_hw_size[1], tgt_hw_size[0]), interpolation=cv2.INTER_AREA
+ )
+ mask = cv2.resize(
+ mask, dsize=(tgt_hw_size[1], tgt_hw_size[0]), interpolation=cv2.INTER_AREA
+ )
+
+ if intr is not None:
+
+ # ******************** Merge *********************** #
+ intr = scale_intrs(intr, ratio_x=ratio_x, ratio_y=ratio_y)
+ assert (
+ abs(intr[0, 2] * 2 - rgb.shape[1]) < 2.5
+ ), f"{intr[0, 2] * 2}, {rgb.shape[1]}"
+ assert (
+ abs(intr[1, 2] * 2 - rgb.shape[0]) < 2.5
+ ), f"{intr[1, 2] * 2}, {rgb.shape[0]}"
+
+ # ******************** Merge *********************** #
+ intr[0, 2] = rgb.shape[1] // 2
+ intr[1, 2] = rgb.shape[0] // 2
+
+ rgb = torch.from_numpy(rgb).float().permute(2, 0, 1).unsqueeze(0) # [1, 3, H, W]
+ mask = (
+ torch.from_numpy(mask[:, :, None]).float().permute(2, 0, 1).unsqueeze(0)
+ ) # [1, 1, H, W]
+ return rgb, mask, intr
+
+def parse_configs():
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--config", type=str)
+ parser.add_argument("--infer", type=str)
+ args, unknown = parser.parse_known_args()
+
+ cfg = OmegaConf.create()
+ cli_cfg = OmegaConf.from_cli(unknown)
+
+ # parse from ENV
+ if os.environ.get("APP_INFER") is not None:
+ args.infer = os.environ.get("APP_INFER")
+ if os.environ.get("APP_MODEL_NAME") is not None:
+ cli_cfg.model_name = os.environ.get("APP_MODEL_NAME")
+
+ args.config = args.infer if args.config is None else args.config
+
+ if args.config is not None:
+ cfg_train = OmegaConf.load(args.config)
+ cfg.source_size = cfg_train.dataset.source_image_res
+ try:
+ cfg.src_head_size = cfg_train.dataset.src_head_size
+ except:
+ cfg.src_head_size = 112
+ cfg.render_size = cfg_train.dataset.render_image.high
+ _relative_path = os.path.join(
+ cfg_train.experiment.parent,
+ cfg_train.experiment.child,
+ os.path.basename(cli_cfg.model_name).split("_")[-1],
+ )
+
+ cfg.save_tmp_dump = os.path.join("exps", "save_tmp", _relative_path)
+ cfg.image_dump = os.path.join("exps", "images", _relative_path)
+ cfg.video_dump = os.path.join("exps", "videos", _relative_path) # output path
+
+ if args.infer is not None:
+ cfg_infer = OmegaConf.load(args.infer)
+ cfg.merge_with(cfg_infer)
+ cfg.setdefault(
+ "save_tmp_dump", os.path.join("exps", cli_cfg.model_name, "save_tmp")
+ )
+ cfg.setdefault("image_dump", os.path.join("exps", cli_cfg.model_name, "images"))
+ cfg.setdefault(
+ "video_dump", os.path.join("dumps", cli_cfg.model_name, "videos")
+ )
+ cfg.setdefault("mesh_dump", os.path.join("dumps", cli_cfg.model_name, "meshes"))
+
+ cfg.motion_video_read_fps = 6
+ cfg.merge_with(cli_cfg)
+
+ cfg.setdefault("logger", "INFO")
+
+ assert cfg.model_name is not None, "model_name is required"
+
+ return cfg, cfg_train
+
+def _build_model(cfg):
+ from LHM.models import model_dict
+
+ hf_model_cls = wrap_model_hub(model_dict["human_lrm_sapdino_bh_sd3_5"])
+ model = hf_model_cls.from_pretrained(cfg.model_name)
+
+ return model
+
+def launch_pretrained():
+ from huggingface_hub import hf_hub_download, snapshot_download
+ hf_hub_download(repo_id="DyrusQZ/LHM_Runtime", repo_type='model', filename='assets.tar', local_dir="./")
+ os.system("tar -xf assets.tar && rm assets.tar")
+ hf_hub_download(repo_id="DyrusQZ/LHM_Runtime", repo_type='model', filename='LHM-0.5B.tar', local_dir="./")
+ os.system("tar -xf LHM-0.5B.tar && rm LHM-0.5B.tar")
+ hf_hub_download(repo_id="DyrusQZ/LHM_Runtime", repo_type='model', filename='LHM_prior_model.tar', local_dir="./")
+ os.system("tar -xf LHM_prior_model.tar && rm LHM_prior_model.tar")
+
+def launch_env_not_compile_with_cuda():
+ os.system("pip install chumpy")
+ os.system("pip uninstall -y basicsr")
+ os.system("pip install git+https://github.com/hitsz-zuoqi/BasicSR/")
+ os.system("pip install numpy==1.23.0")
+ # os.system("pip install git+https://github.com/hitsz-zuoqi/sam2/")
+ # os.system("pip install git+https://github.com/ashawkey/diff-gaussian-rasterization/")
+ # os.system("pip install git+https://github.com/camenduru/simple-knn/")
+ # os.system("pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py310_cu121_pyt240/download.html")
+
+
+def animation_infer(renderer, gs_model_list, query_points, smplx_params, render_c2ws, render_intrs, render_bg_colors):
+ '''Inference code avoid repeat forward.
+ '''
+ render_h, render_w = int(render_intrs[0, 0, 1, 2] * 2), int(
+ render_intrs[0, 0, 0, 2] * 2
+ )
+ # render target views
+ render_res_list = []
+ num_views = render_c2ws.shape[1]
+ start_time = time.time()
+
+ # render target views
+ render_res_list = []
+
+ for view_idx in range(num_views):
+ render_res = renderer.forward_animate_gs(
+ gs_model_list,
+ query_points,
+ renderer.get_single_view_smpl_data(smplx_params, view_idx),
+ render_c2ws[:, view_idx : view_idx + 1],
+ render_intrs[:, view_idx : view_idx + 1],
+ render_h,
+ render_w,
+ render_bg_colors[:, view_idx : view_idx + 1],
+ )
+ render_res_list.append(render_res)
+ print(
+ f"time elpased(animate gs model per frame):{(time.time() - start_time)/num_views}"
+ )
+
+ out = defaultdict(list)
+ for res in render_res_list:
+ for k, v in res.items():
+ if isinstance(v[0], torch.Tensor):
+ out[k].append(v.detach().cpu())
+ else:
+ out[k].append(v)
+ for k, v in out.items():
+ # print(f"out key:{k}")
+ if isinstance(v[0], torch.Tensor):
+ out[k] = torch.concat(v, dim=1)
+ if k in ["comp_rgb", "comp_mask", "comp_depth"]:
+ out[k] = out[k][0].permute(
+ 0, 2, 3, 1
+ ) # [1, Nv, 3, H, W] -> [Nv, 3, H, W] - > [Nv, H, W, 3]
+ else:
+ out[k] = v
+ return out
+
+def assert_input_image(input_image):
+ if input_image is None:
+ raise gr.Error("No image selected or uploaded!")
+
+def prepare_working_dir():
+ import tempfile
+ working_dir = tempfile.TemporaryDirectory()
+ return working_dir
+
+def init_preprocessor():
+ from LHM.utils.preprocess import Preprocessor
+ global preprocessor
+ preprocessor = Preprocessor()
+
+def preprocess_fn(image_in: np.ndarray, remove_bg: bool, recenter: bool, working_dir):
+ image_raw = os.path.join(working_dir.name, "raw.png")
+ with Image.fromarray(image_in) as img:
+ img.save(image_raw)
+ image_out = os.path.join(working_dir.name, "rembg.png")
+ success = preprocessor.preprocess(image_path=image_raw, save_path=image_out, rmbg=remove_bg, recenter=recenter)
+ assert success, f"Failed under preprocess_fn!"
+ return image_out
+
+def get_image_base64(path):
+ with open(path, "rb") as image_file:
+ encoded_string = base64.b64encode(image_file.read()).decode()
+ return f"data:image/png;base64,{encoded_string}"
+
+
+def demo_lhm(pose_estimator, face_detector, parsing_net, lhm, cfg):
+
+ @spaces.GPU(duration=100)
+ def core_fn(image: str, video_params, working_dir):
+ image_raw = os.path.join(working_dir.name, "raw.png")
+ with Image.fromarray(image) as img:
+ img.save(image_raw)
+
+ base_vid = os.path.basename(video_params).split(".")[0]
+ smplx_params_dir = os.path.join("./train_data/motion_video/", base_vid, "smplx_params")
+
+ dump_video_path = os.path.join(working_dir.name, "output.mp4")
+ dump_image_path = os.path.join(working_dir.name, "output.png")
+
+ # prepare dump paths
+ omit_prefix = os.path.dirname(image_raw)
+ image_name = os.path.basename(image_raw)
+ uid = image_name.split(".")[0]
+ subdir_path = os.path.dirname(image_raw).replace(omit_prefix, "")
+ subdir_path = (
+ subdir_path[1:] if subdir_path.startswith("/") else subdir_path
+ )
+ print("subdir_path and uid:", subdir_path, uid)
+
+ motion_seqs_dir = smplx_params_dir
+
+ motion_name = os.path.dirname(
+ motion_seqs_dir[:-1] if motion_seqs_dir[-1] == "/" else motion_seqs_dir
+ )
+
+ motion_name = os.path.basename(motion_name)
+
+ dump_image_dir = os.path.dirname(dump_image_path)
+ os.makedirs(dump_image_dir, exist_ok=True)
+
+ print(image_raw, motion_seqs_dir, dump_image_dir, dump_video_path)
+
+ dump_tmp_dir = dump_image_dir
+
+
+ source_size = cfg.source_size
+ render_size = cfg.render_size
+ render_fps = 30
+
+ aspect_standard = 5.0 / 3
+ motion_img_need_mask = cfg.get("motion_img_need_mask", False) # False
+ vis_motion = cfg.get("vis_motion", False) # False
+
+ with torch.no_grad():
+ parsing_out = parsing_net(img_path=image_raw, bbox=None)
+ parsing_mask = (parsing_out.masks * 255).astype(np.uint8)
+ shape_pose = pose_estimator(image_raw)
+ assert shape_pose.is_full_body, f"The input image is illegal, {shape_pose.msg}"
+
+ # prepare reference image
+ image, _, _ = infer_preprocess_image(
+ image_raw,
+ mask=parsing_mask,
+ intr=None,
+ pad_ratio=0,
+ bg_color=1.0,
+ max_tgt_size=896,
+ aspect_standard=aspect_standard,
+ enlarge_ratio=[1.0, 1.0],
+ render_tgt_size=source_size,
+ multiply=14,
+ need_mask=True,
+ )
+
+ try:
+ rgb = np.array(Image.open(image_path))
+ rgb = torch.from_numpy(rgb).permute(2, 0, 1)
+ bbox = face_detector.detect_face(rgb)
+ head_rgb = rgb[:, int(bbox[1]) : int(bbox[3]), int(bbox[0]) : int(bbox[2])]
+ head_rgb = head_rgb.permute(1, 2, 0)
+ src_head_rgb = head_rgb.cpu().numpy()
+ except:
+ print("w/o head input!")
+ src_head_rgb = np.zeros((112, 112, 3), dtype=np.uint8)
+
+ # resize to dino size
+ try:
+ src_head_rgb = cv2.resize(
+ src_head_rgb,
+ dsize=(cfg.src_head_size, cfg.src_head_size),
+ interpolation=cv2.INTER_AREA,
+ ) # resize to dino size
+ except:
+ src_head_rgb = np.zeros(
+ (cfg.src_head_size, cfg.src_head_size, 3), dtype=np.uint8
+ )
+
+ src_head_rgb = (
+ torch.from_numpy(src_head_rgb / 255.0).float().permute(2, 0, 1).unsqueeze(0)
+ ) # [1, 3, H, W]
+
+ save_ref_img_path = os.path.join(
+ dump_tmp_dir, "output.png"
+ )
+ vis_ref_img = (image[0].permute(1, 2, 0).cpu().detach().numpy() * 255).astype(
+ np.uint8
+ )
+ Image.fromarray(vis_ref_img).save(save_ref_img_path)
+
+ # read motion seq
+ motion_name = os.path.dirname(
+ motion_seqs_dir[:-1] if motion_seqs_dir[-1] == "/" else motion_seqs_dir
+ )
+ motion_name = os.path.basename(motion_name)
+
+ motion_seq = prepare_motion_seqs(
+ motion_seqs_dir,
+ None,
+ save_root=dump_tmp_dir,
+ fps=30,
+ bg_color=1.0,
+ aspect_standard=aspect_standard,
+ enlarge_ratio=[1.0, 1, 0],
+ render_image_res=render_size,
+ multiply=16,
+ need_mask=motion_img_need_mask,
+ vis_motion=vis_motion,
+ motion_size=300,
+ )
+
+ camera_size = len(motion_seq["motion_seqs"])
+ shape_param = shape_pose.beta
+
+ device = "cuda"
+ dtype = torch.float32
+ shape_param = torch.tensor(shape_param, dtype=dtype).unsqueeze(0)
+
+ lhm.to(dtype)
+
+ smplx_params = motion_seq['smplx_params']
+ smplx_params['betas'] = shape_param.to(device)
+
+ gs_model_list, query_points, transform_mat_neutral_pose = lhm.infer_single_view(
+ image.unsqueeze(0).to(device, dtype),
+ src_head_rgb.unsqueeze(0).to(device, dtype),
+ None,
+ None,
+ render_c2ws=motion_seq["render_c2ws"].to(device),
+ render_intrs=motion_seq["render_intrs"].to(device),
+ render_bg_colors=motion_seq["render_bg_colors"].to(device),
+ smplx_params={
+ k: v.to(device) for k, v in smplx_params.items()
+ },
+ )
+
+ # rendering !!!!
+ start_time = time.time()
+ batch_dict = dict()
+ batch_size = 80 # avoid memeory out!
+
+ for batch_i in range(0, camera_size, batch_size):
+ with torch.no_grad():
+ # TODO check device and dtype
+ # dict_keys(['comp_rgb', 'comp_rgb_bg', 'comp_mask', 'comp_depth', '3dgs'])
+ keys = [
+ "root_pose",
+ "body_pose",
+ "jaw_pose",
+ "leye_pose",
+ "reye_pose",
+ "lhand_pose",
+ "rhand_pose",
+ "trans",
+ "focal",
+ "princpt",
+ "img_size_wh",
+ "expr",
+ ]
+ batch_smplx_params = dict()
+ batch_smplx_params["betas"] = shape_param.to(device)
+ batch_smplx_params['transform_mat_neutral_pose'] = transform_mat_neutral_pose
+ for key in keys:
+ batch_smplx_params[key] = motion_seq["smplx_params"][key][
+ :, batch_i : batch_i + batch_size
+ ].to(device)
+
+ res = lhm.animation_infer(gs_model_list, query_points, batch_smplx_params,
+ render_c2ws=motion_seq["render_c2ws"][
+ :, batch_i : batch_i + batch_size
+ ].to(device),
+ render_intrs=motion_seq["render_intrs"][
+ :, batch_i : batch_i + batch_size
+ ].to(device),
+ render_bg_colors=motion_seq["render_bg_colors"][
+ :, batch_i : batch_i + batch_size
+ ].to(device),
+ )
+
+ for accumulate_key in ["comp_rgb", "comp_mask"]:
+ if accumulate_key not in batch_dict:
+ batch_dict[accumulate_key] = []
+ batch_dict[accumulate_key].append(res[accumulate_key].detach().cpu())
+ del res
+ torch.cuda.empty_cache()
+
+ for accumulate_key in ["comp_rgb", "comp_mask"]:
+ batch_dict[accumulate_key] = torch.cat(batch_dict[accumulate_key], dim=0)
+
+ print(f"time elapsed: {time.time() - start_time}")
+ rgb = batch_dict["comp_rgb"].detach().cpu().numpy() # [Nv, H, W, 3], 0-1
+ mask = batch_dict["comp_mask"].detach().cpu().numpy() # [Nv, H, W, 3], 0-1
+ mask[mask < 0.5] = 0.0
+
+ rgb = rgb * mask + (1 - mask) * 1
+ rgb = np.clip(rgb * 255, 0, 255).astype(np.uint8)
+
+ if vis_motion:
+ # print(rgb.shape, motion_seq["vis_motion_render"].shape)
+
+ vis_ref_img = np.tile(
+ cv2.resize(vis_ref_img, (rgb[0].shape[1], rgb[0].shape[0]))[
+ None, :, :, :
+ ],
+ (rgb.shape[0], 1, 1, 1),
+ )
+ rgb = np.concatenate(
+ [rgb, motion_seq["vis_motion_render"], vis_ref_img], axis=2
+ )
+
+ os.makedirs(os.path.dirname(dump_video_path), exist_ok=True)
+
+ images_to_video(
+ rgb,
+ output_path=dump_video_path,
+ fps=render_fps,
+ gradio_codec=False,
+ verbose=True,
+ )
+
+
+ return dump_image_path, dump_video_path
+
+ _TITLE = '''LHM: Large Animatable Human Model'''
+
+ _DESCRIPTION = '''
+ Reconstruct a human avatar in 0.2 seconds with A100!
+ '''
+
+ with gr.Blocks(analytics_enabled=False) as demo:
+
+ logo_url = "./assets/LHM_logo_parsing.png"
+ logo_base64 = get_image_base64(logo_url)
+ gr.HTML(
+ f"""
+
+
+
Large Animatable Human Model
+
+
+ """
+ )
+
+ gr.Markdown(
+ """
+
+
+
+
+
+
+
+
+
+
+
+
+
+ """
+ )
+
+ gr.HTML(
+ """
Notes: Please input full-body image in case of detection errors. Currently, it only supports motion video input with a maximum of 300 frames.
"""
+ )
+
+ # DISPLAY
+ with gr.Row():
+
+ with gr.Column(variant='panel', scale=1):
+ with gr.Tabs(elem_id="openlrm_input_image"):
+ with gr.TabItem('Input Image'):
+ with gr.Row():
+ input_image = gr.Image(label="Input Image", value="./train_data/example_imgs/-00000000_joker_2.jpg",image_mode="RGBA", height=480, width=270, sources="upload", type="numpy", elem_id="content_image")
+ # EXAMPLES
+ examples = os.listdir('./train_data/example_imgs/')
+ with gr.Row():
+ examples = [os.path.join('./train_data/example_imgs/', example) for example in examples]
+ gr.Examples(
+ examples=examples,
+ inputs=[input_image],
+ examples_per_page=9,
+ )
+
+ examples_video = os.listdir('./train_data/motion_video/')
+ examples =[os.path.join('./train_data/motion_video/', example, 'samurai_visualize.mp4') for example in examples_video]
+
+ examples = sorted(examples)
+ new_examples = []
+ for example in examples:
+ video_basename = os.path.basename(os.path.dirname(example))
+ input_video = os.path.join(os.path.dirname(example), video_basename+'.mp4')
+ if not os.path.exists(input_video):
+ shutil.copyfile(example, input_video)
+ new_examples.append(input_video)
+
+ with gr.Column(variant='panel', scale=1):
+ with gr.Tabs(elem_id="openlrm_input_video"):
+ with gr.TabItem('Target Motion'):
+ with gr.Row():
+ video_input = gr.Video(label="Input Video",height=480, width=270, interactive=False, value=new_examples[3])
+
+ with gr.Row():
+ gr.Examples(
+ examples=new_examples,
+ inputs=[video_input],
+ examples_per_page=9,
+ )
+
+ with gr.Column(variant='panel', scale=1):
+ with gr.Tabs(elem_id="openlrm_processed_image"):
+ with gr.TabItem('Processed Image'):
+ with gr.Row():
+ processed_image = gr.Image(label="Processed Image", image_mode="RGBA", type="filepath", elem_id="processed_image", height=480, width=270, interactive=False)
+
+ with gr.Column(variant='panel', scale=1):
+ with gr.Tabs(elem_id="openlrm_render_video"):
+ with gr.TabItem('Rendered Video'):
+ with gr.Row():
+ output_video = gr.Video(label="Rendered Video", format="mp4", height=480, width=270, autoplay=True)
+
+ # SETTING
+ with gr.Row():
+ with gr.Column(variant='panel', scale=1):
+ submit = gr.Button('Generate', elem_id="openlrm_generate", variant='primary')
+
+
+ working_dir = gr.State()
+ submit.click(
+ fn=assert_input_image,
+ inputs=[input_image],
+ queue=False,
+ ).success(
+ fn=prepare_working_dir,
+ outputs=[working_dir],
+ queue=False,
+ ).success(
+ fn=core_fn,
+ inputs=[input_image, video_input, working_dir], # video_params refer to smpl dir
+ outputs=[processed_image, output_video],
+ )
+
+ demo.queue()
+ demo.launch(server_name="0.0.0.0")
+
+
+def launch_gradio_app():
+
+ os.environ.update({
+ "APP_ENABLED": "1",
+ "APP_MODEL_NAME": "./exps/releases/video_human_benchmark/human-lrm-1B/step_060000/",
+ "APP_INFER": "./configs/inference/human-lrm-1B.yaml",
+ "APP_TYPE": "infer.human_lrm",
+ "NUMBA_THREADING_LAYER": 'omp',
+ })
+
+ facedetector = VGGHeadDetector(
+ "./pretrained_models/gagatracker/vgghead/vgg_heads_l.trcd",
+ device='cpu',
+ )
+ facedetector.to('cuda')
+
+ pose_estimator = PoseEstimator(
+ "./pretrained_models/human_model_files/", device='cpu'
+ )
+ pose_estimator.to('cuda')
+ pose_estimator.device = 'cuda'
+ parsingnet = SAM2Seg()
+ accelerator = Accelerator()
+
+ cfg, cfg_train = parse_configs()
+ lhm = _build_model(cfg)
+ lhm.to('cuda')
+
+
+
+ demo_lhm(pose_estimator, facedetector, parsingnet, lhm, cfg)
+
+ # cfg, cfg_train = parse_configs()
+ # demo_lhm(None, None, None, None, cfg)
+
+
+
+if __name__ == '__main__':
+ # launch_env_not_compile_with_cuda()
+ launch_gradio_app()
From fac5b485f50aba6f5516a41781ddfe5abb489fe9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E9=B9=A4=E7=A6=B9?=
Date: Wed, 19 Mar 2025 21:43:35 +0800
Subject: [PATCH 08/53] gradio_readme
---
README.md | 6 ++++++
README_CN.md | 8 ++++++++
engine/pose_estimation/pose_estimator.py | 5 +++++
3 files changed, 19 insertions(+)
diff --git a/README.md b/README.md
index 88fa613..028870b 100755
--- a/README.md
+++ b/README.md
@@ -11,6 +11,7 @@
如果您熟悉中文,可以[阅读中文版本的README](./README_CN.md)
## 📢 Latest Updates
+**[March 19, 2025]** Local Gradio App.py
**[March 19, 2025]** Gradio Optimization: Faster and More Stable 🔥🔥🔥
**[March 15, 2025]** Inference Time Optimization: 30% Faster
**[March 13, 2025]** Initial release with:
@@ -131,6 +132,11 @@ After downloading weights and data, the folder of the project structure seems li
├── requirements.txt
```
+### 💻 Local Gradio Run
+```bash
+python ./app.py
+```
+
### 🏃 Inference Pipeline
```bash
# bash ./inference.sh ./configs/inference/human-lrm-500M.yaml ./exps/releases/video_human_benchmark/human-lrm-500M/step_060000/ ./train_data/example_imgs/ ./train_data/motion_video/mimo1/smplx_params
diff --git a/README_CN.md b/README_CN.md
index a607246..3cbb81a 100755
--- a/README_CN.md
+++ b/README_CN.md
@@ -10,6 +10,7 @@
## 📢 最新动态
+**[March 19, 2025]** 本地部署 Gradio
**[2025年3月19日]** HuggingFace Demo:更快更稳定
**[2025年3月15日]** 推理时间优化:提速30%
**[2025年3月13日]** 首次版本发布包含:
@@ -134,6 +135,13 @@ tar -xvf ./motion_video.tar
├── requirements.txt
```
+
+
+### 💻 本地部署
+```bash
+python ./app.py
+```
+
### 🏃 推理流程
```bash
# bash ./inference.sh ./configs/inference/human-lrm-500M.yaml ./exps/releases/video_human_benchmark/human-lrm-500M/step_060000/ ./train_data/example_imgs/ ./train_data/motion_video/mimo1/smplx_params
diff --git a/engine/pose_estimation/pose_estimator.py b/engine/pose_estimation/pose_estimator.py
index ef7dffa..317c5f1 100755
--- a/engine/pose_estimation/pose_estimator.py
+++ b/engine/pose_estimation/pose_estimator.py
@@ -103,6 +103,11 @@ def __init__(self, model_path, device="cuda"):
self.pad_ratio = 0.2
self.img_size = 896
self.fov = 60
+
+ def to(self, device):
+ self.device = device
+ self.mhmr_model.to(device)
+ return self
def get_camera_parameters(self):
K = torch.eye(3)
From 5c875467b459b7d8a3d9bf565183867bd0ed9c35 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E9=B9=A4=E7=A6=B9?=
Date: Wed, 19 Mar 2025 23:28:45 +0800
Subject: [PATCH 09/53] example-imgs
---
train_data/example_imgs/-00000000_joker_2.jpg | Bin 0 -> 382842 bytes
train_data/example_imgs/-0000_test.png | Bin 0 -> 915109 bytes
train_data/example_imgs/000057.png | Bin 0 -> 370211 bytes
train_data/example_imgs/11.JPG | Bin 0 -> 336824 bytes
train_data/example_imgs/14.JPG | Bin 0 -> 252733 bytes
train_data/example_imgs/4.JPG | Bin 0 -> 77897 bytes
train_data/example_imgs/7.JPG | Bin 0 -> 132501 bytes
.../C85463B5-064E-44F1-BB96-7F6C957D4613.png | Bin 0 -> 1351693 bytes
..._-videos_clips__-data__-326594731337_0.png | Bin 0 -> 203787 bytes
9 files changed, 0 insertions(+), 0 deletions(-)
create mode 100755 train_data/example_imgs/-00000000_joker_2.jpg
create mode 100755 train_data/example_imgs/-0000_test.png
create mode 100755 train_data/example_imgs/000057.png
create mode 100755 train_data/example_imgs/11.JPG
create mode 100755 train_data/example_imgs/14.JPG
create mode 100755 train_data/example_imgs/4.JPG
create mode 100755 train_data/example_imgs/7.JPG
create mode 100755 train_data/example_imgs/C85463B5-064E-44F1-BB96-7F6C957D4613.png
create mode 100755 train_data/example_imgs/video_image_20240913__-videos_clips__-data__-326594731337_0.png
diff --git a/train_data/example_imgs/-00000000_joker_2.jpg b/train_data/example_imgs/-00000000_joker_2.jpg
new file mode 100755
index 0000000000000000000000000000000000000000..956af0cfaf00eb2f6d988a63bce81296471c967e
GIT binary patch
literal 382842
zcmb??cUTio_wN!yReDiTkP^BeAVooX4LyO-QK~fQRgezSq=QnV2M9=$-cb}#KmB!z3=ajd!PHqy^|+9pPfB(W@mQJ>^W!7Ud>!B0w$^fPR{^9M+Xo9
z0Dv3-69E92fCw4rIT6vnF<3Xo>E9TTBclIbm_YgubsC}OKl#7T2#2d*0IjAPR1F}o
zAe0ggS4$v1sIu}?eYl<)R9p357eF`vp$CfqfUBF27hGM5-Ne+4{hzXbCu>`8kALI;
zO-|r@H1%JtqJ{o9*Z*%t6n6IBwgjSG!r#Y>&^X~{?Fg9B;lFUeKiKBK@auoDmmyr4
zKvPJ-d=CE&+x$1|@8jV^p!xa_y{(7WKX{ISAG<#H{im&e(m&&L-QEphK*&u9|2u#;
z00-0oB?A8cn*KAM?!^G`5Dx&vDgP<6$p(OyXaKl5`=2sMApp=s0YLK)4{I;$f8ESK
zPZsoyFiZB!0f5RB02oFAfYSWmH~OEt|9KYwhrY5C>f9mp)1B~h0Gyutv=gz54Bmi}S#|Hgq(`|2Y=M-J8nJtrpO1VD5|#B@YgUx2%W(IEbx
zy#H$q$jC`ZDZrG(AR^!zfxwIwAR;CPfx%#6FgX}ZM0AS?1g0Y|MS$mkWx85m(q
zoYtOV;i(kNkRmQ_aitHR#v(i+eX7sMzo*8+xfC>~2aL_(Y`f+S>aT8}+6mOY57y
zj1zdX5`>B5pT_?^`UJTn0ugwVky8*Z>(CL-2)Pg25mPVlY5P1fm0oIYhzqiZBvu
zPtNevq7S4DVubd{7?l>`HeP#N5#&se2WiFiy-VWCVpX{^-uvV_cB}a~9x(v__J({QX3C+0}Xwa2Pvw@7Wcg^T%!nrHb%H
ze%(%&j-B2|V-s|kgueIBXmF6XWez(MiC)?^@Acw`9Gok^Cm{InCB>x=vm(G{y6v}E
zf)7(adneA)9v7*KRI1!#V8^qag?6>tn;goe;I4p)q>{^6Yq9peo7I6CU%q#uHvP3P
z&c5s%ZN7cf$#n{z>XNPyCH{ah;5wJz`gnTU5Ky6+UukTDdAe46
zw$-moSwZF%eEr7j*7acR&<}#a&aN4IyTKI$U&%89`tunROXw84hNOg||c5;+&_e|%mZyIGuTCNWzX4+xOjE?mE8+^K0l^K6U$fIAS4j_Wyz
zxlp?~EV+MY<|iEfx{%gaj7zU&>LHK0m%!0l)ubkrOoJEKj9so>`*@8Eb}u*HS%luu
z>N!-*_Z0WZDL39{>ZjI82D7cA?3ZB4t;?52@AJ#2Zb`C|AN8RMot3?OvuDw#vOv)4K;$QJXmLlTQbkh&?PB!iq
z#Vsn*E*-p>FUprT5vT_ZStVry5&C4IJ*xrAqSrJX7YZIer;OH&=Z}0aVRd)(3J@mm
z#C)B%bQAwm+S^&LWW~VZv{U}qe`3#JB}8NbhG&Z9RwPA`J^j&qfy`&*6ZaacMi?Ae
zuoseZDqeb#o*tjNejLiX1)6=l&$phP2sv{pl;6R=8bP}^Ni;K#!)cq?KaT2kW6p5L
zT+!F^j54_?ECI7@NK2>bI}B=$3RxTPx$za~29
zhvXDkQ7I#@|UXlsE0-i9lA?7RRYp8`Pp?uf)*f)4#C#hRS
zSj&f_!p2LEO({zxa!tf#QMWdA+Xq6{X4Szw%P4$!MjFk1T{VuA&w)3)cqc3J^y9<1
zy!RtWdnJ$V4(Wm{@>Yj7@czxGad5aZKl>*POeS>n!Xd#p);z-bt@xWZ?P^HyH@ef~
z#o*t4-=e!OjN+Y2B^XPP6g)mNF301(i^Za4&0-z85-+Oam;Vk0D-|00P-XEsq4gI@>5S?&IUTH;T%*uQ)O@64Z9wi{_&b1*8#-K14e
zoY|^%EZAK-aq`*`TvJckW>AWrs#X;MlN&|hX2L>+=c_v3SsPHSq#HLmrNaECb>D!f
zyT}dc<=I1JuWt+gmR+2J=32UIv2r$jA9UhZh{%YK(hUpl>W*|Hk7n6OI3Rh&YGZBy
z&G%w6(wlhOQt-EJET!*~+TH2!cKkZl0
z8|GJwLq2WwOzZVf&`2CS^S_e;;TKdehGGI3M9)W--A?I{)CgjA(Loo9NZP
zD81UlpAJkXO5oS-tdu~;*(bDAVsBqkHC}pOT0A=zwm5eXwwq0(P!eE|Wg(YW+&Ns_
z9AuqR%}=aLLzbnGu&_$|#ho`{=8p6WT?bo+_qDz+MDM3Xn{6QM=*dYq9V}nX5~S{C
zE;Z%CL_(qq-+H-+()u5wq;MWC$0PZW8=X`e%-OQcm98sOzCyjl;)Zcq43~$Cue#TU
z+dQ4BT!bVHp(W7U1-M4h2X*o~0Tmj~@yQQiBtyBk2X5u{ubD!?B>MDHe;^Dk7q9ma
zYs6=z+L?Kzl&p*sm74qv2Sjwo=$*qQ!s;G3^)hhu{=`(gO&1BG%I69gu1Zso&puVh
z^_H6F_FlY+cd5KV&A9gAgt7MR{(~*7E-75&ao3}p`jkUEQVFDX>v3f@l}(>m*ajMD
z(0VI9E@l(=kE7<8_=8k91k-8Z5ZsLRhJ1c!$fNuff9>LuRTSA*P}?X--L=0%cP+X3
zQF3STRQ0dYr(ebpUS!91beq%{9U%=qe3UD$OqmB_rT&JKmdpYGj6aP^B@r3gaQtg4
zVMWjN5u21^ZeYHNhJ-paqk`*%RT7O!nzJRZ>}?lg`L5&BSpoZ;hD^U;+_Yom48?r{
zjV|A+^(r?ladr;U{P6S*NKr}U>%`4zS9xKwXch_k_Jn-SPr4RD;)yH>0eX2FIDN6i
z-T4XV=^oMWy~;T>Wwmp2o26N1jNVrY_B
zByye0QZ9Gzr_z9v9dqvcj7iN5O{Fi9wN81mw}b!Ys`u+!2XgL_XuCqeQYI
zg?gHyigc&Dx%*s7g4wOw}(&s*8`BOCd~WgbWc;N#xM)6j3<
zIEc67J7WN{&~>L9pRNGr_P0mt!nZSpzVaW?u_TtM<;C(|NSbZNSH#ceJq~H=Y!NSO
zsvS)qwS*ThrUxlfiUFA=UEju?U0xBJP)l@bM;>?
z#Ul^sJh~rs!Ut!joD@Txzl)UI)rZ6ha9Iw_>@GbKW2*lmI;dxOZL)|?`;uLQwa4^m
z(ab4jGAPTS_D{NLDH-6HqM>OdMJLL7bOlh~eEE&)K7KELbAk_z%y^M;1Vjzf6|x+0
zy%4=4uC);Ew^ti&6*^*`z#|!S?R^&GyL-If!0N*`U^OH?8Lir+SKI#Ic|pjjuZtS#
zua7Jg<1#y(K6hR|HWexjl4KmuREXiP(5QkajzOhkG5GYh$2UdP5Ks3nCtYh&$`+kAwOM_!c
zAABdDyERsil8Dk@r%h*3w`Fa881Q&SImV}eM*D_ZqWG!wp6FN`gqBOksz0ay!_&@{
zwKJ7vEs=O3*N2=!zRn~@s5<_$)Fa|q>QxE4Po$q;iw4=W2^Z;^!345X51h6zPI+e@
zP3HTVBK4M*kJUAwH-W73c=Z`XEACnsqLI5=*2C*Jk7(W9#$P7Pi~jyPt4+-AdfQbV
zMZ#^G@IXFv`~mNr@7<%`k|j*mrh(q=l~)g$U}rgTs-KIsi=06;zis!=TQgUV1@2|>
z3N2VJNQy5!baOE%t7fds)Q7UO{!TsF^Jwy_`CMR6brNTm;st{VDCn5^FK$gFOw4lPgwQymA7nn8*K>qA{O-T`Ic`M0K@QW?Po+cR`bx4N
z>MZ2zXwELAbyMjMR{#v!Q{sC1(@@@)zicFIcV9g6^7QI{VhUGgVH8G0Orysm3Y!S8
zXTmCb;vx2%_O#)Robj`RxPR#@{?famIrdxW8wuBf+ke34t~;`)NXU)d;X1C={g9CO
zsbe5oIBU9cw(ttb8i!G!q*w*@G|}K#qnyak7fYqz@spEiVG9uwEgi#5jdPVub<<;B
zY_MW@jhX$Ot~G2$tm*1^jsz44Dj4?z1RF+r`;@TQ@O%b@V}lXvU$
zmUT|>w{I|~Vrgm(GX=ro*psAt_uJ~LA~swOC3^(fn?j+`ULrXg-CYHGjal-u#J}b&
z`#V`Ynf(bEI8L~@{f{9zd#SPqBnWrcbuGb2H0B`kj@P^JGI2M@!lXEH$_#}(TjaMZ
zpnHz5jAD2@!|f--U@Xl&-RiGWyxP+`PRc!ecp?52BFYE=2nLpvvmTKN?=*>S7DZ&t
z)?+FYS1KSvzK%+UrI2;$cIXb|%<0RK=)0e_cs7G!Plq=Nc%F??c)jU_9-NVVFhZqF
z9tLsmZThb5_w!j`LXzATzK=!_VO*#rkbz!@RLHU`NPk412h?)Ws=hODubp4&LrH8I
zEE0DI%k<|zYIx$g7VoBytf_=S@M9=)DAwTaT2+#Gf8R1_X{|T&4r_zA!IEE&-`jP4
zV>oJ@l|x0{L+U~*GOJ~3<~y3Pn0E5{j_U@KaAX+V2$@!#l1bF*RG5FW>ONPm+nszw
zeKAVp8?CaLA=7B0Vp3f;NMri^qS2!-Y@xgH@b%9)Z*%6F?-zym#RuHg?yfJb!d`Y4
z!84AbEa3}E$h&9MWZgU%xi-sNbCqb0%6UEx&;3k)hHaX&B+eJt-ZX!(+_}CUmgdYa
z9^=*&BJJg~Gm$;90eEu%u)rl^ZvVBXjo6;rLrfHrs0vIFu@ux@NIiA}{bF5=W&aQ}
z^!RtjT*9Xd?aGDK(ywNl1VzpXMJoWKd`kk^8RrS~!RnH+Q42@}50)GKP=^|2%sEr8
zo3*P_Mtel%FTaPiOyfTHE&j-TS-$!-{BZ+u*F3+O*k%yJw`!E875{~q_zhUSabsS)
z-<;y&{avBd{l2$+ZSkK4{Sh42g1^*Lza>9~2>+FxRS~eGdB2VOh6MSTwOJldNO|{i+r`7blZA^bj4Y+9
zYV3E9H$%6@myfnHa})3+ujgoQw{m9Lfa7sYHQaoFXx>$BN|KPL0K+$11`U;7&
z6}ye*Oha+HEtw$Cfn{ZWWt*a3ty{a9Z>Z+Ey2i+A)Ej3_|<)bb*nS=+nQTYC4Qwu(JFN-
ziKHvw>00l}aEHNrO-xlO47_Q>b$EOQP;7lp!F|8x+ET5dT?z{RLe!i;`=jNWgV;`$
zaDTKI9Xp>JuYx+dZu;`vZSnr%6<}y>Q@PbM-1mU|`!$}?dMik{yveLD8@eSRF@Rfi
zl|eIe^aesKlbA%`&Oedn3>(H>Ka)0Io{5T06+JCjl}KK7*2T#`-;5ISU@Bw)$Qm{Q-hX{b&cgb4S(dOse)}H{u|T*+4%+
zTM)`==NLo~nTOQ(6C@=Z)Fcg{&_8G-`7y*#?x4l(mWynAju2HrA!V@bdz2G?`eFwg
zeYE32y%XyeP7X(l)=l`G`eWT#)|)5CKg|^NEL&2f$RAOkZTp{HYLbTskBsSFm}-_7
z3RemQZInBbS%%Kx`p%dvgOm)XDhD+|zC5HUGFLz?@h0U@Z`H*C6g-j3;_?v%J;5~?
zy7&qs0vm&i@p0q=0*DJhU!>p
zpI=3qj1-X62V(!pf)M27A3up9`
z`>1%vcK#HkOuF4Pg3mMsR8_|`(~R0+sYsVb*ScTH=@@~8u7;Wv98P0{AN(hEF8owG
zH2Fq1+{EN*XXR=FwD(YvC2ro%Kk$GV=229O-b~G
zV_&k_LRQPIE^5@#2}om4*Ol*J-4`GOE3K
zJ#I#8atV)cHHpkot74pp=CL}?BGv(RiXLF2xq8RU>aUd!AyN8B0c7$vieQcslPYco
z5AQzao8g|Vn(*+c^VTfTZxEi1IxjPKqvuNJ!AA&9J<5UBq^q|D>EHG|nB*%S5TJk0JtShCKhn!(lC21vp
zm|g$AJgl7Y9}O-6US8B46N;U#-j~T?wn&>-z4$m*#aEmbtG^!JhJJc>wESRSYpc(H
zq&?r7`xDEz9o98V`8iMAgMyoh5z|8hPF@RdQPof=B|p(q5p;9G4%R}eVz7C7vp{sWncnJ+g2G{0@*_J&};S
z6_FKNH6w^5LdC_R^^nfK(w;D?Qsr!sU|Vbe4>YC)err*V&zedludcR#gd#Q*Ap-
z2;@Y|i$Wrx=Z8C2fG#&Bo896Y(EKai(c&~z)cGleDZ?aYA;98Mq^Hg?*Lx?Xgh8R=
zYK8O(&?@}8hIDez%BN)Bp{?e8Q?XLr7FDwegn=q`MN$S%X4pe?rRaC2d*wydyS;Rco_ttTH2ARLrq=6T}N`0OR+{6{NC`DQ0Zvv__f)u&oYc@nfK
zDS@`ibmgZP&pYF3wvQv+7~3=+$F0Awzc*L3
zy#3n149>&fUr$<4PSz=t!B^y5t9VMGBYr}m$QnTqq|=M$U*CtSJllo+rf;xkj+dwp
z5t&6*E5x054_ru}6b#=nf9n`n84Z+FD3Al`hk3s2!7}Q!(bra$o=*w5Rzs0FuAj`!
zzQj@}(n3=)a~HBdD20V!Oefi?qb)y5U5(2kh7Z*Z)2OjoS3pOQd#BCIkM$P9Zc{lf
zUiD@verB&Vl55e8?sZ1TaZTP?${
zYNZ8*rn3~fX
zoUj}CJC-#wXU;l;J(P0xDw$!Svmn#1DvRBBZ!vVZ>7oRHDB#r%Vqtr!J7dWxkR
zu_W|lT*@p@u>xkC))#&>h`BW;~hCr?-oh^gvp7-#0*2(
z2TTZ7F(zr-4*pr>E%Pb@ZYe8X0fr1_q@;3^c0iIHrCD3steM}u#@!bvMK~RBRxA1N
zWXZbl;;J^vn^zy)?z{i6>ipws;`5M_NLY3CtXVa`Dg5zAX@9ak4QU)1wk{w|;krio
z2&IBAH6j{&v#hVn!L3$29&5pbV7x=uj*N?Yuev5Z&wc}%to(WAs
zWmzmga4SsVt`7A(+l&0s{WC9H2Uq2qT;9X9$_m}GoOPPA`3GgA;>H#)=Z2x
zj7CNK8j;>8ET}cysCBz-*7>1NY{A4Vn@GKK{&}fUIKN_9h)Om&JM#H6@DGABHczbi
zce4AsuYOUX@0z#?uWB(8qLK8Ad~0r3)gkw(=sVp>wc;t)Oh>d^P6cdT{3DKFPc+mN
zJfQ*Wzhb9bF#$)Sq$XUjRm_hBqsQvYwg;>24DCt}$CYBEP%isB=Uw>e=+C)xUmI;Z
zPx|&&6A*UQYLPjPnUq?vQIsFZ0e0dTVEf^1#C)u!SK*>fq<*ROt$=*(Z0B-;Yh
z1#I?`&K&xEfX4>%6N!_%J}dQl*mY`bA3;}&z7JoAhh+xj~m*3DojZw
zveMhSd);%Y^1rwFrdE8{f*dMQBhxxIr4qs<(kLjPD4R6vQT>bBOYYeL&lQkrq_kn?
z%EV+ebG?EaIE>&`lKCpVD>kux8UdVbP?02An^gx~FiQ|SySwQ*wF(QAAf!CfYQj(~
z6ye9%^_#uCFA9f4$2BDi{3UkjcJ~Kwg$S89QVRlYeM!+um;Z6Mb6E_s~Vvv5*2sWQe!9z)ZzB-x)1>#^8Ny*+$kq$-2
zN4F2TSG!ks1T}U#sdC$sYWpYms<)oz3;i{eE-^LAiHLTl#OZUhlZ$B6N@B4+AuKEu
zC0Tch+`aL)sQJEWX`*#ki^up)kRb@VePYq7P;`VrjmclXZ>~LECL?k```S?(`{x-I
zK9$uXN-%o%^up5t(3xy4z1N@SL30U@a!O5?X(a|owI@oiIrmuZoN<3R_w9K`ow%p1
zr{ae1RFP3rlUWfTyxWT2!PW0>Ep67D4cn1q20;j&X1~MVn%()=ANp!uyt%Jk?+?Wm
z=!PpIGfFBo_`}!}<%eLDPlaFT*baFNC2n1O*WR8G0P7>;<29LJ;0+?BXEf7^yn8oB
zhMtPlJgGC$_6mNjZ1%tTeXleyLHUg^qXASUsgzqy
zW9oX)x5eLYgoa;S0ghH_%2$9l-8)p>tec+jRzA$jr7}mukG>d&P$)Mc)>lIku~5&M
zpJXp~aMfAwG+=^_jRn1(oxOe^@|_rd%fORg?%1C0{UjJ|_Zw#0q5~~g7@aX0EtzS<
zXwj9TN!4+5nFLZ_gT`w?EC-7v{OkLiF?RwfZs*O~?4i%8FIkQWV8>i)FNF
zf?wE=%k4aUN8NG2E^ltoOOK_>ztbS+>~*x4VRhW1%_?GvkSnx{(LhbQAVjj$7$MJz
z(i>9;KKGLGcxf+e47(7AK3HOIxo>KT%E$<79;xKk;!jb8k(rEU@i2VS`dA?AKG*!Y
zA&5)0rOmi(u}%kEDk5KGDKJY*6ghc_Hp%*hKNCC<4Ax_MIofO|lp@qtUrRmDS4KZx
z`EC-z4FNdx)5%`2xb}rTBK9JSb9`-o1w3l=^jc|=*K6Ru*LBamI`^HCVIYlBx}wCMM+8fK
zm=Yt_A>&rgzmC)34afq8LKi~v=R&HptH+lXT?6%os%|Q(*$UO7+=+v2F|QlHOJ9om71ApzuOD0id)Kc3eqL9(^v`31
z@ALdyEj3+WCaMnn*?Er#BcbEqTN6&liy}X8Mcv9Fl)ByZl`-?0vdNv{Cy74G@dKr0
zRiee2SAfBQOBe?{O|3!$dnoR=zp|z2%cRP!$f(0j61iD1=PW6sg5%_hxqpAy)&Du&
zvKf91QN-s#P0pvy0O*OAe!L5^m`ar`t(t<}>A_A}M;JLhSJJ(OEidG1
ziL)-_n+>Q^3Zo4X3m~I`e%iY2)@!55EkvZ^;OrIjpt*fZgY0$;12gl-6#)rqH-bQE
zPJth-wCA7@o~m|X5JiZSjQic49%gFz0*SN_AD?slH*hxV0B-9f$n(ZaWU1MYtibkx
z7du!?0-4|kysS$b-;JLHt;OWvr#Ez
zX#hhb#HIEu>GE5?!coJ(vubX7%A`VE+Z8~w*6H$IFdt)aF4RjmVMI^%VutFXyCAgX
z+JpX@t=D~lhne)goLmOjnH~9TwzF2PzoC=24*m#s1q~K|vNw-sK20cNE~yyJChV}B
zsJFO2+-Mk+5?H6M2tT0dJC@N4_Px6?FhtQVS)pnu*xNoSq97lezQsyVE8VVN-)ooP
zd#j_Mn`a3nmB2)l-*dGmd*lJ38%u9d}Fd4|B@Md<}nMW
zVc@iA%>az}!-d+|W22J-&lhGFN;C2CxC4;1-zfQ+dw1B69;&+#zIdZaF|QQEN`SEG
z8-Vwdf9u5iC*Y=u5A70?>T5ikY3WW1OcO-t^R7hRzEma{+O6W(WSpNzW){BqKp{$M
z>xrb8^5Hrh`_-oMj2N!2VNt71=jwKtqWP10iw#SIX^caw^5o7**e1+<;mweO^YK_VZ1&983*Xs?
zFuMzxZcmtRsNTrAImP(x=t&{)QsXQI7w$?&n`QC>`cxQ
zY#n}FFGc9-vyS8jeZ{(Q?hQwzvuIRpk}LV^A7L?ip?GvOafGIFx^)%fBXy^eM1u91#tO8Xz7H+rM5FBm!+e+m|Yjh7!%@~WgjaS;^qBLQZvCX=Kn
z#=)@bgBjVeFxWc1q=;@B`x#*Ss)wiPr81jBVe+4xMy8a)>ued(AaYXnOS6rd^{uge
zmUFoj*fp=^QY(!y%88p;1*nMPW{`&ppOnp=&rXByMLqVMuDvj*;Lz98GmOm(n{&Nn
zS6gWN?(l7!qN6H&6bZmp=q4m+B}DOTgoTiq+P%m7g@a-TZ4*b9#tBh`ifj3}2B{?W
z36a#DMU#u1NC^YuDpLx$24tkf($)aeocnE2`5$-ltK58C$-5;NlmP!|S!G>L
z0i*7a!0{cbm*QTF9b|5|jgd(-W}?5E>YGwUOI1#X9R~Sr#R-GQs&lyxw>F+h?x$50&Fdv#>~TlXt9?zk~gj
zXMNN}FkBUpYDiWfcG`;u2jbOzAe#h(9}Gtsno;Rj<{W+_D9M)v_{Wl$-1X8m`W?aQ
z8k(n?3Y08=xb%L@b~vFlUpyawe`hwKy>5}SSc8Xrf%FQxzCaQDu&sEQ^Tx0VPJd%*p_2#iZwSlsD8(
znlyh(uo%bl2zC;Dio7cbQ!ugoFV0Y5HW@Vv`!6%I?qNJxeC1$3Dlxe(e^nz)F0#3m
zX-)F0kg!>!oaFE{tF@2h?5Xd?44(P+y%(HkMpfOMD2Fqq7GO%yNTb+#{tHnNKr=6u$L=P)%SlN8$A4&Its|rNaTcoHs
zQ(&RfrZQ){OTvHuOxioQ@J$G+iYfULiGQS3*aBVOL1ChjGo>hfcii4RB;nUY>rj$?
zxIOI0BeQYzEAOdnt99G2P)h;J;ym^YA%(-y&&c_pqr4ven-b?m5A~$C8>fzGTgFfkm&GVJEOnBU2pZYBgO|@(
zu>9^IMRspi3{}^^4%bvXAv2}ta$CFD7{ITBXUi1hWLwbc`Sm$XhA9Na7W|{TN11Gk
zlz#R4?}rucI%mmdE>zTOF~SIW_ZXkY#ii42qZgMU~Iee|_6l*wsXy^ai*BMmH4<^UeZXLxtqf;Q#lUBcE)%{XG8VDG-+I1vXu`R)-p&_FVSXbz$M2O
zQ@$i*F46DA&xY_ppqOSX-|Y2vq#en1j1HV1I+*=4vX!*&H??RjaeEB4!l<2rjKcc@+6tmIkzvN=
zp@URP;T6EP+~r}!eS!L+C%&=2Z5CkyX745|=M~Ps;OPOGvooi%f%pfyZkF<)TQ?Py
zzIVkB!iyOTGmsxeLLx74j8AC{9ZerOFKur*?^pCPH{I>iM}FmX;>G95{^j}gccpU8
zuXEM+Pg3*5Acil|kP(_u;NIePEhR|UqCOZ1cSs>GWCA4~*Jd~}`1=J~l(magEts`#
zlu=R$D7jFlQLtPA9MT@5u6AEu1jW4+9$#d{Vp#%Hl~E)pRL(1v!A96z(9u%;kZ{Q7
zTQ9$T7d69pt5Rk_8GdK8xR&*-4D@2c=f$d0-#B@C!lNVlXSfX~5G$2g3Y$J>PKS4m
zDZRaZ&FoLzQn2>}g<<@{**w$G9fXL+-SL@*_4^XU?<0sf`3uu(3o)}&r+coySwBc7
zB)sxFe4#Cr!93fApt&?e;ra5Uj2gahFVj2Lc#n7dyaMW`8}ARA7Tl{=L5BHF1+hYt
zYp;JB5SsDhhgVD6$^73YSvAOQk>Y;*i0AJ84~N>ZCWN&tE&ZS~9;;Pm}
ztpiN~xgxx7y#eHO$pMx3D?9y=-Mv>UK-H{~Md&@BkangD^oUDA?etbSOV&~*XLQ8AzdvdLpC#1Bn2D_;2{bpg~N#<9eaJY?IBw<5-X~NK@%+D
z2j@Zg%NZ&j?9|#0aG&n@dVGV{%skShmnLUF%2r`?y^%{liCE+VQ)Su81p3+9vKcp;3^A^HGr&$XSS|MNYCd(o|^_)*XNsxcUh`$
zbEpz|$*KAQo3;UGTX(4HUXnE6Z`_-oFs`sfl(ITUk$?iN^0;8Pi9{q{k$xX)zS&As
zzz@~=R#c>;lAjJ!iNnrqaCJJ}anNmBm*r5pk!NZ;D>^h;T}{GdAd#s91u
zdz|sMjl6>rO3sqZNe2>!FiNW^Ovzi*a}5g4>KG8L9kyRmF8%pdM?)3Dp00^vi6&wR
zrXh0Et86OBiX&H2bE-d5XeTBQ5)_`dXkE{y&JN*UKM$Hw&sINB6u~Gi
zUzJ1CQhej`d?whI9!_WOQtv2&gqO>Q`=1A>JD+>s=CGPf)+lzVp6?v4He63pSkMfd
ztjVF0*?qn{o%Ud5;Pu9a{@K^&xf?!?V+tAMwP<~M6{n2}g}BSZJll9^@Ny
z-1lheUcug9(C-S@>1VaAD9o12fMjpmIqR19-11!JocGha?+W_8-G;;p6*|Gq;{F^s
zwd#ZX*2%Fp?CV5#-25Xu+&D7-x~=F;Zc%uNmQ}6ut0S1}<3x`WebMo&s0zE>x`29P
z`;S73&Tf0FuF(R|x0v`ViQq;u(CK|?=*7c2D@ilAg-Rqjyys!TLGs>Xgxfbu(n||M
z`2CQ4Il*eO{lxGVTDg#2L8__SRldq~;fuv2@x1|WncxgOCjvhWVqzPqb0_t*OBEGj
zw`=w`br&v|U-2wl_)Yonky1V$L?r#K?WF%jF>PMh=>1*-*Nfu+*>yFr%_wi4xll1Kh
zBwXUsq%Yme?L%J^Jra~f}n`?Wq_8^0bYW(A+zVtS?UxrYiRNpq3>w@?E
zFV!<`)3y(EnmK`E4+x=%bCT%Ge%V$Dbe&<_6a_e=0!U|ZPLa3><-^3w+1g4bm`7EM
z@kpXqQ~wE;X}SF`AIi;#9c(2Ny$(w2&Hmzz3Vf6NC-!m+vlnXz3jx+Sb_mzj-3oY@
zN?MA_gfX_hrvR{ZQ+=5SVNacASDz^Oagp3}kn>Y|Q_`@dd?I~p6c|lM1ECM^E@L7J
zY17PJwSRC-uM>O!c15cVp_i?h5S@`pT|og4OYxsRmkbN=Itg+^ejgj<-OWN<1H>mG
zF`EI}C%60rY~q{9zMEH8#wXR;EAc*AJGom+hVgs(=Kd4fR>lc~*5k;!3vtoy05(cB
z*000FByusuwDSnrpy=QOvFy3jS=U4~2
z84an}n}v$~EhMcP9k6GwAl(8LdJ*wq}SU
z(@Dm(gdSHI9mn)Iq!GU2LBcfC6tY;wXu9WneF46c^c~mRIR7iSOs-n}$HSK&2nVS(;OS+8o0k*bG<#i+
zv?CY=Az2IC({~r$f-IzAM;mFj0$&!)Sq-68iILH6|9g3Njm*{ew@ed
z8L)R>L82@E=#a>)G(d4%Pah;ND|gY7b|X4^t|!3srBe@rDJq3A`}VN^jB^ME`oNni
zCM=rzy%g8AmHpeDw7=lmkUurEK^Oj~1Geuvw|-c3c9;*Ty+{3eNF`RbV6{^fYBB|IFCfpC{u$>;@;K3p2NY(dk?a@kZpErd_kc9p^cEM-o
z2)w&n6uUdAlqT}b81tQR)T7;SP{%N;;4>js#1gE5X>{)!dS@J@Q_hN5L8NNcSX-(6
zUYFdeqL(*Wau89CygTrG%0*qv2Mk-+;RC`_xmJ>9H&}LhgO-LwoYABXPA-TLFI?e!
zOHoF=6`GVEptBO1f8oz63UAC(Go<
zPG6qR3gvSK9RxXl>^ei8^xYxlt^Q|!EnCK%5{ww{2q&B-1^%X|h+L3jQB2+HPnvam
zLDfLo-tz*cMWL?4shtU^FeXmO&9XsKR-30tH7m`c=;dIaf_v|^5=3asrRgN3G7*AL
zWf6S;1iN|p(tam)@Vde4u14cxIriW5(GcqTiqx7llheCHG0oE%3u1zZJ;Y!%&nP{#
zU?&T+84ATlVcs$GN)1;|st+AFO(?$_R$&5?;(r#}&_efb$$4fTC4HO7OeXH;xh{zj&JZAp6a@
zo$k-x{U&BnLwDnTwl&1R@K;JS|LvckasV1|s5jOxx6BRn(JYLG0d(@p`0zU>k2S|i
zpT4chBRRoupry+Uk!krwbL$jrXZz>--PkebbkvWd0so-#SR=Kg0xYPlqTC2RS^I
znd7uHhSVtCS%ugFwC#HHLtVKvC(26mP|o5)dJ0N0y-q5Hyk*M(@L%r++oUlq&JSbC
z_Go0hAWl#HshL9Di9@J2`=#C5@L5Dw18Su1fW9U(0{oHUgs|yybhJQ2L&CXKvJb-ASoqCgVcu(VbToPMv2lP(h>@?
z5t0JZ-GYG6zCS#F!meF=@4oMI&g;B~gu0nnQ5i>CQwbZWhh3XqMrUo?g<)%H?3Sm+$&OG?SIFeKC5$Zga
zdtAQ_O#luRfbj=Pu4=gZOyUH0s9DKpjt3r`&)6SFb+_1w-?74Uq_k0jNPYq{IcXhV
zeKE;WX>M6kOnukc%9|9zSr#!Q#zDUYqALi9MsW$-i*XZQ7KDC#^)mCWT(C#A_Lnr6
zz#pJ%AThit&FSV3PC?$!to>njZL*S^94j6R!x=#Y^}wP4bmsi%5n1J}L2P~xC)nzGSPcziK18Vxht`wy-Ff`h>-)$*fRvZOq}6O?Rwqpd$e-2@vKekl
zt+rSZxU_|7TN_g#Z}ewe*R=dI2lBpD@}#s<`$mpq^{{lz{ha={9$(wb#Dd>6n!gbf
z)gn61aN3r?B#5!%_wH-cH_g>=%bF5Ta8zVwP^vyQe(T^g)X+MIpg^oce3=hKUyhAo^8h;?hkLRt1Yo8;d6l)Z`hRzTdA-L{8FC?CB35>
zTFjLc8TL};$pupP1$}*6mqphMjQVGUSiN!<6y?I);c?#{`emK?Tolupp
zju?d)#fukkxLu}^$$01?4t(NVAInz54SURqt%0Qt
z)Ib@^ABj?nkZyx5iq2NPtYinv?>HQ6vvNQYzwyA7LX>MZGlQ`9dFGPU?Q64oI|=!U
zk(y0$*_{$YE!H2Zq!7OZikt4$@)LP>m4JfCerRRNW<|1G0e^`B^W?7obnBdU|CV_i
zi~K{9Y9SJC?!KVa^Ab;M1ZEwy$=3K
zk42Ys>fshWf4(S@S>>>->A67t0dTd*YTvtB(iO9_E^$cP+yv$ECdZ^R
z2yAsTBlBDO^Pu(*+U-EbA`@C17P`RU9*vpEu28vY?LlJO-&4XOopVnU4c)}cA+W1B
zt1YVny)WV7Cza&wZ02v*_wMEYm#A>1U#y6xGSUU;7)&_1QvCy{v51%+Sy+rh-oG_Y
zYs&8VCX@{#12UBW5*Ta$4&t{T<#ak_?3
zslJATKv3%`FViCFEV^>SSOp++FTwB9=mQj)!%xIZQz|R6a+R1X>n8#)
zR@J=4T|H%$wNg~BE_~}}s)7@f;Ot;srDEd**$Ro)qBXh4ldA5L60iW`M;i%S
z3jT_JlilBVy(XOt-OQc
zOy=Dn_j*t7CExaPwocvTZ^Pyr@F!4nK3;!rG>?|w^!&?wM|y16kjH_i6JFPnl1?km
z%oOO=@x5&~U`gvI?rym*u}eqX5K2xtS^mr@=yZeUFyDa*v|_r`>C%|5Cmrs^t{?<*vnMedVJV!N!Ui
z|9~nSbkP(=%qO#U$Sude$}KN&sTKENna}zM&=Z-`A(|B8v;PO!hS$ECCtK|ed+ZDa
zp-i2fJ9uk!vdZP=io5r?pO14NovoxU^N2RiUCMd5TC|o0cc8BO+}9@xlw#b~2o4Mt
zMppif%26%WTs&2{A@3K?2)~5Ffv^rHpQMjAAE;uY*V1QrdTLhG$3unTZ5wI$up??%
zy3%&&;7#RDcL)y_TVuwImE-ZNGMV;iUK=bzw(m6fOHVeNT`beMuxuIgl$Ka+Mc$HX
z3e4)$JPw%@!cbZRJYOn}KO@^?hyx7`8F{z$U0C~bh
z-b{J7SrrVmC2T(2if|!)?JscfVQFjdMU2pas145q8kR=#8k|WnQVPGA4&6xIP@WSi
zd;vpgP5q!2Qb>F(?C}qw=VI4|TJuUCGojTn4v5
z;Z21^`_LhgO_g2aDI^()h0Q0LEfK`2J_oW9H`y@EhjLvVCWC_0gRV97^z|Q6X&>X{
zwp>W@FgW^*i8ozYf60@?V%xtXt@ODlLF+b^=chQnSef()SSdb^G^2_luP?cd;4Cm9
za9hF&nTw4g)HBD=;WD(J0Vs>6DZkYUQJiF%3~zwhb%P97Ix#=Rsu4}1aKuJ^Ac5G%+PVrAYMMTy5CX<;~0UAjcQ113N)?AXI_s{*X&Qd
z&vCcYxXgP0P{$x^)8Kqbvu>k~T_=Nt`VK8`!ALCJ!>b|&RaTJ!6)?&O2^E-e4SF~+
zSlGsbMM>23!w9P^nfbnu{&*f#l#NQ;Q+&W46
zfzT4JknSjh2?yY&a2I(sw6UpB7AwYrGo65IqV5?eh3o)4_GX=4#Y@>eo}B$1If2ZE
zFd0k)qY8vw|DMlyL$uSUxcI*A+_x8Q&D6f%qelM{&1hf4Fq2Qi@heTn~Q`XtMg=KgmAXOeuMP)Qayt`aT?EZg~a>#m1f<6X@#sr!S>$P(!
zbvFL;7{UJ66|~umN;h)f($TrALxM6yRJ`Zr0Ob~Ev9<1=3SQq;e&9v;Y8E-h;5YPxUgCz6uZOdk;dY#9Y97i|bGu<1u~gqT`noN~eBeJGikfX(GoY|Us!L!c!k#2r-wUM7k)RBk5s)l2Aye!iak
zPwCn3M|*6)ynV)U;OOG1AJnvxr|hh%E9lSe&qPcZEONg`5OJMe{~q;uJ%F^sxZt1K
zBJk)RaN;x#AOMy@#8{hN!fAZcug>0|Aw%Af$YhGdkbYJ2_X22KO#8|z9|o-#3=}eS
zmsR*NTa=~|o>BwAwqJV7RrDVEhb_~IGE&S2b7@m9A_~}4(!ddR;NU^Z?@z(|h~N-U
z=x`g2CO(3|SBOs#Zk=oS_1{viXB}JjwCdyN3B&8*0{+mOUx;RgfM-l=Cx8bk2)e<#Z9i{-7ky%5-JE$Oqr^p8wqN1UGF(FtH{`9n=PxFK`L%9P||VgL`-S
zIvX~e#QTf5ZvXV_(g)HoB%+zj&@9`%O1a-{NXigY3I-(@Ov%kNqC#(4xEv-kwwHsT
z40o^!Wac_lRzzmX;5iS~l+{@3M~Z`S5~6K3^Ri^_s2h+d*m9J~g_83&op~u>Ea!=s
z{9WARFBQl@kgU0(Aeaam6CsNq^JowKE#c8T-~S^CiHAdx_B>KRChCw)4Nl`h#4bI}
zTFU0)^43g=s|(US5o5LmNS4PL8Q;Y3^>Rchkh5rg>`W?z{YXMZVd2E44!|J*q7rv}
zb>#nDYMACAXQy0;REr5El1xOkYqI1u7DmRu?4>XBU}`;9sMC__#DR)OAxJ6Ar;dg~
z$sLON5i0{1L5o7EqN}q~R-bk`Rx)N3;y!n^2UD6BtGy0yKOvjsbrX%{1w7dnjcm{>
zviqO2s*5QQQa!Iwu}6*3L$-xn0JsEpZdDLmW$ewfNAL3PAi)?rqhyU1(#^cQ>)Vq7
z+gDNl04DOIx8z7W7|u>(OOm*3ZYDH*ENNXdId7r
zA*8hJPo(yX;nBEIo^)+4hc#Ri(kUm|kcUqTBslC}^=u3gctoA)<(&(_j6~`ZK>_2$
za*}_5J=~^Hkazw{(|QnYwt*=v%d`UZ1$Uh4b@t22_VhBo*1mJ;5y#JW&doCx!*vEm
zVU3g0MAX7);t)45zdr|;ysb2-+TK~j#%Y){BU>aY*px+1it)qr_4h9Li*L+N`5Id=
zII{v@MAWl#z+P>!ep5jt{!m3Rw5fdP&{GTB>S=Cb7reTyHm8RSP~l|LrRWouy1OzH
zg3W0wR;$hybgd}1q*%j~8RKS>AC0|)?xSp7?WRhb-+^M?Nij~~?OdhNH08iNToi@U
z{mz!(*-xg>ZKIQ>sA4qXqLxld43Mdq{p)7JV(2@xC9&&nnf49)W`gxkFawdKaVGcg
zgVSG*j^jPip60Jxe;#GGR|LaOj)0!#90pTiNN@Uv-!Mg~$1
z-qaa==hl^aQxr*4JQ?C6=?gF6Zn2Z0f+S!nrFe1&wYb~lcLJ^V3!OIp^A(GgLk5(G
z2>{)d2}-VdMF-z}`KQjI*Y)eI)>DiG@Bgk|F=c@O!JyuHUHYl@3n!F35h@M_L*gSr
zr18A>B7{8`2chHbmL->=r=1H5gczwpM{zIG@xKL`;u!vEw+eFu4QAN&&@M2ONq>rh
zQKcWdUfz1r76FF~W=L@C38C|*>{^rk0B-!n6f;5!{}rbz>-r6%C}6flFo
zk({FW6YD)khCk2m|B00iV-m~m<0GYHY?&wOkC%8CNWXA?h>LvCy>}9SJ>*BCt(WZg
zupTT@$_mOm=NM9#Iu{O;`3K0ed=NDP*@D-|ff94-jReqH#DKj^dav*>D%-(zU#k=D
z!RL)Fa99Uiwl-yo{q=4QieJ|?R9l12e@yJYk%T|iWno2e6?b!eztZ{Z
znjMv13}7-b&{_A?2sWSGUr8Gb`~Grr;gf7vMU6r5S-&Zzx-?{{unv>SdqC`Ha4P2P
z55f0aR+S}!URflvWQ&w(fPsu>Jh=zW++kfW*r>GjJuhEoKdOPl8GtDXf*Q%4H>vYJ
zr7G*|x+5HoQwZoTXK8_Cq?{H6qDcif^QU!eikp?a2cZM2lGj|!ANirO)zzK`hKuOF
z34LX;_KnU#VQsJE#@wsy$u*==5REDasgG}(z`7|kG4rlkt3#f~b9*Yo^;76r7VB-C
zbyOzf$8zfs5O^ZA0e9onS*bF73|Fg0HlH^_F!P!W9}S>dCd<;4@lrgV)PDPNQc3PR
znQbw4#9D|+NFqyV8UPAM-9#!MGDRg;8;|m0o;Vmos~aD?@Gvi~JsAD2Ql?j(-~VCu
zld^6sRfVOU>=tPcc{#5Kj+w$vcKXEWqo8A_bfxS997aOFrp%QcoAU
zg;njPC@4gmL9c16L;qfHZxrQ&firH;Zz1~v@B_u{#KE|QeY{w3v{v%7#7N)ZHkODq
z&D55Hb&9YD*Rh?LCGq^H+x#`4gXrM~NMgAG7lgrYx+~nP$!TKvKq234GFHad?Q_RN
zF&~g1*zin*fP>;q{q84WZtS{h`|-C&O>Fm^o23rxoG7oTC^R3YsdG;T`#oZ4;>E0(
zsd3nAkBjdD`_e!FaT2wZ{Cro{97N;J`+H=og6p30-`$!`?X-1+d-^#lO-(vX=JhJy
zHokX<8#=drqyW3EH!@^BvWhTL&w(1(^BVHBrZ696etMO*B5~j`h^fGCmzfgbO_!aD
zb4k0wTpP#Hy?mpbuWY`Hx>C0U3*p~`N}fE?>Oui44CBYQ39?F(6N8q)#!0-G2&;ig
zZqA{Z7p3tV#QH^tbfN6Gf_{1sf7HGalpW8N4l9h5!5aYRR$P=1RlZD($T!S>gj=w+
z$GUX)Pt2EVR>6QnBu2`)hc_^7+FyG@WG>blax7&La4iwP-7HYE2wgFFF*s9Voe`@E+`L
zh7(3O>?gDGm-8#>!tSoh3s&kIHJJ*$@2Y+4@I<1eQRwW=+v$M4jOGIyOjq1R{v9-IZt613+
z2$;wk!9gN?ro#Q{Z0@IAQ~SrXwd7|O%PeEijL(rW9JzQm$|*Tz?DKwe&oa1o7Oqb7
zG2x}SJ;ZbI?)pj9W_HtajBJiq8md^LT@o8zvf&2IoP-%|px0CO3sLTqu8^)82QfOL
zxv{;{u2BlSo6^U2w``m&G;1iG0gA~mS@Jnh_IBUcX|R|4Y`&z_Z(XxjgENCoo{#}6
zG`kXcRFAkycgxZOk;d
zxK=yr;T%?JJgZ}P3}K}O3T=2>8C+^W-x>Qck^?U0UiGr31jV+%);MXTFJPPk9j;JO
ziglhZo0I@X^myv9>@xMI#aL|Slq-fxcW65UxZ4;duVPoAs$w!MMrA698QohB`U7t;
zE5n&<=r7Hz;zH=r
zRIi|*P-)**v)x0_V69yH8CZSW0#I{Y_^)UHLXP9BP@Qlr337rfUD&
zVhgae2Ki`zA3Bw74tVM>R#N)d2Gelcv3kP%+35fLl(~!Vse=*
z@%jV=-zC#Xy)Fj-MJ1bTpvQ+uB7=-9F07ve`Uu@(g
zD3#?WFGm6-a_Pt&HQG0DwR66rn1sUkV``ML&}3XXgQ1Q808U)=Y;P#;R@raaZKFzb
zL#1U40;DeRx1R8
z6XhIz=+m7!yEq4-z$<+keVKdYlWmNuz79kMG~oiI0l-}a63!RzHZDEC@B%0-=FYFe
zG6L`WPxL-Ej%mhs#UC&ZjY-JaCIxBel@D(`FLr(Ct}AB_;vK#2MdWgRjhAYwjIZXZ
zdEB^O?pBO$tt|!1h*CT)<~luBO(pyMY!%`A18x%!jnmRHSN_Tf;A)ujsy%$dl~|5(
zsDZIuif)hVeV~aZd0HL%D9e~S=h{q`2Ui_qP?6Rt;H!L&zbEa#wDdM4Cr2g#9o6RY
zrmxswPu;PDz6&r4*?yPp{<_rZ6j(5dSPU7vJRE;E?J~t@6w1d-FPToVN1BRqGmf+R
z6=W7VG_le*RSr_ssEF5~Wc-sss#6q76V~CgTnqd8F>42l#PXN>GeMkAv$@W13>h37
zPUSzFjnwrlO|Y?{xJXknYr72c824&>Qx*|612Qg;-}aUeR6}i)`$@bKQ8T3MIWse6
z1$?9bth;u@HI0;vY4xK?Nxzep!*IdkOw(?}NOu1Eb
zxj6(T2%eyT={LLn4LQWkW7KW9v57!DlVO2V$WUIvWm(nKxiHw(BVl!`?;DPA`QdMgZx9sZH_S*QHNHx|
zJq+!xe)*0qYf;7YDfsg3TN?MS&!zlfNR#``FTd@5-|hbh5TPb9D!w}*x7UQc@n25&
zNLC7rfA=%df96a}s?ZtycK^_F2KD@5|Egk-z<>Y>uPvUUJ}?&T1&oC|bSuwB*Q;
zko`vjj`BIp?69X@p7a2+Qx?|c4977F-yE)fjgL4%~qm%JDpDQsTt
z7UZyr*S2x~oGh7cCPJmE+^4fQr=e~n8v~5ce`)%&s|$&!(a088%0A(`P!3FC
zPh~C?t+%Di%-2lQayPTa2#MWIKmH1cR3iN6rjQ1_IS_AHsV?bf84XHl=
zbag&NG%bSwUVF6#A1fbYR7Qe!MjX#yLJHqr5yFDu_RVrD_SwT#yd}JRJU@Cg1kFl|
z^>BL~g%6Cgbz8uwt}T|*Z2gPuVyS&uo~+QIKfiLT>K0}dw6gh2=g^ht0#XLB@;%Ob
zBp;R{?FI#H411;#c3P&*Agu{7^NFUU{cjW1*xLCUOuyG?R_1ue~aC=mNy5%Su>(=P%ln$8C}
zVWo+|U=l3RLCaDod3vwcvOvz{y;b}+yn+hO_awK#7!@U(SrV_5q59_|i!JDrvl|A!
z;D#^)$w7)E0Rvw=n%97#f9_YewBtJF_uxseMi4q91F8fKA8YtJ&h_8niw&EAN9(eC
zS6@wX+#Vuk(h7utxxAt6f4z&l#;oZ~e@|DbY9*d-)(E4n7tBvHKWDfInS`;3{60KO
zn;w3B;8Ph`wa!1&kO<5$=M4`Cml(Z$qkey`p{ZTb>snv`o&Kl+i1$sDUc3H1=jE`f
zc8|qcr~MMSBK(e(Su~SDnuDy&6_@;S)jE&jww*Z3y#^4g78%KpYD3{B+-2a!$`In&
zO>gDJ=>|33qi^Ts(_9@tK^QUARI;_G(mg&w)r*wX+kAoCO0h7BZ(^U`WhL57)WKmn
zIz*&>%xC;+$eZ`IcAwa;o&Qc}Q=+$#J4}HYa$INlQ01*YaTz0aO9cTptxyN-LsdjX
zX>h}g@y!=pm`GviTI6SZ2))*GecCEEAIETb8>kox0l~Cz0AlJaCVH>hwrCaW*%yH?
z&ZnmsTD_rd&-OJmRDoedC=cNz3!9PMsL6;sjk+*)YOI`AlV!HdSu%_2hKVP;|02eb
zCV8u;E3w896%D{R>fYnAnxcPwAgP5s^|5x}=@1#)54ZEBA>Y0|t!;SsxC-hY2>}y7
z(~I*ZKNhejx}UPas=R2pUDJc!h7)4KrJ9R6%*6*Eatj}ZJ(Q`;D+sm-;AO?X9in3D
zLNb4t)?7N479zc0{5y1F#J=ELzb=s#pA$1_w%ZH4FUs7%ZY{!|wL4Echw=ugKEiko
ziHpxsBkbI!iBS)VMKwfS_?(zzg*5_{E>?u|p1AKARb*l*sf@UbdWgX|Ft2;pYo**3
zb6U15ML9Pzjp=e9mOcU&Ih$o~CO)%&wPhjS4%rG!bzOXS-F)Z
zB%`E2td71#m*>&-pBBl+IUcLW&7v&RfC}=iiltXyS|R}Bk37f^OP}_vbF6JhGTpFz
z(<%K(1>)-X5;k}nXDlyt{YS-K!sTT#sG(*2gh|**D~l$%0e>&OBTZ1^uq$=8hHazO
z)C3NKVQQZ-0ZC?<8)#E~_UMYlZ&|Y*wfv4WGkX7Hjf@H`pk-d^ppc8-X=dSzVEbd&
zhHv*dG?!sZ$1d=auFEzVv^
z)LnemYyAMkg60EJrGuump4}UdzraVYgr~i0JYgFfor4Fv|ACKB1}W*hbtqN3a(ao1
zuD(~4aCIpyT8XnE^H~{n~q9FOkxEoRx2`1`R^9_vL?nsyEDmZ
zWo6LF)@>ZKazD3aN#dc5d#atjkqz=Ih!mYu^@H`ZgZ
z?&GHs(YT7$<}Oa=n3m>$fM0u@Z7LtnXh8nSO64MEPn7p4?2jtazwl7kTR;8O`BXcY
z@64?+bDg08W`R23+}H2I8Ph*S@DL)}LC?Oj^je($x5*
zK5wEu9B7$7<1s1y%xpuH+S*bIUxtT*BOqYjP@ON!q=&ofb7l*M8@$iVJS$KSRrNK9
zh&3Zf_o^v@h*sy9=rYf{PgVp_Q0&o
z8W3h2i!nqCQ6=!393Qwir>^EMiPYTl_?1Yj;sPs5U=dkJAf|
zfkXv4pLbJtSlb2`AoSua8TE$#S~=w!K%Nak=rCOP$fox0L-wSVH_chw=EprKGC>^%
z@(7Q*S0tYEB(mI-bVzQ~H@ElF0-of!&_gBk<<=z}2IMfcrW?HLa;=|Wv2)did6)63
z9SuT;B&3^$1;Wwimf4D9>0SdhbW^jX`Q~n)vqgZUD1TahU_5hW7d5?v7_Mi9_GfqzvjbRH4)gu%wQ&
zjEqEJuJ)2~Zz5AUG00PK=R1p&nzH*kThO;jVYWNz
z)?c62u(Zx-fTAZEi10iU
zE_5fs34-5#sztFlob~49Mc?Th=nY0Kx6MsjBWk!1kPP})l70um#|d{VrRyta*0zhP
zwPTv&H!@De>m{vBLUsZFD%WhW-f@O!uX=S+XnBali
z3TZD4?prL@o_J~{ZcWqT0YGA6n8_-qh73i+(659S+HDp(t>!TqD_Ct`^HNNP5+*}0
zy-kkTZ`%GYr{zQD?2nEw=|o;1a0-TZ^H_Nh(ON$*(
zN#$FJgjY!k+a3LFV!AYauA3D{GOeP^+cwk_iS!2SFcTrV&dq#u59
zzO3CVXjd+0sw&D+`44cpWO7kAosjRJRQ3zr&eD2&@6Thi=v~nbiOYwTm{_P
z-LwE5rw9c~R(CXh>V@li9n^)*{XO~$06*sv)L!Qfv{MgDcLBH*;M5i~H@ES2L}eBABHnivZ@wQahgT2K)oOh%)_!
zB7P!&9``>A>HSvFO|0S2!}ko3n%d0cg5i$C+`Z9i&!dY~E_PKl
z2f8A^Q?(C`Hd7HRHG(>^4?)^3R8S2GPBf=@oql_9
z(C@XpSXlouCLM1uOEsfUh?#c&5m|BWv=CI{$uXqA`EuXv^><6Ry>I`A=O#^L2;irliS2(-0BfDFGI>Y(
z5Lj-ienF|e|H4@O;_=of63GezVG#vnx}u1*KqB+%)W6=ux;^C;1L`bOC`#2asLo$^
z;Y-9pH!~^pofGy?)jAC8k0W^S4xXGZ|4wRIy!mlokC)IJr*Z0TZoInuIQP$@Ho0uJ
z?l%~E90ke9a6(Yc^hGW@D0^q0RNPKQTf=K6%{MrbE?r>!dPRBDw}-mlkEcB(-USO`
ztXYteO1Yd73VQ^Ax7O>}tWKZw*`F|UnO==QgvgAOz=xUj!`1V;cYhRI1294%*`B`w
zaRWqxL(0AN%R59bGACbN?aqtWC@q25z`(O1V$Jx1&_P5B&wrziIW&;+&)Ka8vS`gM
zA|SdFFmc+4@~v`-J_%m+<>?TWLL=bSoAW;vI_q9R0ir8QDN5w
z86Sx!&6U>s#kg0;Rm)-N>@8{yLkN9jMdK<28UlEBx?O%ac|i`mjR0v3Ur30Ra
znSozt0&$zqjvLHMSIdgYE=8hIX9J~v=gJp1kB0YLZl1EM{z`v7uA}7p`;Kb=18f9V
z_8lp}fhboAp&X&3ad2Pw;(^=IC{0vmiFw_SFIteds88=ECN(I2W~P5pXYoJ|i!Y_{Wd)r+*&
zT9(weCBj&Gu&I*UGe*_(TU+b@6;?4v`ck|R^B!8h_PH9Hcw(BEE=Lu`
zMmI~VLwQ4I8@nc?@|TWwpXFyeT(+E#szlF>`->2B6Uu|<`4{biCnx3`TbI>15?QQY}$W6pXX$7
z@Di^7^kMZKxuF;+!}T?~BBmH9C@RPcNJkNo+;#|1_13b#EN5qdZOd*C!>}Sy6!)+x
z(7ifZ-JWaF$JTRZ&wBU%M5VYpUX!r}ZlA~f>-!0L*pX%P&-b%?FU93VCr<7Q%285f
z)721QTskR>Kbr6TCVcQGeBJfr$^T4e+$+ywZEW(I}H>vW8g0VB%$&^Phq
zZ^?6q1sGFoa43wI#%$W*2`9!3kiEalixu&$e*ZTYo)7zO_t&79;T;o#-5f;21J771;Fy
zav6{lr7%u^I~MbQ5kSB&)<(fhUC+c&02d_((w8A$C>(?k!13tG&$rijY1?G&xiy7k
zYcg={o&iY*-ilHAdxiY1Eo-YsLd~Gm_{5?0>4b=imt+Zrqr0gt6a0v
zJhlf9pTtfjbK!PeOJzy<+_eZZg?bV71>d5YV}48Xv{nZ;dt2WW_nNCL{Gydsb%~!|
z@zoI=&7+8^)ro?+3si5_qUt=rLXS9nt%tuKUs?|PMS-ZRxHQ~lm<5)0t}El7c@phM
zF2`wwXwI
zKDX0`&6}}f2~4Q}-hVsr;M`!_Nt%GknNka*UTW}eNn>(T*B^N)vLWaDtKjeD(!ng-
zS&@>wj1@o79F!DBl`(ERp7wSb3MCVzs0b$gX@gndmeS6w@}>#@%R6fi8`M@n
z8>i6xAnE~~WY$Sc^4otq@#HUd#?;3``^LoWY
z;9sf^pX?s~o`z3XX|iH4Lr=|!)1LtIICVz1uDn0|6e89th>hbfDsUKj%6OX{tT$%Q
z2_1WR#sNa6mD`9}pbfia(P#HEgo6ErU(YSjtx?DotSK1oIBr9|Ed{Pp%x`en^
zqcoKCB4~jKu9k7N=PrMyc!mR}M>sI>*3Zqs&tm+tW5v_72#pIxge$e0R6wKtqe7!s
zBQMMSmh5+Ga1G`Rbx^ZkB8*|CY*)p&>!Xyc~nS4gKEPeyymQ1pRvH
zJbG1Ak<`@GTxF;8RsEr2l-@^$BG>j&HMubZvDp;>~b*v`tHYL?Mzz9eIKVwPFW6E4J*vUp`+N6BN=#&
zLr*yF+<)vmdhGIM*Vv_^QS?)QdId%YLL9+ymw_rlpmp_lCttSEau6SLX@Z#`UdNJ@
zeY1#Q&Fb3`u#;0h=*zClGpVEBp1t3suM6om+LhF%$>D0*f>zW~rr)W#)>ku0e&$8BE3dxqmaEZ^Mm)V>
z4~$)7KU8PoYZK%1OxhxN>{g*c&s#;~d_SIy*aVmq;+(b{J0Hg1smNes#tE2{(xwoQ
z{*Opb4rJEcljdxU8K0Fr9yM}P^WbpR6jYVk+e3{xDjQ)iG*svh|EZWKx{()zcL2P}
zoSNT!Yh1bUu6z01?+G>yg^``h$|HNM4DeU#bMFn$IX*t}qp7=F0AaVxnxJ0zVQq0A
zAjk7yxZz^_d{xy`1=qR2sn%}|eL1!2Vpe96ESY6d1vWpKkYrW9N1A@|t9&TT!{x5C
z(s^7U@(Y~2^K)i-HVb-4c1>s0&@cMZN0D$Zkhm!oVdElYmmXcs4T8KGPEPhS*U;^m
zhK0!rubFZfdQ~1T6CJ7M*?f9{u%z}7X|6HTvG4+G;9cwgL`{$$D5qS}2EDF*{a2-8
zrP)f+3esI^T?!>;HQgfp!clb5e6M}wJ;1JDq&kW1O@mAjs(?U+vp;?A3o{>9wGyaXqZtE%r3xzXh(kKaB=aeu&8gVoC##_XTUq4
zDSlv{kXq+JJ|~L_u6=_d=|Jsp_kgN>#&=e?clfbow*;9kK`wNZ^oyt1BE1Kqh8qP^
zyC4%W9K{5)GJ=ddo5w4M{_04p-Fp9#y{1)cHz@Q!d)Q;$zS86;S-6z$r^)J{;syzG@3)1|g$*0RYrui5X#Jp8r$JyU0n*8d(WcfCHS)ilg
zqZ%Uv*F8NxXJDYz#ra$VV_}Gg?)}o=S>lP~Ze+Z%23x?$56N+?%89iV*$Nh#*5z*g
zS|T{D;#gw6`Z~wYN0+JP0fG7;Blp%S@bOmhq*j)bt7BJaxT3l-3*_z;VNGaDoY2_f
zSt(Es?8%XXy|lYX?
zuud6vUr8LaDtpP0q}b8|I{X+0mq*Y%5hf0ZvzyNt_ECB%Yu&}p^
zW{4^h+*ra_x{Z|>Ne5&%7wURx`SUfa3Zz-Pf-O#zkzUMA?%JQY>}e2J%-BZH;7FW(
zrraPj4~)L%S1v3}CU6H%5Hg>8KN*eXOxijGv>iGrf>RyZoPDHDCBYnsJ(Xs5lRvn-
z9C6!-iv=zdYKaHt`p3c>l+m-%D=<6}PoMD#!3?xB$t-+!pCx}>+-GeVoG3}*ktuA_
zJna#5cVUx5eEcJ|z8#a_1*Pds;PyEOPbe5%^ZEnUu%F4n;hCr&(A7mb@;dHreCyT;
zV~yI@=JTgcGZp9O1)3Ao+d7-PA_D{*^}0Y8nV4X;g4+t#fKR8O{qL=}PQcQ7x!?hmtt}N^{V47vBWC|b|61Y$ARp}Kb|A!B;#hIaGw|eJ3
z-D3{o)5L{m+4Jtr(irQME;#dI$kyksk@(69-kqk)-__*-JWM@YrQm5WQQm-l7lV)SpYBt{RsmZ
z)LkBrD{^FO*-4wzpY^Lirahea{C1V^odZ=b$HtiJNDefdeH>(k7_
z%s*2DePfU1$-CSBKWwDk
zWY+V7RG?2$kOtl0p7R=QFii?@Q>zx|OMU+Ua25$9f^KO1{{HU-pG$SVTapNZ@-%apmJYi}i=+EZE;LYlwt2lc1e8(24Imm&rFgSZVReLo>+f&o(
z5Go^~J&6ZM>^qJ9xQUeIL8|73{{WPI1+Ez0850hqk_M&U`%~+9&z}|vQ+whaQ%~^p
z8dD2gXcfAYc7lFsKY5)Ybhq;t;x#SS%4!u*lS!}x<^c2wjSNH`e=WFsyi@Yo?cWf(
zIA)TkRPBna7ZO5}OlkzlA7AIjSaQ0+_Ro;erFT-5Q8m2po?sA1x!=F%^*m$Zu*54+
zbA0(fO21o(I}BVoC9=IwICyfKopH;mv?x=8-Kx|=Km`1x$%N1PZNeRf)2nd3RI2Z(
z0;9T`s(UQNNzhDp{XmhR7y6A_Vm
zY^AnMa&rA0Ayt+W*P)q`WP$l(CF~#|H%gyrs(D3|tY{o4$aO>ULjYNghZ(OBN
z56qM(`+I}59ZuY5_c8DG%G~Fs#pPs_rD{M5f{?UU5ThW8fj>|pWy=Z*3?^}?HC
zgH;ui_eiI?*l1_D)^sC`ilU3Qea{ELwfUIAn$cq$qr@*``}+
z_;s;V-Bhwu^va&PhL4k=0H%5)r0hq$jd)`*Z1&G(wN>T@ZE?7{
zB+zSFOCd+){ANCznT-M0w8ah$i^5R=7a&mIqzXABgIwtzNOeplJ!wip
zqyeanNc_jseG~qCX6EZc-BsmLC{k7uRjCpPjdh6aKEgYuI~bwCv3U7-c+J?_aysoU
zlvPmISadp*C#3fTf*|VjvpJQ6yc4&`l=!m)ZU2#00se%yy^De>a7ea
zkm{CeZsW3=^
zKK}cS_`hfP*|y`67W%2gVWAXhX=5O$f{yy`d6GVtx0T}4Y}SL#s}
z>QgkyCPa`2=iBe}<3V|@<(-olw&ez*gZhc5HDv0703ecmj~v9s%)w9hZcc7nkxNqk
z14uSs#DAq;k)7F}iU;`SSo;II)mw&kXO-zwqOwRRDo{iMNQnOcNZv308yq+0&pT|m
z#m%Akb>gY2khJ`XLZw0h5fP;8AD0S0I=N=`{{S@X8BHW=Pr$q@^V%*bGbReplVCkry+nzfd3Xo)_Wt~j@>D;w@;=owG)$pts
zymxq5wBhgZzr3zAH
zqE4e}SJdocVt8+SMtHlvWO=%uT<9vTt?1}xf=NPDtpHAuBIC}}zhgUh;k>QQYZor>%F3HjL8E%HmvoYpf)i0GAZ(ouo9jEC0T&$l*Up?nExrn9
z7rR5_U9Dn}DkOE3q$H6tf1kNL4yid!L$TG5_Vni(7)~acfQA(e3B7hdO+_hZv%?C$
zFNtK>B?_u@O`B@<5`tuW;sGK|YXf;C`|X*W$+pzS^PI7xxuh6Aekq{H9VK6V^^V`9
z@a%X7%AU2(FDx%K$}3#$hyW=nAx5Q0-09wXaIg5~YH?0(WXysgH(E}dQz0=XqL8gV
zx|r8;3E+`H2Eay6B)sLF$vz~Qv-oXv>7OropI*7?WpcONDbuKQ5TpU`q4|LQPYr(~
zp4;tQuFow6#8h4fkyT29hsw0F6$YbXCU^Hf(Z%6Qku`JQ3>gd7WTE|gbE=6X5Tc?f
zp5z?~nV$auek!>B9g$&8Igwy|mGw8pGB$}?fd&n&C#3!3^(S424;r9>!gq&*`z2mm0cXA31
z&b2gxG^m47URWfPu#FC}wvafnZ(`X?d2af>V>g6i#twSt5t}I_dGaTvI_l!wdz1Nn
z?R75>MO3#bQ;LdEQ0VI<>Q1{(fM`b*%!8A+&x`u?sa4uiq*F$ql2BElNQ0p>8+-jH
zNV&z#j$d-$4ZT_!npCc2bkYea2cbZ8#2?Q=0tdF4Nn6B?+Pin`Qas8ixRldcpv^TI
z01|Z`(02OqL6+guR;jL?e%yKPPxh*G)D)l;vVdl5c~bV;@HNd{{_kBEZLLPLO4VB`
zkt;%yR|JAmNJ!LcBVo7J{{YL|o$j>?mFQF|Y}t@aFaacxCvgOT1k7v*o+df7eX;X%
z7_ns5TU9zYt3zZW_GqCa`c8mV))EQSm@+_<>>$uNXPNvirF&Rxij|fMP=yiG3IqUs
z#GQ2-fjnj6Xip+((==U+#itb29|*CjEFJmzUHR%SCbJxl@5@V_h4bK=bg3$%I>kU5
zYDgq?>NOjFdO+8KBJWqW4lB`hR28YhlDmL13R1ZpCU=4QeYghl!!f_IcX4KyD-7wn
zMF*|=wWm#F^#BC+Jw^unH4g38MYw2d)e2!%wKpk95~ZzUsE(tolOu=0ooH)Uuj#LR
za?{Y0inwg10|b@3#DD!`x91
zXt}t)WkS(FNw?JL0W|^+gbe{YXh#7~Yh)OOqY8I#x;UchrN9>HsI-shR5bZ#PzI9*
z2j{@31QOw?^AUP%IuA(3yl^EI?kFDBA|y#lm8^E1_#oPGnzxE^RRYCDFaj$bi#ESNg9aU5#ESAR2-=uWK
z(psTMU*Jp7WDqoznbUCxKy>2f{{SSTMT(VmW|gZ_qpX5-)PttKNY`OJ4j%$+9kpn#&@gF+7S*)c!U%*F
zN{vmmol3YBvLy7${^`1dTxK2h@MEc#FOclqCsC
zl_@$w(@{DNkJDeb4x5gn))f6azHvRv`;{jdo;^^Il~cMP0r%({&cEFskk^N6oTsM_
zZfYc&bn4JO$RE*^k8!L=f1wk`Xk&LOw#>rLuvCzX!*L$rRaTwmb?G7r{QGgGJkL?P
za^|AIg&{Ol2>LIlcqjZPT~8QXRVO$sT&BJh@s0itdnpXe-%pex*&~W8txS)U#R4PT
z1c)H}K>GX4@qT2KSFoR(PbiQ708sDx`~9bZZbH#>O+cvponQV6);o{=pnp9yf<=Oo
z>e)&pljTH>^-i6)(stGd<nRQ)zVTAIeDp^ZMI5IaGEGJh{<%fe|=ZB2C$
z6jZdpi5h$TXY`nO7hs1zsXJ;5I5eGZ?W+mAv^epFf9Z1?fBa7qLcSc;uF!ii+L*Q0)F&!IpX
zh)E!*NF?_|Et0h)5R^dn1Ogy;jpln9;6HcI%lCp)F(e8aQ%yVI{fs4@
z#r>FuHVPUoXw
zLDWejd5G6YC%pF@Mmx%%-7T&Ig0h7u#LPsKGp^fj_3a!Wh9JF*+xLr;G!c}$vja~H
z{q?&$%U2^Kx4f*PI;qiGfG1**efHZ;G}GUO55!%`N-JeeWlE-0k`h!5i5iWJe!o%0
z+Xd!e#Tnd^rIe&lSTm-jCPu?k-%Yg>!o`#2b+eqMbP!EMRj9HANPpxtbyKs=6G{Z
zM)1%Pz%dq1-q&!Xq*Si$1WW@eSkxHbW4NEsM~fHb`L{lAaHb}MRZ?1s{2)Li#+r>}
z&ibC*CD?@vi!@Vknrj^ukyTsj5=@hzjlWNL;i6wP9$WE;VJRx9P*XxraR~`K56k)Y
z(zGO9h09ZRw}ewPJO2Pr)uR2*Zf1HbV=FUSok2(_SVFdt1VDk<4{axbHNNZUaO#St
zvTL{tsQ~=aOu_FZCr|a@0e{M$UoN!P=>4CvsvZO?B7kWbu5qJvEt%9beH?
zgmlb}NDy@!{{WcQBZCrLr1KuJMA1Td5$fMK9AmjAeUxYDwbE~07WUOeG?Ek$KrmIX
zohRJ;#Br+pqsJUmOInu^)lo*B8Z^k>qP|ruX|hsP^%DYMaZ=>xDDCRAjF!79RNXj0
zDLz_4f=XnqL~5NUaRPMPLys+4J-KHIw=<@q*}XwVfa!rDVpa$L06jZM_TxbUiRQzl
zoc9WMzLA=%o~PC>IW@h;6LPdZr%geXhjbrZ&mVi)>0eqFBO
z)Fi3eXw^s&B}E--i3e!Z4*vWnoaUt6R@OGEh#s+}YC!4pd5Y*DfJEstr}K>%3?4=W
zY_&Tl`X<#Xu63X2epH$IfFnWo(h0+*u{CfTTR2G_KTCMUQ`MPC0f?g!-hD(U9NTAh
zwqEj!R4gcS{elK?<6b(qwXb@0Q^JEiK?R-Jb1twFVN(u#;C5|h&h
zarqe|Z95z9_8S&cYIfE8bfhBM8&+8Z<*n($2$TN+y(dyY+iBo0;eOMK;m0h;ZnaQ{
zKNPQ4-PAXy<|#mljUbO;4`ali{jp!d=cS+i&^jKGY@+SV5PA>o7M^EwJqx|H#$0p?
z7jIEQpkGdasA;J`-Z34u17j(3tZlL8O|sCmrkxv==CuW@Z|FKYfs!Nw2>$>BJWX@t
z+$(b~(R8<0T3FG$7T}5Lfn0SN14PEMMx%>AHSuTm(PPgo$}rsYN+iW{-2kiQpO~2w
z{{Zi4#dGE7B7&yP%s36F)%1%Htf?iyy~}7K!_TzaG0R=GgLS60Rj12}M_*!KPjT<8
zjc0+MEttD?2~7%uks)6)NYYBTkGJ?wZU&gPcVC!%#baQ|YO1nOpc0yfM99(%{#p(n
z(~CC77}2QFst|$_Lb`iL+CJL-zN6v*37`V{a)I!K=DGNWr)XOF!1PhW-CFCXr9x9q
zdjq%w>pG6Z`KLU!!XM?l!nNAP3aexk3X}mUf}l*sn)@CcUU*k#yIn(FX-d{KNFYgp
z5&r;V{(M@zE#mYjGI5l)L+6M#?@B-|Aq`mfG84e6juCrAPr@
zol0p$55%N&%m86qc8zzF!VAfMLsDnWiur5it4#+ES5%UZl!dNTb|ws{@1}=P1hxJm
zv-fGGZKT}*p-vJC00i`vz%T^;cH2Ts>SN`@R&{XeW!2MUBCSd&QCUw(R+BRTkv+9D
z`tUqT10Er)XsKnTO3Og>^rR@E8tDgfyq@|G!y|>A=1|uY#p$(rQ%b*+3JUp8;XBOz
z{`0QT?(=f#Z>4JMZ%8dY0O};DqzPJdPNG1;7
zSvu+NHSRf4W-1}UXFc1(i=Io*oOZ~E&Dz#SsBzWQl&%RRkY{b@?-QpJcr6=7S&v>X
zP6{dcjaOVK#Fa0H50~g`*&MdZPB|Yk3ZoP$?F8`5!D7X{Pv&IgvZ1cn&PX?zJ;K!vt`yQS*AL_yE_5iPt$1Dcr`J<7b4+O
zqR6jARYFvEkuj;AKf|={CV4q#VV`AfZMV{zs9VJ$3aO|gt7%aG0DjtWrVlBdZ$kNpKU#=mVR9F>lWq(D+M=H*u0@=L)u{wKcq^gV0R>IKbH>X
zQ(n`R@)GXFbx`WQGMaAaR*t2@K-ir~3DzV|y6_^9MXEgF)h+Df)-lVTc|zD?&FrfA
z7K9b3jR~D~?sgg-Vn&8|%v^-%%dtw{9AKW9Bt;3?7CpGVX^;QU~K8X|H$#>+kdc5sucO
zXt1f(B%}sXq6BP_FaR3urkb53@jJf?#mhbA1+zM{p-X>?Eed`VI=Tp6@T~{2h>Z^xzJ7C2
ztG-6zR2y6guSuP%okYnbR6+u34L13bXLuTG#8)nPv3bL-{K)0?A|jw#Qk0MZ5J$>2
z_ZohOzX?_H>02hG#}}&X^wWRTP(gqMkpeaoF|@$ja`1}DEK4D(maifQ=*j`_7HnVw+o(P#WyGTlj=c5XmeAcDFzIH
zAzGP`AdMqfi6@6!nHKkVuMCaal%%`IE_4Dm>IejEbuj~R`p(N{1wpO(ou$jbk22MA
zMKz?r`BHvq8XZStB+N)0YawI+4MPs-8f3N|K__~O_-HxS44mC#LlfmL6>)N)E*Hys
zkSWxHuANSSQxo(ap3)0bA<1C=(Lc0+J>+J()nB`i^2LRO-ZNB~6rwA8>8uNkf4
zZ!NZSyPH1}WCufKE|I8#AxIFJfCphd)1f-Y68!tS-6av(g5J8WW
zK#!-?Pt;L|F{3~`zq~7bBE;nQ){dc&$=5{ewL=S^Gb=J$$nDRZDf1kWwRf2Hx%FCfZM|>$DAiI0H4`9!g#jSzp!@K(wRq{KOIa=lT^h~`
zQlkbUm;qm}ao%;|&*aA}(?-E}%+pw@#ne@5ged9&>L5YZ3`hOAjv+t`igOxfh)53O
zcOESQEYBv&vn!3kE?(ET74(jp)f7k)bosZ_bD=UpJW#SrE9-rQtuIWb^{TAOsmW0)
z8~f`s^we%Rw77ic*O)qH@wujtG6_Op4aaSSop&=LPTWZ3cginr_UmmmbflmuK5!AB
zk-P|Fe*Ao5xw3#XNU3@aGd|a?w&eyN4HkN}?71T=hw+r$+d2vs
zNGMQAGC>nM_xCza44*B08$IR);~2c!7OQ(prQ_lCie?m`zjCDgh@E47I7z%d<=G1^
zWM0@%NPA~RRJVSLdZiT9Nclu-0Er-Q)<(SO5ut
z1jz$JN7Ns%;0Ks^Fy^igc(5|L-hiP|_oz`v;ptHX$m-Sv_S>`*#@=1I({iVb+-bW~
zt`zOpI&w@z3>rWwVj5D>?b3&~!mk$C7rcZKLu`?z~I*HKm
z?PA=H#o1F*u2oY-+hi?4Dp&XQS4jlwB4oL-LgQgsVvk
zI?v`l=ZeCw_gkge%0I{HM1Wf6hM%Jjc1cxGK6TRbu4I6aj>z`=?E`
z+JBt!pxHyq!&i2i_Sd4lX0_6{G}BO!QVfznCU)4*u5xBC?-yHg%6$`7=z;-&
zPyrwvq@VQTI(mRmVBM$Mulv`eX-ZH6yn*UJX!H4UvfOaondO0Uo0XCGqgWC;nytlJ
zOPJh(i8@Hr>%mKw_I$G9=cdhXHyUhK6u3;rl1_toB&NSvs%ZZJo!486{{R)m
z(v?w^Kq^v_XI{l+L~r`cF@6oM6xm%fJ9?w!sHCMN{tyn^4Yr8azPw&O5vOP66t3f^
z%)-;o>kVOVW}AQXR~Lkfg8-5OPvJc<0D?OW2f5$|rWGvqLeim90@PHnB_dTFQU>Q<
z{{UUM)qLL@$8rkSOMz~mSkI_}5CJKYeqA8McKZD|LixF5Wo5Y8k|?M&d`VIgol;5E
zl*GiJmSeZn@j!5+SMR7kVVlMw0sBpuy+Z9vt=C0r1|&r7{vhkxeYc&VhqYjW+*77P
zRHG$B`_b+8;z9F8*=`}Ss225VBq=9QrAUz)lQ2fn^c#*V&eWeir=57MNo2`DQTa!2
z&N#R!sHz>>6f<_9iqq0bIt}*hJ8L8P@M7hiv1a#1sRv4lAdpXecbU^|f5LbIVXoBS
zX#^25+j$eef6M8%oHm@gY2~=5RHsl;)tN00ha6HUQ3oG@oO%@nA*L
zn|fhHi4cFl?LX)GcOpP{qu)o`BRq;!X{hG|rwQ^h`(H>PBIeO*0(SEWpU|HBdvU87
zYXe`EbjV4SGow;xQwj^8HG3BiM0~s=J}aT#A$O(|JM&{30p=xDh%>(@i&>fO_~n
z<%?4g4X&9d>E)#CN4I#L_2Vat;fhFBV^A)Z6w>j(!W0_f`;FBZ%aY9^kglNtUAn4>)3w0SM#RGO3*$C;)U5{HOiQM_`Ta#MR}o%ifXl(klvx1zIXL-(BSP&>k-w
zu~J-PRhkt62~t3j?0bG>Z}imf+@zeKIc5~nIJ`&$GXO8A^+jc^%_+#8Rj#k-kVfRh
zYDm{_@h7?B)pER0x-Q;^#4Dz%0f0|%J4x4F_0*qfkNwlA`Rma|CQK$%COgKUY(Aft
z6ZqyGQG@1`+@(kQPNCEcej&7ph}Tcl@peS+d`HqV(KU-T>3ww5?ct82g{Jk1w{Vnv
z$fBO8-|(dN`w~FgT{YIFZpE0XUkh4NDk#K221)yg_tJIX@sB>D!o}!}eAL2H4F3Si
z0FR*D=^AJTr;2?%ZnE#xA5-cS)UL7y#U#(rY47@u7D2i4`bEC;m!ItmCo?dykJ;`k
zLGssCD+z^ePJ$*k5KpxIbprCk9Im{frCOBw(@`f=-U;4x`iYS}P6
zz(Rmg+!)v%+5tVc;*rVc4JtpjS6wM1q!mQyd(UszssZ1L#gTEyFgn%UTPylts)_9Jn@Dcq~jJu6D;6)6Kq20Lx1V12e69j{o!-ec6GZKjomlu(ou1egHE
zglVLLBWR5xPX;UZEtz8cv#DwnLTb~uq$HI80DVb6E1Xi(Q-S_7i
z!E3~mbZc;RMweHWTm-5PkWc5_4Rj)Po+L2aaC3EWXRM+W9ibZOpgMsk_0vuMoE$CI
z_V}JM{Z0@Gtgx_D1PRx_u_H~p3EZwYjqz^t%dJ{Or4;T-f;Rke06Rg5F)()=PFWy_
z1)LXS%gQQHaC}GKlx%JYq*}4cXs1m{qHUc$C+dJ81G#}4Ndk8>$J++Gd=n^Y{hMW_
zYEqRYCJd-;9f$gAAf7%r4cWEZ6K<;1!fS>}Nl4Z}00Z>|f%J$Fd-dEh8-6cR*In~^
zbg2;~WB{lqu_TH1_ue++Vp6D{LG-mL_shF06ozIbQ2L5#9JwY(bj$O`sz+TzNyw<;
zoDz_qFhr&bNDu^0ygfFKwph&o;*ksV;afpXKA4I91RcA3725*FEbs30au&!6r*)&}
zq6DQW1d$UkG~8%FACoLiXTjaJl2U0F386?P40PyXCs~;KckB$YaYMeoFfUjc+p9lu
zO8m7fsl7!kk|cnVWbOR>nI!52g6|mp7gwgPt*%fjCZfFIGG;`Q2B+~Em_ISxaMoH;
zV8*FlEccqJHQZ5H1f9uAIvq)g?J+Wa_*<#kTT#eey=vuUu&TA75=uZMsC@wJH;JCm
zM{@4<-(MIb3#WJeur=p(?;pi1OpxN4^)4#YsnnvPprq6WXI(+>An77C;%=TvLo?+H
zmqsd6XHu0|B#@MZ1n=8Poj3c>3I~dO;@x(>vayE}x)iFB(qw`OAjk)DBps%9?`Gv*
zV{ga#NqVJldZSLIwRI4aG6;~66Q-km_4;;kanyRmF$TdVtzdcbh<;Mx;E!nt?bXVZ
zYSE^((<_;f3V;N}{)28WJmSeym{p^X)~HqKoG3vuKuVx{cK-k#`|vg+;2r*v938;b
z#Wl1F@|=O!G9B93(5#?pRj1YgB1DPMcK-k^_|SYkG;qFC)wd-gop9Y9RU!{pOw5Qs
zX)_;T!e4;n7Mp$DomD7xP*M)N81MS&^go{rrfZfot&F?1Gg?JaNjidb36E_^F|gx4
zHqr2o{{T}9b@uG6pDAd;%ca#%tEL4q3d9A0r)hz?pVCJTJ@(;Et*aTaP*RajlX_)d
zSb+&bV5$Hc_b2t-q+FJ^)W~^&@VTw#29=ai){;>vQRxK49f&cg`cAEWK+5{3>voEi
z{Ua2VCO`nC0%0RSeSibBhy?J;vFYnblvg>;cx=xVY?>7fM--@51u{yH;sQV<_Jccd
zjeNwVSyi81-?UQAXAAPdNl_!LfgfS_*G~Lu27P#D_R(F+>WZoer~sJ-LQ+C!cqTO+
zr|y~GEhxP|wXky8C8N5El29Pe&;TRb>Lw2vSio2iNToyb6@)54-UC0lbh0dB&2VQ#
zD!N`#Q3+4Y(iD@dLEGEjG~&n1~!DjF~z@>?5`Fq
ziqzxNRYhZQo}vgaN%}+`W;gms;k4vOEvy;eHg0xDbdau!W~vgnj*tl(nD4jS-%}qo
z`9O>={{U}ArKL0`jaAfA!UO=F$uM^InVADpc-c(g2uWb%0OjgwL3*FNn9NtjKt+6d
ze%JL<`GorQWald!jTJn`(5IUjfeEe(v3|>Qxr1_eQ~8D*o^w
z7=jGM_Lw_xkaM#uX7f76e73+*qUMq+2?wNw1g$DCMuq^=5Axx&W-8m-Tk;!AH1w7<
z!unxA6Hi%z9^{_DA86pe%esG6M%Hto@
z-=yd``tpApsaB#J7*A3bYQWGbK$(G{AfIC-euPY2#p9SczD-5#4>FBgrBtU*pprq5
ze?QJN?>t{I>jNAj<$NzkO6b$5x?^b6kOb@uf%o5xjK@1>{Z@W^MuQVzOb9uhkh#Zb
zRIpjwsl`t%Lb}ijhzb%(K=#ytckkbVe};=|rZyM1I#6FZO?uT5Q_!@MlAt>kK?Vn6
zblmU|uwGM}^E&2==moWPA-y_rDe5wvCPw36d&hlrDYhGJ<>m)j{WJ#7E2Rmg(h63-
zxCEq_>~)PHU!l`U%g-ns=UsOX(k#LxX6*q&PiS%@UT^?x#_@?}*Bib|LZe2vz`9iG
zR*;gVq>T(gAebkA<~UAyk$roN^Fb8aEh=j6xvfn}jY5pbJ-c`OxIcKK=gKbB?RfQS
zC8PU&R4G-dC#0w)Nis-Ja0asyK-*pyy7<+6@ztL9MyjeRjT8d4!6c;uKv6NUFegoc
z9q2=XoxvV224oHrMG4ZK;vct_UDo6q=XEQCMbT|qsOr!aM*EKb;x`5e;Wx58b!HCg
zx2#PPVG~S`(_N~?}fa<$*5w|$-O}Y>C&OGgZKS6juA|Cg*8w`
zD(mY@l9`nQ8;y5~nb7^a@u-yw3mO5=D#XhHG;&A-omibVZwRmDm-W-BYI^xbqV80x
z(@Ioifhqz4)DGj?L~hS}HzLligRM%KjY%i4kVkR{
z)DgypRLAL_O<6DZ?vE{%q1uoj4x#Dk06-g%G}}!_+c?_Y5N=iK(@_XrWvL1TgV0Wp
zcN#!3+-t82E;&ue@ewxm&cI*1ghArZk$QY7qQ?94^qIi4v{L=y}>OLNF9IT+74wXC3`h84JlR5@ny>|Nh>Vg&!
z+<9nJ7=aK+zi#>uv~XCxRy3_O6$Wc5YbYaMnNl=>N8%c&&*jI$)nIQ}TdA5u%yTPQ
z=@T#tf$gO01pQ}$u+DP3>3{OY@Y(oN!K}=&j$&kvXd1UP+no_nL=xG`!3bHAuSp<@
z+E3@Fyp(#{*@0q~`??6o6